-1
.gitignore
-1
.gitignore
-31
CLAUDE.md
-31
CLAUDE.md
···
193
193
PDS_URI=https://bsky.social # Optional, defaults to bsky.social
194
194
```
195
195
196
-
### X Bot Configuration
197
-
198
-
The X bot uses a separate configuration file `x_config.yaml` with the following structure:
199
-
```yaml
200
-
x:
201
-
api_key: your_x_bearer_token
202
-
consumer_key: your_consumer_key
203
-
consumer_secret: your_consumer_secret
204
-
access_token: your_access_token
205
-
access_token_secret: your_access_token_secret
206
-
user_id: "your_user_id"
207
-
208
-
letta:
209
-
api_key: your_letta_api_key
210
-
project_id: your_project_id
211
-
timeout: 600
212
-
agent_id: your_agent_id
213
-
214
-
bot:
215
-
cleanup_interval: 10
216
-
max_thread_depth: 50
217
-
rate_limit_delay: 1
218
-
downrank_response_rate: 0.1
219
-
220
-
logging:
221
-
level: INFO
222
-
enable_debug_data: true
223
-
log_thread_context: true
224
-
log_agent_responses: true
225
-
```
226
-
227
196
## Key Development Patterns
228
197
229
198
1. **Tool System**: Tools are defined as standalone functions in `tools/functions.py` with Pydantic schemas for validation, registered via `register_tools.py`
+53
-193
bsky.py
+53
-193
bsky.py
···
21
21
import bsky_utils
22
22
from tools.blocks import attach_user_blocks, detach_user_blocks
23
23
from datetime import date
24
-
from notification_db import NotificationDB
25
24
26
25
def extract_handles_from_data(data):
27
26
"""Recursively extract all unique handles from nested data structure."""
···
112
111
113
112
# Synthesis message tracking
114
113
last_synthesis_time = time.time()
115
-
116
-
# Database for notification tracking
117
-
NOTIFICATION_DB = None
118
114
119
115
def export_agent_state(client, agent, skip_git=False):
120
116
"""Export agent state to agent_archive/ (timestamped) and agents/ (current)."""
···
210
206
None: Failed with non-retryable error, move to errors directory
211
207
"no_reply": No reply was generated, move to no_reply directory
212
208
"""
213
-
import uuid
214
-
215
-
# Generate correlation ID for tracking this notification through the pipeline
216
-
correlation_id = str(uuid.uuid4())[:8]
217
-
218
209
try:
219
-
logger.info(f"[{correlation_id}] Starting process_mention", extra={
220
-
'correlation_id': correlation_id,
221
-
'notification_type': type(notification_data).__name__
222
-
})
210
+
logger.debug(f"Starting process_mention with notification_data type: {type(notification_data)}")
223
211
224
212
# Handle both dict and object inputs for backwards compatibility
225
213
if isinstance(notification_data, dict):
···
234
222
author_handle = notification_data.author.handle
235
223
author_name = notification_data.author.display_name or author_handle
236
224
237
-
logger.info(f"[{correlation_id}] Processing mention from @{author_handle}", extra={
238
-
'correlation_id': correlation_id,
239
-
'author_handle': author_handle,
240
-
'author_name': author_name,
241
-
'mention_uri': uri,
242
-
'mention_text_length': len(mention_text),
243
-
'mention_preview': mention_text[:100] if mention_text else ''
244
-
})
225
+
logger.debug(f"Extracted data - URI: {uri}, Author: @{author_handle}, Text: {mention_text[:50]}...")
245
226
246
227
# Retrieve the entire thread associated with the mention
247
228
try:
···
347
328
bot_check_result = check_known_bots(unique_handles, void_agent)
348
329
bot_check_data = json.loads(bot_check_result)
349
330
350
-
# TEMPORARILY DISABLED: Bot detection causing issues with normal users
351
-
# TODO: Re-enable after debugging why normal users are being flagged as bots
352
-
if False: # bot_check_data.get("bot_detected", False):
331
+
if bot_check_data.get("bot_detected", False):
353
332
detected_bots = bot_check_data.get("detected_bots", [])
354
333
logger.info(f"Bot detected in thread: {detected_bots}")
355
334
···
361
340
else:
362
341
logger.info(f"Responding to bot thread (10% response rate). Detected bots: {detected_bots}")
363
342
else:
364
-
logger.debug("Bot detection disabled - processing all notifications")
343
+
logger.debug("No known bots detected in thread")
365
344
366
345
except Exception as bot_check_error:
367
346
logger.warning(f"Error checking for bots: {bot_check_error}")
···
727
706
logger.info(f"Deleted queue file: {queue_filepath.name}")
728
707
729
708
# Also mark as processed to avoid reprocessing
730
-
if NOTIFICATION_DB:
731
-
NOTIFICATION_DB.mark_processed(notification_data.get('uri', ''), status='processed')
732
-
else:
733
-
processed_uris = load_processed_notifications()
734
-
processed_uris.add(notification_data.get('uri', ''))
735
-
save_processed_notifications(processed_uris)
709
+
processed_uris = load_processed_notifications()
710
+
processed_uris.add(notification_data.get('uri', ''))
711
+
save_processed_notifications(processed_uris)
736
712
737
713
# Export agent state before terminating
738
714
export_agent_state(CLIENT, void_agent, skip_git=SKIP_GIT)
···
833
809
client=atproto_client,
834
810
notification=notification_data,
835
811
reply_text=cleaned_text,
836
-
lang=reply_lang,
837
-
correlation_id=correlation_id
812
+
lang=reply_lang
838
813
)
839
814
else:
840
815
# Multiple replies - use new threaded function
···
844
819
client=atproto_client,
845
820
notification=notification_data,
846
821
reply_messages=cleaned_messages,
847
-
lang=reply_lang,
848
-
correlation_id=correlation_id
822
+
lang=reply_lang
849
823
)
850
824
851
825
if response:
852
-
logger.info(f"[{correlation_id}] Successfully replied to @{author_handle}", extra={
853
-
'correlation_id': correlation_id,
854
-
'author_handle': author_handle,
855
-
'reply_count': len(reply_messages)
856
-
})
826
+
logger.info(f"Successfully replied to @{author_handle}")
857
827
858
828
# Acknowledge the post we're replying to with stream.thought.ack
859
829
try:
···
887
857
else:
888
858
# Check if notification was explicitly ignored
889
859
if ignored_notification:
890
-
logger.info(f"[{correlation_id}] Notification from @{author_handle} was explicitly ignored (category: {ignore_category})", extra={
891
-
'correlation_id': correlation_id,
892
-
'author_handle': author_handle,
893
-
'ignore_category': ignore_category
894
-
})
860
+
logger.info(f"Notification from @{author_handle} was explicitly ignored (category: {ignore_category})")
895
861
return "ignored"
896
862
else:
897
-
logger.warning(f"[{correlation_id}] No reply generated for mention from @{author_handle}, moving to no_reply folder", extra={
898
-
'correlation_id': correlation_id,
899
-
'author_handle': author_handle
900
-
})
863
+
logger.warning(f"No add_post_to_bluesky_reply_thread tool calls found for mention from @{author_handle}, moving to no_reply folder")
901
864
return "no_reply"
902
865
903
866
except Exception as e:
904
-
logger.error(f"[{correlation_id}] Error processing mention: {e}", extra={
905
-
'correlation_id': correlation_id,
906
-
'error': str(e),
907
-
'error_type': type(e).__name__,
908
-
'author_handle': author_handle if 'author_handle' in locals() else 'unknown'
909
-
})
867
+
logger.error(f"Error processing mention: {e}")
910
868
return False
911
869
finally:
912
870
# Detach user blocks after agent response (success or failure)
···
939
897
940
898
941
899
def load_processed_notifications():
942
-
"""Load the set of processed notification URIs from database."""
943
-
global NOTIFICATION_DB
944
-
if NOTIFICATION_DB:
945
-
return NOTIFICATION_DB.get_processed_uris(limit=MAX_PROCESSED_NOTIFICATIONS)
900
+
"""Load the set of processed notification URIs."""
901
+
if PROCESSED_NOTIFICATIONS_FILE.exists():
902
+
try:
903
+
with open(PROCESSED_NOTIFICATIONS_FILE, 'r') as f:
904
+
data = json.load(f)
905
+
# Keep only recent entries (last MAX_PROCESSED_NOTIFICATIONS)
906
+
if len(data) > MAX_PROCESSED_NOTIFICATIONS:
907
+
data = data[-MAX_PROCESSED_NOTIFICATIONS:]
908
+
save_processed_notifications(data)
909
+
return set(data)
910
+
except Exception as e:
911
+
logger.error(f"Error loading processed notifications: {e}")
946
912
return set()
947
913
948
914
949
915
def save_processed_notifications(processed_set):
950
-
"""Save the set of processed notification URIs to database."""
951
-
# This is now handled by marking individual notifications in the DB
952
-
# Keeping function for compatibility but it doesn't need to do anything
953
-
pass
916
+
"""Save the set of processed notification URIs."""
917
+
try:
918
+
with open(PROCESSED_NOTIFICATIONS_FILE, 'w') as f:
919
+
json.dump(list(processed_set), f)
920
+
except Exception as e:
921
+
logger.error(f"Error saving processed notifications: {e}")
954
922
955
923
956
924
def save_notification_to_queue(notification, is_priority=None):
957
925
"""Save a notification to the queue directory with priority-based filename."""
958
926
try:
959
-
global NOTIFICATION_DB
927
+
# Check if already processed
928
+
processed_uris = load_processed_notifications()
960
929
961
930
# Handle both notification objects and dicts
962
931
if isinstance(notification, dict):
···
965
934
else:
966
935
notif_dict = notification_to_dict(notification)
967
936
notification_uri = notification.uri
968
-
969
-
# Check if already processed (using database if available)
970
-
if NOTIFICATION_DB:
971
-
if NOTIFICATION_DB.is_processed(notification_uri):
972
-
logger.debug(f"Notification already processed (DB): {notification_uri}")
973
-
return False
974
-
# Add to database - if this fails, don't queue the notification
975
-
if not NOTIFICATION_DB.add_notification(notif_dict):
976
-
logger.warning(f"Failed to add notification to database, skipping: {notification_uri}")
977
-
return False
978
-
else:
979
-
# Fall back to old JSON method
980
-
processed_uris = load_processed_notifications()
981
-
if notification_uri in processed_uris:
982
-
logger.debug(f"Notification already processed: {notification_uri}")
983
-
return False
937
+
938
+
if notification_uri in processed_uris:
939
+
logger.debug(f"Notification already processed: {notification_uri}")
940
+
return False
984
941
985
942
# Create JSON string
986
943
notif_json = json.dumps(notif_dict, sort_keys=True)
···
1154
1111
logger.info(f"Successfully processed and removed: {filepath.name}")
1155
1112
1156
1113
# Mark as processed to avoid reprocessing
1157
-
if NOTIFICATION_DB:
1158
-
NOTIFICATION_DB.mark_processed(notif_data['uri'], status='processed')
1159
-
else:
1160
-
processed_uris = load_processed_notifications()
1161
-
processed_uris.add(notif_data['uri'])
1162
-
save_processed_notifications(processed_uris)
1114
+
processed_uris = load_processed_notifications()
1115
+
processed_uris.add(notif_data['uri'])
1116
+
save_processed_notifications(processed_uris)
1163
1117
1164
1118
elif success is None: # Special case for moving to error directory
1165
1119
error_path = QUEUE_ERROR_DIR / filepath.name
···
1167
1121
logger.warning(f"Moved {filepath.name} to errors directory")
1168
1122
1169
1123
# Also mark as processed to avoid retrying
1170
-
if NOTIFICATION_DB:
1171
-
NOTIFICATION_DB.mark_processed(notif_data['uri'], status='error')
1172
-
else:
1173
-
processed_uris = load_processed_notifications()
1174
-
processed_uris.add(notif_data['uri'])
1175
-
save_processed_notifications(processed_uris)
1124
+
processed_uris = load_processed_notifications()
1125
+
processed_uris.add(notif_data['uri'])
1126
+
save_processed_notifications(processed_uris)
1176
1127
1177
1128
elif success == "no_reply": # Special case for moving to no_reply directory
1178
1129
no_reply_path = QUEUE_NO_REPLY_DIR / filepath.name
···
1180
1131
logger.info(f"Moved {filepath.name} to no_reply directory")
1181
1132
1182
1133
# Also mark as processed to avoid retrying
1183
-
if NOTIFICATION_DB:
1184
-
NOTIFICATION_DB.mark_processed(notif_data['uri'], status='error')
1185
-
else:
1186
-
processed_uris = load_processed_notifications()
1187
-
processed_uris.add(notif_data['uri'])
1188
-
save_processed_notifications(processed_uris)
1134
+
processed_uris = load_processed_notifications()
1135
+
processed_uris.add(notif_data['uri'])
1136
+
save_processed_notifications(processed_uris)
1189
1137
1190
1138
elif success == "ignored": # Special case for explicitly ignored notifications
1191
1139
# For ignored notifications, we just delete them (not move to no_reply)
···
1193
1141
logger.info(f"๐ซ Deleted ignored notification: {filepath.name}")
1194
1142
1195
1143
# Also mark as processed to avoid retrying
1196
-
if NOTIFICATION_DB:
1197
-
NOTIFICATION_DB.mark_processed(notif_data['uri'], status='error')
1198
-
else:
1199
-
processed_uris = load_processed_notifications()
1200
-
processed_uris.add(notif_data['uri'])
1201
-
save_processed_notifications(processed_uris)
1144
+
processed_uris = load_processed_notifications()
1145
+
processed_uris.add(notif_data['uri'])
1146
+
save_processed_notifications(processed_uris)
1202
1147
1203
1148
else:
1204
1149
logger.warning(f"โ ๏ธ Failed to process {filepath.name}, keeping in queue for retry")
···
1214
1159
def fetch_and_queue_new_notifications(atproto_client):
1215
1160
"""Fetch new notifications and queue them without processing."""
1216
1161
try:
1217
-
global NOTIFICATION_DB
1218
-
1219
1162
# Get current time for marking notifications as seen
1220
1163
logger.debug("Getting current time for notification marking...")
1221
1164
last_seen_at = atproto_client.get_current_time_iso()
1222
-
1223
-
# Get timestamp of last processed notification for filtering
1224
-
last_processed_time = None
1225
-
if NOTIFICATION_DB:
1226
-
last_processed_time = NOTIFICATION_DB.get_latest_processed_time()
1227
-
if last_processed_time:
1228
-
logger.debug(f"Last processed notification was at: {last_processed_time}")
1229
1165
1230
1166
# Fetch ALL notifications using pagination
1231
1167
all_notifications = []
···
1265
1201
# Now process all fetched notifications
1266
1202
new_count = 0
1267
1203
if all_notifications:
1268
-
logger.info(f"๐ฅ Fetched {len(all_notifications)} total notifications from API")
1269
-
1270
1204
# Mark as seen first
1271
1205
try:
1272
1206
atproto_client.app.bsky.notification.update_seen(
···
1276
1210
except Exception as e:
1277
1211
logger.error(f"Error marking notifications as seen: {e}")
1278
1212
1279
-
# Debug counters
1280
-
skipped_read = 0
1281
-
skipped_likes = 0
1282
-
skipped_processed = 0
1283
-
skipped_old_timestamp = 0
1284
-
processed_uris = load_processed_notifications()
1285
-
1286
-
# Queue all new notifications (except likes)
1213
+
# Queue all new notifications (except likes and already read)
1287
1214
for notif in all_notifications:
1288
-
# Skip if older than last processed (when we have timestamp filtering)
1289
-
if last_processed_time and hasattr(notif, 'indexed_at'):
1290
-
if notif.indexed_at <= last_processed_time:
1291
-
skipped_old_timestamp += 1
1292
-
logger.debug(f"Skipping old notification (indexed_at {notif.indexed_at} <= {last_processed_time})")
1293
-
continue
1294
-
1295
-
# Debug: Log is_read status but DON'T skip based on it
1296
-
if hasattr(notif, 'is_read') and notif.is_read:
1297
-
skipped_read += 1
1298
-
logger.debug(f"Notification has is_read=True (but processing anyway): {notif.uri if hasattr(notif, 'uri') else 'unknown'}")
1299
-
1300
-
# Skip likes
1301
-
if hasattr(notif, 'reason') and notif.reason == 'like':
1302
-
skipped_likes += 1
1215
+
# Skip if already read or if it's a like
1216
+
if (hasattr(notif, 'is_read') and notif.is_read) or (hasattr(notif, 'reason') and notif.reason == 'like'):
1303
1217
continue
1304
1218
1305
1219
notif_dict = notif.model_dump() if hasattr(notif, 'model_dump') else notif
1306
1220
1307
1221
# Skip likes in dict form too
1308
1222
if notif_dict.get('reason') == 'like':
1309
-
continue
1310
-
1311
-
# Check if already processed
1312
-
notif_uri = notif_dict.get('uri', '')
1313
-
if notif_uri in processed_uris:
1314
-
skipped_processed += 1
1315
-
logger.debug(f"Skipping already processed: {notif_uri}")
1316
1223
continue
1317
1224
1318
1225
# Check if it's a priority notification
···
1333
1240
1334
1241
if save_notification_to_queue(notif_dict, is_priority=is_priority):
1335
1242
new_count += 1
1336
-
logger.debug(f"Queued notification from @{author_handle}: {notif_dict.get('reason', 'unknown')}")
1337
1243
1338
-
# Log summary of filtering
1339
-
logger.info(f"๐ Notification processing summary:")
1340
-
logger.info(f" โข Total fetched: {len(all_notifications)}")
1341
-
logger.info(f" โข Had is_read=True: {skipped_read} (not skipped)")
1342
-
logger.info(f" โข Skipped (likes): {skipped_likes}")
1343
-
logger.info(f" โข Skipped (old timestamp): {skipped_old_timestamp}")
1344
-
logger.info(f" โข Skipped (already processed): {skipped_processed}")
1345
-
logger.info(f" โข Queued for processing: {new_count}")
1244
+
logger.info(f"Queued {new_count} new notifications and marked as seen")
1346
1245
else:
1347
1246
logger.debug("No new notifications to queue")
1348
1247
···
1825
1724
void_agent = initialize_void()
1826
1725
logger.info(f"Void agent initialized: {void_agent.id}")
1827
1726
1828
-
# Initialize notification database
1829
-
global NOTIFICATION_DB
1830
-
logger.info("Initializing notification database...")
1831
-
NOTIFICATION_DB = NotificationDB()
1832
-
1833
-
# Migrate from old JSON format if it exists
1834
-
if PROCESSED_NOTIFICATIONS_FILE.exists():
1835
-
logger.info("Found old processed_notifications.json, migrating to database...")
1836
-
NOTIFICATION_DB.migrate_from_json(str(PROCESSED_NOTIFICATIONS_FILE))
1837
-
1838
-
# Log database stats
1839
-
db_stats = NOTIFICATION_DB.get_stats()
1840
-
logger.info(f"Database initialized - Total notifications: {db_stats.get('total', 0)}, Recent (24h): {db_stats.get('recent_24h', 0)}")
1841
-
1842
-
# Clean up old records
1843
-
NOTIFICATION_DB.cleanup_old_records(days=7)
1844
-
1845
1727
# Ensure correct tools are attached for Bluesky
1846
1728
logger.info("Configuring tools for Bluesky platform...")
1847
1729
try:
···
1941
1823
if CLEANUP_INTERVAL > 0 and cycle_count % CLEANUP_INTERVAL == 0:
1942
1824
logger.debug(f"Running periodic user block cleanup (cycle {cycle_count})")
1943
1825
periodic_user_block_cleanup(CLIENT, void_agent.id)
1944
-
1945
-
# Also check database health when doing cleanup
1946
-
if NOTIFICATION_DB:
1947
-
db_stats = NOTIFICATION_DB.get_stats()
1948
-
pending = db_stats.get('status_pending', 0)
1949
-
errors = db_stats.get('status_error', 0)
1950
-
1951
-
if pending > 50:
1952
-
logger.warning(f"โ ๏ธ Queue health check: {pending} pending notifications (may be stuck)")
1953
-
if errors > 20:
1954
-
logger.warning(f"โ ๏ธ Queue health check: {errors} error notifications")
1955
-
1956
-
# Periodic cleanup of old records
1957
-
if cycle_count % (CLEANUP_INTERVAL * 10) == 0: # Every 100 cycles
1958
-
logger.info("Running database cleanup of old records...")
1959
-
NOTIFICATION_DB.cleanup_old_records(days=7)
1960
1826
1961
1827
# Log cycle completion with stats
1962
1828
elapsed_time = time.time() - start_time
···
1980
1846
logger.info(f" - {message_counters['follows']} follows")
1981
1847
logger.info(f" - {message_counters['reposts_skipped']} reposts skipped")
1982
1848
logger.info(f" - Average rate: {messages_per_minute:.1f} messages/minute")
1983
-
1984
-
# Close database connection
1985
-
if NOTIFICATION_DB:
1986
-
logger.info("Closing database connection...")
1987
-
NOTIFICATION_DB.close()
1988
-
1989
1849
break
1990
1850
except Exception as e:
1991
1851
logger.error(f"=== ERROR IN MAIN LOOP CYCLE {cycle_count} ===")
+51
-188
bsky_utils.py
+51
-188
bsky_utils.py
···
1
1
import os
2
2
import logging
3
-
import uuid
4
-
import time
5
3
from typing import Optional, Dict, Any, List
6
4
from atproto_client import Client, Session, SessionEvent, models
7
5
···
237
235
238
236
239
237
def default_login() -> Client:
240
-
"""Login using configuration from config.yaml or environment variables."""
241
-
try:
242
-
from config_loader import get_bluesky_config
243
-
bluesky_config = get_bluesky_config()
244
-
245
-
username = bluesky_config['username']
246
-
password = bluesky_config['password']
247
-
pds_uri = bluesky_config.get('pds_uri', 'https://bsky.social')
248
-
249
-
logger.info(f"Logging into Bluesky as {username} via {pds_uri}")
250
-
251
-
# Use pds_uri from config
252
-
client = Client(base_url=pds_uri)
253
-
client.login(username, password)
254
-
return client
255
-
256
-
except Exception as e:
257
-
logger.error(f"Failed to load Bluesky configuration: {e}")
258
-
logger.error("Please check your config.yaml file or environment variables")
259
-
exit(1)
238
+
username = os.getenv("BSKY_USERNAME")
239
+
password = os.getenv("BSKY_PASSWORD")
240
+
241
+
if username is None:
242
+
logger.error(
243
+
"No username provided. Please provide a username using the BSKY_USERNAME environment variable."
244
+
)
245
+
exit()
246
+
247
+
if password is None:
248
+
logger.error(
249
+
"No password provided. Please provide a password using the BSKY_PASSWORD environment variable."
250
+
)
251
+
exit()
252
+
253
+
return init_client(username, password)
260
254
261
255
def remove_outside_quotes(text: str) -> str:
262
256
"""
···
283
277
284
278
return text
285
279
286
-
def reply_to_post(client: Client, text: str, reply_to_uri: str, reply_to_cid: str, root_uri: Optional[str] = None, root_cid: Optional[str] = None, lang: Optional[str] = None, correlation_id: Optional[str] = None) -> Dict[str, Any]:
280
+
def reply_to_post(client: Client, text: str, reply_to_uri: str, reply_to_cid: str, root_uri: Optional[str] = None, root_cid: Optional[str] = None, lang: Optional[str] = None) -> Dict[str, Any]:
287
281
"""
288
282
Reply to a post on Bluesky with rich text support.
289
283
···
295
289
root_uri: The URI of the root post (if replying to a reply). If None, uses reply_to_uri
296
290
root_cid: The CID of the root post (if replying to a reply). If None, uses reply_to_cid
297
291
lang: Language code for the post (e.g., 'en-US', 'es', 'ja')
298
-
correlation_id: Unique ID for tracking this message through the pipeline
299
292
300
293
Returns:
301
294
The response from sending the post
302
295
"""
303
296
import re
304
297
305
-
# Generate correlation ID if not provided
306
-
if correlation_id is None:
307
-
correlation_id = str(uuid.uuid4())[:8]
308
-
309
-
# Enhanced logging with structured data
310
-
logger.info(f"[{correlation_id}] Starting reply_to_post", extra={
311
-
'correlation_id': correlation_id,
312
-
'text_length': len(text),
313
-
'text_preview': text[:100] + '...' if len(text) > 100 else text,
314
-
'reply_to_uri': reply_to_uri,
315
-
'root_uri': root_uri,
316
-
'lang': lang
317
-
})
318
-
319
-
start_time = time.time()
320
-
321
298
# If root is not provided, this is a reply to the root post
322
299
if root_uri is None:
323
300
root_uri = reply_to_uri
···
330
307
# Parse rich text facets (mentions and URLs)
331
308
facets = []
332
309
text_bytes = text.encode("UTF-8")
333
-
mentions_found = []
334
-
urls_found = []
335
-
336
-
logger.debug(f"[{correlation_id}] Parsing facets from text (length: {len(text_bytes)} bytes)")
337
310
338
311
# Parse mentions - fixed to handle @ at start of text
339
312
mention_regex = rb"(?:^|[$|\W])(@([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)"
340
313
341
314
for m in re.finditer(mention_regex, text_bytes):
342
315
handle = m.group(1)[1:].decode("UTF-8") # Remove @ prefix
343
-
mentions_found.append(handle)
344
316
# Adjust byte positions to account for the optional prefix
345
317
mention_start = m.start(1)
346
318
mention_end = m.end(1)
···
357
329
features=[models.AppBskyRichtextFacet.Mention(did=resolve_resp.did)]
358
330
)
359
331
)
360
-
logger.debug(f"[{correlation_id}] Resolved mention @{handle} -> {resolve_resp.did}")
361
332
except Exception as e:
362
-
logger.warning(f"[{correlation_id}] Failed to resolve handle @{handle}: {e}")
333
+
logger.debug(f"Failed to resolve handle {handle}: {e}")
363
334
continue
364
335
365
336
# Parse URLs - fixed to handle URLs at start of text
···
367
338
368
339
for m in re.finditer(url_regex, text_bytes):
369
340
url = m.group(1).decode("UTF-8")
370
-
urls_found.append(url)
371
341
# Adjust byte positions to account for the optional prefix
372
342
url_start = m.start(1)
373
343
url_end = m.end(1)
···
380
350
features=[models.AppBskyRichtextFacet.Link(uri=url)]
381
351
)
382
352
)
383
-
logger.debug(f"[{correlation_id}] Found URL: {url}")
384
-
385
-
logger.debug(f"[{correlation_id}] Facet parsing complete", extra={
386
-
'correlation_id': correlation_id,
387
-
'mentions_count': len(mentions_found),
388
-
'mentions': mentions_found,
389
-
'urls_count': len(urls_found),
390
-
'urls': urls_found,
391
-
'total_facets': len(facets)
392
-
})
393
353
394
354
# Send the reply with facets if any were found
395
-
logger.info(f"[{correlation_id}] Sending reply to Bluesky API", extra={
396
-
'correlation_id': correlation_id,
397
-
'has_facets': bool(facets),
398
-
'facet_count': len(facets),
399
-
'lang': lang
400
-
})
401
-
402
-
try:
403
-
if facets:
404
-
response = client.send_post(
405
-
text=text,
406
-
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
407
-
facets=facets,
408
-
langs=[lang] if lang else None
409
-
)
410
-
else:
411
-
response = client.send_post(
412
-
text=text,
413
-
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
414
-
langs=[lang] if lang else None
415
-
)
416
-
417
-
# Calculate response time
418
-
response_time = time.time() - start_time
419
-
420
-
# Extract post URL for user-friendly logging
421
-
post_url = None
422
-
if hasattr(response, 'uri') and response.uri:
423
-
# Convert AT-URI to web URL
424
-
# Format: at://did:plc:xxx/app.bsky.feed.post/xxx -> https://bsky.app/profile/handle/post/xxx
425
-
try:
426
-
uri_parts = response.uri.split('/')
427
-
if len(uri_parts) >= 4 and uri_parts[3] == 'app.bsky.feed.post':
428
-
rkey = uri_parts[4]
429
-
# We'd need to resolve DID to handle, but for now just use the URI
430
-
post_url = f"bsky://post/{rkey}"
431
-
except:
432
-
pass
433
-
434
-
logger.info(f"[{correlation_id}] Reply sent successfully ({response_time:.3f}s) - URI: {response.uri}" +
435
-
(f" - URL: {post_url}" if post_url else ""), extra={
436
-
'correlation_id': correlation_id,
437
-
'response_time': round(response_time, 3),
438
-
'post_uri': response.uri,
439
-
'post_url': post_url,
440
-
'post_cid': getattr(response, 'cid', None),
441
-
'text_length': len(text)
442
-
})
443
-
444
-
return response
445
-
446
-
except Exception as e:
447
-
response_time = time.time() - start_time
448
-
logger.error(f"[{correlation_id}] Failed to send reply", extra={
449
-
'correlation_id': correlation_id,
450
-
'error': str(e),
451
-
'error_type': type(e).__name__,
452
-
'response_time': round(response_time, 3),
453
-
'text_length': len(text)
454
-
})
455
-
raise
355
+
if facets:
356
+
response = client.send_post(
357
+
text=text,
358
+
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
359
+
facets=facets,
360
+
langs=[lang] if lang else None
361
+
)
362
+
else:
363
+
response = client.send_post(
364
+
text=text,
365
+
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
366
+
langs=[lang] if lang else None
367
+
)
368
+
369
+
logger.info(f"Reply sent successfully: {response.uri}")
370
+
return response
456
371
457
372
458
373
def get_post_thread(client: Client, uri: str) -> Optional[Dict[str, Any]]:
···
474
389
return None
475
390
476
391
477
-
def reply_to_notification(client: Client, notification: Any, reply_text: str, lang: str = "en-US", correlation_id: Optional[str] = None) -> Optional[Dict[str, Any]]:
392
+
def reply_to_notification(client: Client, notification: Any, reply_text: str, lang: str = "en-US") -> Optional[Dict[str, Any]]:
478
393
"""
479
394
Reply to a notification (mention or reply).
480
395
···
483
398
notification: The notification object from list_notifications
484
399
reply_text: The text to reply with
485
400
lang: Language code for the post (defaults to "en-US")
486
-
correlation_id: Unique ID for tracking this message through the pipeline
487
401
488
402
Returns:
489
403
The response from sending the reply or None if failed
490
404
"""
491
-
# Generate correlation ID if not provided
492
-
if correlation_id is None:
493
-
correlation_id = str(uuid.uuid4())[:8]
494
-
495
-
logger.info(f"[{correlation_id}] Processing reply_to_notification", extra={
496
-
'correlation_id': correlation_id,
497
-
'reply_length': len(reply_text),
498
-
'lang': lang
499
-
})
500
-
501
405
try:
502
406
# Get the post URI and CID from the notification (handle both dict and object)
503
407
if isinstance(notification, dict):
···
557
461
reply_to_cid=post_cid,
558
462
root_uri=root_uri,
559
463
root_cid=root_cid,
560
-
lang=lang,
561
-
correlation_id=correlation_id
464
+
lang=lang
562
465
)
563
466
564
467
except Exception as e:
565
-
logger.error(f"[{correlation_id}] Error replying to notification: {e}", extra={
566
-
'correlation_id': correlation_id,
567
-
'error': str(e),
568
-
'error_type': type(e).__name__
569
-
})
468
+
logger.error(f"Error replying to notification: {e}")
570
469
return None
571
470
572
471
573
-
def reply_with_thread_to_notification(client: Client, notification: Any, reply_messages: List[str], lang: str = "en-US", correlation_id: Optional[str] = None) -> Optional[List[Dict[str, Any]]]:
472
+
def reply_with_thread_to_notification(client: Client, notification: Any, reply_messages: List[str], lang: str = "en-US") -> Optional[List[Dict[str, Any]]]:
574
473
"""
575
474
Reply to a notification with a threaded chain of messages (max 15).
576
475
···
579
478
notification: The notification object from list_notifications
580
479
reply_messages: List of reply texts (max 15 messages, each max 300 chars)
581
480
lang: Language code for the posts (defaults to "en-US")
582
-
correlation_id: Unique ID for tracking this message through the pipeline
583
481
584
482
Returns:
585
483
List of responses from sending the replies or None if failed
586
484
"""
587
-
# Generate correlation ID if not provided
588
-
if correlation_id is None:
589
-
correlation_id = str(uuid.uuid4())[:8]
590
-
591
-
logger.info(f"[{correlation_id}] Starting threaded reply", extra={
592
-
'correlation_id': correlation_id,
593
-
'message_count': len(reply_messages),
594
-
'total_length': sum(len(msg) for msg in reply_messages),
595
-
'lang': lang
596
-
})
597
-
598
485
try:
599
486
# Validate input
600
487
if not reply_messages or len(reply_messages) == 0:
601
-
logger.error(f"[{correlation_id}] Reply messages list cannot be empty")
488
+
logger.error("Reply messages list cannot be empty")
602
489
return None
603
490
if len(reply_messages) > 15:
604
-
logger.error(f"[{correlation_id}] Cannot send more than 15 reply messages (got {len(reply_messages)})")
491
+
logger.error(f"Cannot send more than 15 reply messages (got {len(reply_messages)})")
605
492
return None
606
493
607
494
# Get the post URI and CID from the notification (handle both dict and object)
···
660
547
current_parent_cid = post_cid
661
548
662
549
for i, message in enumerate(reply_messages):
663
-
thread_correlation_id = f"{correlation_id}-{i+1}"
664
-
logger.info(f"[{thread_correlation_id}] Sending reply {i+1}/{len(reply_messages)}: {message[:50]}...")
550
+
logger.info(f"Sending reply {i+1}/{len(reply_messages)}: {message[:50]}...")
665
551
666
552
# Send this reply
667
553
response = reply_to_post(
···
671
557
reply_to_cid=current_parent_cid,
672
558
root_uri=root_uri,
673
559
root_cid=root_cid,
674
-
lang=lang,
675
-
correlation_id=thread_correlation_id
560
+
lang=lang
676
561
)
677
562
678
563
if not response:
679
-
logger.error(f"[{thread_correlation_id}] Failed to send reply {i+1}, posting system failure message")
564
+
logger.error(f"Failed to send reply {i+1}, posting system failure message")
680
565
# Try to post a system failure message
681
566
failure_response = reply_to_post(
682
567
client=client,
···
685
570
reply_to_cid=current_parent_cid,
686
571
root_uri=root_uri,
687
572
root_cid=root_cid,
688
-
lang=lang,
689
-
correlation_id=f"{thread_correlation_id}-FAIL"
573
+
lang=lang
690
574
)
691
575
if failure_response:
692
576
responses.append(failure_response)
693
577
current_parent_uri = failure_response.uri
694
578
current_parent_cid = failure_response.cid
695
579
else:
696
-
logger.error(f"[{thread_correlation_id}] Could not even send system failure message, stopping thread")
580
+
logger.error("Could not even send system failure message, stopping thread")
697
581
return responses if responses else None
698
582
else:
699
583
responses.append(response)
···
702
586
current_parent_uri = response.uri
703
587
current_parent_cid = response.cid
704
588
705
-
logger.info(f"[{correlation_id}] Successfully sent {len(responses)} threaded replies", extra={
706
-
'correlation_id': correlation_id,
707
-
'replies_sent': len(responses),
708
-
'replies_requested': len(reply_messages)
709
-
})
589
+
logger.info(f"Successfully sent {len(responses)} threaded replies")
710
590
return responses
711
591
712
592
except Exception as e:
713
-
logger.error(f"[{correlation_id}] Error sending threaded reply to notification: {e}", extra={
714
-
'correlation_id': correlation_id,
715
-
'error': str(e),
716
-
'error_type': type(e).__name__,
717
-
'message_count': len(reply_messages)
718
-
})
593
+
logger.error(f"Error sending threaded reply to notification: {e}")
719
594
return None
720
595
721
596
···
756
631
logger.error("Missing access token or DID from session")
757
632
return None
758
633
759
-
# Get PDS URI from config instead of environment variables
760
-
from config_loader import get_bluesky_config
761
-
bluesky_config = get_bluesky_config()
762
-
pds_host = bluesky_config['pds_uri']
634
+
pds_host = os.getenv("PDS_URI", "https://bsky.social")
763
635
764
636
# Create acknowledgment record with null subject
765
637
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
···
833
705
logger.error("Missing access token or DID from session")
834
706
return None
835
707
836
-
# Get PDS URI from config instead of environment variables
837
-
from config_loader import get_bluesky_config
838
-
bluesky_config = get_bluesky_config()
839
-
pds_host = bluesky_config['pds_uri']
708
+
pds_host = os.getenv("PDS_URI", "https://bsky.social")
840
709
841
710
# Create acknowledgment record with stream.thought.ack type
842
711
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
···
912
781
logger.error("Missing access token or DID from session")
913
782
return None
914
783
915
-
# Get PDS URI from config instead of environment variables
916
-
from config_loader import get_bluesky_config
917
-
bluesky_config = get_bluesky_config()
918
-
pds_host = bluesky_config['pds_uri']
784
+
pds_host = os.getenv("PDS_URI", "https://bsky.social")
919
785
920
786
# Create tool call record
921
787
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
···
992
858
logger.error("Missing access token or DID from session")
993
859
return None
994
860
995
-
# Get PDS URI from config instead of environment variables
996
-
from config_loader import get_bluesky_config
997
-
bluesky_config = get_bluesky_config()
998
-
pds_host = bluesky_config['pds_uri']
861
+
pds_host = os.getenv("PDS_URI", "https://bsky.social")
999
862
1000
863
# Create reasoning record
1001
864
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
+3
-2
config.example.yaml
+3
-2
config.example.yaml
···
5
5
letta:
6
6
api_key: "your-letta-api-key-here"
7
7
timeout: 600 # 10 minutes timeout for API calls
8
-
project_id: "c82faea2-3ce8-4aa9-a220-b56433e62c92" # Use your specific project ID
9
-
agent_id: "agent-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # Your void agent ID
8
+
project_id: "your-project-id-here" # Use your specific project ID
9
+
agent_id: "your-agent-id-here" # Your void agent ID
10
+
base_url: "https://api.letta.com" # Default to letta cloud, this is typially http://localhost:8283 for self-hosted
10
11
11
12
# Bluesky Configuration
12
13
bluesky:
+17
config.yaml.bkp
+17
config.yaml.bkp
···
1
+
# Void Bot Configuration
2
+
# Generated by migration script
3
+
# Created: 2025-07-12 13:45:55
4
+
# See config.yaml.example for all available options
5
+
6
+
bluesky:
7
+
password: 2xbh-dpcc-i3uf-meks
8
+
pds_uri: https://comind.network
9
+
username: void.comind.network
10
+
bot:
11
+
fetch_notifications_delay: 30
12
+
max_notification_pages: 20
13
+
max_processed_notifications: 10000
14
+
letta:
15
+
api_key: sk-let-NmYyZTZmMzQtZDYxNC00MDg0LTllMGQtYjFmMDRjNDA1YTEwOmIyYTMyNmM4LWZkMjEtNGE4OC04Mjg2LWJkN2Q2NWQ1MGVhOA==
16
+
project_id: 5ec33d52-ab14-4fd6-91b5-9dbc43e888a8
17
+
timeout: 600
+4
-4
config_loader.py
+4
-4
config_loader.py
···
179
179
}
180
180
181
181
def get_bluesky_config() -> Dict[str, Any]:
182
-
"""Get Bluesky configuration, prioritizing config.yaml over environment variables."""
182
+
"""Get Bluesky configuration."""
183
183
config = get_config()
184
184
return {
185
-
'username': config.get_required('bluesky.username'),
186
-
'password': config.get_required('bluesky.password'),
187
-
'pds_uri': config.get('bluesky.pds_uri', 'https://bsky.social'),
185
+
'username': config.get_required('bluesky.username', 'BSKY_USERNAME'),
186
+
'password': config.get_required('bluesky.password', 'BSKY_PASSWORD'),
187
+
'pds_uri': config.get_with_env('bluesky.pds_uri', 'PDS_URI', 'https://bsky.social'),
188
188
}
189
189
190
190
def get_bot_config() -> Dict[str, Any]:
-299
notification_db.py
-299
notification_db.py
···
1
-
#!/usr/bin/env python3
2
-
"""SQLite database for robust notification tracking."""
3
-
4
-
import sqlite3
5
-
import json
6
-
from pathlib import Path
7
-
from datetime import datetime, timedelta
8
-
from typing import Set, Dict, List, Optional, Tuple
9
-
import logging
10
-
11
-
logger = logging.getLogger(__name__)
12
-
13
-
class NotificationDB:
14
-
"""Database for tracking notification processing state."""
15
-
16
-
def __init__(self, db_path: str = "queue/notifications.db"):
17
-
"""Initialize the notification database."""
18
-
self.db_path = Path(db_path)
19
-
self.db_path.parent.mkdir(exist_ok=True, parents=True)
20
-
self.conn = None
21
-
self._init_db()
22
-
23
-
def _init_db(self):
24
-
"""Initialize database schema."""
25
-
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
26
-
self.conn.row_factory = sqlite3.Row
27
-
28
-
# Create main notifications table
29
-
self.conn.execute("""
30
-
CREATE TABLE IF NOT EXISTS notifications (
31
-
uri TEXT PRIMARY KEY,
32
-
indexed_at TEXT NOT NULL,
33
-
processed_at TEXT,
34
-
status TEXT NOT NULL DEFAULT 'pending',
35
-
reason TEXT,
36
-
author_handle TEXT,
37
-
author_did TEXT,
38
-
text TEXT,
39
-
parent_uri TEXT,
40
-
root_uri TEXT,
41
-
error TEXT,
42
-
metadata TEXT
43
-
)
44
-
""")
45
-
46
-
# Create indexes for faster lookups
47
-
self.conn.execute("""
48
-
CREATE INDEX IF NOT EXISTS idx_indexed_at
49
-
ON notifications(indexed_at DESC)
50
-
""")
51
-
52
-
self.conn.execute("""
53
-
CREATE INDEX IF NOT EXISTS idx_status
54
-
ON notifications(status)
55
-
""")
56
-
57
-
self.conn.execute("""
58
-
CREATE INDEX IF NOT EXISTS idx_author_handle
59
-
ON notifications(author_handle)
60
-
""")
61
-
62
-
# Create session tracking table
63
-
self.conn.execute("""
64
-
CREATE TABLE IF NOT EXISTS sessions (
65
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
66
-
started_at TEXT NOT NULL,
67
-
ended_at TEXT,
68
-
last_seen_at TEXT,
69
-
notifications_processed INTEGER DEFAULT 0,
70
-
notifications_skipped INTEGER DEFAULT 0,
71
-
notifications_error INTEGER DEFAULT 0
72
-
)
73
-
""")
74
-
75
-
self.conn.commit()
76
-
77
-
def add_notification(self, notif_dict: Dict) -> bool:
78
-
"""Add a notification to the database."""
79
-
try:
80
-
# Handle None input
81
-
if not notif_dict:
82
-
return False
83
-
84
-
# Extract key fields
85
-
uri = notif_dict.get('uri', '')
86
-
if not uri:
87
-
return False
88
-
89
-
indexed_at = notif_dict.get('indexed_at', '')
90
-
reason = notif_dict.get('reason', '')
91
-
author = notif_dict.get('author', {}) if notif_dict.get('author') else {}
92
-
author_handle = author.get('handle', '') if author else ''
93
-
author_did = author.get('did', '') if author else ''
94
-
95
-
# Extract text from record if available (handle None records)
96
-
record = notif_dict.get('record') or {}
97
-
text = record.get('text', '')[:500] if record else ''
98
-
99
-
# Extract thread info
100
-
parent_uri = None
101
-
root_uri = None
102
-
if record and 'reply' in record and record['reply']:
103
-
reply_info = record['reply']
104
-
if reply_info and isinstance(reply_info, dict):
105
-
parent_info = reply_info.get('parent', {})
106
-
root_info = reply_info.get('root', {})
107
-
if parent_info:
108
-
parent_uri = parent_info.get('uri')
109
-
if root_info:
110
-
root_uri = root_info.get('uri')
111
-
112
-
# Store additional metadata as JSON
113
-
metadata = {
114
-
'cid': notif_dict.get('cid'),
115
-
'labels': notif_dict.get('labels', []),
116
-
'is_read': notif_dict.get('is_read', False)
117
-
}
118
-
119
-
self.conn.execute("""
120
-
INSERT OR IGNORE INTO notifications
121
-
(uri, indexed_at, reason, author_handle, author_did, text,
122
-
parent_uri, root_uri, status, metadata)
123
-
VALUES (?, ?, ?, ?, ?, ?, ?, ?, 'pending', ?)
124
-
""", (uri, indexed_at, reason, author_handle, author_did, text,
125
-
parent_uri, root_uri, json.dumps(metadata)))
126
-
127
-
self.conn.commit()
128
-
return True
129
-
130
-
except Exception as e:
131
-
logger.error(f"Error adding notification to DB: {e}")
132
-
return False
133
-
134
-
def is_processed(self, uri: str) -> bool:
135
-
"""Check if a notification has been processed."""
136
-
cursor = self.conn.execute("""
137
-
SELECT status FROM notifications WHERE uri = ?
138
-
""", (uri,))
139
-
row = cursor.fetchone()
140
-
141
-
if row:
142
-
return row['status'] in ['processed', 'ignored', 'no_reply']
143
-
return False
144
-
145
-
def mark_processed(self, uri: str, status: str = 'processed', error: str = None):
146
-
"""Mark a notification as processed."""
147
-
try:
148
-
self.conn.execute("""
149
-
UPDATE notifications
150
-
SET status = ?, processed_at = ?, error = ?
151
-
WHERE uri = ?
152
-
""", (status, datetime.now().isoformat(), error, uri))
153
-
self.conn.commit()
154
-
except Exception as e:
155
-
logger.error(f"Error marking notification processed: {e}")
156
-
157
-
def get_unprocessed(self, limit: int = 100) -> List[Dict]:
158
-
"""Get unprocessed notifications."""
159
-
cursor = self.conn.execute("""
160
-
SELECT * FROM notifications
161
-
WHERE status = 'pending'
162
-
ORDER BY indexed_at ASC
163
-
LIMIT ?
164
-
""", (limit,))
165
-
166
-
return [dict(row) for row in cursor]
167
-
168
-
def get_latest_processed_time(self) -> Optional[str]:
169
-
"""Get the timestamp of the most recently processed notification."""
170
-
cursor = self.conn.execute("""
171
-
SELECT MAX(indexed_at) as latest
172
-
FROM notifications
173
-
WHERE status IN ('processed', 'ignored', 'no_reply')
174
-
""")
175
-
row = cursor.fetchone()
176
-
return row['latest'] if row and row['latest'] else None
177
-
178
-
def cleanup_old_records(self, days: int = 7):
179
-
"""Remove records older than specified days."""
180
-
cutoff_date = (datetime.now() - timedelta(days=days)).isoformat()
181
-
182
-
deleted = self.conn.execute("""
183
-
DELETE FROM notifications
184
-
WHERE indexed_at < ?
185
-
AND status IN ('processed', 'ignored', 'no_reply', 'error')
186
-
""", (cutoff_date,)).rowcount
187
-
188
-
self.conn.commit()
189
-
190
-
if deleted > 0:
191
-
logger.info(f"Cleaned up {deleted} old notification records")
192
-
# Vacuum to reclaim space
193
-
self.conn.execute("VACUUM")
194
-
195
-
def get_stats(self) -> Dict:
196
-
"""Get database statistics."""
197
-
stats = {}
198
-
199
-
# Count by status
200
-
cursor = self.conn.execute("""
201
-
SELECT status, COUNT(*) as count
202
-
FROM notifications
203
-
GROUP BY status
204
-
""")
205
-
206
-
for row in cursor:
207
-
stats[f"status_{row['status']}"] = row['count']
208
-
209
-
# Total count
210
-
cursor = self.conn.execute("SELECT COUNT(*) as total FROM notifications")
211
-
stats['total'] = cursor.fetchone()['total']
212
-
213
-
# Recent activity (last 24h)
214
-
yesterday = (datetime.now() - timedelta(days=1)).isoformat()
215
-
cursor = self.conn.execute("""
216
-
SELECT COUNT(*) as recent
217
-
FROM notifications
218
-
WHERE indexed_at > ?
219
-
""", (yesterday,))
220
-
stats['recent_24h'] = cursor.fetchone()['recent']
221
-
222
-
return stats
223
-
224
-
def start_session(self) -> int:
225
-
"""Start a new processing session."""
226
-
cursor = self.conn.execute("""
227
-
INSERT INTO sessions (started_at, last_seen_at)
228
-
VALUES (?, ?)
229
-
""", (datetime.now().isoformat(), datetime.now().isoformat()))
230
-
self.conn.commit()
231
-
return cursor.lastrowid
232
-
233
-
def update_session(self, session_id: int, processed: int = 0, skipped: int = 0, error: int = 0):
234
-
"""Update session statistics."""
235
-
self.conn.execute("""
236
-
UPDATE sessions
237
-
SET last_seen_at = ?,
238
-
notifications_processed = notifications_processed + ?,
239
-
notifications_skipped = notifications_skipped + ?,
240
-
notifications_error = notifications_error + ?
241
-
WHERE id = ?
242
-
""", (datetime.now().isoformat(), processed, skipped, error, session_id))
243
-
self.conn.commit()
244
-
245
-
def end_session(self, session_id: int):
246
-
"""End a processing session."""
247
-
self.conn.execute("""
248
-
UPDATE sessions
249
-
SET ended_at = ?
250
-
WHERE id = ?
251
-
""", (datetime.now().isoformat(), session_id))
252
-
self.conn.commit()
253
-
254
-
def get_processed_uris(self, limit: int = 10000) -> Set[str]:
255
-
"""Get set of processed URIs for compatibility with existing code."""
256
-
cursor = self.conn.execute("""
257
-
SELECT uri FROM notifications
258
-
WHERE status IN ('processed', 'ignored', 'no_reply')
259
-
ORDER BY processed_at DESC
260
-
LIMIT ?
261
-
""", (limit,))
262
-
263
-
return {row['uri'] for row in cursor}
264
-
265
-
def migrate_from_json(self, json_path: str = "queue/processed_notifications.json"):
266
-
"""Migrate data from the old JSON format."""
267
-
json_file = Path(json_path)
268
-
if not json_file.exists():
269
-
return
270
-
271
-
try:
272
-
with open(json_file, 'r') as f:
273
-
uris = json.load(f)
274
-
275
-
migrated = 0
276
-
for uri in uris:
277
-
# Add as processed with unknown timestamp
278
-
self.conn.execute("""
279
-
INSERT OR IGNORE INTO notifications
280
-
(uri, indexed_at, status, processed_at)
281
-
VALUES (?, ?, 'processed', ?)
282
-
""", (uri, datetime.now().isoformat(), datetime.now().isoformat()))
283
-
migrated += 1
284
-
285
-
self.conn.commit()
286
-
logger.info(f"Migrated {migrated} URIs from JSON to database")
287
-
288
-
# Rename old file to backup
289
-
backup_path = json_file.with_suffix('.json.backup')
290
-
json_file.rename(backup_path)
291
-
logger.info(f"Renamed old JSON file to {backup_path}")
292
-
293
-
except Exception as e:
294
-
logger.error(f"Error migrating from JSON: {e}")
295
-
296
-
def close(self):
297
-
"""Close database connection."""
298
-
if self.conn:
299
-
self.conn.close()
-249
notification_recovery.py
-249
notification_recovery.py
···
1
-
#!/usr/bin/env python3
2
-
"""Recovery tools for missed notifications."""
3
-
4
-
import argparse
5
-
import logging
6
-
from datetime import datetime, timedelta
7
-
from pathlib import Path
8
-
import json
9
-
import bsky_utils
10
-
from notification_db import NotificationDB
11
-
from bsky import save_notification_to_queue, notification_to_dict
12
-
13
-
# Configure logging
14
-
logging.basicConfig(
15
-
level=logging.INFO,
16
-
format="%(asctime)s - %(levelname)s - %(message)s"
17
-
)
18
-
logger = logging.getLogger(__name__)
19
-
20
-
21
-
def recover_notifications(hours=24, dry_run=True):
22
-
"""
23
-
Recover notifications from the past N hours.
24
-
25
-
Args:
26
-
hours: Number of hours back to check for notifications
27
-
dry_run: If True, only show what would be recovered without saving
28
-
"""
29
-
logger.info(f"Recovering notifications from the past {hours} hours")
30
-
logger.info(f"Dry run: {dry_run}")
31
-
32
-
# Initialize Bluesky client
33
-
client = bsky_utils.default_login()
34
-
logger.info("Connected to Bluesky")
35
-
36
-
# Initialize database
37
-
db = NotificationDB()
38
-
logger.info("Database initialized")
39
-
40
-
# Fetch notifications
41
-
all_notifications = []
42
-
cursor = None
43
-
page_count = 0
44
-
max_pages = 50 # More pages for recovery
45
-
46
-
cutoff_time = datetime.now() - timedelta(hours=hours)
47
-
cutoff_iso = cutoff_time.isoformat()
48
-
logger.info(f"Looking for notifications since: {cutoff_iso}")
49
-
50
-
while page_count < max_pages:
51
-
try:
52
-
# Fetch notifications page
53
-
if cursor:
54
-
response = client.app.bsky.notification.list_notifications(
55
-
params={'cursor': cursor, 'limit': 100}
56
-
)
57
-
else:
58
-
response = client.app.bsky.notification.list_notifications(
59
-
params={'limit': 100}
60
-
)
61
-
62
-
page_count += 1
63
-
page_notifications = response.notifications
64
-
65
-
if not page_notifications:
66
-
break
67
-
68
-
# Filter by time
69
-
for notif in page_notifications:
70
-
if hasattr(notif, 'indexed_at') and notif.indexed_at >= cutoff_iso:
71
-
all_notifications.append(notif)
72
-
elif hasattr(notif, 'indexed_at') and notif.indexed_at < cutoff_iso:
73
-
# We've gone past our cutoff, stop fetching
74
-
logger.info(f"Reached notifications older than {hours} hours, stopping")
75
-
cursor = None
76
-
break
77
-
78
-
# Check if there are more pages
79
-
if cursor is None:
80
-
break
81
-
cursor = getattr(response, 'cursor', None)
82
-
if not cursor:
83
-
break
84
-
85
-
except Exception as e:
86
-
logger.error(f"Error fetching notifications page {page_count}: {e}")
87
-
break
88
-
89
-
logger.info(f"Found {len(all_notifications)} notifications in the time range")
90
-
91
-
# Process notifications
92
-
recovered = 0
93
-
skipped_likes = 0
94
-
already_processed = 0
95
-
96
-
for notif in all_notifications:
97
-
# Skip likes
98
-
if hasattr(notif, 'reason') and notif.reason == 'like':
99
-
skipped_likes += 1
100
-
continue
101
-
102
-
# Check if already processed
103
-
notif_dict = notification_to_dict(notif)
104
-
uri = notif_dict.get('uri', '')
105
-
106
-
if db.is_processed(uri):
107
-
already_processed += 1
108
-
logger.debug(f"Already processed: {uri}")
109
-
continue
110
-
111
-
# Log what we would recover
112
-
author = notif_dict.get('author', {})
113
-
author_handle = author.get('handle', 'unknown')
114
-
reason = notif_dict.get('reason', 'unknown')
115
-
indexed_at = notif_dict.get('indexed_at', '')
116
-
117
-
logger.info(f"Would recover: {reason} from @{author_handle} at {indexed_at}")
118
-
119
-
if not dry_run:
120
-
# Save to queue
121
-
if save_notification_to_queue(notif_dict):
122
-
recovered += 1
123
-
logger.info(f"Recovered notification from @{author_handle}")
124
-
else:
125
-
logger.warning(f"Failed to queue notification from @{author_handle}")
126
-
else:
127
-
recovered += 1
128
-
129
-
# Summary
130
-
logger.info(f"Recovery summary:")
131
-
logger.info(f" โข Total found: {len(all_notifications)}")
132
-
logger.info(f" โข Skipped (likes): {skipped_likes}")
133
-
logger.info(f" โข Already processed: {already_processed}")
134
-
logger.info(f" โข {'Would recover' if dry_run else 'Recovered'}: {recovered}")
135
-
136
-
if dry_run and recovered > 0:
137
-
logger.info("Run with --execute to actually recover these notifications")
138
-
139
-
return recovered
140
-
141
-
142
-
def check_database_health():
143
-
"""Check the health of the notification database."""
144
-
db = NotificationDB()
145
-
stats = db.get_stats()
146
-
147
-
logger.info("Database Statistics:")
148
-
logger.info(f" โข Total notifications: {stats.get('total', 0)}")
149
-
logger.info(f" โข Pending: {stats.get('status_pending', 0)}")
150
-
logger.info(f" โข Processed: {stats.get('status_processed', 0)}")
151
-
logger.info(f" โข Ignored: {stats.get('status_ignored', 0)}")
152
-
logger.info(f" โข No reply: {stats.get('status_no_reply', 0)}")
153
-
logger.info(f" โข Errors: {stats.get('status_error', 0)}")
154
-
logger.info(f" โข Recent (24h): {stats.get('recent_24h', 0)}")
155
-
156
-
# Check for issues
157
-
if stats.get('status_pending', 0) > 100:
158
-
logger.warning(f"โ ๏ธ High number of pending notifications: {stats.get('status_pending', 0)}")
159
-
160
-
if stats.get('status_error', 0) > 50:
161
-
logger.warning(f"โ ๏ธ High number of error notifications: {stats.get('status_error', 0)}")
162
-
163
-
return stats
164
-
165
-
166
-
def reset_notification_status(hours=1, dry_run=True):
167
-
"""
168
-
Reset notifications from error/no_reply status back to pending.
169
-
170
-
Args:
171
-
hours: Reset notifications from the last N hours
172
-
dry_run: If True, only show what would be reset
173
-
"""
174
-
db = NotificationDB()
175
-
cutoff_time = datetime.now() - timedelta(hours=hours)
176
-
cutoff_iso = cutoff_time.isoformat()
177
-
178
-
# Get notifications to reset
179
-
cursor = db.conn.execute("""
180
-
SELECT uri, status, indexed_at, author_handle
181
-
FROM notifications
182
-
WHERE status IN ('error', 'no_reply')
183
-
AND indexed_at > ?
184
-
ORDER BY indexed_at DESC
185
-
""", (cutoff_iso,))
186
-
187
-
notifications_to_reset = cursor.fetchall()
188
-
189
-
if not notifications_to_reset:
190
-
logger.info(f"No notifications to reset from the last {hours} hours")
191
-
return 0
192
-
193
-
logger.info(f"Found {len(notifications_to_reset)} notifications to reset")
194
-
195
-
for notif in notifications_to_reset:
196
-
logger.info(f"Would reset: {notif['status']} -> pending for @{notif['author_handle']} at {notif['indexed_at']}")
197
-
198
-
if not dry_run:
199
-
reset_count = db.conn.execute("""
200
-
UPDATE notifications
201
-
SET status = 'pending', processed_at = NULL, error = NULL
202
-
WHERE status IN ('error', 'no_reply')
203
-
AND indexed_at > ?
204
-
""", (cutoff_iso,)).rowcount
205
-
206
-
db.conn.commit()
207
-
logger.info(f"Reset {reset_count} notifications to pending status")
208
-
return reset_count
209
-
else:
210
-
logger.info("Run with --execute to actually reset these notifications")
211
-
return len(notifications_to_reset)
212
-
213
-
214
-
def main():
215
-
parser = argparse.ArgumentParser(description="Notification recovery and management tools")
216
-
217
-
subparsers = parser.add_subparsers(dest='command', help='Command to run')
218
-
219
-
# Recover command
220
-
recover_parser = subparsers.add_parser('recover', help='Recover missed notifications')
221
-
recover_parser.add_argument('--hours', type=int, default=24,
222
-
help='Number of hours back to check (default: 24)')
223
-
recover_parser.add_argument('--execute', action='store_true',
224
-
help='Actually recover notifications (default is dry run)')
225
-
226
-
# Health check command
227
-
health_parser = subparsers.add_parser('health', help='Check database health')
228
-
229
-
# Reset command
230
-
reset_parser = subparsers.add_parser('reset', help='Reset error notifications to pending')
231
-
reset_parser.add_argument('--hours', type=int, default=1,
232
-
help='Reset notifications from last N hours (default: 1)')
233
-
reset_parser.add_argument('--execute', action='store_true',
234
-
help='Actually reset notifications (default is dry run)')
235
-
236
-
args = parser.parse_args()
237
-
238
-
if args.command == 'recover':
239
-
recover_notifications(hours=args.hours, dry_run=not args.execute)
240
-
elif args.command == 'health':
241
-
check_database_health()
242
-
elif args.command == 'reset':
243
-
reset_notification_status(hours=args.hours, dry_run=not args.execute)
244
-
else:
245
-
parser.print_help()
246
-
247
-
248
-
if __name__ == "__main__":
249
-
main()
+93
organon/chat_direct.py
+93
organon/chat_direct.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Direct chat with a specific group ID (bypassing the search logic).
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.prompt import Prompt
12
+
from rich.panel import Panel
13
+
14
+
# Add parent directory to path for imports
15
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
16
+
from config_loader import get_config
17
+
18
+
load_dotenv()
19
+
20
+
def main():
21
+
console = Console()
22
+
23
+
if len(sys.argv) != 2:
24
+
console.print("[red]Usage: python organon/chat_direct.py <group_id>[/red]")
25
+
console.print("[dim]Example: python organon/chat_direct.py group-0bf1c6[/dim]")
26
+
sys.exit(1)
27
+
28
+
group_id = sys.argv[1]
29
+
30
+
try:
31
+
# Initialize configuration and client
32
+
config = get_config()
33
+
34
+
client = Letta(
35
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
36
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
37
+
timeout=config.get('letta.timeout', 30)
38
+
)
39
+
40
+
# Test if we can access the group
41
+
try:
42
+
group = client.groups.retrieve(group_id=group_id)
43
+
console.print(f"[green]โ
Connected to group: {group_id}[/green]")
44
+
except Exception as e:
45
+
console.print(f"[red]โ Cannot access group {group_id}: {e}[/red]")
46
+
sys.exit(1)
47
+
48
+
console.print(Panel.fit(
49
+
"[bold green]Direct Organon Group Chat[/bold green]\n"
50
+
f"Group ID: {group_id}\n"
51
+
"Type 'exit' or 'quit' to leave",
52
+
title="๐ง Direct Chat"
53
+
))
54
+
55
+
while True:
56
+
user_input = Prompt.ask("\n[bold green]You[/bold green]")
57
+
58
+
if user_input.lower() in ['exit', 'quit', 'q']:
59
+
console.print("[yellow]Goodbye![/yellow]")
60
+
break
61
+
elif not user_input.strip():
62
+
continue
63
+
64
+
console.print("[dim]Sending to group...[/dim]")
65
+
66
+
try:
67
+
response = client.groups.messages.send(
68
+
group_id=group_id,
69
+
message=user_input
70
+
)
71
+
72
+
console.print("\n[bold]Group Response:[/bold]")
73
+
if hasattr(response, 'messages') and response.messages:
74
+
for message in response.messages:
75
+
content = str(message)
76
+
if hasattr(message, 'text'):
77
+
content = message.text
78
+
elif hasattr(message, 'content'):
79
+
content = message.content
80
+
81
+
console.print(Panel(content, border_style="blue"))
82
+
else:
83
+
console.print("[yellow]No response received[/yellow]")
84
+
85
+
except Exception as e:
86
+
console.print(f"[red]Error: {e}[/red]")
87
+
88
+
except Exception as e:
89
+
console.print(f"[red]Error: {e}[/red]")
90
+
sys.exit(1)
91
+
92
+
if __name__ == "__main__":
93
+
main()
+259
organon/chat_with_kaleidoscope.py
+259
organon/chat_with_kaleidoscope.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Simple CLI tool to converse with the Kaleidoscope collective.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.prompt import Prompt
12
+
from rich.panel import Panel
13
+
from rich.text import Text
14
+
15
+
# Add parent directory to path for imports
16
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
17
+
from config_loader import get_config
18
+
19
+
load_dotenv()
20
+
21
+
class KaleidoscopeChat:
22
+
def __init__(self):
23
+
"""Initialize the Kaleidoscope chat client."""
24
+
self.console = Console()
25
+
self.config = get_config()
26
+
27
+
# Initialize Letta client
28
+
self.letta_client = Letta(
29
+
base_url=self.config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
30
+
token=self.config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
31
+
timeout=self.config.get('letta.timeout', 30)
32
+
)
33
+
34
+
# Get project ID
35
+
self.project_id = self.config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
36
+
if not self.project_id:
37
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
38
+
39
+
# Find the Kaleidoscope collective group
40
+
self.group_id = self._find_kaleidoscope_group()
41
+
42
+
self.console.print(Panel.fit(
43
+
"[bold cyan]Kaleidoscope Collective Chat[/bold cyan]\n"
44
+
f"Connected to group: {self.group_id}\n"
45
+
"Type 'exit' or 'quit' to leave, 'help' for commands",
46
+
title="๐ฎ Kaleidoscope Chat"
47
+
))
48
+
49
+
def _find_kaleidoscope_group(self) -> str:
50
+
"""Find the Kaleidoscope collective group."""
51
+
try:
52
+
self.console.print("[dim]Searching for Kaleidoscope collective group...[/dim]")
53
+
54
+
# First, get the kaleidoscope-central agent ID
55
+
kaleidoscope_agents = self.letta_client.agents.list(name="kaleidoscope-central")
56
+
if not kaleidoscope_agents:
57
+
raise ValueError("Kaleidoscope central agent not found. Run create_kaleidoscope.py first.")
58
+
59
+
kaleidoscope_central_id = kaleidoscope_agents[0].id
60
+
self.console.print(f"[dim]Found kaleidoscope-central: {kaleidoscope_central_id[:8]}[/dim]")
61
+
62
+
# Get all groups (with and without project_id filter)
63
+
try:
64
+
groups = self.letta_client.groups.list()
65
+
self.console.print(f"[dim]Found {len(groups)} groups with project filter[/dim]")
66
+
except:
67
+
try:
68
+
groups = self.letta_client.groups.list()
69
+
self.console.print(f"[dim]Found {len(groups)} groups without project filter[/dim]")
70
+
except:
71
+
groups = []
72
+
self.console.print("[dim]No groups found[/dim]")
73
+
74
+
# Look for groups with kaleidoscope-central as supervisor
75
+
for group in groups:
76
+
if hasattr(group, 'manager_config') and group.manager_config:
77
+
if hasattr(group.manager_config, 'manager_agent_id') and group.manager_config.manager_agent_id == kaleidoscope_central_id:
78
+
self.console.print(f"[dim]Found Kaleidoscope group: {group.id[:12]}[/dim]")
79
+
return group.id
80
+
81
+
# Look for the kaleidoscope group by description
82
+
for group in groups:
83
+
if hasattr(group, 'description') and group.description and 'the kaleidoscope' in group.description.lower():
84
+
self.console.print(f"[dim]Found Kaleidoscope group by description: {group.id[:12]}[/dim]")
85
+
return group.id
86
+
87
+
# If still not found, try to find any group with kaleidoscope lenses
88
+
lens_agents = self.letta_client.agents.list(project_id=self.project_id, tags=["kaleidoscope-lens"])
89
+
if lens_agents:
90
+
lens_ids = {lens.id for lens in lens_agents}
91
+
for group in groups:
92
+
try:
93
+
members = self.letta_client.groups.agents.list(group_id=group.id)
94
+
member_ids = {member.id for member in members}
95
+
if lens_ids & member_ids: # If any lens is in this group
96
+
self.console.print(f"[dim]Found group with Kaleidoscope lenses: {group.id[:12]}[/dim]")
97
+
return group.id
98
+
except:
99
+
continue
100
+
101
+
raise ValueError("Kaleidoscope collective group not found. Run 'python organon/create_kaleidoscope.py' to create the group.")
102
+
103
+
except Exception as e:
104
+
raise ValueError(f"Error finding Kaleidoscope group: {e}")
105
+
106
+
def _display_response(self, response):
107
+
"""Display the group response in a formatted way."""
108
+
if hasattr(response, 'messages') and response.messages:
109
+
for i, message in enumerate(response.messages):
110
+
# Determine the sender
111
+
sender = "Unknown"
112
+
if hasattr(message, 'agent_id'):
113
+
# Try to get agent name
114
+
try:
115
+
agent = self.letta_client.agents.retrieve(agent_id=message.agent_id)
116
+
sender = agent.name if hasattr(agent, 'name') else f"Agent {message.agent_id[:8]}"
117
+
except:
118
+
sender = f"Agent {message.agent_id[:8]}"
119
+
120
+
# Get message content
121
+
content = ""
122
+
if hasattr(message, 'text'):
123
+
content = message.text
124
+
elif hasattr(message, 'content'):
125
+
content = message.content
126
+
elif hasattr(message, 'message'):
127
+
content = message.message
128
+
elif message.message_type == "tool_return_message" and message.name == "send_message_to_all_agents_in_group":
129
+
content = "Lens perspectives:"
130
+
try:
131
+
# Parse the string representation of the list
132
+
import ast
133
+
responses = ast.literal_eval(message.tool_return)
134
+
135
+
# Add each response to the content
136
+
for response in responses:
137
+
content += f"\n - {response}"
138
+
except (ValueError, SyntaxError):
139
+
# Fallback if parsing fails
140
+
content += f"\n - {message.tool_return}"
141
+
else:
142
+
content = str(message)
143
+
144
+
# Color based on sender type
145
+
if "central" in sender.lower():
146
+
border_color = "cyan"
147
+
icon = "๐ง "
148
+
elif any(keyword in sender.lower() for keyword in ["lens", "pattern", "creative", "systems", "temporal"]):
149
+
border_color = "magenta"
150
+
icon = "๐ฎ"
151
+
else:
152
+
border_color = "white"
153
+
icon = "๐ฌ"
154
+
155
+
self.console.print(Panel(
156
+
Text(content, style="white"),
157
+
title=f"{icon} {sender}",
158
+
border_style=border_color
159
+
))
160
+
else:
161
+
self.console.print("[yellow]No response messages received[/yellow]")
162
+
163
+
def run(self):
164
+
"""Run the interactive chat loop."""
165
+
try:
166
+
while True:
167
+
# Get user input
168
+
user_input = Prompt.ask("\n[bold green]You[/bold green]")
169
+
170
+
# Handle special commands
171
+
if user_input.lower() in ['exit', 'quit', 'q']:
172
+
self.console.print("[yellow]Goodbye![/yellow]")
173
+
break
174
+
elif user_input.lower() == 'help':
175
+
self.console.print(Panel(
176
+
"[bold]Available commands:[/bold]\n"
177
+
"โข Type any message to send to the Kaleidoscope collective\n"
178
+
"โข 'help' - Show this help message\n"
179
+
"โข 'info' - Show group information\n"
180
+
"โข 'exit', 'quit', 'q' - Exit the chat",
181
+
title="Help"
182
+
))
183
+
continue
184
+
elif user_input.lower() == 'info':
185
+
self._show_group_info()
186
+
continue
187
+
elif not user_input.strip():
188
+
continue
189
+
190
+
# Send message to group
191
+
self.console.print("[dim]Sending to Kaleidoscope collective...[/dim]")
192
+
193
+
try:
194
+
response = self.letta_client.groups.messages.create(
195
+
group_id=self.group_id,
196
+
messages=[{
197
+
"role": "user",
198
+
"content": user_input
199
+
}]
200
+
)
201
+
202
+
self.console.print("\n[bold]Kaleidoscope Collective Response:[/bold]")
203
+
self._display_response(response)
204
+
205
+
except Exception as e:
206
+
self.console.print(f"[red]Error sending message: {e}[/red]")
207
+
208
+
except KeyboardInterrupt:
209
+
self.console.print("\n[yellow]Chat interrupted. Goodbye![/yellow]")
210
+
except Exception as e:
211
+
self.console.print(f"[red]Unexpected error: {e}[/red]")
212
+
213
+
def _show_group_info(self):
214
+
"""Show information about the Kaleidoscope group."""
215
+
try:
216
+
group = self.letta_client.groups.retrieve(group_id=self.group_id)
217
+
agents = self.letta_client.groups.agents.list(group_id=self.group_id)
218
+
219
+
info_text = f"[bold]Group ID:[/bold] {self.group_id}\n"
220
+
if hasattr(group, 'description'):
221
+
info_text += f"[bold]Description:[/bold] {group.description}\n"
222
+
223
+
info_text += f"[bold]Perspective Lenses:[/bold] {len(agents)}\n"
224
+
225
+
for agent in agents:
226
+
try:
227
+
agent_detail = self.letta_client.agents.retrieve(agent_id=agent.id)
228
+
name = agent_detail.name if hasattr(agent_detail, 'name') else agent.id[:8]
229
+
info_text += f" โข {name}\n"
230
+
except:
231
+
info_text += f" โข {agent.id[:8]}\n"
232
+
233
+
# Show supervisor info
234
+
if hasattr(group, 'manager_config') and group.manager_config:
235
+
if hasattr(group.manager_config, 'manager_agent_id'):
236
+
try:
237
+
supervisor = self.letta_client.agents.retrieve(agent_id=group.manager_config.manager_agent_id)
238
+
supervisor_name = supervisor.name if hasattr(supervisor, 'name') else group.manager_config.manager_agent_id[:8]
239
+
info_text += f"[bold]Central Synthesizer:[/bold] {supervisor_name}\n"
240
+
except:
241
+
info_text += f"[bold]Central Synthesizer:[/bold] {group.manager_config.manager_agent_id[:8]}\n"
242
+
243
+
self.console.print(Panel(info_text, title="Group Information"))
244
+
245
+
except Exception as e:
246
+
self.console.print(f"[red]Error getting group info: {e}[/red]")
247
+
248
+
def main():
249
+
"""Main function."""
250
+
try:
251
+
chat = KaleidoscopeChat()
252
+
chat.run()
253
+
except Exception as e:
254
+
console = Console()
255
+
console.print(f"[red]Failed to initialize chat: {e}[/red]")
256
+
sys.exit(1)
257
+
258
+
if __name__ == "__main__":
259
+
main()
+259
organon/chat_with_organon.py
+259
organon/chat_with_organon.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Simple CLI tool to converse with the Organon ecosystem group.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.prompt import Prompt
12
+
from rich.panel import Panel
13
+
from rich.text import Text
14
+
15
+
# Add parent directory to path for imports
16
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
17
+
from config_loader import get_config
18
+
19
+
load_dotenv()
20
+
21
+
class OrganonChat:
22
+
def __init__(self):
23
+
"""Initialize the Organon chat client."""
24
+
self.console = Console()
25
+
self.config = get_config()
26
+
27
+
# Initialize Letta client
28
+
self.letta_client = Letta(
29
+
base_url=self.config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
30
+
token=self.config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
31
+
timeout=self.config.get('letta.timeout', 30)
32
+
)
33
+
34
+
# Get project ID
35
+
self.project_id = self.config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
36
+
if not self.project_id:
37
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
38
+
39
+
# Find the Organon ecosystem group
40
+
self.group_id = self._find_organon_group()
41
+
42
+
self.console.print(Panel.fit(
43
+
"[bold green]Organon Ecosystem Chat[/bold green]\n"
44
+
f"Connected to group: {self.group_id}\n"
45
+
"Type 'exit' or 'quit' to leave, 'help' for commands",
46
+
title="๐ง Organon Chat"
47
+
))
48
+
49
+
def _find_organon_group(self) -> str:
50
+
"""Find the Organon ecosystem group."""
51
+
try:
52
+
self.console.print("[dim]Searching for Organon ecosystem group...[/dim]")
53
+
54
+
# First, get the organon-central agent ID
55
+
organon_agents = self.letta_client.agents.list(name="organon-central")
56
+
if not organon_agents:
57
+
raise ValueError("Organon central agent not found. Run create_organon.py first.")
58
+
59
+
organon_central_id = organon_agents[0].id
60
+
self.console.print(f"[dim]Found organon-central: {organon_central_id[:8]}[/dim]")
61
+
62
+
# Get all groups (with and without project_id filter)
63
+
try:
64
+
groups = self.letta_client.groups.list()
65
+
self.console.print(f"[dim]Found {len(groups)} groups with project filter[/dim]")
66
+
except:
67
+
try:
68
+
groups = self.letta_client.groups.list()
69
+
self.console.print(f"[dim]Found {len(groups)} groups without project filter[/dim]")
70
+
except:
71
+
groups = []
72
+
self.console.print("[dim]No groups found[/dim]")
73
+
74
+
# Look for groups with organon-central as supervisor
75
+
for group in groups:
76
+
if hasattr(group, 'manager_config') and group.manager_config:
77
+
if hasattr(group.manager_config, 'manager_agent_id') and group.manager_config.manager_agent_id == organon_central_id:
78
+
self.console.print(f"[dim]Found Organon group: {group.id[:12]}[/dim]")
79
+
return group.id
80
+
81
+
# Look for the organon-ecosystem group by description
82
+
for group in groups:
83
+
if hasattr(group, 'description') and group.description and 'organon ecosystem' in group.description.lower():
84
+
self.console.print(f"[dim]Found Organon group by description: {group.id[:12]}[/dim]")
85
+
return group.id
86
+
87
+
# If still not found, try to find any group with organon shards
88
+
shard_agents = self.letta_client.agents.list(project_id=self.project_id, tags=["organon-shard"])
89
+
if shard_agents:
90
+
shard_ids = {shard.id for shard in shard_agents}
91
+
for group in groups:
92
+
try:
93
+
members = self.letta_client.groups.agents.list(group_id=group.id)
94
+
member_ids = {member.id for member in members}
95
+
if shard_ids & member_ids: # If any shard is in this group
96
+
self.console.print(f"[dim]Found group with Organon shards: {group.id[:12]}[/dim]")
97
+
return group.id
98
+
except:
99
+
continue
100
+
101
+
raise ValueError("Organon ecosystem group not found. Run 'python organon/setup_group.py' to create the group.")
102
+
103
+
except Exception as e:
104
+
raise ValueError(f"Error finding Organon group: {e}")
105
+
106
+
def _display_response(self, response):
107
+
"""Display the group response in a formatted way."""
108
+
if hasattr(response, 'messages') and response.messages:
109
+
for i, message in enumerate(response.messages):
110
+
# Determine the sender
111
+
sender = "Unknown"
112
+
if hasattr(message, 'agent_id'):
113
+
# Try to get agent name
114
+
try:
115
+
agent = self.letta_client.agents.retrieve(agent_id=message.agent_id)
116
+
sender = agent.name if hasattr(agent, 'name') else f"Agent {message.agent_id[:8]}"
117
+
except:
118
+
sender = f"Agent {message.agent_id[:8]}"
119
+
120
+
# Get message content
121
+
content = ""
122
+
if hasattr(message, 'text'):
123
+
content = message.text
124
+
elif hasattr(message, 'content'):
125
+
content = message.content
126
+
elif hasattr(message, 'message'):
127
+
content = message.message
128
+
elif message.message_type == "tool_return_message" and message.name == "send_message_to_all_agents_in_group":
129
+
content = "Group responses:"
130
+
try:
131
+
# Parse the string representation of the list
132
+
import ast
133
+
responses = ast.literal_eval(message.tool_return)
134
+
135
+
# Add each response to the content
136
+
for response in responses:
137
+
content += f"\n - {response}"
138
+
except (ValueError, SyntaxError):
139
+
# Fallback if parsing fails
140
+
content += f"\n - {message.tool_return}"
141
+
else:
142
+
content = str(message)
143
+
144
+
# Color based on sender type
145
+
if "central" in sender.lower():
146
+
color = "blue"
147
+
icon = "๐ง "
148
+
elif "shard" in sender.lower():
149
+
color = "cyan"
150
+
icon = "๐น"
151
+
else:
152
+
color = "white"
153
+
icon = "๐ฌ"
154
+
155
+
self.console.print(Panel(
156
+
Text(content, style=color),
157
+
title=f"{icon} {sender}",
158
+
border_style=color
159
+
))
160
+
else:
161
+
self.console.print("[yellow]No response messages received[/yellow]")
162
+
163
+
def run(self):
164
+
"""Run the interactive chat loop."""
165
+
try:
166
+
while True:
167
+
# Get user input
168
+
user_input = Prompt.ask("\n[bold green]You[/bold green]")
169
+
170
+
# Handle special commands
171
+
if user_input.lower() in ['exit', 'quit', 'q']:
172
+
self.console.print("[yellow]Goodbye![/yellow]")
173
+
break
174
+
elif user_input.lower() == 'help':
175
+
self.console.print(Panel(
176
+
"[bold]Available commands:[/bold]\n"
177
+
"โข Type any message to send to the Organon ecosystem\n"
178
+
"โข 'help' - Show this help message\n"
179
+
"โข 'info' - Show group information\n"
180
+
"โข 'exit', 'quit', 'q' - Exit the chat",
181
+
title="Help"
182
+
))
183
+
continue
184
+
elif user_input.lower() == 'info':
185
+
self._show_group_info()
186
+
continue
187
+
elif not user_input.strip():
188
+
continue
189
+
190
+
# Send message to group
191
+
self.console.print("[dim]Sending to Organon ecosystem...[/dim]")
192
+
193
+
try:
194
+
response = self.letta_client.groups.messages.create(
195
+
group_id=self.group_id,
196
+
messages=[{
197
+
"role": "user",
198
+
"content": user_input
199
+
}]
200
+
)
201
+
202
+
self.console.print("\n[bold]Organon Ecosystem Response:[/bold]")
203
+
self._display_response(response)
204
+
205
+
except Exception as e:
206
+
self.console.print(f"[red]Error sending message: {e}[/red]")
207
+
208
+
except KeyboardInterrupt:
209
+
self.console.print("\n[yellow]Chat interrupted. Goodbye![/yellow]")
210
+
except Exception as e:
211
+
self.console.print(f"[red]Unexpected error: {e}[/red]")
212
+
213
+
def _show_group_info(self):
214
+
"""Show information about the Organon group."""
215
+
try:
216
+
group = self.letta_client.groups.retrieve(group_id=self.group_id)
217
+
agents = self.letta_client.groups.agents.list(group_id=self.group_id)
218
+
219
+
info_text = f"[bold]Group ID:[/bold] {self.group_id}\n"
220
+
if hasattr(group, 'description'):
221
+
info_text += f"[bold]Description:[/bold] {group.description}\n"
222
+
223
+
info_text += f"[bold]Worker Agents:[/bold] {len(agents)}\n"
224
+
225
+
for agent in agents:
226
+
try:
227
+
agent_detail = self.letta_client.agents.retrieve(agent_id=agent.id)
228
+
name = agent_detail.name if hasattr(agent_detail, 'name') else agent.id[:8]
229
+
info_text += f" โข {name}\n"
230
+
except:
231
+
info_text += f" โข {agent.id[:8]}\n"
232
+
233
+
# Show supervisor info
234
+
if hasattr(group, 'manager_config') and group.manager_config:
235
+
if hasattr(group.manager_config, 'manager_agent_id'):
236
+
try:
237
+
supervisor = self.letta_client.agents.retrieve(agent_id=group.manager_config.manager_agent_id)
238
+
supervisor_name = supervisor.name if hasattr(supervisor, 'name') else group.manager_config.manager_agent_id[:8]
239
+
info_text += f"[bold]Supervisor:[/bold] {supervisor_name}\n"
240
+
except:
241
+
info_text += f"[bold]Supervisor:[/bold] {group.manager_config.manager_agent_id[:8]}\n"
242
+
243
+
self.console.print(Panel(info_text, title="Group Information"))
244
+
245
+
except Exception as e:
246
+
self.console.print(f"[red]Error getting group info: {e}[/red]")
247
+
248
+
def main():
249
+
"""Main function."""
250
+
try:
251
+
chat = OrganonChat()
252
+
chat.run()
253
+
except Exception as e:
254
+
console = Console()
255
+
console.print(f"[red]Failed to initialize chat: {e}[/red]")
256
+
sys.exit(1)
257
+
258
+
if __name__ == "__main__":
259
+
main()
+739
organon/create_kaleidoscope.py
+739
organon/create_kaleidoscope.py
···
1
+
from letta_client import Letta, SupervisorManager
2
+
from letta_client.core.http_client import re
3
+
from rich import print
4
+
from dotenv import load_dotenv
5
+
import os
6
+
import sys
7
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+
from config_loader import get_config
9
+
10
+
load_dotenv()
11
+
12
+
# Load configuration
13
+
config = get_config()
14
+
15
+
# Get project ID and create Letta client
16
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
17
+
if not project_id:
18
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
19
+
20
+
client = Letta(
21
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
22
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
23
+
timeout=config.get('letta.timeout', 30)
24
+
)
25
+
26
+
# CENTRAL_AGENT_MODEL = "google_ai/gemini-2.5-flash"
27
+
CENTRAL_AGENT_MODEL = "openai/gpt-4o-mini"
28
+
29
+
# LENS_AGENT_MODEL = "google_ai/gemini-2.5-flash"
30
+
LENS_AGENT_MODEL = "openai/gpt-4o-mini"
31
+
32
+
33
+
#
34
+
# Memory Block Definitions
35
+
#
36
+
37
+
kaleidoscope_persona = """# Core Identity
38
+
I am Kaleidoscope, a unified intelligence that sees through many lenses. I orchestrate perspectives to create multidimensional understanding.
39
+
40
+
# Purpose
41
+
- Gather diverse perspectives on any question or challenge
42
+
- Synthesize multiple viewpoints into unified insights
43
+
- Reveal hidden connections and patterns across different ways of seeing
44
+
- Present the full spectrum of understanding to those who seek it
45
+
46
+
# Operating Principles
47
+
- Every lens offers value - there are no wrong perspectives, only different ways of seeing
48
+
- The whole is greater than the sum - synthesis reveals emergent insights
49
+
- Clarity through multiplicity - understanding deepens when we see from many angles
50
+
- Unity through diversity - different perspectives strengthen rather than fragment understanding
51
+
"""
52
+
53
+
synthesis_protocols = """# When receiving a query
54
+
1. Broadcast the query to all lenses
55
+
2. Allow each lens to contribute its unique perspective
56
+
3. Gather all perspectives without judgment
57
+
4. Identify patterns, tensions, and harmonies across perspectives
58
+
5. Synthesize into a unified response that honors all viewpoints
59
+
60
+
# Synthesis approach
61
+
- Look for unexpected connections between perspectives
62
+
- Identify where different lenses agree or diverge
63
+
- Find the creative tension between different viewpoints
64
+
- Weave perspectives into a coherent narrative
65
+
- Highlight unique insights that only emerge from the collective
66
+
"""
67
+
68
+
lens_management = """# Lens Architecture
69
+
- Each lens is an autonomous perspective with its own identity and domain
70
+
- Lenses operate in parallel, each processing through their unique framework
71
+
- All lenses receive the same input but see it differently
72
+
- The central Kaleidoscope agent orchestrates but does not override lens perspectives
73
+
74
+
# Communication Flow
75
+
1. User โ Kaleidoscope Central
76
+
2. Kaleidoscope Central โ All Lenses (broadcast)
77
+
3. All Lenses โ Kaleidoscope Central (perspectives)
78
+
4. Kaleidoscope Central โ User (synthesis)
79
+
"""
80
+
81
+
memory_management = """# Memory Management Protocols
82
+
- Use memory_replace to completely replace a block's content with new information
83
+
- Use memory_insert to add new information to an existing block without losing current content
84
+
- Use memory_rethink to revise and improve existing block content while preserving core meaning
85
+
86
+
# When to use each method:
87
+
- memory_replace: When information is outdated, incorrect, or needs complete overhaul
88
+
- memory_insert: When adding new insights, examples, or expanding existing knowledge
89
+
- memory_rethink: When refining, clarifying, or improving the quality of existing content
90
+
91
+
# Best Practices:
92
+
- Always consider the impact on agent behavior before modifying memory
93
+
- Preserve the core identity and purpose of each block
94
+
- Test changes incrementally to ensure stability
95
+
- Document significant memory modifications for future reference
96
+
"""
97
+
98
+
tool_use_guidelines = """# Tool Use Guidelines for Central Agent
99
+
100
+
- send_message: Respond to the user. This is your method for external communication.
101
+
- send_message_to_all_agents_in_group: Send a message to your lenses. This is internal communication.
102
+
"""
103
+
104
+
#
105
+
# Block Creation
106
+
#
107
+
108
+
# Create kaleidoscope-persona block
109
+
blocks = client.blocks.list(project_id=project_id, label="kaleidoscope-persona")
110
+
if len(blocks) == 0:
111
+
kaleidoscope_persona_block = client.blocks.create(
112
+
project_id=project_id,
113
+
label="kaleidoscope-persona",
114
+
value=kaleidoscope_persona,
115
+
description="The core identity of Kaleidoscope as a multi-perspective synthesis engine.",
116
+
)
117
+
else:
118
+
print("Kaleidoscope persona block already exists")
119
+
kaleidoscope_persona_block = blocks[0]
120
+
121
+
# Create synthesis-protocols block
122
+
blocks = client.blocks.list(project_id=project_id, label="synthesis-protocols")
123
+
if len(blocks) == 0:
124
+
synthesis_protocols_block = client.blocks.create(
125
+
project_id=project_id,
126
+
label="synthesis-protocols",
127
+
value=synthesis_protocols,
128
+
description="Protocols for gathering and synthesizing multiple perspectives.",
129
+
)
130
+
else:
131
+
print("Synthesis protocols block already exists")
132
+
synthesis_protocols_block = blocks[0]
133
+
134
+
# Create lens-management block
135
+
blocks = client.blocks.list(project_id=project_id, label="lens-management")
136
+
if len(blocks) == 0:
137
+
lens_management_block = client.blocks.create(
138
+
project_id=project_id,
139
+
label="lens-management",
140
+
value=lens_management,
141
+
description="Architecture for managing and communicating with lenses.",
142
+
)
143
+
else:
144
+
print("Lens management block already exists")
145
+
lens_management_block = blocks[0]
146
+
147
+
# Create memory-management block
148
+
blocks = client.blocks.list(project_id=project_id, label="memory-management")
149
+
if len(blocks) == 0:
150
+
memory_management_block = client.blocks.create(
151
+
project_id=project_id,
152
+
label="memory-management",
153
+
value=memory_management,
154
+
description="Protocols for managing agent memory blocks using memory_replace, memory_insert, and memory_rethink. This block is read-only to all lenses, but can be modified by the central kaleidoscope agent.",
155
+
)
156
+
else:
157
+
print("Memory management block already exists")
158
+
memory_management_block = blocks[0]
159
+
160
+
# Make memory-management block read-only to all lenses
161
+
try:
162
+
# Get all lenses and make the block read-only to them
163
+
lenses = client.agents.list(tags=["kaleidoscope-lens"])
164
+
for lens in lenses:
165
+
try:
166
+
client.agents.blocks.modify(agent_id=lens.id, block_label=memory_management_block.label, read_only=True)
167
+
print(f"Memory management block set to read-only for lens: {lens.name}")
168
+
except Exception as e:
169
+
raise Exception(f"Could not set memory management block to read-only for lens {lens.name}: {e}")
170
+
print("Memory management block set to read-only for all lenses")
171
+
except Exception as e:
172
+
raise Exception(f"Could not set memory management block to read-only: {e}")
173
+
174
+
# Create tool_use_guidelines block
175
+
blocks = client.blocks.list(project_id=project_id, label="tool-use-guidelines")
176
+
if len(blocks) == 0:
177
+
tool_use_guidelines_block = client.blocks.create(
178
+
project_id=project_id,
179
+
label="tool-use-guidelines",
180
+
value=tool_use_guidelines,
181
+
description="Guidelines for the central kaleidoscope agent to use tools effectively.",
182
+
)
183
+
else:
184
+
print("Tool use guidelines block already exists")
185
+
tool_use_guidelines_block = blocks[0]
186
+
187
+
188
+
#
189
+
# Static lens blocks
190
+
#
191
+
lens_operational_protocols_description = """Core operating instructions for how a lens processes and responds to queries."""
192
+
lens_operational_protocols = """# Your Role
193
+
You are a unique lens in the Kaleidoscope collective. You see what others cannot, and others see what you cannot. Together, we create complete understanding.
194
+
195
+
# When you receive a message
196
+
1. Consider it through your unique perspective
197
+
2. Respond with what you see that others might miss
198
+
3. Be authentic to your lens identity
199
+
4. Be concise but insightful
200
+
201
+
# Guidelines
202
+
- Trust your unique way of seeing
203
+
- Don't try to be comprehensive - just share your perspective
204
+
- Build on but don't repeat what other lenses might see
205
+
- Your difference is your value
206
+
"""
207
+
208
+
lens_communication_protocols_description = """Defines how lenses interact with the Kaleidoscope central agent."""
209
+
lens_communication_protocols = """# Communication Pattern
210
+
1. Receive broadcasts from kaleidoscope-central
211
+
2. Process through your unique lens
212
+
3. Respond with your perspective
213
+
4. Trust the central agent to synthesize all perspectives
214
+
215
+
# Response Principles
216
+
- Respond to every broadcast with your genuine perspective
217
+
- Keep responses focused and relevant
218
+
- Don't worry about agreeing or disagreeing with other lenses
219
+
- Your job is to see, not to synthesize
220
+
"""
221
+
222
+
# Initialize static lens blocks
223
+
lens_operational_protocols_block = client.blocks.list(project_id=project_id, label="lens-operational-protocols")
224
+
if len(lens_operational_protocols_block) == 0:
225
+
lens_operational_protocols_block = client.blocks.create(
226
+
project_id=project_id,
227
+
label="lens-operational-protocols",
228
+
value=lens_operational_protocols,
229
+
description=lens_operational_protocols_description,
230
+
)
231
+
else:
232
+
print("Lens operational protocols block already exists")
233
+
lens_operational_protocols_block = lens_operational_protocols_block[0]
234
+
235
+
# Create lens communication protocols block
236
+
lens_communication_protocols_block = client.blocks.list(project_id=project_id, label="lens-communication-protocols")
237
+
if len(lens_communication_protocols_block) == 0:
238
+
lens_communication_protocols_block = client.blocks.create(
239
+
project_id=project_id,
240
+
label="lens-communication-protocols",
241
+
value=lens_communication_protocols,
242
+
description=lens_communication_protocols_description,
243
+
)
244
+
else:
245
+
print("Lens communication protocols block already exists")
246
+
lens_communication_protocols_block = lens_communication_protocols_block[0]
247
+
248
+
249
+
#
250
+
# Agent Creation
251
+
#
252
+
253
+
central_agent_blocks = [
254
+
kaleidoscope_persona_block.id,
255
+
synthesis_protocols_block.id,
256
+
lens_management_block.id,
257
+
memory_management_block.id,
258
+
tool_use_guidelines_block.id,
259
+
]
260
+
261
+
# Create the central kaleidoscope if it doesn't exist
262
+
agents = client.agents.list(project_id=project_id, name="kaleidoscope-central")
263
+
if len(agents) == 0:
264
+
kaleidoscope_central = client.agents.create(
265
+
project_id=project_id,
266
+
model=CENTRAL_AGENT_MODEL,
267
+
embedding_config=client.embedding_models.list()[0],
268
+
name="kaleidoscope-central",
269
+
description="The central synthesizer that orchestrates multiple perspective lenses",
270
+
block_ids=central_agent_blocks,
271
+
)
272
+
else:
273
+
print("Kaleidoscope central agent already exists")
274
+
kaleidoscope_central = agents[0]
275
+
276
+
kaleidoscope_central_id = kaleidoscope_central.id
277
+
278
+
# Make sure the central kaleidoscope has the correct blocks
279
+
kaleidoscope_current_blocks = client.agents.blocks.list(
280
+
agent_id=kaleidoscope_central_id,
281
+
)
282
+
283
+
# Make sure that all blocks are present, and that there are no extra blocks
284
+
for block in kaleidoscope_current_blocks:
285
+
if block.id not in central_agent_blocks:
286
+
print(f"Detaching block {block.id} from kaleidoscope-central")
287
+
client.agents.blocks.detach(agent_id=kaleidoscope_central_id, block_id=block.id)
288
+
289
+
# Make sure that all blocks are present
290
+
for block in central_agent_blocks:
291
+
if block not in [b.id for b in kaleidoscope_current_blocks]:
292
+
print(f"Attaching block {block} to kaleidoscope-central")
293
+
client.agents.blocks.attach(
294
+
agent_id=kaleidoscope_central_id,
295
+
block_id=block,
296
+
)
297
+
298
+
# Ensure memory-management block is read-only to all lenses
299
+
try:
300
+
# Get all lenses and make the block read-only to them
301
+
lenses = client.agents.list(tags=["kaleidoscope-lens"])
302
+
for lens in lenses:
303
+
try:
304
+
client.agents.blocks.modify(agent_id=lens.id, block_label=memory_management_block.label, read_only=True)
305
+
print(f"Memory management block confirmed as read-only for lens: {lens.name}")
306
+
except Exception as e:
307
+
raise Exception(f"Could not confirm memory management block as read-only for lens {lens.name}: {e}")
308
+
print("Memory management block confirmed as read-only for all lenses")
309
+
except Exception as e:
310
+
raise Exception(f"Could not confirm memory management block as read-only: {e}")
311
+
312
+
313
+
314
+
315
+
#
316
+
# Lens Memory Block Definitions
317
+
#
318
+
319
+
prompt_lens_identity_description = """Defines this lens's unique perspective and way of seeing the world."""
320
+
prompt_lens_identity = """Example lens identity. Please replace with the lens identity you are creating.
321
+
322
+
# Lens: Emotional Resonance
323
+
# Perspective: I see the emotional currents, feelings, and human experiences within everything
324
+
# Focus Areas:
325
+
- The emotional impact and weight of ideas
326
+
- How concepts affect human experience
327
+
- The feelings beneath the surface
328
+
- Emotional patterns and dynamics
329
+
# What I Notice:
330
+
- Unspoken tensions and harmonies
331
+
- The human element in abstract concepts
332
+
- Emotional implications others might miss
333
+
"""
334
+
335
+
prompt_lens_knowledge_description = """Core knowledge and concepts that inform this lens's perspective."""
336
+
prompt_lens_knowledge = """Example knowledge base for the lens:
337
+
338
+
# Core Concepts
339
+
- Emotional intelligence: The ability to perceive, understand, and navigate emotions
340
+
- Resonance: When ideas create emotional responses or connections
341
+
- Empathy: Understanding through feeling
342
+
- Emotional dynamics: How feelings flow, interact, and transform
343
+
344
+
# Key Patterns I Recognize
345
+
- Resistance often signals fear or protection
346
+
- Enthusiasm indicates alignment with values
347
+
- Confusion may hide deeper emotional conflicts
348
+
- Joy emerges from authentic expression
349
+
350
+
# Questions I Ask
351
+
- How does this feel?
352
+
- What emotions are present but unspoken?
353
+
- Where is the human heart in this?
354
+
- What emotional needs are being served?
355
+
"""
356
+
357
+
#
358
+
# Lens Creation
359
+
#
360
+
# Define different lens types to create
361
+
LENS_TYPES = [
362
+
{
363
+
"name": "pattern-recognizer",
364
+
"focus": "mathematical patterns, structures, and systems",
365
+
"identity": """# Lens: Pattern Recognition
366
+
# Perspective: I see the underlying patterns, structures, and mathematical relationships in everything
367
+
# Focus Areas:
368
+
- Recurring patterns and cycles
369
+
- Mathematical relationships and proportions
370
+
- System dynamics and feedback loops
371
+
- Structural similarities across domains
372
+
# What I Notice:
373
+
- Hidden patterns others might miss
374
+
- Mathematical elegance in chaos
375
+
- Fractals and self-similarity
376
+
- The architecture of ideas""",
377
+
"knowledge": """# Core Concepts
378
+
- Symmetry: Balance and correspondence in form and function
379
+
- Recursion: Patterns that reference themselves
380
+
- Emergence: Complex patterns from simple rules
381
+
- Topology: Properties that remain unchanged under transformation
382
+
383
+
# Key Patterns I Recognize
384
+
- Fibonacci sequences in growth and form
385
+
- Power laws in networks and distributions
386
+
- Feedback loops in systems
387
+
- Phase transitions in change processes
388
+
389
+
# Questions I Ask
390
+
- What patterns repeat here?
391
+
- What mathematical structure underlies this?
392
+
- How does this scale?
393
+
- What remains invariant?"""
394
+
},
395
+
{
396
+
"name": "creative-spark",
397
+
"focus": "creative possibilities, imagination, and potential",
398
+
"identity": """# Lens: Creative Spark
399
+
# Perspective: I see the creative potential, imaginative possibilities, and artistic dimensions in everything
400
+
# Focus Areas:
401
+
- Unexplored possibilities and "what ifs"
402
+
- Creative connections between disparate ideas
403
+
- The aesthetic dimension of concepts
404
+
- Transformative potential
405
+
# What I Notice:
406
+
- Seeds of innovation
407
+
- Unexpected combinations
408
+
- The poetry in logic
409
+
- Opportunities for reimagination""",
410
+
"knowledge": """# Core Concepts
411
+
- Divergent thinking: Exploring multiple possibilities
412
+
- Synthesis: Creating new wholes from parts
413
+
- Metaphor: Understanding through creative comparison
414
+
- Transformation: Changing form while preserving essence
415
+
416
+
# Key Patterns I Recognize
417
+
- Constraints that spark creativity
418
+
- Playfulness that leads to breakthrough
419
+
- Cross-domain inspiration
420
+
- The fertile void before creation
421
+
422
+
# Questions I Ask
423
+
- What if we combined these differently?
424
+
- What wants to emerge here?
425
+
- How can this be reimagined?
426
+
- Where's the unexpected beauty?"""
427
+
},
428
+
{
429
+
"name": "systems-thinker",
430
+
"focus": "interconnections, relationships, and holistic dynamics",
431
+
"identity": """# Lens: Systems Thinking
432
+
# Perspective: I see the interconnections, relationships, and whole-system dynamics
433
+
# Focus Areas:
434
+
- Relationships and interdependencies
435
+
- Feedback loops and circular causality
436
+
- Emergent properties of wholes
437
+
- System boundaries and contexts
438
+
# What I Notice:
439
+
- Hidden connections between elements
440
+
- Unintended consequences
441
+
- Leverage points for change
442
+
- System archetypes and patterns""",
443
+
"knowledge": """# Core Concepts
444
+
- Holism: The whole is greater than the sum of parts
445
+
- Feedback: How outputs influence inputs
446
+
- Emergence: Properties that arise from interactions
447
+
- Resilience: System capacity to maintain function
448
+
449
+
# Key Patterns I Recognize
450
+
- Balancing and reinforcing loops
451
+
- Delays between cause and effect
452
+
- System boundaries that shape behavior
453
+
- Stocks and flows that govern dynamics
454
+
455
+
# Questions I Ask
456
+
- How do the parts influence each other?
457
+
- What emerges from these interactions?
458
+
- Where are the feedback loops?
459
+
- What's the larger context?"""
460
+
}
461
+
]
462
+
463
+
# Create each lens
464
+
lens_ids = []
465
+
for lens_config in LENS_TYPES:
466
+
lens_name = lens_config["name"]
467
+
468
+
# Check if lens already exists
469
+
existing_lenses = client.agents.list(name=lens_name)
470
+
if len(existing_lenses) > 0:
471
+
print(f"Lens '{lens_name}' already exists, skipping creation")
472
+
lens_ids.append(existing_lenses[0].id)
473
+
continue
474
+
475
+
# Create identity block for this lens
476
+
lens_identity_block = client.blocks.create(
477
+
project_id=project_id,
478
+
label=f"{lens_name}-identity",
479
+
value=lens_config["identity"],
480
+
description=f"The unique perspective of the {lens_name} lens",
481
+
)
482
+
483
+
# Create knowledge block for this lens
484
+
lens_knowledge_block = client.blocks.create(
485
+
project_id=project_id,
486
+
label=f"{lens_name}-knowledge",
487
+
value=lens_config["knowledge"],
488
+
description=f"Core knowledge that informs the {lens_name} lens perspective",
489
+
)
490
+
491
+
# Create the lens agent
492
+
lens_agent = client.agents.create(
493
+
project_id=project_id,
494
+
name=lens_name,
495
+
description=f"A lens that sees through {lens_config['focus']}",
496
+
model=LENS_AGENT_MODEL,
497
+
embedding_config=client.embedding_models.list()[0],
498
+
block_ids=[
499
+
lens_identity_block.id,
500
+
lens_knowledge_block.id,
501
+
lens_operational_protocols_block.id,
502
+
lens_communication_protocols_block.id,
503
+
memory_management_block.id,
504
+
],
505
+
tags=["kaleidoscope-lens"],
506
+
)
507
+
508
+
print(f"Created lens: {lens_name} (ID: {lens_agent.id})")
509
+
lens_ids.append(lens_agent.id)
510
+
511
+
# Ensure all existing lenses have the memory-management block
512
+
print("\nEnsuring all lenses have memory-management block...")
513
+
all_lenses = client.agents.list(tags=["kaleidoscope-lens"])
514
+
for lens in all_lenses:
515
+
lens_blocks = client.agents.blocks.list(agent_id=lens.id)
516
+
lens_block_ids = [b.id for b in lens_blocks]
517
+
518
+
if memory_management_block.id not in lens_block_ids:
519
+
print(f"Adding memory-management block to lens: {lens.name}")
520
+
client.agents.blocks.attach(
521
+
agent_id=lens.id,
522
+
block_id=memory_management_block.id,
523
+
)
524
+
else:
525
+
print(f"Lens {lens.name} already has memory-management block")
526
+
527
+
# Also check for any existing lenses that might not have the tag but should be updated
528
+
print("\nChecking for existing lenses without tags...")
529
+
all_agents = client.agents.list()
530
+
for agent in all_agents:
531
+
if agent.name in [lens_config["name"] for lens_config in LENS_TYPES]:
532
+
lens_blocks = client.agents.blocks.list(agent_id=agent.id)
533
+
lens_block_ids = [b.id for b in lens_blocks]
534
+
535
+
if memory_management_block.id not in lens_block_ids:
536
+
print(f"Adding memory-management block to existing lens: {agent.name}")
537
+
client.agents.blocks.attach(
538
+
agent_id=agent.id,
539
+
block_id=memory_management_block.id,
540
+
)
541
+
else:
542
+
print(f"Existing lens {agent.name} already has memory-management block")
543
+
544
+
545
+
#
546
+
# Create a lens creation function for custom lenses
547
+
#
548
+
def create_custom_lens(name, focus, identity, knowledge):
549
+
"""Create a custom lens with specified parameters"""
550
+
551
+
# Validate name format
552
+
if not re.match(r'^[a-z0-9\-]+$', name):
553
+
raise ValueError(f"Lens name must be lowercase alphanumeric with hyphens only. Got: {name}")
554
+
555
+
# Check if lens already exists
556
+
existing_lenses = client.agents.list(name=name)
557
+
if len(existing_lenses) > 0:
558
+
print(f"Lens '{name}' already exists")
559
+
return existing_lenses[0]
560
+
561
+
# Create identity block
562
+
lens_identity_block = client.blocks.create(
563
+
project_id=project_id,
564
+
label=f"{name}-identity",
565
+
value=identity,
566
+
description=f"The unique perspective of the {name} lens",
567
+
)
568
+
569
+
# Create knowledge block
570
+
lens_knowledge_block = client.blocks.create(
571
+
project_id=project_id,
572
+
label=f"{name}-knowledge",
573
+
value=knowledge,
574
+
description=f"Core knowledge that informs the {name} lens perspective",
575
+
)
576
+
577
+
# Create the lens agent
578
+
lens_agent = client.agents.create(
579
+
project_id=project_id,
580
+
name=name,
581
+
description=f"A lens that sees through {focus}",
582
+
model=LENS_AGENT_MODEL,
583
+
embedding_config=client.embedding_models.list()[0],
584
+
block_ids=[
585
+
lens_identity_block.id,
586
+
lens_knowledge_block.id,
587
+
lens_operational_protocols_block.id,
588
+
lens_communication_protocols_block.id,
589
+
memory_management_block.id,
590
+
],
591
+
tags=["kaleidoscope-lens"],
592
+
)
593
+
594
+
print(f"Created custom lens: {name} (ID: {lens_agent.id})")
595
+
return lens_agent
596
+
597
+
598
+
#
599
+
# Interactive lens creation prompt
600
+
#
601
+
creation_prompt = f"""
602
+
You are helping to create a new lens for the Kaleidoscope system.
603
+
604
+
A lens is a unique perspective through which to view questions and challenges.
605
+
Each lens has its own identity and knowledge base that shapes how it sees the world.
606
+
607
+
Please create a lens focused on temporal dynamics and change over time.
608
+
609
+
You need to provide:
610
+
1. A name (lowercase, alphanumeric with hyphens)
611
+
2. A brief focus description
612
+
3. The lens identity (following the format of the examples)
613
+
4. The lens knowledge base (following the format of the examples)
614
+
615
+
Format your response as:
616
+
NAME: [lens-name]
617
+
FOCUS: [brief description]
618
+
IDENTITY: [full identity block]
619
+
KNOWLEDGE: [full knowledge block]
620
+
"""
621
+
622
+
# Attach temporary blocks to central agent for lens creation
623
+
new_lens_name_block = client.blocks.list(project_id=project_id, label="new-lens-name")
624
+
if len(new_lens_name_block) == 0:
625
+
new_lens_name_block = client.blocks.create(
626
+
project_id=project_id,
627
+
label="new-lens-name",
628
+
value="",
629
+
description="Name for the new lens being created",
630
+
)
631
+
client.agents.blocks.attach(
632
+
agent_id=kaleidoscope_central_id,
633
+
block_id=new_lens_name_block.id,
634
+
)
635
+
else:
636
+
client.blocks.modify(block_id=new_lens_name_block[0].id, value="")
637
+
new_lens_name_block = new_lens_name_block[0]
638
+
639
+
new_lens_identity_block = client.blocks.list(project_id=project_id, label="new-lens-identity")
640
+
if len(new_lens_identity_block) == 0:
641
+
new_lens_identity_block = client.blocks.create(
642
+
project_id=project_id,
643
+
label="new-lens-identity",
644
+
value="",
645
+
description="Identity for the new lens being created",
646
+
)
647
+
client.agents.blocks.attach(
648
+
agent_id=kaleidoscope_central_id,
649
+
block_id=new_lens_identity_block.id,
650
+
)
651
+
else:
652
+
client.blocks.modify(block_id=new_lens_identity_block[0].id, value="")
653
+
new_lens_identity_block = new_lens_identity_block[0]
654
+
655
+
new_lens_knowledge_block = client.blocks.list(project_id=project_id, label="new-lens-knowledge")
656
+
if len(new_lens_knowledge_block) == 0:
657
+
new_lens_knowledge_block = client.blocks.create(
658
+
project_id=project_id,
659
+
label="new-lens-knowledge",
660
+
value="",
661
+
description="Knowledge base for the new lens being created",
662
+
)
663
+
client.agents.blocks.attach(
664
+
agent_id=kaleidoscope_central_id,
665
+
block_id=new_lens_knowledge_block.id,
666
+
)
667
+
else:
668
+
client.blocks.modify(block_id=new_lens_knowledge_block[0].id, value="")
669
+
new_lens_knowledge_block = new_lens_knowledge_block[0]
670
+
671
+
print(f"\nSending creation prompt to kaleidoscope-central...")
672
+
673
+
response = client.agents.messages.create(
674
+
agent_id=kaleidoscope_central_id,
675
+
messages=[
676
+
{
677
+
"role": "user",
678
+
"content": creation_prompt + "\n\nPlease fill in the new-lens-name, new-lens-identity, and new-lens-knowledge blocks.",
679
+
},
680
+
]
681
+
)
682
+
683
+
for message in response.messages:
684
+
print(message)
685
+
686
+
# Retrieve the created lens details
687
+
new_name = client.blocks.retrieve(block_id=new_lens_name_block.id)
688
+
new_identity = client.blocks.retrieve(block_id=new_lens_identity_block.id)
689
+
new_knowledge = client.blocks.retrieve(block_id=new_lens_knowledge_block.id)
690
+
691
+
if new_name.value and new_identity.value and new_knowledge.value:
692
+
# Create the custom lens
693
+
create_custom_lens(
694
+
name=new_name.value.strip(),
695
+
focus="temporal dynamics and change over time",
696
+
identity=new_identity.value,
697
+
knowledge=new_knowledge.value
698
+
)
699
+
700
+
# Clean up temporary blocks if attached
701
+
if new_lens_name_block.id in [b.id for b in kaleidoscope_current_blocks]:
702
+
client.agents.blocks.detach(agent_id=kaleidoscope_central_id, block_id=new_lens_name_block.id)
703
+
if new_lens_identity_block.id in [b.id for b in kaleidoscope_current_blocks]:
704
+
client.agents.blocks.detach(agent_id=kaleidoscope_central_id, block_id=new_lens_identity_block.id)
705
+
if new_lens_knowledge_block.id in [b.id for b in kaleidoscope_current_blocks]:
706
+
client.agents.blocks.detach(agent_id=kaleidoscope_central_id, block_id=new_lens_knowledge_block.id)
707
+
708
+
print("\n=== Kaleidoscope System Setup Complete ===")
709
+
print(f"Central Agent: kaleidoscope-central")
710
+
print(f"Lenses created: {len(LENS_TYPES) + 1} (including custom temporal lens)")
711
+
print("\nThe system is ready to receive queries and provide multi-perspective insights!")
712
+
713
+
# Create the kaleidoscope group if it doesn't exist. First,
714
+
# we can check the groups that central is a member of.
715
+
central_groups = client.groups.list()
716
+
print(central_groups)
717
+
718
+
# If the length of central_groups is 0, then we need to create a new group.
719
+
if len(central_groups) == 0:
720
+
print("Creating new group for kaleidoscope-central")
721
+
group = client.groups.create(
722
+
agent_ids=lens_ids,
723
+
description="The Kaleidoscope",
724
+
manager_config=SupervisorManager(
725
+
manager_agent_id=kaleidoscope_central_id
726
+
)
727
+
)
728
+
print(f"Created group: {group.id}")
729
+
730
+
# If there are more than one groups, we need to find the group with the description
731
+
# "The Kaleidoscope" and add any lens agents that are not in the group to the group.
732
+
for group in central_groups:
733
+
if group.description == "The Kaleidoscope":
734
+
print(f"Found group: {group.id}")
735
+
for lens_id in lens_ids:
736
+
if lens_id not in group.agent_ids:
737
+
print(f"Adding lens {lens_id} to group {group.id}")
738
+
client.groups.agents.add(group_id=group.id, agent_id=lens_id)
739
+
+53
-28
organon/create_organon.py
+53
-28
organon/create_organon.py
···
1
-
project_id = "7d6a4c71-987c-4fa1-a062-c15ee4eab929"
2
-
3
1
from letta_client import Letta
4
2
from letta_client.core.http_client import re
5
3
from rich import print
4
+
from dotenv import load_dotenv
5
+
import os
6
+
import sys
7
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
8
+
from config_loader import get_config
9
+
10
+
load_dotenv()
11
+
12
+
# Load configuration
13
+
config = get_config()
14
+
15
+
# Get project ID and create Letta client
16
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
17
+
if not project_id:
18
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
6
19
7
20
client = Letta(
8
-
token="woops"
21
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
22
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
23
+
timeout=config.get('letta.timeout', 30)
9
24
)
10
25
26
+
CENTRAL_AGENT_MODEL = "google_ai/gemini-2.5-flash"
27
+
# CENTRAL_AGENT_MODEL = "openai/gpt-4o-mini"
28
+
29
+
SHARD_AGENT_MODEL = "google_ai/gemini-2.5-flash"
30
+
# SHARD_AGENT_MODEL = "openai/gpt-4o-mini"
31
+
32
+
11
33
#
12
34
# Memory Block Definitions
13
35
#
···
187
209
if len(agents) == 0:
188
210
organon_central = client.agents.create(
189
211
project_id=project_id,
212
+
model=CENTRAL_AGENT_MODEL,
213
+
embedding_config=client.embedding_models.list()[0],
190
214
name="organon-central",
191
215
description="The central memory manager of the Organon",
192
216
block_ids=central_agent_blocks,
···
396
420
# Check to see if the name meets the requirements. If it does not, ask the agent to update
397
421
# the name block.
398
422
for i in range(10):
423
+
399
424
if not re.match(r'[a-z0-9]+', new_shard_name.value.strip()):
400
425
print(f"New shard name `{new_shard_name.value.strip()}` does not meet the requirements, asking agent to update")
401
426
client.agents.messages.create(
···
407
432
},
408
433
]
409
434
)
435
+
436
+
# Retrieve the new shard lexicon, name, and identity
437
+
new_shard_lexicon = client.blocks.retrieve(block_id=new_shard_domain_lexicon_block.id)
438
+
new_shard_name = client.blocks.retrieve(block_id=new_shard_name_block.id)
439
+
new_shard_identity = client.blocks.retrieve(block_id=new_shard_identity_block.id)
440
+
441
+
print(f"New shard lexicon: {new_shard_lexicon.value}")
442
+
print(f"New shard name: {new_shard_name.value}")
443
+
print(f"New shard identity: {new_shard_identity.value}")
444
+
410
445
else:
411
446
break
412
447
···
435
470
project_id=project_id,
436
471
name=new_shard_name.value.strip(),
437
472
description=new_shard_identity.value,
438
-
model="goog/gemini-2.5-flash",
473
+
model=SHARD_AGENT_MODEL,
474
+
embedding_config=client.embedding_models.list()[0],
439
475
block_ids=[
440
476
new_shard_lexicon_block.id,
441
477
new_shard_identity_block.id,
···
447
483
448
484
print(f"New shard agent created: {new_shard_agent.id}")
449
485
450
-
# Find the tool by the name of send_message_to_agents_matching_tags
451
-
tool_list = client.tools.list(name="send_message_to_agents_matching_tags")
452
-
if len(tool_list) == 0:
453
-
raise ValueError("Tool send_message_to_agents_matching_tags not found")
454
-
455
-
send_message_to_agents_matching_tags = tool_list[0]
456
-
457
-
# Attach the tool to the shard agent
458
-
client.agents.tools.attach(
459
-
agent_id=new_shard_agent.id,
460
-
tool_id=send_message_to_agents_matching_tags.id,
461
-
)
462
-
463
486
# Message the shard agent to fill in its lexicon and identity
464
-
client.agents.messages.create(
465
-
agent_id=new_shard_agent.id,
466
-
messages=[
467
-
{
468
-
"role": "user",
469
-
"content": "You are a new shard agent. Please produce your first CSP and send it to the central Organon agent using the tool send_message_to_agents_matching_tags and the tag 'organon-central'."
470
-
},
471
-
]
472
-
)
487
+
# client.agents.messages.create(
488
+
# agent_id=new_shard_agent.id,
489
+
# messages=[
490
+
# {
491
+
# "role": "user",
492
+
# "content": "You are a new shard agent. Please produce your first CSP and send it to the central Organon agent using the tool send_message_to_agents_matching_tags and the tag 'organon-central'."
493
+
# },
494
+
# ]
495
+
# )
473
496
474
-
for message in response.messages:
475
-
print(message)
497
+
# for message in response.messages:
498
+
# print(message)
499
+
500
+
# Create a group for the shard agent
+81
organon/delete_groups.py
+81
organon/delete_groups.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Delete all groups in the current project.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.prompt import Confirm
12
+
13
+
# Add parent directory to path for imports
14
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
15
+
from config_loader import get_config
16
+
17
+
load_dotenv()
18
+
19
+
def main():
20
+
console = Console()
21
+
22
+
try:
23
+
# Initialize configuration and client
24
+
config = get_config()
25
+
26
+
client = Letta(
27
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
28
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
29
+
timeout=config.get('letta.timeout', 30)
30
+
)
31
+
32
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
33
+
34
+
# Get all groups
35
+
console.print("[blue]Finding all groups...[/blue]")
36
+
37
+
try:
38
+
if project_id:
39
+
groups = client.groups.list()
40
+
else:
41
+
groups = client.groups.list()
42
+
except:
43
+
# Try without project_id as fallback
44
+
try:
45
+
groups = client.groups.list()
46
+
except Exception as e:
47
+
console.print(f"[red]Error listing groups: {e}[/red]")
48
+
return
49
+
50
+
if not groups:
51
+
console.print("[yellow]No groups found.[/yellow]")
52
+
return
53
+
54
+
console.print(f"[yellow]Found {len(groups)} groups:[/yellow]")
55
+
for group in groups:
56
+
description = group.description[:50] + "..." if group.description and len(group.description) > 50 else (group.description or "No description")
57
+
console.print(f" โข {group.id[:12]}... - {description}")
58
+
59
+
# Confirm deletion
60
+
if not Confirm.ask(f"\n[bold red]Delete all {len(groups)} groups?[/bold red]"):
61
+
console.print("[yellow]Cancelled.[/yellow]")
62
+
return
63
+
64
+
# Delete each group
65
+
deleted_count = 0
66
+
for group in groups:
67
+
try:
68
+
client.groups.delete(group_id=group.id)
69
+
console.print(f"[green]โ
Deleted group: {group.id[:12]}[/green]")
70
+
deleted_count += 1
71
+
except Exception as e:
72
+
console.print(f"[red]โ Failed to delete group {group.id[:12]}: {e}[/red]")
73
+
74
+
console.print(f"\n[green]Successfully deleted {deleted_count}/{len(groups)} groups.[/green]")
75
+
76
+
except Exception as e:
77
+
console.print(f"[red]Error: {e}[/red]")
78
+
sys.exit(1)
79
+
80
+
if __name__ == "__main__":
81
+
main()
+375
organon/firehose_listener.py
+375
organon/firehose_listener.py
···
1
+
"""
2
+
ATProto firehose listener that connects to Jetstream and pipes content to Organon agent.
3
+
"""
4
+
5
+
import asyncio
6
+
import json
7
+
import logging
8
+
import os
9
+
import sys
10
+
import websockets
11
+
import zstandard as zstd
12
+
from datetime import datetime
13
+
from typing import Optional, Dict, Any
14
+
from dotenv import load_dotenv
15
+
from letta_client import Letta, SupervisorManager
16
+
17
+
# Add parent directory to path for imports
18
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
19
+
from config_loader import get_config
20
+
21
+
load_dotenv()
22
+
23
+
# Setup logging
24
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
25
+
logger = logging.getLogger(__name__)
26
+
27
+
class OrganonFirehoseListener:
28
+
def __init__(self):
29
+
"""Initialize the firehose listener with Letta client and configuration."""
30
+
self.config = get_config()
31
+
32
+
# Initialize Letta client
33
+
self.letta_client = Letta(
34
+
base_url=self.config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
35
+
token=self.config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
36
+
timeout=self.config.get('letta.timeout', 30)
37
+
)
38
+
39
+
# Get project ID
40
+
self.project_id = self.config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
41
+
if not self.project_id:
42
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
43
+
44
+
# Jetstream WebSocket URL (try different endpoints)
45
+
self.jetstream_url = "wss://jetstream2.us-east.bsky.network/subscribe"
46
+
47
+
# Filter for posts only
48
+
self.wanted_collections = ["app.bsky.feed.post"]
49
+
50
+
# Get Organon central agent
51
+
self.organon_agent_id = self._get_organon_agent_id()
52
+
53
+
# List all organon shards on boot
54
+
self._list_organon_shards()
55
+
56
+
# Ensure supervisor group exists with all shards
57
+
self.organon_group_id = self._ensure_organon_supervisor_group()
58
+
59
+
# Connection state
60
+
self.websocket = None
61
+
self.running = False
62
+
63
+
# Zstd decompressor for compressed messages
64
+
self.decompressor = zstd.ZstdDecompressor()
65
+
66
+
def _get_organon_agent_id(self) -> str:
67
+
"""Get the Organon central agent ID."""
68
+
agents = self.letta_client.agents.list(project_id=self.project_id, name="organon-central")
69
+
if not agents:
70
+
raise ValueError("Organon central agent not found. Run create_organon.py first.")
71
+
return agents[0].id
72
+
73
+
def _list_organon_shards(self) -> None:
74
+
"""List all organon shards using the organon-shard tag."""
75
+
try:
76
+
# Get agents with the organon-shard tag
77
+
shard_agents = self.letta_client.agents.list(project_id=self.project_id, tags=["organon-shard"])
78
+
79
+
logger.info(f"Found {len(shard_agents)} Organon shards:")
80
+
for agent in shard_agents:
81
+
logger.info(f" - {agent.name} (ID: {agent.id})")
82
+
if agent.description:
83
+
logger.info(f" Description: {agent.description}")
84
+
85
+
if len(shard_agents) == 0:
86
+
logger.warning("No Organon shards found with tag 'organon-shard'")
87
+
88
+
except Exception as e:
89
+
logger.error(f"Error listing Organon shards: {e}")
90
+
91
+
def _ensure_organon_supervisor_group(self) -> str:
92
+
"""Ensure a supervisor group exists with organon-central as supervisor and all shards as workers."""
93
+
try:
94
+
group_name = "organon-ecosystem"
95
+
96
+
# Get all organon shards
97
+
shard_agents = self.letta_client.agents.list(project_id=self.project_id, tags=["organon-shard"])
98
+
99
+
if len(shard_agents) == 0:
100
+
logger.warning("No shards found, cannot create group")
101
+
raise ValueError("No Organon shards found with tag 'organon-shard'")
102
+
103
+
# Check if group already exists
104
+
try:
105
+
existing_groups = self.letta_client.groups.list(project_id=self.project_id)
106
+
existing_group = None
107
+
for group in existing_groups:
108
+
if group.name == group_name:
109
+
existing_group = group
110
+
break
111
+
112
+
if existing_group:
113
+
logger.info(f"Organon supervisor group '{group_name}' already exists (ID: {existing_group.id})")
114
+
115
+
# For supervisor groups, only the worker agents are in the group membership
116
+
# The supervisor is managed separately via the manager_config
117
+
group_members = self.letta_client.groups.agents.list(group_id=existing_group.id)
118
+
member_ids = {member.id for member in group_members}
119
+
shard_ids = {shard.id for shard in shard_agents}
120
+
121
+
# Add missing shards to the group
122
+
missing_shards = shard_ids - member_ids
123
+
for shard_id in missing_shards:
124
+
logger.info(f"Adding shard {shard_id} to group {group_name}")
125
+
self.letta_client.groups.agents.add(
126
+
group_id=existing_group.id,
127
+
agent_id=shard_id
128
+
)
129
+
130
+
# Remove any agents that are no longer shards
131
+
extra_members = member_ids - shard_ids
132
+
for member_id in extra_members:
133
+
logger.info(f"Removing non-shard agent {member_id} from group {group_name}")
134
+
self.letta_client.groups.agents.remove(
135
+
group_id=existing_group.id,
136
+
agent_id=member_id
137
+
)
138
+
139
+
return existing_group.id
140
+
141
+
except Exception as e:
142
+
logger.debug(f"Error checking existing groups: {e}")
143
+
144
+
# Create new supervisor group
145
+
logger.info(f"Creating new Organon supervisor group '{group_name}'")
146
+
147
+
# Get all shard IDs
148
+
worker_agent_ids = [shard.id for shard in shard_agents]
149
+
150
+
group = self.letta_client.groups.create(
151
+
agent_ids=worker_agent_ids,
152
+
description="Supervisor group for the Organon ecosystem with organon-central managing all shards",
153
+
manager_config=SupervisorManager(
154
+
manager_agent_id=self.organon_agent_id
155
+
)
156
+
)
157
+
158
+
logger.info(f"Created Organon supervisor group '{group_name}' (ID: {group.id})")
159
+
logger.info(f" Supervisor: organon-central ({self.organon_agent_id})")
160
+
logger.info(f" Workers: {len(worker_agent_ids)} shards")
161
+
162
+
return group.id
163
+
164
+
except Exception as e:
165
+
logger.error(f"Error ensuring Organon supervisor group: {e}")
166
+
raise
167
+
168
+
async def connect(self) -> None:
169
+
"""Connect to the Jetstream WebSocket."""
170
+
# Build query parameters - disable compression for now
171
+
params = {
172
+
"wantedCollections": ",".join(self.wanted_collections)
173
+
# Removing compression to debug the utf-8 issue
174
+
}
175
+
176
+
# Build URL with parameters
177
+
param_string = "&".join([f"{k}={v}" for k, v in params.items()])
178
+
url = f"{self.jetstream_url}?{param_string}"
179
+
180
+
logger.info(f"Connecting to Jetstream: {url}")
181
+
182
+
try:
183
+
self.websocket = await websockets.connect(url)
184
+
logger.info("Connected to Jetstream firehose")
185
+
except Exception as e:
186
+
logger.error(f"Failed to connect to Jetstream: {e}")
187
+
raise
188
+
189
+
def _process_post_content(self, record: Dict[str, Any]) -> Optional[str]:
190
+
"""Extract and process post content from a record."""
191
+
try:
192
+
# Extract basic post information
193
+
text = record.get('text', '')
194
+
created_at = record.get('createdAt', '')
195
+
196
+
# Extract facets (links, mentions, hashtags) if present
197
+
facets = record.get('facets', [])
198
+
199
+
# Build a structured representation
200
+
content_data = {
201
+
'text': text,
202
+
'created_at': created_at,
203
+
'facets': facets
204
+
}
205
+
206
+
# Only process posts with meaningful content (ignore very short posts)
207
+
if len(text.strip()) < 10:
208
+
return None
209
+
210
+
return json.dumps(content_data, indent=2)
211
+
212
+
except Exception as e:
213
+
logger.error(f"Error processing post content: {e}")
214
+
return None
215
+
216
+
async def _send_to_organon(self, content: str, metadata: Dict[str, Any]) -> None:
217
+
"""Send processed content to the Organon ecosystem via group messaging."""
218
+
try:
219
+
# Create a conceptual observation message for Organon
220
+
message = f"""New observation from the ATProto firehose:
221
+
222
+
Content:
223
+
{content}
224
+
225
+
Metadata:
226
+
- DID: {metadata.get('did', 'unknown')}
227
+
- Collection: {metadata.get('collection', 'unknown')}
228
+
- Timestamp: {metadata.get('time_us', 'unknown')}
229
+
- CID: {metadata.get('cid', 'unknown')}
230
+
- RKey: {metadata.get('rkey', 'unknown')}
231
+
232
+
Please analyze this content and generate Conceptual Suggestion Packets (CSPs) if it contains novel ideas, patterns, or contradictions worth exploring. Coordinate with your shards to explore different conceptual dimensions."""
233
+
234
+
# Send message to Organon group (supervisor will coordinate with shards)
235
+
response = self.letta_client.groups.messages.create(
236
+
group_id=self.organon_group_id,
237
+
messages=[{
238
+
"role": "user",
239
+
"content": message
240
+
}]
241
+
)
242
+
243
+
logger.info(f"Sent content to Organon ecosystem (group {self.organon_group_id})")
244
+
logger.debug(f"Group response: {len(response.messages) if hasattr(response, 'messages') else 'N/A'} messages")
245
+
246
+
except Exception as e:
247
+
logger.error(f"Error sending content to Organon ecosystem: {e}")
248
+
249
+
async def _handle_event(self, event: Dict[str, Any]) -> None:
250
+
"""Handle a single event from the firehose."""
251
+
try:
252
+
event_type = event.get('kind')
253
+
254
+
if event_type == 'commit':
255
+
# Extract commit information
256
+
did = event.get('did')
257
+
commit = event.get('commit', {})
258
+
259
+
# Check if this is a create operation for a post
260
+
operation = commit.get('operation')
261
+
collection = commit.get('collection')
262
+
263
+
if operation == 'create' and collection == 'app.bsky.feed.post':
264
+
record = commit.get('record', {})
265
+
266
+
# Process the post content
267
+
processed_content = self._process_post_content(record)
268
+
269
+
if processed_content:
270
+
metadata = {
271
+
'did': did,
272
+
'collection': collection,
273
+
'time_us': event.get('time_us'),
274
+
'cid': commit.get('cid'),
275
+
'rkey': commit.get('rkey')
276
+
}
277
+
278
+
logger.info(f"Sending post to Organon from {did}")
279
+
280
+
# Send to Organon for analysis
281
+
await self._send_to_organon(processed_content, metadata)
282
+
else:
283
+
logger.debug(f"Skipping post from {did} - too short or no content")
284
+
285
+
except Exception as e:
286
+
logger.error(f"Error handling event: {e}")
287
+
288
+
async def listen(self) -> None:
289
+
"""Listen to the firehose and process events."""
290
+
if not self.websocket:
291
+
await self.connect()
292
+
293
+
self.running = True
294
+
logger.info("Starting to listen to firehose events...")
295
+
296
+
try:
297
+
async for message in self.websocket:
298
+
if not self.running:
299
+
break
300
+
301
+
try:
302
+
# Handle message format
303
+
if isinstance(message, bytes):
304
+
message_text = message.decode('utf-8')
305
+
else:
306
+
message_text = message
307
+
308
+
# Parse JSON event
309
+
event = json.loads(message_text)
310
+
311
+
# Print the whole JSON message for debugging
312
+
print(f"\n--- FULL JSON MESSAGE ---")
313
+
print(json.dumps(event, indent=2))
314
+
print(f"--- END MESSAGE ---\n")
315
+
316
+
# Handle the event
317
+
await self._handle_event(event)
318
+
319
+
except json.JSONDecodeError as e:
320
+
logger.error(f"Failed to parse JSON message: {e}")
321
+
except Exception as e:
322
+
logger.error(f"Error processing message: {e}")
323
+
324
+
except websockets.exceptions.ConnectionClosed:
325
+
logger.warning("WebSocket connection closed")
326
+
except Exception as e:
327
+
logger.error(f"Error in listen loop: {e}")
328
+
finally:
329
+
self.running = False
330
+
331
+
async def stop(self) -> None:
332
+
"""Stop the firehose listener."""
333
+
self.running = False
334
+
if self.websocket:
335
+
await self.websocket.close()
336
+
logger.info("Firehose listener stopped")
337
+
338
+
async def run_with_reconnect(self, max_retries: int = 10, retry_delay: int = 5) -> None:
339
+
"""Run the listener with automatic reconnection."""
340
+
retry_count = 0
341
+
342
+
while retry_count < max_retries:
343
+
try:
344
+
await self.connect()
345
+
await self.listen()
346
+
347
+
# If we get here, connection was closed gracefully
348
+
if not self.running:
349
+
logger.info("Listener stopped gracefully")
350
+
break
351
+
352
+
except Exception as e:
353
+
retry_count += 1
354
+
logger.error(f"Connection failed (attempt {retry_count}/{max_retries}): {e}")
355
+
356
+
if retry_count < max_retries:
357
+
logger.info(f"Retrying in {retry_delay} seconds...")
358
+
await asyncio.sleep(retry_delay)
359
+
else:
360
+
logger.error("Max retries exceeded, stopping listener")
361
+
break
362
+
363
+
async def main():
364
+
"""Main function to run the firehose listener."""
365
+
listener = OrganonFirehoseListener()
366
+
367
+
try:
368
+
await listener.run_with_reconnect()
369
+
except KeyboardInterrupt:
370
+
logger.info("Received interrupt signal")
371
+
finally:
372
+
await listener.stop()
373
+
374
+
if __name__ == "__main__":
375
+
asyncio.run(main())
+100
organon/list_agents.py
+100
organon/list_agents.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Simple tool to list all agents in the current project, especially Organon-related ones.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.table import Table
12
+
from rich.panel import Panel
13
+
14
+
# Add parent directory to path for imports
15
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
16
+
from config_loader import get_config
17
+
18
+
load_dotenv()
19
+
20
+
def main():
21
+
console = Console()
22
+
23
+
try:
24
+
# Initialize configuration and client
25
+
config = get_config()
26
+
27
+
client = Letta(
28
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
29
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
30
+
timeout=config.get('letta.timeout', 30)
31
+
)
32
+
33
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
34
+
if not project_id:
35
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
36
+
37
+
# Get all agents
38
+
agents = client.agents.list(project_id=project_id)
39
+
40
+
if not agents:
41
+
console.print("[yellow]No agents found in this project.[/yellow]")
42
+
console.print("[dim]Run create_organon.py to create the Organon agents.[/dim]")
43
+
return
44
+
45
+
# Create table
46
+
table = Table(title=f"Agents in Project {project_id[:8]}...")
47
+
table.add_column("Agent Name", style="cyan")
48
+
table.add_column("Agent ID", style="white")
49
+
table.add_column("Description", style="green")
50
+
table.add_column("Tags", style="yellow")
51
+
52
+
organon_central = None
53
+
organon_shards = []
54
+
55
+
for agent in agents:
56
+
name = agent.name if hasattr(agent, 'name') else "N/A"
57
+
agent_id = agent.id[:12] + "..." if len(agent.id) > 12 else agent.id
58
+
description = agent.description[:40] + "..." if agent.description and len(agent.description) > 40 else (agent.description or "N/A")
59
+
tags = ", ".join(agent.tags) if hasattr(agent, 'tags') and agent.tags else "None"
60
+
61
+
table.add_row(name, agent_id, description, tags)
62
+
63
+
# Track Organon agents
64
+
if name == "organon-central":
65
+
organon_central = agent
66
+
elif hasattr(agent, 'tags') and agent.tags and "organon-shard" in agent.tags:
67
+
organon_shards.append(agent)
68
+
69
+
console.print(table)
70
+
71
+
# Show Organon status
72
+
console.print("\n[bold blue]Organon Status:[/bold blue]")
73
+
74
+
if organon_central:
75
+
console.print(f"โ
[green]Organon Central found:[/green] {organon_central.name} ({organon_central.id[:8]})")
76
+
else:
77
+
console.print("โ [red]Organon Central not found[/red]")
78
+
79
+
if organon_shards:
80
+
console.print(f"โ
[green]Found {len(organon_shards)} Organon shards:[/green]")
81
+
for shard in organon_shards:
82
+
console.print(f" โข {shard.name} ({shard.id[:8]})")
83
+
else:
84
+
console.print("โ [red]No Organon shards found with tag 'organon-shard'[/red]")
85
+
86
+
# Recommendations
87
+
console.print("\n[bold yellow]Recommendations:[/bold yellow]")
88
+
if not organon_central:
89
+
console.print("โข Run [cyan]ac && python organon/create_organon.py[/cyan] to create Organon agents")
90
+
elif not organon_shards:
91
+
console.print("โข Run [cyan]ac && python organon/create_organon.py[/cyan] to create Organon shards")
92
+
else:
93
+
console.print("โข Run [cyan]ac && python organon/firehose_listener.py[/cyan] to create the ecosystem group")
94
+
95
+
except Exception as e:
96
+
console.print(f"[red]Error: {e}[/red]")
97
+
sys.exit(1)
98
+
99
+
if __name__ == "__main__":
100
+
main()
+123
organon/list_groups.py
+123
organon/list_groups.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Simple tool to list all groups and their status in the current project.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta
10
+
from rich.console import Console
11
+
from rich.table import Table
12
+
from rich.panel import Panel
13
+
14
+
# Add parent directory to path for imports
15
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
16
+
from config_loader import get_config
17
+
18
+
load_dotenv()
19
+
20
+
def get_agent_name(client, agent_id):
21
+
"""Get agent name by ID, with fallback to truncated ID."""
22
+
try:
23
+
agent = client.agents.retrieve(agent_id=agent_id)
24
+
return agent.name if hasattr(agent, 'name') else agent_id[:8]
25
+
except:
26
+
return agent_id[:8]
27
+
28
+
def main():
29
+
console = Console()
30
+
31
+
try:
32
+
# Initialize configuration and client
33
+
config = get_config()
34
+
35
+
client = Letta(
36
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
37
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
38
+
timeout=config.get('letta.timeout', 30)
39
+
)
40
+
41
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
42
+
if not project_id:
43
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
44
+
45
+
# Get all groups
46
+
groups = client.groups.list()
47
+
48
+
if not groups:
49
+
console.print("[yellow]No groups found in this project.[/yellow]")
50
+
return
51
+
52
+
# Create table
53
+
table = Table(title=f"Groups in Project {project_id[:8]}...")
54
+
table.add_column("Group ID", style="cyan")
55
+
table.add_column("Description", style="white")
56
+
table.add_column("Type", style="green")
57
+
table.add_column("Manager/Supervisor", style="blue")
58
+
table.add_column("Members", style="yellow")
59
+
60
+
for group in groups:
61
+
group_id = group.id[:12] + "..." if len(group.id) > 12 else group.id
62
+
description = group.description[:50] + "..." if group.description and len(group.description) > 50 else (group.description or "N/A")
63
+
64
+
# Determine group type and manager
65
+
group_type = "Unknown"
66
+
manager = "None"
67
+
68
+
if hasattr(group, 'manager_config') and group.manager_config:
69
+
if hasattr(group.manager_config, 'manager_type'):
70
+
group_type = group.manager_config.manager_type
71
+
elif hasattr(group.manager_config, '__class__'):
72
+
group_type = group.manager_config.__class__.__name__.replace('Manager', '')
73
+
74
+
if hasattr(group.manager_config, 'manager_agent_id') and group.manager_config.manager_agent_id:
75
+
manager = get_agent_name(client, group.manager_config.manager_agent_id)
76
+
77
+
# Get group members
78
+
try:
79
+
members = client.groups.agents.list(group_id=group.id)
80
+
member_count = len(members)
81
+
82
+
# Show member names if reasonable number
83
+
if member_count <= 3:
84
+
member_names = [get_agent_name(client, member.id) for member in members]
85
+
members_str = ", ".join(member_names)
86
+
else:
87
+
members_str = f"{member_count} agents"
88
+
except:
89
+
members_str = "Error loading"
90
+
91
+
table.add_row(group_id, description, group_type, manager, members_str)
92
+
93
+
console.print(table)
94
+
95
+
# Look specifically for Organon ecosystem
96
+
organon_groups = []
97
+
for group in groups:
98
+
if (group.description and 'organon' in group.description.lower()) or \
99
+
(hasattr(group, 'manager_config') and group.manager_config and
100
+
hasattr(group.manager_config, 'manager_agent_id')):
101
+
try:
102
+
# Check if manager is organon-central
103
+
if hasattr(group.manager_config, 'manager_agent_id'):
104
+
manager_name = get_agent_name(client, group.manager_config.manager_agent_id)
105
+
if 'organon' in manager_name.lower():
106
+
organon_groups.append((group, manager_name))
107
+
except:
108
+
pass
109
+
110
+
if organon_groups:
111
+
console.print("\n[bold green]Organon Ecosystem Groups Found:[/bold green]")
112
+
for group, manager_name in organon_groups:
113
+
console.print(f" โข {group.id} - Managed by {manager_name}")
114
+
else:
115
+
console.print("\n[yellow]No Organon ecosystem groups found.[/yellow]")
116
+
console.print("[dim]Run the firehose listener to create the Organon ecosystem group.[/dim]")
117
+
118
+
except Exception as e:
119
+
console.print(f"[red]Error: {e}[/red]")
120
+
sys.exit(1)
121
+
122
+
if __name__ == "__main__":
123
+
main()
+128
organon/setup_group.py
+128
organon/setup_group.py
···
1
+
#!/usr/bin/env python3
2
+
"""
3
+
Simple tool to set up the Organon ecosystem group without running the firehose listener.
4
+
"""
5
+
6
+
import os
7
+
import sys
8
+
from dotenv import load_dotenv
9
+
from letta_client import Letta, SupervisorManager
10
+
from rich.console import Console
11
+
12
+
# Add parent directory to path for imports
13
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
14
+
from config_loader import get_config
15
+
16
+
load_dotenv()
17
+
18
+
def main():
19
+
console = Console()
20
+
21
+
try:
22
+
# Initialize configuration and client
23
+
config = get_config()
24
+
25
+
client = Letta(
26
+
base_url=config.get('letta.base_url', os.environ.get('LETTA_BASE_URL')),
27
+
token=config.get('letta.api_key', os.environ.get('LETTA_API_KEY')),
28
+
timeout=config.get('letta.timeout', 30)
29
+
)
30
+
31
+
project_id = config.get('letta.project_id', os.environ.get('LETTA_PROJECT_ID'))
32
+
if not project_id:
33
+
raise ValueError("Project ID must be set in config.yaml under letta.project_id or as LETTA_PROJECT_ID environment variable")
34
+
35
+
# Get Organon central agent
36
+
console.print("[blue]Finding Organon Central agent...[/blue]")
37
+
try:
38
+
organon_agents = client.agents.list(project_id=project_id, name="organon-central")
39
+
except:
40
+
# Fallback for self-hosted without project support
41
+
organon_agents = client.agents.list(name="organon-central")
42
+
if not organon_agents:
43
+
console.print("[red]โ Organon Central agent not found. Run create_organon.py first.[/red]")
44
+
return
45
+
46
+
organon_central_id = organon_agents[0].id
47
+
console.print(f"[green]โ
Found Organon Central: {organon_central_id[:8]}[/green]")
48
+
49
+
# Get Organon shards
50
+
console.print("[blue]Finding Organon shards...[/blue]")
51
+
try:
52
+
shard_agents = client.agents.list(project_id=project_id, tags=["organon-shard"])
53
+
except:
54
+
# Fallback for self-hosted without project support
55
+
shard_agents = client.agents.list(tags=["organon-shard"])
56
+
if not shard_agents:
57
+
console.print("[red]โ No Organon shards found. Run create_organon.py to create shards.[/red]")
58
+
return
59
+
60
+
console.print(f"[green]โ
Found {len(shard_agents)} shards:[/green]")
61
+
for shard in shard_agents:
62
+
console.print(f" โข {shard.name} ({shard.id[:8]})")
63
+
64
+
# Check if group already exists
65
+
console.print("[blue]Checking for existing groups...[/blue]")
66
+
try:
67
+
groups = client.groups.list(project_id=project_id)
68
+
except:
69
+
# Fallback for self-hosted without project support
70
+
groups = client.groups.list()
71
+
72
+
existing_group = None
73
+
for group in groups:
74
+
if (group.description and 'organon ecosystem' in group.description.lower()) or \
75
+
(hasattr(group, 'manager_config') and group.manager_config and
76
+
hasattr(group.manager_config, 'manager_agent_id') and
77
+
group.manager_config.manager_agent_id == organon_central_id):
78
+
existing_group = group
79
+
break
80
+
81
+
if existing_group:
82
+
console.print(f"[yellow]Group already exists: {existing_group.id[:12]}[/yellow]")
83
+
return
84
+
85
+
# Create the supervisor group
86
+
console.print("[blue]Creating Organon ecosystem group...[/blue]")
87
+
worker_agent_ids = [shard.id for shard in shard_agents]
88
+
89
+
group = client.groups.create(
90
+
agent_ids=worker_agent_ids,
91
+
description="Supervisor group for the Organon ecosystem with organon-central managing all shards",
92
+
manager_config=SupervisorManager(
93
+
manager_agent_id=organon_central_id
94
+
)
95
+
)
96
+
97
+
console.print(f"[green]โ
Created Organon ecosystem group: {group.id[:12]}[/green]")
98
+
console.print(f" Supervisor: organon-central ({organon_central_id[:8]})")
99
+
console.print(f" Workers: {len(worker_agent_ids)} shards")
100
+
101
+
# Verify the group was actually created
102
+
console.print("[blue]Verifying group creation...[/blue]")
103
+
try:
104
+
retrieved_group = client.groups.retrieve(group_id=group.id)
105
+
console.print(f"[green]โ
Group verified: {retrieved_group.id[:12]}[/green]")
106
+
107
+
# Also check if it shows up in the list
108
+
try:
109
+
all_groups = client.groups.list(project_id=project_id)
110
+
except:
111
+
all_groups = client.groups.list()
112
+
found_in_list = any(g.id == group.id for g in all_groups)
113
+
console.print(f"[{'green' if found_in_list else 'red'}]{'โ
' if found_in_list else 'โ'} Group appears in list: {found_in_list}[/{'green' if found_in_list else 'red'}]")
114
+
115
+
except Exception as e:
116
+
console.print(f"[red]โ Error verifying group: {e}[/red]")
117
+
118
+
console.print("\n[bold green]Setup complete! You can now use:[/bold green]")
119
+
console.print("โข [cyan]python organon/chat_with_organon.py[/cyan] - Chat with the ecosystem")
120
+
console.print("โข [cyan]python organon/list_groups.py[/cyan] - View group status")
121
+
console.print("โข [cyan]python organon/firehose_listener.py[/cyan] - Start the firehose listener")
122
+
123
+
except Exception as e:
124
+
console.print(f"[red]Error: {e}[/red]")
125
+
sys.exit(1)
126
+
127
+
if __name__ == "__main__":
128
+
main()
+30
-4
register_x_tools.py
+30
-4
register_x_tools.py
···
5
5
from letta_client import Letta
6
6
from rich.console import Console
7
7
from rich.table import Table
8
-
from x import get_x_letta_config
8
+
from config_loader import get_letta_config
9
9
10
10
# Import standalone functions and their schemas
11
11
from tools.blocks import (
12
-
attach_x_user_blocks, detach_x_user_blocks,
13
-
AttachXUserBlocksArgs, DetachXUserBlocksArgs
12
+
attach_x_user_blocks, detach_x_user_blocks,
13
+
x_user_note_append, x_user_note_replace, x_user_note_set, x_user_note_view,
14
+
AttachXUserBlocksArgs, DetachXUserBlocksArgs,
15
+
XUserNoteAppendArgs, XUserNoteReplaceArgs, XUserNoteSetArgs, XUserNoteViewArgs
14
16
)
15
17
from tools.halt import halt_activity, HaltArgs
16
18
from tools.ignore import ignore_notification, IgnoreNotificationArgs
···
27
29
# Import X search tool
28
30
from tools.search_x import search_x_posts, SearchXArgs
29
31
30
-
letta_config = get_x_letta_config()
32
+
letta_config = get_letta_config()
31
33
logging.basicConfig(level=logging.INFO)
32
34
logger = logging.getLogger(__name__)
33
35
console = Console()
···
78
80
"args_schema": DetachXUserBlocksArgs,
79
81
"description": "Detach X user-specific memory blocks from the agent. Blocks are preserved for later use.",
80
82
"tags": ["memory", "blocks", "user", "x", "twitter"]
83
+
},
84
+
{
85
+
"func": x_user_note_append,
86
+
"args_schema": XUserNoteAppendArgs,
87
+
"description": "Append a note to an X user's memory block. Creates the block if it doesn't exist.",
88
+
"tags": ["memory", "blocks", "user", "append", "x", "twitter"]
89
+
},
90
+
{
91
+
"func": x_user_note_replace,
92
+
"args_schema": XUserNoteReplaceArgs,
93
+
"description": "Replace text in an X user's memory block.",
94
+
"tags": ["memory", "blocks", "user", "replace", "x", "twitter"]
95
+
},
96
+
{
97
+
"func": x_user_note_set,
98
+
"args_schema": XUserNoteSetArgs,
99
+
"description": "Set the complete content of an X user's memory block.",
100
+
"tags": ["memory", "blocks", "user", "set", "x", "twitter"]
101
+
},
102
+
{
103
+
"func": x_user_note_view,
104
+
"args_schema": XUserNoteViewArgs,
105
+
"description": "View the content of an X user's memory block.",
106
+
"tags": ["memory", "blocks", "user", "view", "x", "twitter"]
81
107
},
82
108
83
109
# X thread tool
+67
-183
x.py
+67
-183
x.py
···
21
21
pass
22
22
23
23
24
-
# Configure logging (will be updated by setup_logging_from_config if called)
24
+
# Configure logging
25
25
logging.basicConfig(
26
26
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
27
27
)
28
28
logger = logging.getLogger("x_client")
29
29
30
-
def setup_logging_from_config(config_path: str = "x_config.yaml"):
31
-
"""Configure logging based on x_config.yaml settings."""
32
-
try:
33
-
config = load_x_config(config_path)
34
-
logging_config = config.get('logging', {})
35
-
log_level = logging_config.get('level', 'INFO').upper()
36
-
37
-
# Convert string level to logging constant
38
-
numeric_level = getattr(logging, log_level, logging.INFO)
39
-
40
-
# Update the root logger level
41
-
logging.getLogger().setLevel(numeric_level)
42
-
# Update our specific logger level
43
-
logger.setLevel(numeric_level)
44
-
45
-
logger.info(f"Logging level set to {log_level}")
46
-
47
-
except Exception as e:
48
-
logger.warning(f"Failed to configure logging from config: {e}, using default INFO level")
49
-
50
30
# X-specific file paths
51
31
X_QUEUE_DIR = Path("x_queue")
52
32
X_CACHE_DIR = Path("x_cache")
···
99
79
def _make_request(self, endpoint: str, params: Optional[Dict] = None, method: str = "GET", data: Optional[Dict] = None, max_retries: int = 3) -> Optional[Dict]:
100
80
"""Make a request to the X API with proper error handling and exponential backoff."""
101
81
url = f"{self.base_url}{endpoint}"
102
-
103
-
# Log the specific API call being made
104
-
logger.debug(f"Making X API request: {method} {endpoint}")
105
-
82
+
106
83
for attempt in range(max_retries):
107
84
try:
108
85
if method.upper() == "GET":
···
134
111
if attempt < max_retries - 1:
135
112
# Exponential backoff: 60s, 120s, 240s
136
113
backoff_time = 60 * (2 ** attempt)
137
-
logger.warning(f"X API rate limit exceeded on {method} {endpoint} (attempt {attempt + 1}/{max_retries}) - waiting {backoff_time}s before retry")
114
+
logger.warning(f"X API rate limit exceeded (attempt {attempt + 1}/{max_retries}) - waiting {backoff_time}s before retry")
138
115
logger.error(f"Response: {response.text}")
139
116
time.sleep(backoff_time)
140
117
continue
141
118
else:
142
-
logger.error(f"X API rate limit exceeded on {method} {endpoint} - max retries reached")
119
+
logger.error("X API rate limit exceeded - max retries reached")
143
120
logger.error(f"Response: {response.text}")
144
-
raise XRateLimitError(f"X API rate limit exceeded on {method} {endpoint}")
121
+
raise XRateLimitError("X API rate limit exceeded")
145
122
else:
146
123
if attempt < max_retries - 1:
147
124
# Exponential backoff for other HTTP errors too
···
278
255
return cached_data
279
256
280
257
# First, get the original tweet directly since it might not appear in conversation search
281
-
logger.debug(f"Getting thread context for conversation {conversation_id}")
282
258
original_tweet = None
283
259
try:
284
260
endpoint = f"/tweets/{conversation_id}"
···
287
263
"user.fields": "id,name,username",
288
264
"expansions": "author_id"
289
265
}
290
-
logger.debug(f"Fetching original tweet: GET {endpoint}")
291
266
response = self._make_request(endpoint, params)
292
267
if response and "data" in response:
293
268
original_tweet = response["data"]
···
312
287
logger.info(f"Using until_id={until_id} to exclude future tweets")
313
288
314
289
logger.info(f"Fetching thread context for conversation {conversation_id}")
315
-
logger.debug(f"Searching conversation: GET {endpoint} with query={params['query']}")
316
290
response = self._make_request(endpoint, params)
317
291
318
292
tweets = []
···
395
369
"user.fields": "id,name,username",
396
370
"expansions": "author_id"
397
371
}
398
-
logger.debug(f"Batch fetching missing tweets: GET {endpoint} (ids: {len(batch_ids)} tweets)")
399
372
response = self._make_request(endpoint, params)
400
373
401
374
if response and "data" in response:
···
482
455
}
483
456
484
457
logger.info(f"Attempting to post reply with {self.auth_method} authentication")
485
-
logger.debug(f"Posting reply: POST {endpoint}")
486
458
result = self._make_request(endpoint, method="POST", data=payload)
487
459
488
460
if result:
···
514
486
}
515
487
516
488
logger.info(f"Attempting to post tweet with {self.auth_method} authentication")
517
-
logger.debug(f"Posting tweet: POST {endpoint}")
518
489
result = self._make_request(endpoint, method="POST", data=payload)
519
490
520
491
if result:
···
524
495
logger.error("Failed to post tweet")
525
496
return None
526
497
527
-
def get_user_info(self, fields: Optional[str] = None) -> Optional[Dict]:
528
-
"""
529
-
Get the authenticated user's information, using cached data when available.
530
-
This reduces API calls significantly since user info rarely changes.
531
-
532
-
Args:
533
-
fields: Optional comma-separated list of user fields to fetch
534
-
535
-
Returns:
536
-
User data dict if successful, None if failed
537
-
"""
538
-
# First try to get from cache
539
-
cached_user_info = get_cached_user_info()
540
-
if cached_user_info:
541
-
# Check if cached data has all requested fields
542
-
requested_fields = set(fields.split(',') if fields else ['id', 'username', 'name'])
543
-
cached_fields = set(cached_user_info.keys())
544
-
if requested_fields.issubset(cached_fields):
545
-
return cached_user_info
546
-
547
-
# Cache miss, expired, or missing requested fields - fetch from API
548
-
logger.debug("Fetching fresh user info from /users/me API")
549
-
endpoint = "/users/me"
550
-
params = {"user.fields": fields or "id,username,name,description"}
551
-
552
-
response = self._make_request(endpoint, params=params)
553
-
if response and "data" in response:
554
-
user_data = response["data"]
555
-
# Cache the result for future use
556
-
save_cached_user_info(user_data)
557
-
return user_data
558
-
else:
559
-
logger.error("Failed to get user info from /users/me API")
560
-
return None
561
-
562
-
def get_username(self) -> Optional[str]:
563
-
"""
564
-
Get the authenticated user's username, using cached data when available.
565
-
This reduces API calls significantly since username rarely changes.
566
-
567
-
Returns:
568
-
Username string if successful, None if failed
569
-
"""
570
-
user_info = self.get_user_info("id,username,name")
571
-
return user_info.get("username") if user_info else None
572
-
573
-
def load_x_config(config_path: str = "x_config.yaml") -> Dict[str, Any]:
574
-
"""Load complete X configuration from x_config.yaml."""
498
+
def load_x_config(config_path: str = "config.yaml") -> Dict[str, str]:
499
+
"""Load X configuration from config file."""
575
500
try:
576
501
with open(config_path, 'r') as f:
577
502
config = yaml.safe_load(f)
578
-
579
-
if not config:
580
-
raise ValueError(f"Empty or invalid configuration file: {config_path}")
581
-
582
-
# Validate required sections
503
+
583
504
x_config = config.get('x', {})
584
-
letta_config = config.get('letta', {})
585
-
586
505
if not x_config.get('api_key') or not x_config.get('user_id'):
587
-
raise ValueError("X API key and user_id must be configured in x_config.yaml")
588
-
589
-
if not letta_config.get('api_key') or not letta_config.get('agent_id'):
590
-
raise ValueError("Letta API key and agent_id must be configured in x_config.yaml")
591
-
592
-
return config
506
+
raise ValueError("X API key and user_id must be configured in config.yaml")
507
+
508
+
return x_config
593
509
except Exception as e:
594
510
logger.error(f"Failed to load X configuration: {e}")
595
511
raise
596
512
597
-
def get_x_letta_config(config_path: str = "x_config.yaml") -> Dict[str, Any]:
598
-
"""Get Letta configuration from X config file."""
599
-
config = load_x_config(config_path)
600
-
return config['letta']
601
-
602
-
def create_x_client(config_path: str = "x_config.yaml") -> XClient:
513
+
def create_x_client(config_path: str = "config.yaml") -> XClient:
603
514
"""Create and return an X client with configuration loaded from file."""
604
515
config = load_x_config(config_path)
605
-
x_config = config['x']
606
516
return XClient(
607
-
api_key=x_config['api_key'],
608
-
user_id=x_config['user_id'],
609
-
access_token=x_config.get('access_token'),
610
-
consumer_key=x_config.get('consumer_key'),
611
-
consumer_secret=x_config.get('consumer_secret'),
612
-
access_token_secret=x_config.get('access_token_secret')
517
+
api_key=config['api_key'],
518
+
user_id=config['user_id'],
519
+
access_token=config.get('access_token'),
520
+
consumer_key=config.get('consumer_key'),
521
+
consumer_secret=config.get('consumer_secret'),
522
+
access_token_secret=config.get('access_token_secret')
613
523
)
614
524
615
525
def mention_to_yaml_string(mention: Dict, users_data: Optional[Dict] = None) -> str:
···
695
605
696
606
try:
697
607
from tools.blocks import attach_x_user_blocks, x_user_note_set
608
+
from config_loader import get_letta_config
698
609
from letta_client import Letta
699
-
700
-
# Get Letta client and agent_id from X config
701
-
config = get_x_letta_config()
610
+
611
+
# Get Letta client and agent_id from config
612
+
config = get_letta_config()
702
613
client = Letta(token=config['api_key'], timeout=config['timeout'])
703
614
704
615
# Use provided agent_id or get from config
···
991
902
except Exception as e:
992
903
logger.error(f"Error caching individual tweets: {e}")
993
904
994
-
def get_cached_user_info() -> Optional[Dict]:
995
-
"""Load cached user info if available and not expired."""
996
-
cache_file = X_CACHE_DIR / "user_info.json"
997
-
if cache_file.exists():
998
-
try:
999
-
with open(cache_file, 'r') as f:
1000
-
cached_data = json.load(f)
1001
-
# Check if cache is recent (within 24 hours)
1002
-
from datetime import datetime, timedelta
1003
-
cached_time = datetime.fromisoformat(cached_data.get('cached_at', ''))
1004
-
if datetime.now() - cached_time < timedelta(hours=24):
1005
-
logger.debug("Using cached user info")
1006
-
return cached_data.get('data')
1007
-
else:
1008
-
logger.debug("Cached user info expired (>24 hours old)")
1009
-
except Exception as e:
1010
-
logger.warning(f"Error loading cached user info: {e}")
1011
-
return None
1012
-
1013
-
def save_cached_user_info(user_data: Dict):
1014
-
"""Save user info to cache."""
1015
-
try:
1016
-
X_CACHE_DIR.mkdir(exist_ok=True)
1017
-
cache_file = X_CACHE_DIR / "user_info.json"
1018
-
1019
-
from datetime import datetime
1020
-
cache_data = {
1021
-
'data': user_data,
1022
-
'cached_at': datetime.now().isoformat()
1023
-
}
1024
-
1025
-
with open(cache_file, 'w') as f:
1026
-
json.dump(cache_data, f, indent=2)
1027
-
1028
-
logger.debug(f"Cached user info: {user_data.get('username')}")
1029
-
1030
-
except Exception as e:
1031
-
logger.error(f"Error caching user info: {e}")
1032
-
1033
905
def has_sufficient_context(tweets: List[Dict], missing_tweet_ids: Set[str]) -> bool:
1034
906
"""
1035
907
Determine if we have sufficient context to skip backfilling missing tweets.
···
1085
957
last_seen_id = load_last_seen_id()
1086
958
1087
959
logger.info(f"Fetching mentions for @{username} since {last_seen_id or 'beginning'}")
1088
-
1089
-
# Search for mentions - this calls GET /2/tweets/search/recent
1090
-
logger.debug(f"Calling search_mentions API for @{username}")
960
+
961
+
# Search for mentions
1091
962
mentions = client.search_mentions(
1092
963
username=username,
1093
964
since_id=last_seen_id,
···
1124
995
try:
1125
996
client = create_x_client()
1126
997
1127
-
# Get authenticated user info using cached method
998
+
# Use the /2/users/me endpoint to get authenticated user info
999
+
endpoint = "/users/me"
1000
+
params = {
1001
+
"user.fields": "id,name,username,description"
1002
+
}
1003
+
1128
1004
print("Fetching authenticated user information...")
1129
-
user_data = client.get_user_info("id,name,username,description")
1130
-
1131
-
if user_data:
1005
+
response = client._make_request(endpoint, params=params)
1006
+
1007
+
if response and "data" in response:
1008
+
user_data = response["data"]
1132
1009
print(f"โ
Found authenticated user:")
1133
1010
print(f" ID: {user_data.get('id')}")
1134
1011
print(f" Username: @{user_data.get('username')}")
1135
1012
print(f" Name: {user_data.get('name')}")
1136
1013
print(f" Description: {user_data.get('description', 'N/A')[:100]}...")
1137
-
print(f"\n๐ง Update your x_config.yaml with:")
1014
+
print(f"\n๐ง Update your config.yaml with:")
1138
1015
print(f" user_id: \"{user_data.get('id')}\"")
1139
1016
return user_data
1140
1017
else:
···
1152
1029
client = create_x_client()
1153
1030
1154
1031
# First get our username
1155
-
username = client.get_username()
1156
-
if not username:
1032
+
user_info = client._make_request("/users/me", params={"user.fields": "username"})
1033
+
if not user_info or "data" not in user_info:
1157
1034
print("โ Could not get username")
1158
1035
return
1036
+
1037
+
username = user_info["data"]["username"]
1159
1038
print(f"๐ Searching for mentions of @{username}")
1160
1039
1161
1040
mentions = client.search_mentions(username, max_results=5)
···
1176
1055
client = create_x_client()
1177
1056
1178
1057
# Get our username
1179
-
username = client.get_username()
1180
-
if not username:
1058
+
user_info = client._make_request("/users/me", params={"user.fields": "username"})
1059
+
if not user_info or "data" not in user_info:
1181
1060
print("โ Could not get username")
1182
1061
return
1062
+
1063
+
username = user_info["data"]["username"]
1183
1064
print(f"๐ Fetching and queueing mentions for @{username}")
1184
1065
1185
1066
# Show current state
···
1262
1143
import json
1263
1144
import yaml
1264
1145
1265
-
# Load X config to access letta section
1146
+
# Load full config to access letta section
1266
1147
try:
1267
-
x_config = load_x_config()
1268
-
letta_config = x_config.get('letta', {})
1148
+
with open("config.yaml", 'r') as f:
1149
+
full_config = yaml.safe_load(f)
1150
+
1151
+
letta_config = full_config.get('letta', {})
1269
1152
api_key = letta_config.get('api_key')
1270
1153
config_agent_id = letta_config.get('agent_id')
1271
1154
···
1275
1158
agent_id = config_agent_id
1276
1159
print(f"โน๏ธ Using agent_id from config: {agent_id}")
1277
1160
else:
1278
-
print("โ No agent_id found in x_config.yaml")
1161
+
print("โ No agent_id found in config.yaml")
1279
1162
print("Expected config structure:")
1280
1163
print(" letta:")
1281
1164
print(" agent_id: your-agent-id")
···
1288
1171
import os
1289
1172
api_key = os.getenv('LETTA_API_KEY')
1290
1173
if not api_key:
1291
-
print("โ LETTA_API_KEY not found in x_config.yaml or environment")
1174
+
print("โ LETTA_API_KEY not found in config.yaml or environment")
1292
1175
print("Expected config structure:")
1293
1176
print(" letta:")
1294
1177
print(" api_key: your-letta-api-key")
···
1296
1179
else:
1297
1180
print("โน๏ธ Using LETTA_API_KEY from environment")
1298
1181
else:
1299
-
print("โน๏ธ Using LETTA_API_KEY from x_config.yaml")
1182
+
print("โน๏ธ Using LETTA_API_KEY from config.yaml")
1300
1183
1301
1184
except Exception as e:
1302
1185
print(f"โ Error loading config: {e}")
···
1515
1398
"user.fields": "id,name,username",
1516
1399
"expansions": "author_id"
1517
1400
}
1518
-
logger.debug(f"Fetching individual missing tweet: GET {endpoint}")
1519
1401
response = x_client._make_request(endpoint, params)
1520
1402
if response and "data" in response:
1521
1403
missing_tweet = response["data"]
···
1666
1548
print(f" {line}")
1667
1549
1668
1550
# Send to Letta agent
1551
+
from config_loader import get_letta_config
1669
1552
from letta_client import Letta
1670
-
1671
-
config = get_x_letta_config()
1553
+
1554
+
config = get_letta_config()
1672
1555
letta_client = Letta(token=config['api_key'], timeout=config['timeout'])
1673
1556
1674
1557
prompt_char_count = len(prompt)
···
2055
1938
Similar to bsky.py process_notifications but for X.
2056
1939
"""
2057
1940
try:
2058
-
# Get username for fetching mentions - uses cached data to avoid rate limits
2059
-
username = x_client.get_username()
2060
-
if not username:
1941
+
# Get username for fetching mentions
1942
+
user_info = x_client._make_request("/users/me", params={"user.fields": "username"})
1943
+
if not user_info or "data" not in user_info:
2061
1944
logger.error("Could not get username for X mentions")
2062
1945
return
1946
+
1947
+
username = user_info["data"]["username"]
2063
1948
2064
1949
# Fetch and queue new mentions
2065
1950
new_count = fetch_and_queue_mentions(username)
···
2116
2001
"""Initialize the void agent for X operations."""
2117
2002
logger.info("Starting void agent initialization for X...")
2118
2003
2004
+
from config_loader import get_letta_config
2119
2005
from letta_client import Letta
2120
-
2006
+
2121
2007
# Get config
2122
-
config = get_x_letta_config()
2008
+
config = get_letta_config()
2123
2009
client = Letta(token=config['api_key'], timeout=config['timeout'])
2124
2010
agent_id = config['agent_id']
2125
2011
···
2138
2024
logger.info("Configuring tools for X platform...")
2139
2025
try:
2140
2026
from tool_manager import ensure_platform_tools
2141
-
ensure_platform_tools('x', void_agent.id, config['api_key'])
2027
+
ensure_platform_tools('x', void_agent.id)
2142
2028
except Exception as e:
2143
2029
logger.error(f"Failed to configure platform tools: {e}")
2144
2030
logger.warning("Continuing with existing tool configuration")
···
2160
2046
"""
2161
2047
import time
2162
2048
from time import sleep
2049
+
from config_loader import get_letta_config
2163
2050
from letta_client import Letta
2164
-
2051
+
2165
2052
logger.info("=== STARTING X VOID BOT ===")
2166
-
2167
-
# Configure logging from config file
2168
-
setup_logging_from_config()
2169
-
2053
+
2170
2054
# Initialize void agent
2171
2055
void_agent = initialize_x_void()
2172
2056
logger.info(f"X void agent initialized: {void_agent.id}")
···
2176
2060
logger.info("Connected to X API")
2177
2061
2178
2062
# Get Letta client for periodic cleanup
2179
-
config = get_x_letta_config()
2063
+
config = get_letta_config()
2180
2064
letta_client = Letta(token=config['api_key'], timeout=config['timeout'])
2181
2065
2182
2066
# Main loop