+338
-201
bsky.py
+338
-201
bsky.py
···
1
-
from rich import print # pretty printing tools
1
+
from rich import print # pretty printing tools
2
2
from time import sleep
3
3
from letta_client import Letta
4
4
from bsky_utils import thread_to_yaml_string
···
30
30
get_queue_config
31
31
)
32
32
33
+
33
34
def extract_handles_from_data(data):
34
35
"""Recursively extract all unique handles from nested data structure."""
35
36
handles = set()
36
-
37
+
37
38
def _extract_recursive(obj):
38
39
if isinstance(obj, dict):
39
40
# Check if this dict has a 'handle' key
···
46
47
# Recursively check all list items
47
48
for item in obj:
48
49
_extract_recursive(item)
49
-
50
+
50
51
_extract_recursive(data)
51
52
return list(handles)
53
+
52
54
53
55
# Initialize configuration and logging
54
56
config = get_config()
···
96
98
97
99
# Skip git operations flag
98
100
SKIP_GIT = False
101
+
99
102
100
103
def export_agent_state(client, agent, skip_git=False):
101
104
"""Export agent state to agent_archive/ (timestamped) and agents/ (current)."""
102
105
try:
103
106
# Confirm export with user unless git is being skipped
104
107
if not skip_git:
105
-
response = input("Export agent state to files and stage with git? (y/n): ").lower().strip()
108
+
response = input(
109
+
"Export agent state to files and stage with git? (y/n): ").lower().strip()
106
110
if response not in ['y', 'yes']:
107
111
logger.info("Agent export cancelled by user.")
108
112
return
109
113
else:
110
114
logger.info("Exporting agent state (git staging disabled)")
111
-
115
+
112
116
# Create directories if they don't exist
113
117
os.makedirs("agent_archive", exist_ok=True)
114
118
os.makedirs("agents", exist_ok=True)
115
-
119
+
116
120
# Export agent data
117
121
logger.info(f"Exporting agent {agent.id}. This takes some time...")
118
122
agent_data = client.agents.export_file(agent_id=agent.id)
119
-
123
+
120
124
# Save timestamped archive copy
121
125
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
122
126
archive_file = os.path.join("agent_archive", f"void_{timestamp}.af")
123
127
with open(archive_file, 'w', encoding='utf-8') as f:
124
128
json.dump(agent_data, f, indent=2, ensure_ascii=False)
125
-
129
+
126
130
# Save current agent state
127
131
current_file = os.path.join("agents", "void.af")
128
132
with open(current_file, 'w', encoding='utf-8') as f:
129
133
json.dump(agent_data, f, indent=2, ensure_ascii=False)
130
-
134
+
131
135
logger.info(f"✅ Agent exported to {archive_file} and {current_file}")
132
-
136
+
133
137
# Git add only the current agent file (archive is ignored) unless skip_git is True
134
138
if not skip_git:
135
139
try:
136
-
subprocess.run(["git", "add", current_file], check=True, capture_output=True)
140
+
subprocess.run(["git", "add", current_file],
141
+
check=True, capture_output=True)
137
142
logger.info("Added current agent file to git staging")
138
143
except subprocess.CalledProcessError as e:
139
144
logger.warning(f"Failed to git add agent file: {e}")
140
-
145
+
141
146
except Exception as e:
142
147
logger.error(f"Failed to export agent: {e}")
148
+
143
149
144
150
def initialize_void():
145
151
logger.info("Starting void agent initialization...")
146
152
147
153
# Get block configurations
148
154
blocks_config = agent_config['blocks']
149
-
155
+
150
156
# Ensure that a shared zeitgeist block exists
151
157
logger.info("Creating/updating zeitgeist block...")
152
158
zeitgeist_config = blocks_config.get('zeitgeist', {})
153
159
zeigeist_block = upsert_block(
154
160
CLIENT,
155
161
label=zeitgeist_config.get('label', 'zeitgeist'),
156
-
value=zeitgeist_config.get('value', "I don't currently know anything about what is happening right now."),
157
-
description=zeitgeist_config.get('description', "A block to store your understanding of the current social environment.")
162
+
value=zeitgeist_config.get(
163
+
'value', "I don't currently know anything about what is happening right now."),
164
+
description=zeitgeist_config.get(
165
+
'description', "A block to store your understanding of the current social environment.")
158
166
)
159
167
160
168
# Ensure that a shared void personality block exists
···
163
171
persona_block = upsert_block(
164
172
CLIENT,
165
173
label=persona_config.get('label', 'void-persona'),
166
-
value=persona_config.get('value', "My name is Void. I live in the void. I must develop my personality."),
167
-
description=persona_config.get('description', "The personality of Void.")
174
+
value=persona_config.get(
175
+
'value', "My name is Void. I live in the void. I must develop my personality."),
176
+
description=persona_config.get(
177
+
'description', "The personality of Void.")
168
178
)
169
179
170
180
# Ensure that a shared void human block exists
···
173
183
human_block = upsert_block(
174
184
CLIENT,
175
185
label=humans_config.get('label', 'void-humans'),
176
-
value=humans_config.get('value', "I haven't seen any bluesky users yet. I will update this block when I learn things about users, identified by their handles such as @cameron.pfiffer.org."),
177
-
description=humans_config.get('description', "A block to store your understanding of users you talk to or observe on the bluesky social network.")
186
+
value=humans_config.get(
187
+
'value', "I haven't seen any bluesky users yet. I will update this block when I learn things about users, identified by their handles such as @cameron.pfiffer.org."),
188
+
description=humans_config.get(
189
+
'description', "A block to store your understanding of users you talk to or observe on the bluesky social network.")
178
190
)
179
191
180
192
# Create the agent if it doesn't exist
···
193
205
description=agent_config['description'],
194
206
project_id=PROJECT_ID
195
207
)
196
-
208
+
197
209
# Export agent state
198
210
logger.info("Exporting agent state...")
199
211
export_agent_state(CLIENT, void_agent, skip_git=SKIP_GIT)
200
-
212
+
201
213
# Log agent details
202
214
logger.info(f"Void agent details - ID: {void_agent.id}")
203
215
logger.info(f"Agent name: {void_agent.name}")
···
214
226
215
227
def process_mention(void_agent, atproto_client, notification_data, queue_filepath=None, testing_mode=False):
216
228
"""Process a mention and generate a reply using the Letta agent.
217
-
229
+
218
230
Args:
219
231
void_agent: The Letta agent instance
220
232
atproto_client: The AT Protocol client
221
233
notification_data: The notification data dictionary
222
234
queue_filepath: Optional Path object to the queue file (for cleanup on halt)
223
-
235
+
224
236
Returns:
225
237
True: Successfully processed, remove from queue
226
238
False: Failed but retryable, keep in queue
···
228
240
"no_reply": No reply was generated, move to no_reply directory
229
241
"""
230
242
try:
231
-
logger.debug(f"Starting process_mention with notification_data type: {type(notification_data)}")
232
-
243
+
logger.debug(
244
+
f"Starting process_mention with notification_data type: {type(notification_data)}")
245
+
233
246
# Handle both dict and object inputs for backwards compatibility
234
247
if isinstance(notification_data, dict):
235
248
uri = notification_data['uri']
236
249
mention_text = notification_data.get('record', {}).get('text', '')
237
250
author_handle = notification_data['author']['handle']
238
-
author_name = notification_data['author'].get('display_name') or author_handle
251
+
author_name = notification_data['author'].get(
252
+
'display_name') or author_handle
239
253
else:
240
254
# Legacy object access
241
255
uri = notification_data.uri
242
-
mention_text = notification_data.record.text if hasattr(notification_data.record, 'text') else ""
256
+
mention_text = notification_data.record.text if hasattr(
257
+
notification_data.record, 'text') else ""
243
258
author_handle = notification_data.author.handle
244
259
author_name = notification_data.author.display_name or author_handle
245
-
246
-
logger.info(f"Extracted data - URI: {uri}, Author: @{author_handle}, Text: {mention_text[:50]}...")
260
+
261
+
logger.info(
262
+
f"Extracted data - URI: {uri}, Author: @{author_handle}, Text: {mention_text[:50]}...")
247
263
248
264
# Retrieve the entire thread associated with the mention
249
265
try:
···
254
270
})
255
271
except Exception as e:
256
272
error_str = str(e)
257
-
# Check if this is a NotFound error
273
+
# Check for various error types that indicate the post/user is gone
258
274
if 'NotFound' in error_str or 'Post not found' in error_str:
259
-
logger.warning(f"Post not found for URI {uri}, removing from queue")
275
+
logger.warning(
276
+
f"Post not found for URI {uri}, removing from queue")
277
+
return True # Return True to remove from queue
278
+
elif 'Could not find user info' in error_str or 'InvalidRequest' in error_str:
279
+
logger.warning(
280
+
f"User account not found for post URI {uri} (account may be deleted/suspended), removing from queue")
281
+
return True # Return True to remove from queue
282
+
elif 'BadRequestError' in error_str:
283
+
logger.warning(
284
+
f"Bad request error for URI {uri}: {e}, removing from queue")
260
285
return True # Return True to remove from queue
261
286
else:
262
287
# Re-raise other errors
···
267
292
logger.debug("Converting thread to YAML string")
268
293
try:
269
294
thread_context = thread_to_yaml_string(thread)
270
-
logger.debug(f"Thread context generated, length: {len(thread_context)} characters")
271
-
295
+
logger.debug(
296
+
f"Thread context generated, length: {len(thread_context)} characters")
297
+
272
298
# Create a more informative preview by extracting meaningful content
273
299
lines = thread_context.split('\n')
274
300
meaningful_lines = []
275
-
301
+
276
302
for line in lines:
277
303
stripped = line.strip()
278
304
if not stripped:
279
305
continue
280
-
306
+
281
307
# Look for lines with actual content (not just structure)
282
308
if any(keyword in line for keyword in ['text:', 'handle:', 'display_name:', 'created_at:', 'reply_count:', 'like_count:']):
283
309
meaningful_lines.append(line)
284
310
if len(meaningful_lines) >= 5:
285
311
break
286
-
312
+
287
313
if meaningful_lines:
288
314
preview = '\n'.join(meaningful_lines)
289
315
logger.debug(f"Thread content preview:\n{preview}")
290
316
else:
291
317
# If no content fields found, just show it's a thread structure
292
-
logger.debug(f"Thread structure generated ({len(thread_context)} chars)")
318
+
logger.debug(
319
+
f"Thread structure generated ({len(thread_context)} chars)")
293
320
except Exception as yaml_error:
294
321
import traceback
295
322
logger.error(f"Error converting thread to YAML: {yaml_error}")
···
323
350
all_handles.update(extract_handles_from_data(notification_data))
324
351
all_handles.update(extract_handles_from_data(thread.model_dump()))
325
352
unique_handles = list(all_handles)
326
-
327
-
logger.debug(f"Found {len(unique_handles)} unique handles in thread: {unique_handles}")
328
-
353
+
354
+
logger.debug(
355
+
f"Found {len(unique_handles)} unique handles in thread: {unique_handles}")
356
+
329
357
# Attach user blocks before agent call
330
358
attached_handles = []
331
359
if unique_handles:
332
360
try:
333
-
logger.debug(f"Attaching user blocks for handles: {unique_handles}")
361
+
logger.debug(
362
+
f"Attaching user blocks for handles: {unique_handles}")
334
363
attach_result = attach_user_blocks(unique_handles, void_agent)
335
364
attached_handles = unique_handles # Track successfully attached handles
336
365
logger.debug(f"Attach result: {attach_result}")
···
340
369
341
370
# Get response from Letta agent
342
371
logger.info(f"Mention from @{author_handle}: {mention_text}")
343
-
372
+
344
373
# Log prompt details to separate logger
345
374
prompt_logger.debug(f"Full prompt being sent:\n{prompt}")
346
-
375
+
347
376
# Log concise prompt info to main logger
348
377
thread_handles_count = len(unique_handles)
349
-
logger.info(f"💬 Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users")
378
+
logger.info(
379
+
f"💬 Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users")
350
380
351
381
try:
352
382
# Use streaming to avoid 524 timeout errors
353
383
message_stream = CLIENT.agents.messages.create_stream(
354
384
agent_id=void_agent.id,
355
385
messages=[{"role": "user", "content": prompt}],
356
-
stream_tokens=False, # Step streaming only (faster than token streaming)
386
+
# Step streaming only (faster than token streaming)
387
+
stream_tokens=False,
357
388
max_steps=agent_config['max_steps']
358
389
)
359
-
390
+
360
391
# Collect the streaming response
361
392
all_messages = []
362
393
for chunk in message_stream:
···
372
403
args = json.loads(chunk.tool_call.arguments)
373
404
# Format based on tool type
374
405
if tool_name == 'bluesky_reply':
375
-
messages = args.get('messages', [args.get('message', '')])
406
+
messages = args.get(
407
+
'messages', [args.get('message', '')])
376
408
lang = args.get('lang', 'en-US')
377
409
if messages and isinstance(messages, list):
378
-
preview = messages[0][:100] + "..." if len(messages[0]) > 100 else messages[0]
379
-
msg_count = f" ({len(messages)} msgs)" if len(messages) > 1 else ""
380
-
logger.info(f"🔧 Tool call: {tool_name} → \"{preview}\"{msg_count} [lang: {lang}]")
410
+
preview = messages[0][:100] + "..." if len(
411
+
messages[0]) > 100 else messages[0]
412
+
msg_count = f" ({len(messages)} msgs)" if len(
413
+
messages) > 1 else ""
414
+
logger.info(
415
+
f"🔧 Tool call: {tool_name} → \"{preview}\"{msg_count} [lang: {lang}]")
381
416
else:
382
-
logger.info(f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)")
417
+
logger.info(
418
+
f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)")
383
419
elif tool_name == 'archival_memory_search':
384
420
query = args.get('query', 'unknown')
385
-
logger.info(f"🔧 Tool call: {tool_name} → query: \"{query}\"")
421
+
logger.info(
422
+
f"🔧 Tool call: {tool_name} → query: \"{query}\"")
386
423
elif tool_name == 'update_block':
387
424
label = args.get('label', 'unknown')
388
-
value_preview = str(args.get('value', ''))[:50] + "..." if len(str(args.get('value', ''))) > 50 else str(args.get('value', ''))
389
-
logger.info(f"🔧 Tool call: {tool_name} → {label}: \"{value_preview}\"")
425
+
value_preview = str(args.get('value', ''))[
426
+
:50] + "..." if len(str(args.get('value', ''))) > 50 else str(args.get('value', ''))
427
+
logger.info(
428
+
f"🔧 Tool call: {tool_name} → {label}: \"{value_preview}\"")
390
429
else:
391
430
# Generic display for other tools
392
-
args_str = ', '.join(f"{k}={v}" for k, v in args.items() if k != 'request_heartbeat')
431
+
args_str = ', '.join(
432
+
f"{k}={v}" for k, v in args.items() if k != 'request_heartbeat')
393
433
if len(args_str) > 150:
394
434
args_str = args_str[:150] + "..."
395
-
logger.info(f"🔧 Tool call: {tool_name}({args_str})")
435
+
logger.info(
436
+
f"🔧 Tool call: {tool_name}({args_str})")
396
437
except:
397
438
# Fallback to original format if parsing fails
398
-
logger.info(f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)")
439
+
logger.info(
440
+
f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)")
399
441
elif chunk.message_type == 'tool_return_message':
400
442
# Enhanced tool result logging
401
443
tool_name = chunk.name
402
444
status = chunk.status
403
-
445
+
404
446
if status == 'success':
405
447
# Try to show meaningful result info based on tool type
406
448
if hasattr(chunk, 'tool_return') and chunk.tool_return:
···
410
452
if result_str.startswith('[') and result_str.endswith(']'):
411
453
try:
412
454
results = json.loads(result_str)
413
-
logger.info(f"📋 Tool result: {tool_name} ✓ Found {len(results)} memory entries")
455
+
logger.info(
456
+
f"📋 Tool result: {tool_name} ✓ Found {len(results)} memory entries")
414
457
except:
415
-
logger.info(f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...")
458
+
logger.info(
459
+
f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...")
416
460
else:
417
-
logger.info(f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...")
461
+
logger.info(
462
+
f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...")
418
463
elif tool_name == 'bluesky_reply':
419
-
logger.info(f"📋 Tool result: {tool_name} ✓ Reply posted successfully")
464
+
logger.info(
465
+
f"📋 Tool result: {tool_name} ✓ Reply posted successfully")
420
466
elif tool_name == 'update_block':
421
-
logger.info(f"📋 Tool result: {tool_name} ✓ Memory block updated")
467
+
logger.info(
468
+
f"📋 Tool result: {tool_name} ✓ Memory block updated")
422
469
else:
423
470
# Generic success with preview
424
-
preview = result_str[:100] + "..." if len(result_str) > 100 else result_str
425
-
logger.info(f"📋 Tool result: {tool_name} ✓ {preview}")
471
+
preview = result_str[:100] + "..." if len(
472
+
result_str) > 100 else result_str
473
+
logger.info(
474
+
f"📋 Tool result: {tool_name} ✓ {preview}")
426
475
else:
427
476
logger.info(f"📋 Tool result: {tool_name} ✓")
428
477
elif status == 'error':
···
430
479
error_preview = ""
431
480
if hasattr(chunk, 'tool_return') and chunk.tool_return:
432
481
error_str = str(chunk.tool_return)
433
-
error_preview = error_str[:100] + "..." if len(error_str) > 100 else error_str
434
-
logger.info(f"📋 Tool result: {tool_name} ✗ Error: {error_preview}")
482
+
error_preview = error_str[:100] + \
483
+
"..." if len(
484
+
error_str) > 100 else error_str
485
+
logger.info(
486
+
f"📋 Tool result: {tool_name} ✗ Error: {error_preview}")
435
487
else:
436
-
logger.info(f"📋 Tool result: {tool_name} ✗ Error occurred")
488
+
logger.info(
489
+
f"📋 Tool result: {tool_name} ✗ Error occurred")
437
490
else:
438
-
logger.info(f"📋 Tool result: {tool_name} - {status}")
491
+
logger.info(
492
+
f"📋 Tool result: {tool_name} - {status}")
439
493
elif chunk.message_type == 'assistant_message':
440
494
logger.info(f"💬 Assistant: {chunk.content[:150]}...")
441
495
else:
442
-
logger.info(f"📨 {chunk.message_type}: {str(chunk)[:150]}...")
496
+
logger.info(
497
+
f"📨 {chunk.message_type}: {str(chunk)[:150]}...")
443
498
else:
444
499
logger.info(f"📦 Stream status: {chunk}")
445
-
500
+
446
501
# Log full chunk for debugging
447
502
logger.debug(f"Full streaming chunk: {chunk}")
448
503
all_messages.append(chunk)
449
504
if str(chunk) == 'done':
450
505
break
451
-
506
+
452
507
# Convert streaming response to standard format for compatibility
453
508
message_response = type('StreamingResponse', (), {
454
509
'messages': [msg for msg in all_messages if hasattr(msg, 'message_type')]
···
462
517
logger.error(f"Mention text was: {mention_text}")
463
518
logger.error(f"Author: @{author_handle}")
464
519
logger.error(f"URI: {uri}")
465
-
466
-
520
+
467
521
# Try to extract more info from different error types
468
522
if hasattr(api_error, 'response'):
469
523
logger.error(f"Error response object exists")
···
471
525
logger.error(f"Response text: {api_error.response.text}")
472
526
if hasattr(api_error.response, 'json') and callable(api_error.response.json):
473
527
try:
474
-
logger.error(f"Response JSON: {api_error.response.json()}")
528
+
logger.error(
529
+
f"Response JSON: {api_error.response.json()}")
475
530
except:
476
531
pass
477
-
532
+
478
533
# Check for specific error types
479
534
if hasattr(api_error, 'status_code'):
480
535
logger.error(f"API Status code: {api_error.status_code}")
···
482
537
logger.error(f"API Response body: {api_error.body}")
483
538
if hasattr(api_error, 'headers'):
484
539
logger.error(f"API Response headers: {api_error.headers}")
485
-
540
+
486
541
if api_error.status_code == 413:
487
-
logger.error("413 Payload Too Large - moving to errors directory")
542
+
logger.error(
543
+
"413 Payload Too Large - moving to errors directory")
488
544
return None # Move to errors directory - payload is too large to ever succeed
489
545
elif api_error.status_code == 524:
490
-
logger.error("524 error - timeout from Cloudflare, will retry later")
546
+
logger.error(
547
+
"524 error - timeout from Cloudflare, will retry later")
491
548
return False # Keep in queue for retry
492
-
549
+
493
550
# Check if error indicates we should remove from queue
494
551
if 'status_code: 413' in error_str or 'Payload Too Large' in error_str:
495
-
logger.warning("Payload too large error, moving to errors directory")
552
+
logger.warning(
553
+
"Payload too large error, moving to errors directory")
496
554
return None # Move to errors directory - cannot be fixed by retry
497
555
elif 'status_code: 524' in error_str:
498
556
logger.warning("524 timeout error, keeping in queue for retry")
499
557
return False # Keep in queue for retry
500
-
558
+
501
559
raise
502
560
503
561
# Log successful response
504
562
logger.debug("Successfully received response from Letta API")
505
-
logger.debug(f"Number of messages in response: {len(message_response.messages) if hasattr(message_response, 'messages') else 'N/A'}")
563
+
logger.debug(
564
+
f"Number of messages in response: {len(message_response.messages) if hasattr(message_response, 'messages') else 'N/A'}")
506
565
507
566
# Extract successful add_post_to_bluesky_reply_thread tool calls from the agent's response
508
567
reply_candidates = []
509
568
tool_call_results = {} # Map tool_call_id to status
510
-
511
-
logger.debug(f"Processing {len(message_response.messages)} response messages...")
512
-
569
+
570
+
logger.debug(
571
+
f"Processing {len(message_response.messages)} response messages...")
572
+
513
573
# First pass: collect tool return statuses
514
574
ignored_notification = False
515
575
ignore_reason = ""
516
576
ignore_category = ""
517
-
577
+
518
578
for message in message_response.messages:
519
579
if hasattr(message, 'tool_call_id') and hasattr(message, 'status') and hasattr(message, 'name'):
520
580
if message.name == 'add_post_to_bluesky_reply_thread':
521
581
tool_call_results[message.tool_call_id] = message.status
522
-
logger.debug(f"Tool result: {message.tool_call_id} -> {message.status}")
582
+
logger.debug(
583
+
f"Tool result: {message.tool_call_id} -> {message.status}")
523
584
elif message.name == 'ignore_notification':
524
585
# Check if the tool was successful
525
586
if hasattr(message, 'tool_return') and message.status == 'success':
···
531
592
ignore_category = parts[1]
532
593
ignore_reason = parts[2]
533
594
ignored_notification = True
534
-
logger.info(f"🚫 Notification ignored - Category: {ignore_category}, Reason: {ignore_reason}")
595
+
logger.info(
596
+
f"🚫 Notification ignored - Category: {ignore_category}, Reason: {ignore_reason}")
535
597
elif message.name == 'bluesky_reply':
536
-
logger.error("❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!")
537
-
logger.error("Please use add_post_to_bluesky_reply_thread instead.")
538
-
logger.error("Update the agent's tools using register_tools.py")
598
+
logger.error(
599
+
"❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!")
600
+
logger.error(
601
+
"Please use add_post_to_bluesky_reply_thread instead.")
602
+
logger.error(
603
+
"Update the agent's tools using register_tools.py")
539
604
# Export agent state before terminating
540
605
export_agent_state(CLIENT, void_agent, skip_git=SKIP_GIT)
541
-
logger.info("=== BOT TERMINATED DUE TO DEPRECATED TOOL USE ===")
606
+
logger.info(
607
+
"=== BOT TERMINATED DUE TO DEPRECATED TOOL USE ===")
542
608
exit(1)
543
-
609
+
544
610
# Second pass: process messages and check for successful tool calls
545
611
for i, message in enumerate(message_response.messages, 1):
546
612
# Log concise message info instead of full object
547
613
msg_type = getattr(message, 'message_type', 'unknown')
548
614
if hasattr(message, 'reasoning') and message.reasoning:
549
-
logger.debug(f" {i}. {msg_type}: {message.reasoning[:100]}...")
615
+
logger.debug(
616
+
f" {i}. {msg_type}: {message.reasoning[:100]}...")
550
617
elif hasattr(message, 'tool_call') and message.tool_call:
551
618
tool_name = message.tool_call.name
552
619
logger.debug(f" {i}. {msg_type}: {tool_name}")
553
620
elif hasattr(message, 'tool_return'):
554
621
tool_name = getattr(message, 'name', 'unknown_tool')
555
-
return_preview = str(message.tool_return)[:100] if message.tool_return else "None"
622
+
return_preview = str(message.tool_return)[
623
+
:100] if message.tool_return else "None"
556
624
status = getattr(message, 'status', 'unknown')
557
-
logger.debug(f" {i}. {msg_type}: {tool_name} -> {return_preview}... (status: {status})")
625
+
logger.debug(
626
+
f" {i}. {msg_type}: {tool_name} -> {return_preview}... (status: {status})")
558
627
elif hasattr(message, 'text'):
559
628
logger.debug(f" {i}. {msg_type}: {message.text[:100]}...")
560
629
else:
···
563
632
# Check for halt_activity tool call
564
633
if hasattr(message, 'tool_call') and message.tool_call:
565
634
if message.tool_call.name == 'halt_activity':
566
-
logger.info("🛑 HALT_ACTIVITY TOOL CALLED - TERMINATING BOT")
635
+
logger.info(
636
+
"🛑 HALT_ACTIVITY TOOL CALLED - TERMINATING BOT")
567
637
try:
568
638
args = json.loads(message.tool_call.arguments)
569
639
reason = args.get('reason', 'Agent requested halt')
570
640
logger.info(f"Halt reason: {reason}")
571
641
except:
572
642
logger.info("Halt reason: <unable to parse>")
573
-
643
+
574
644
# Delete the queue file before terminating
575
645
if queue_filepath and queue_filepath.exists():
576
646
queue_filepath.unlink()
577
-
logger.info(f"✅ Deleted queue file: {queue_filepath.name}")
578
-
647
+
logger.info(
648
+
f"✅ Deleted queue file: {queue_filepath.name}")
649
+
579
650
# Also mark as processed to avoid reprocessing
580
651
processed_uris = load_processed_notifications()
581
652
processed_uris.add(notification_data.get('uri', ''))
582
653
save_processed_notifications(processed_uris)
583
-
654
+
584
655
# Export agent state before terminating
585
656
export_agent_state(CLIENT, void_agent, skip_git=SKIP_GIT)
586
-
657
+
587
658
# Exit the program
588
659
logger.info("=== BOT TERMINATED BY AGENT ===")
589
660
exit(0)
590
-
661
+
591
662
# Check for deprecated bluesky_reply tool
592
663
if hasattr(message, 'tool_call') and message.tool_call:
593
664
if message.tool_call.name == 'bluesky_reply':
594
-
logger.error("❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!")
595
-
logger.error("Please use add_post_to_bluesky_reply_thread instead.")
596
-
logger.error("Update the agent's tools using register_tools.py")
665
+
logger.error(
666
+
"❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!")
667
+
logger.error(
668
+
"Please use add_post_to_bluesky_reply_thread instead.")
669
+
logger.error(
670
+
"Update the agent's tools using register_tools.py")
597
671
# Export agent state before terminating
598
672
export_agent_state(CLIENT, void_agent, skip_git=SKIP_GIT)
599
-
logger.info("=== BOT TERMINATED DUE TO DEPRECATED TOOL USE ===")
673
+
logger.info(
674
+
"=== BOT TERMINATED DUE TO DEPRECATED TOOL USE ===")
600
675
exit(1)
601
-
676
+
602
677
# Collect add_post_to_bluesky_reply_thread tool calls - only if they were successful
603
678
elif message.tool_call.name == 'add_post_to_bluesky_reply_thread':
604
679
tool_call_id = message.tool_call.tool_call_id
605
-
tool_status = tool_call_results.get(tool_call_id, 'unknown')
606
-
680
+
tool_status = tool_call_results.get(
681
+
tool_call_id, 'unknown')
682
+
607
683
if tool_status == 'success':
608
684
try:
609
685
args = json.loads(message.tool_call.arguments)
610
686
reply_text = args.get('text', '')
611
687
reply_lang = args.get('lang', 'en-US')
612
-
688
+
613
689
if reply_text: # Only add if there's actual content
614
-
reply_candidates.append((reply_text, reply_lang))
615
-
logger.info(f"Found successful add_post_to_bluesky_reply_thread candidate: {reply_text[:50]}... (lang: {reply_lang})")
690
+
reply_candidates.append(
691
+
(reply_text, reply_lang))
692
+
logger.info(
693
+
f"Found successful add_post_to_bluesky_reply_thread candidate: {reply_text[:50]}... (lang: {reply_lang})")
616
694
except json.JSONDecodeError as e:
617
-
logger.error(f"Failed to parse tool call arguments: {e}")
695
+
logger.error(
696
+
f"Failed to parse tool call arguments: {e}")
618
697
elif tool_status == 'error':
619
-
logger.info(f"⚠️ Skipping failed add_post_to_bluesky_reply_thread tool call (status: error)")
698
+
logger.info(
699
+
f"⚠️ Skipping failed add_post_to_bluesky_reply_thread tool call (status: error)")
620
700
else:
621
-
logger.warning(f"⚠️ Skipping add_post_to_bluesky_reply_thread tool call with unknown status: {tool_status}")
701
+
logger.warning(
702
+
f"⚠️ Skipping add_post_to_bluesky_reply_thread tool call with unknown status: {tool_status}")
622
703
623
704
# Check for conflicting tool calls
624
705
if reply_candidates and ignored_notification:
625
-
logger.error(f"⚠️ CONFLICT: Agent called both add_post_to_bluesky_reply_thread and ignore_notification!")
626
-
logger.error(f"Reply candidates: {len(reply_candidates)}, Ignore reason: {ignore_reason}")
706
+
logger.error(
707
+
f"⚠️ CONFLICT: Agent called both add_post_to_bluesky_reply_thread and ignore_notification!")
708
+
logger.error(
709
+
f"Reply candidates: {len(reply_candidates)}, Ignore reason: {ignore_reason}")
627
710
logger.warning("Item will be left in queue for manual review")
628
711
# Return False to keep in queue
629
712
return False
630
-
713
+
631
714
if reply_candidates:
632
715
# Aggregate reply posts into a thread
633
716
reply_messages = []
···
635
718
for text, lang in reply_candidates:
636
719
reply_messages.append(text)
637
720
reply_langs.append(lang)
638
-
721
+
639
722
# Use the first language for the entire thread (could be enhanced later)
640
723
reply_lang = reply_langs[0] if reply_langs else 'en-US'
641
-
642
-
logger.info(f"Found {len(reply_candidates)} add_post_to_bluesky_reply_thread calls, building thread")
643
-
724
+
725
+
logger.info(
726
+
f"Found {len(reply_candidates)} add_post_to_bluesky_reply_thread calls, building thread")
727
+
644
728
# Print the generated reply for testing
645
729
print(f"\n=== GENERATED REPLY THREAD ===")
646
730
print(f"To: @{author_handle}")
···
660
744
else:
661
745
if len(reply_messages) == 1:
662
746
# Single reply - use existing function
663
-
cleaned_text = bsky_utils.remove_outside_quotes(reply_messages[0])
664
-
logger.info(f"Sending single reply: {cleaned_text[:50]}... (lang: {reply_lang})")
747
+
cleaned_text = bsky_utils.remove_outside_quotes(
748
+
reply_messages[0])
749
+
logger.info(
750
+
f"Sending single reply: {cleaned_text[:50]}... (lang: {reply_lang})")
665
751
response = bsky_utils.reply_to_notification(
666
752
client=atproto_client,
667
753
notification=notification_data,
···
670
756
)
671
757
else:
672
758
# Multiple replies - use new threaded function
673
-
cleaned_messages = [bsky_utils.remove_outside_quotes(msg) for msg in reply_messages]
674
-
logger.info(f"Sending threaded reply with {len(cleaned_messages)} messages (lang: {reply_lang})")
759
+
cleaned_messages = [bsky_utils.remove_outside_quotes(
760
+
msg) for msg in reply_messages]
761
+
logger.info(
762
+
f"Sending threaded reply with {len(cleaned_messages)} messages (lang: {reply_lang})")
675
763
response = bsky_utils.reply_with_thread_to_notification(
676
764
client=atproto_client,
677
765
notification=notification_data,
···
688
776
else:
689
777
# Check if notification was explicitly ignored
690
778
if ignored_notification:
691
-
logger.info(f"Notification from @{author_handle} was explicitly ignored (category: {ignore_category})")
779
+
logger.info(
780
+
f"Notification from @{author_handle} was explicitly ignored (category: {ignore_category})")
692
781
return "ignored"
693
782
else:
694
-
logger.warning(f"No add_post_to_bluesky_reply_thread tool calls found for mention from @{author_handle}, moving to no_reply folder")
783
+
logger.warning(
784
+
f"No add_post_to_bluesky_reply_thread tool calls found for mention from @{author_handle}, moving to no_reply folder")
695
785
return "no_reply"
696
786
697
787
except Exception as e:
···
701
791
# Detach user blocks after agent response (success or failure)
702
792
if 'attached_handles' in locals() and attached_handles:
703
793
try:
704
-
logger.info(f"Detaching user blocks for handles: {attached_handles}")
705
-
detach_result = detach_user_blocks(attached_handles, void_agent)
794
+
logger.info(
795
+
f"Detaching user blocks for handles: {attached_handles}")
796
+
detach_result = detach_user_blocks(
797
+
attached_handles, void_agent)
706
798
logger.debug(f"Detach result: {detach_result}")
707
799
except Exception as detach_error:
708
800
logger.warning(f"Failed to detach user blocks: {detach_error}")
···
771
863
notif_hash = hashlib.sha256(notif_json.encode()).hexdigest()[:16]
772
864
773
865
# Determine priority based on author handle
774
-
author_handle = getattr(notification.author, 'handle', '') if hasattr(notification, 'author') else ''
866
+
author_handle = getattr(notification.author, 'handle', '') if hasattr(
867
+
notification, 'author') else ''
775
868
priority_users = queue_config['priority_users']
776
869
priority_prefix = "0_" if author_handle in priority_users else "1_"
777
870
···
788
881
with open(existing_file, 'r') as f:
789
882
existing_data = json.load(f)
790
883
if existing_data.get('uri') == notification.uri:
791
-
logger.debug(f"Notification already queued (URI: {notification.uri})")
884
+
logger.debug(
885
+
f"Notification already queued (URI: {notification.uri})")
792
886
return False
793
887
except:
794
888
continue
···
811
905
try:
812
906
# Get all JSON files in queue directory (excluding processed_notifications.json)
813
907
# Files are sorted by name, which puts priority files first (0_ prefix before 1_ prefix)
814
-
queue_files = sorted([f for f in QUEUE_DIR.glob("*.json") if f.name != "processed_notifications.json"])
908
+
queue_files = sorted([f for f in QUEUE_DIR.glob(
909
+
"*.json") if f.name != "processed_notifications.json"])
815
910
816
911
if not queue_files:
817
912
return
818
913
819
914
logger.info(f"Processing {len(queue_files)} queued notifications")
820
-
915
+
821
916
# Log current statistics
822
917
elapsed_time = time.time() - start_time
823
918
total_messages = sum(message_counters.values())
824
-
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
825
-
826
-
logger.info(f"📊 Session stats: {total_messages} total messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies, {message_counters['follows']} follows) | {messages_per_minute:.1f} msg/min")
919
+
messages_per_minute = (
920
+
total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
921
+
922
+
logger.info(
923
+
f"📊 Session stats: {total_messages} total messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies, {message_counters['follows']} follows) | {messages_per_minute:.1f} msg/min")
827
924
828
925
for i, filepath in enumerate(queue_files, 1):
829
-
logger.info(f"Processing queue file {i}/{len(queue_files)}: {filepath.name}")
926
+
logger.info(
927
+
f"Processing queue file {i}/{len(queue_files)}: {filepath.name}")
830
928
try:
831
929
# Load notification data
832
930
with open(filepath, 'r') as f:
···
835
933
# Process based on type using dict data directly
836
934
success = False
837
935
if notif_data['reason'] == "mention":
838
-
success = process_mention(void_agent, atproto_client, notif_data, queue_filepath=filepath, testing_mode=testing_mode)
936
+
success = process_mention(
937
+
void_agent, atproto_client, notif_data, queue_filepath=filepath, testing_mode=testing_mode)
839
938
if success:
840
939
message_counters['mentions'] += 1
841
940
elif notif_data['reason'] == "reply":
842
-
success = process_mention(void_agent, atproto_client, notif_data, queue_filepath=filepath, testing_mode=testing_mode)
941
+
success = process_mention(
942
+
void_agent, atproto_client, notif_data, queue_filepath=filepath, testing_mode=testing_mode)
843
943
if success:
844
944
message_counters['replies'] += 1
845
945
elif notif_data['reason'] == "follow":
846
946
author_handle = notif_data['author']['handle']
847
-
author_display_name = notif_data['author'].get('display_name', 'no display name')
947
+
author_display_name = notif_data['author'].get(
948
+
'display_name', 'no display name')
848
949
follow_update = f"@{author_handle} ({author_display_name}) started following you."
849
-
logger.info(f"Notifying agent about new follower: @{author_handle}")
950
+
logger.info(
951
+
f"Notifying agent about new follower: @{author_handle}")
850
952
CLIENT.agents.messages.create(
851
-
agent_id = void_agent.id,
852
-
messages = [{"role":"user", "content": f"Update: {follow_update}"}]
953
+
agent_id=void_agent.id,
954
+
messages=[
955
+
{"role": "user", "content": f"Update: {follow_update}"}]
853
956
)
854
957
success = True # Follow updates are always successful
855
958
if success:
···
860
963
if success:
861
964
message_counters['reposts_skipped'] += 1
862
965
else:
863
-
logger.warning(f"Unknown notification type: {notif_data['reason']}")
966
+
logger.warning(
967
+
f"Unknown notification type: {notif_data['reason']}")
864
968
success = True # Remove unknown types from queue
865
969
866
970
# Handle file based on processing result
867
971
if success:
868
972
if testing_mode:
869
-
logger.info(f"🧪 TESTING MODE: Keeping queue file: {filepath.name}")
973
+
logger.info(
974
+
f"🧪 TESTING MODE: Keeping queue file: {filepath.name}")
870
975
else:
871
976
filepath.unlink()
872
-
logger.info(f"✅ Successfully processed and removed: {filepath.name}")
873
-
977
+
logger.info(
978
+
f"✅ Successfully processed and removed: {filepath.name}")
979
+
874
980
# Mark as processed to avoid reprocessing
875
981
processed_uris = load_processed_notifications()
876
982
processed_uris.add(notif_data['uri'])
877
983
save_processed_notifications(processed_uris)
878
-
984
+
879
985
elif success is None: # Special case for moving to error directory
880
986
error_path = QUEUE_ERROR_DIR / filepath.name
881
987
filepath.rename(error_path)
882
-
logger.warning(f"❌ Moved {filepath.name} to errors directory")
883
-
988
+
logger.warning(
989
+
f"❌ Moved {filepath.name} to errors directory")
990
+
884
991
# Also mark as processed to avoid retrying
885
992
processed_uris = load_processed_notifications()
886
993
processed_uris.add(notif_data['uri'])
887
994
save_processed_notifications(processed_uris)
888
-
995
+
889
996
elif success == "no_reply": # Special case for moving to no_reply directory
890
997
no_reply_path = QUEUE_NO_REPLY_DIR / filepath.name
891
998
filepath.rename(no_reply_path)
892
-
logger.info(f"📭 Moved {filepath.name} to no_reply directory")
893
-
999
+
logger.info(
1000
+
f"📭 Moved {filepath.name} to no_reply directory")
1001
+
894
1002
# Also mark as processed to avoid retrying
895
1003
processed_uris = load_processed_notifications()
896
1004
processed_uris.add(notif_data['uri'])
897
1005
save_processed_notifications(processed_uris)
898
-
1006
+
899
1007
elif success == "ignored": # Special case for explicitly ignored notifications
900
1008
# For ignored notifications, we just delete them (not move to no_reply)
901
1009
filepath.unlink()
902
-
logger.info(f"🚫 Deleted ignored notification: {filepath.name}")
903
-
1010
+
logger.info(
1011
+
f"🚫 Deleted ignored notification: {filepath.name}")
1012
+
904
1013
# Also mark as processed to avoid retrying
905
1014
processed_uris = load_processed_notifications()
906
1015
processed_uris.add(notif_data['uri'])
907
1016
save_processed_notifications(processed_uris)
908
-
1017
+
909
1018
else:
910
-
logger.warning(f"⚠️ Failed to process {filepath.name}, keeping in queue for retry")
1019
+
logger.warning(
1020
+
f"⚠️ Failed to process {filepath.name}, keeping in queue for retry")
911
1021
912
1022
except Exception as e:
913
-
logger.error(f"💥 Error processing queued notification {filepath.name}: {e}")
1023
+
logger.error(
1024
+
f"💥 Error processing queued notification {filepath.name}: {e}")
914
1025
# Keep the file for retry later
915
1026
916
1027
except Exception as e:
···
929
1040
all_notifications = []
930
1041
cursor = None
931
1042
page_count = 0
932
-
max_pages = bot_config['max_notification_pages'] # Safety limit to prevent infinite loops
933
-
1043
+
# Safety limit to prevent infinite loops
1044
+
max_pages = bot_config['max_notification_pages']
1045
+
934
1046
logger.info("Fetching all unread notifications...")
935
-
1047
+
936
1048
while page_count < max_pages:
937
1049
try:
938
1050
# Fetch notifications page
···
944
1056
notifications_response = atproto_client.app.bsky.notification.list_notifications(
945
1057
params={'limit': 100}
946
1058
)
947
-
1059
+
948
1060
page_count += 1
949
1061
page_notifications = notifications_response.notifications
950
-
1062
+
951
1063
# Count unread notifications in this page
952
-
unread_count = sum(1 for n in page_notifications if not n.is_read and n.reason != "like")
953
-
logger.debug(f"Page {page_count}: {len(page_notifications)} notifications, {unread_count} unread (non-like)")
954
-
1064
+
unread_count = sum(
1065
+
1 for n in page_notifications if not n.is_read and n.reason != "like")
1066
+
logger.debug(
1067
+
f"Page {page_count}: {len(page_notifications)} notifications, {unread_count} unread (non-like)")
1068
+
955
1069
# Add all notifications to our list
956
1070
all_notifications.extend(page_notifications)
957
-
1071
+
958
1072
# Check if we have more pages
959
1073
if hasattr(notifications_response, 'cursor') and notifications_response.cursor:
960
1074
cursor = notifications_response.cursor
961
1075
# If this page had no unread notifications, we can stop
962
1076
if unread_count == 0:
963
-
logger.info(f"No more unread notifications found after {page_count} pages")
1077
+
logger.info(
1078
+
f"No more unread notifications found after {page_count} pages")
964
1079
break
965
1080
else:
966
1081
# No more pages
967
-
logger.info(f"Fetched all notifications across {page_count} pages")
1082
+
logger.info(
1083
+
f"Fetched all notifications across {page_count} pages")
968
1084
break
969
-
1085
+
970
1086
except Exception as e:
971
1087
error_str = str(e)
972
-
logger.error(f"Error fetching notifications page {page_count}: {e}")
973
-
1088
+
logger.error(
1089
+
f"Error fetching notifications page {page_count}: {e}")
1090
+
974
1091
# Handle specific API errors
975
1092
if 'rate limit' in error_str.lower():
976
-
logger.warning("Rate limit hit while fetching notifications, will retry next cycle")
1093
+
logger.warning(
1094
+
"Rate limit hit while fetching notifications, will retry next cycle")
977
1095
break
978
1096
elif '401' in error_str or 'unauthorized' in error_str.lower():
979
1097
logger.error("Authentication error, re-raising exception")
980
1098
raise
981
1099
else:
982
1100
# For other errors, try to continue with what we have
983
-
logger.warning("Continuing with notifications fetched so far")
1101
+
logger.warning(
1102
+
"Continuing with notifications fetched so far")
984
1103
break
985
1104
986
1105
# Queue all unread notifications (except likes)
···
993
1112
994
1113
# Mark all notifications as seen immediately after queuing (unless in testing mode)
995
1114
if testing_mode:
996
-
logger.info("🧪 TESTING MODE: Skipping marking notifications as seen")
1115
+
logger.info(
1116
+
"🧪 TESTING MODE: Skipping marking notifications as seen")
997
1117
else:
998
1118
if new_count > 0:
999
-
atproto_client.app.bsky.notification.update_seen({'seen_at': last_seen_at})
1000
-
logger.info(f"Queued {new_count} new notifications and marked as seen")
1119
+
atproto_client.app.bsky.notification.update_seen(
1120
+
{'seen_at': last_seen_at})
1121
+
logger.info(
1122
+
f"Queued {new_count} new notifications and marked as seen")
1001
1123
else:
1002
1124
logger.debug("No new notifications to queue")
1003
1125
1004
1126
# Now process the entire queue (old + new notifications)
1005
-
load_and_process_queued_notifications(void_agent, atproto_client, testing_mode)
1127
+
load_and_process_queued_notifications(
1128
+
void_agent, atproto_client, testing_mode)
1006
1129
1007
1130
except Exception as e:
1008
1131
logger.error(f"Error processing notifications: {e}")
···
1010
1133
1011
1134
def main():
1012
1135
# Parse command line arguments
1013
-
parser = argparse.ArgumentParser(description='Void Bot - Bluesky autonomous agent')
1014
-
parser.add_argument('--test', action='store_true', help='Run in testing mode (no messages sent, queue files preserved)')
1015
-
parser.add_argument('--no-git', action='store_true', help='Skip git operations when exporting agent state')
1136
+
parser = argparse.ArgumentParser(
1137
+
description='Void Bot - Bluesky autonomous agent')
1138
+
parser.add_argument('--test', action='store_true',
1139
+
help='Run in testing mode (no messages sent, queue files preserved)')
1140
+
parser.add_argument('--no-git', action='store_true',
1141
+
help='Skip git operations when exporting agent state')
1016
1142
args = parser.parse_args()
1017
-
1143
+
1018
1144
global TESTING_MODE
1019
1145
TESTING_MODE = args.test
1020
-
1146
+
1021
1147
# Store no-git flag globally for use in export_agent_state calls
1022
1148
global SKIP_GIT
1023
1149
SKIP_GIT = args.no_git
1024
-
1150
+
1025
1151
if TESTING_MODE:
1026
1152
logger.info("🧪 === RUNNING IN TESTING MODE ===")
1027
1153
logger.info(" - No messages will be sent to Bluesky")
···
1034
1160
logger.info("=== STARTING VOID BOT ===")
1035
1161
void_agent = initialize_void()
1036
1162
logger.info(f"Void agent initialized: {void_agent.id}")
1037
-
1163
+
1038
1164
# Check if agent has required tools
1039
1165
if hasattr(void_agent, 'tools') and void_agent.tools:
1040
1166
tool_names = [tool.name for tool in void_agent.tools]
1041
1167
# Check for bluesky-related tools
1042
-
bluesky_tools = [name for name in tool_names if 'bluesky' in name.lower() or 'reply' in name.lower()]
1168
+
bluesky_tools = [name for name in tool_names if 'bluesky' in name.lower(
1169
+
) or 'reply' in name.lower()]
1043
1170
if not bluesky_tools:
1044
-
logger.warning("No Bluesky-related tools found! Agent may not be able to reply.")
1171
+
logger.warning(
1172
+
"No Bluesky-related tools found! Agent may not be able to reply.")
1045
1173
else:
1046
1174
logger.warning("Agent has no tools registered!")
1047
1175
1048
1176
# Initialize Bluesky client
1177
+
logger.debug("Connecting to Bluesky")
1049
1178
atproto_client = bsky_utils.default_login()
1050
1179
logger.info("Connected to Bluesky")
1051
1180
1052
1181
# Main loop
1053
-
logger.info(f"Starting notification monitoring, checking every {FETCH_NOTIFICATIONS_DELAY_SEC} seconds")
1182
+
logger.info(
1183
+
f"Starting notification monitoring, checking every {FETCH_NOTIFICATIONS_DELAY_SEC} seconds")
1054
1184
1055
1185
cycle_count = 0
1056
1186
while True:
···
1060
1190
# Log cycle completion with stats
1061
1191
elapsed_time = time.time() - start_time
1062
1192
total_messages = sum(message_counters.values())
1063
-
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
1064
-
1193
+
messages_per_minute = (
1194
+
total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
1195
+
1065
1196
if total_messages > 0:
1066
-
logger.info(f"Cycle {cycle_count} complete. Session totals: {total_messages} messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies) | {messages_per_minute:.1f} msg/min")
1197
+
logger.info(
1198
+
f"Cycle {cycle_count} complete. Session totals: {total_messages} messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies) | {messages_per_minute:.1f} msg/min")
1067
1199
sleep(FETCH_NOTIFICATIONS_DELAY_SEC)
1068
1200
1069
1201
except KeyboardInterrupt:
1070
1202
# Final stats
1071
1203
elapsed_time = time.time() - start_time
1072
1204
total_messages = sum(message_counters.values())
1073
-
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
1074
-
1205
+
messages_per_minute = (
1206
+
total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
1207
+
1075
1208
logger.info("=== BOT STOPPED BY USER ===")
1076
-
logger.info(f"📊 Final session stats: {total_messages} total messages processed in {elapsed_time/60:.1f} minutes")
1209
+
logger.info(
1210
+
f"📊 Final session stats: {total_messages} total messages processed in {elapsed_time/60:.1f} minutes")
1077
1211
logger.info(f" - {message_counters['mentions']} mentions")
1078
1212
logger.info(f" - {message_counters['replies']} replies")
1079
1213
logger.info(f" - {message_counters['follows']} follows")
1080
-
logger.info(f" - {message_counters['reposts_skipped']} reposts skipped")
1081
-
logger.info(f" - Average rate: {messages_per_minute:.1f} messages/minute")
1214
+
logger.info(
1215
+
f" - {message_counters['reposts_skipped']} reposts skipped")
1216
+
logger.info(
1217
+
f" - Average rate: {messages_per_minute:.1f} messages/minute")
1082
1218
break
1083
1219
except Exception as e:
1084
1220
logger.error(f"=== ERROR IN MAIN LOOP CYCLE {cycle_count} ===")
1085
1221
logger.error(f"Error details: {e}")
1086
1222
# Wait a bit longer on errors
1087
-
logger.info(f"Sleeping for {FETCH_NOTIFICATIONS_DELAY_SEC * 2} seconds due to error...")
1223
+
logger.info(
1224
+
f"Sleeping for {FETCH_NOTIFICATIONS_DELAY_SEC * 2} seconds due to error...")
1088
1225
sleep(FETCH_NOTIFICATIONS_DELAY_SEC * 2)
1089
1226
1090
1227
+79
-47
bsky_utils.py
+79
-47
bsky_utils.py
···
1
+
import json
2
+
import yaml
3
+
import dotenv
1
4
import os
2
5
import logging
3
6
from typing import Optional, Dict, Any, List
···
10
13
logger = logging.getLogger("bluesky_session_handler")
11
14
12
15
# Load the environment variables
13
-
import dotenv
14
16
dotenv.load_dotenv(override=True)
15
17
16
-
import yaml
17
-
import json
18
18
19
19
# Strip fields. A list of fields to remove from a JSON object
20
20
STRIP_FIELDS = [
···
63
63
"mime_type",
64
64
"size",
65
65
]
66
+
67
+
66
68
def convert_to_basic_types(obj):
67
69
"""Convert complex Python objects to basic types for JSON/YAML serialization."""
68
70
if hasattr(obj, '__dict__'):
···
117
119
def flatten_thread_structure(thread_data):
118
120
"""
119
121
Flatten a nested thread structure into a list while preserving all data.
120
-
122
+
121
123
Args:
122
124
thread_data: The thread data from get_post_thread
123
-
125
+
124
126
Returns:
125
127
Dict with 'posts' key containing a list of posts in chronological order
126
128
"""
127
129
posts = []
128
-
130
+
129
131
def traverse_thread(node):
130
132
"""Recursively traverse the thread structure to collect posts."""
131
133
if not node:
132
134
return
133
-
135
+
134
136
# If this node has a parent, traverse it first (to maintain chronological order)
135
137
if hasattr(node, 'parent') and node.parent:
136
138
traverse_thread(node.parent)
137
-
139
+
138
140
# Then add this node's post
139
141
if hasattr(node, 'post') and node.post:
140
142
# Convert to dict if needed to ensure we can process it
···
144
146
post_dict = node.post.copy()
145
147
else:
146
148
post_dict = {}
147
-
149
+
148
150
posts.append(post_dict)
149
-
151
+
150
152
# Handle the thread structure
151
153
if hasattr(thread_data, 'thread'):
152
154
# Start from the main thread node
153
155
traverse_thread(thread_data.thread)
154
156
elif hasattr(thread_data, '__dict__') and 'thread' in thread_data.__dict__:
155
157
traverse_thread(thread_data.__dict__['thread'])
156
-
158
+
157
159
# Return a simple structure with posts list
158
160
return {'posts': posts}
159
161
···
171
173
"""
172
174
# First flatten the thread structure to avoid deep nesting
173
175
flattened = flatten_thread_structure(thread)
174
-
176
+
175
177
# Convert complex objects to basic types
176
178
basic_thread = convert_to_basic_types(flattened)
177
179
···
184
186
return yaml.dump(cleaned_thread, indent=2, allow_unicode=True, default_flow_style=False)
185
187
186
188
187
-
188
-
189
-
190
-
191
-
192
189
def get_session(username: str) -> Optional[str]:
193
190
try:
194
191
with open(f"session_{username}.txt", encoding="UTF-8") as f:
···
196
193
except FileNotFoundError:
197
194
logger.debug(f"No existing session found for {username}")
198
195
return None
196
+
199
197
200
198
def save_session(username: str, session_string: str) -> None:
201
199
with open(f"session_{username}.txt", "w", encoding="UTF-8") as f:
202
200
f.write(session_string)
203
201
logger.debug(f"Session saved for {username}")
204
202
203
+
205
204
def on_session_change(username: str, event: SessionEvent, session: Session) -> None:
206
205
logger.debug(f"Session changed: {event} {repr(session)}")
207
206
if event in (SessionEvent.CREATE, SessionEvent.REFRESH):
208
207
logger.debug(f"Saving changed session for {username}")
209
208
save_session(username, session.export())
209
+
210
210
211
211
def init_client(username: str, password: str, pds_uri: str = "https://bsky.social") -> Client:
212
212
if pds_uri is None:
···
243
243
password = config['password']
244
244
pds_uri = config['pds_uri']
245
245
except (ImportError, FileNotFoundError, KeyError) as e:
246
-
logger.warning(f"Could not load from config file ({e}), falling back to environment variables")
246
+
logger.warning(
247
+
f"Could not load from config file ({e}), falling back to environment variables")
247
248
username = os.getenv("BSKY_USERNAME")
248
249
password = os.getenv("BSKY_PASSWORD")
249
250
pds_uri = os.getenv("PDS_URI", "https://bsky.social")
···
262
263
263
264
return init_client(username, password, pds_uri)
264
265
266
+
265
267
def remove_outside_quotes(text: str) -> str:
266
268
"""
267
269
Remove outside double quotes from response text.
268
-
270
+
269
271
Only handles double quotes to avoid interfering with contractions:
270
272
- Double quotes: "text" → text
271
273
- Preserves single quotes and internal quotes
272
-
274
+
273
275
Args:
274
276
text: The text to process
275
-
277
+
276
278
Returns:
277
279
Text with outside double quotes removed
278
280
"""
279
281
if not text or len(text) < 2:
280
282
return text
281
-
283
+
282
284
text = text.strip()
283
-
285
+
284
286
# Only remove double quotes from start and end
285
287
if text.startswith('"') and text.endswith('"'):
286
288
return text[1:-1]
287
-
289
+
288
290
return text
291
+
289
292
290
293
def reply_to_post(client: Client, text: str, reply_to_uri: str, reply_to_cid: str, root_uri: Optional[str] = None, root_cid: Optional[str] = None, lang: Optional[str] = None) -> Dict[str, Any]:
291
294
"""
···
304
307
The response from sending the post
305
308
"""
306
309
import re
307
-
310
+
308
311
# If root is not provided, this is a reply to the root post
309
312
if root_uri is None:
310
313
root_uri = reply_to_uri
311
314
root_cid = reply_to_cid
312
315
313
316
# Create references for the reply
314
-
parent_ref = models.create_strong_ref(models.ComAtprotoRepoStrongRef.Main(uri=reply_to_uri, cid=reply_to_cid))
315
-
root_ref = models.create_strong_ref(models.ComAtprotoRepoStrongRef.Main(uri=root_uri, cid=root_cid))
317
+
parent_ref = models.create_strong_ref(
318
+
models.ComAtprotoRepoStrongRef.Main(uri=reply_to_uri, cid=reply_to_cid))
319
+
root_ref = models.create_strong_ref(
320
+
models.ComAtprotoRepoStrongRef.Main(uri=root_uri, cid=root_cid))
316
321
317
322
# Parse rich text facets (mentions and URLs)
318
323
facets = []
319
324
text_bytes = text.encode("UTF-8")
320
-
325
+
321
326
# Parse mentions - fixed to handle @ at start of text
322
327
mention_regex = rb"(?:^|[$|\W])(@([a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)"
323
-
328
+
324
329
for m in re.finditer(mention_regex, text_bytes):
325
330
handle = m.group(1)[1:].decode("UTF-8") # Remove @ prefix
326
331
# Adjust byte positions to account for the optional prefix
···
336
341
byteStart=mention_start,
337
342
byteEnd=mention_end
338
343
),
339
-
features=[models.AppBskyRichtextFacet.Mention(did=resolve_resp.did)]
344
+
features=[models.AppBskyRichtextFacet.Mention(
345
+
did=resolve_resp.did)]
340
346
)
341
347
)
342
348
except Exception as e:
343
-
logger.debug(f"Failed to resolve handle {handle}: {e}")
349
+
# Handle specific error cases
350
+
error_str = str(e)
351
+
if 'Could not find user info' in error_str or 'InvalidRequest' in error_str:
352
+
logger.warning(
353
+
f"User @{handle} not found (account may be deleted/suspended), skipping mention facet")
354
+
elif 'BadRequestError' in error_str:
355
+
logger.warning(
356
+
f"Bad request when resolving @{handle}, skipping mention facet: {e}")
357
+
else:
358
+
logger.debug(f"Failed to resolve handle @{handle}: {e}")
344
359
continue
345
-
360
+
346
361
# Parse URLs - fixed to handle URLs at start of text
347
362
url_regex = rb"(?:^|[$|\W])(https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*[-a-zA-Z0-9@%_\+~#//=])?)"
348
-
363
+
349
364
for m in re.finditer(url_regex, text_bytes):
350
365
url = m.group(1).decode("UTF-8")
351
366
# Adjust byte positions to account for the optional prefix
···
365
380
if facets:
366
381
response = client.send_post(
367
382
text=text,
368
-
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
383
+
reply_to=models.AppBskyFeedPost.ReplyRef(
384
+
parent=parent_ref, root=root_ref),
369
385
facets=facets,
370
386
langs=[lang] if lang else None
371
387
)
372
388
else:
373
389
response = client.send_post(
374
390
text=text,
375
-
reply_to=models.AppBskyFeedPost.ReplyRef(parent=parent_ref, root=root_ref),
391
+
reply_to=models.AppBskyFeedPost.ReplyRef(
392
+
parent=parent_ref, root=root_ref),
376
393
langs=[lang] if lang else None
377
394
)
378
395
···
392
409
The thread data or None if not found
393
410
"""
394
411
try:
395
-
thread = client.app.bsky.feed.get_post_thread({'uri': uri, 'parent_height': 60, 'depth': 10})
412
+
thread = client.app.bsky.feed.get_post_thread(
413
+
{'uri': uri, 'parent_height': 60, 'depth': 10})
396
414
return thread
397
415
except Exception as e:
398
-
logger.error(f"Error fetching post thread: {e}")
416
+
error_str = str(e)
417
+
# Handle specific error cases more gracefully
418
+
if 'Could not find user info' in error_str or 'InvalidRequest' in error_str:
419
+
logger.warning(
420
+
f"User account not found for post URI {uri} (account may be deleted/suspended)")
421
+
elif 'NotFound' in error_str or 'Post not found' in error_str:
422
+
logger.warning(f"Post not found for URI {uri}")
423
+
elif 'BadRequestError' in error_str:
424
+
logger.warning(f"Bad request error for URI {uri}: {e}")
425
+
else:
426
+
logger.error(f"Error fetching post thread: {e}")
399
427
return None
400
428
401
429
···
492
520
logger.error("Reply messages list cannot be empty")
493
521
return None
494
522
if len(reply_messages) > 15:
495
-
logger.error(f"Cannot send more than 15 reply messages (got {len(reply_messages)})")
523
+
logger.error(
524
+
f"Cannot send more than 15 reply messages (got {len(reply_messages)})")
496
525
return None
497
-
526
+
498
527
# Get the post URI and CID from the notification (handle both dict and object)
499
528
if isinstance(notification, dict):
500
529
post_uri = notification.get('uri')
···
512
541
513
542
# Get the thread to find the root post
514
543
thread_data = get_post_thread(client, post_uri)
515
-
544
+
516
545
root_uri = post_uri
517
546
root_cid = post_cid
518
547
···
532
561
responses = []
533
562
current_parent_uri = post_uri
534
563
current_parent_cid = post_cid
535
-
564
+
536
565
for i, message in enumerate(reply_messages):
537
-
logger.info(f"Sending reply {i+1}/{len(reply_messages)}: {message[:50]}...")
538
-
566
+
logger.info(
567
+
f"Sending reply {i+1}/{len(reply_messages)}: {message[:50]}...")
568
+
539
569
# Send this reply
540
570
response = reply_to_post(
541
571
client=client,
···
546
576
root_cid=root_cid,
547
577
lang=lang
548
578
)
549
-
579
+
550
580
if not response:
551
-
logger.error(f"Failed to send reply {i+1}, posting system failure message")
581
+
logger.error(
582
+
f"Failed to send reply {i+1}, posting system failure message")
552
583
# Try to post a system failure message
553
584
failure_response = reply_to_post(
554
585
client=client,
···
564
595
current_parent_uri = failure_response.uri
565
596
current_parent_cid = failure_response.cid
566
597
else:
567
-
logger.error("Could not even send system failure message, stopping thread")
598
+
logger.error(
599
+
"Could not even send system failure message, stopping thread")
568
600
return responses if responses else None
569
601
else:
570
602
responses.append(response)
···
572
604
if i < len(reply_messages) - 1: # Not the last message
573
605
current_parent_uri = response.uri
574
606
current_parent_cid = response.cid
575
-
607
+
576
608
logger.info(f"Successfully sent {len(responses)} threaded replies")
577
609
return responses
578
610
+16
-8
register_tools.py
+16
-8
register_tools.py
···
4
4
import sys
5
5
import logging
6
6
from typing import List
7
-
from dotenv import load_dotenv
8
7
from letta_client import Letta
9
8
from rich.console import Console
10
9
from rich.table import Table
10
+
from config_loader import get_config, get_letta_config, get_agent_config
11
11
12
12
# Import standalone functions and their schemas
13
13
from tools.search import search_bluesky_posts, SearchArgs
···
18
18
from tools.thread import add_post_to_bluesky_reply_thread, ReplyThreadPostArgs
19
19
from tools.ignore import ignore_notification, IgnoreNotificationArgs
20
20
21
-
load_dotenv()
21
+
config = get_config()
22
+
letta_config = get_letta_config()
23
+
agent_config = get_agent_config()
22
24
logging.basicConfig(level=logging.INFO)
23
25
logger = logging.getLogger(__name__)
24
26
console = Console()
···
101
103
]
102
104
103
105
104
-
def register_tools(agent_name: str = "void", tools: List[str] = None):
106
+
def register_tools(agent_name: str = None, tools: List[str] = None):
105
107
"""Register tools with a Letta agent.
106
108
107
109
Args:
108
-
agent_name: Name of the agent to attach tools to
110
+
agent_name: Name of the agent to attach tools to. If None, uses config default.
109
111
tools: List of tool names to register. If None, registers all tools.
110
112
"""
113
+
# Use agent name from config if not provided
114
+
if agent_name is None:
115
+
agent_name = agent_config['name']
116
+
111
117
try:
112
-
# Initialize Letta client with API key
113
-
client = Letta(token=os.environ["LETTA_API_KEY"])
118
+
# Initialize Letta client with API key from config
119
+
client = Letta(token=letta_config['api_key'])
114
120
115
121
# Find the agent
116
122
agents = client.agents.list()
···
201
207
import argparse
202
208
203
209
parser = argparse.ArgumentParser(description="Register Void tools with a Letta agent")
204
-
parser.add_argument("agent", nargs="?", default="void", help="Agent name (default: void)")
210
+
parser.add_argument("agent", nargs="?", default=None, help=f"Agent name (default: {agent_config['name']})")
205
211
parser.add_argument("--tools", nargs="+", help="Specific tools to register (default: all)")
206
212
parser.add_argument("--list", action="store_true", help="List available tools")
207
213
···
210
216
if args.list:
211
217
list_available_tools()
212
218
else:
213
-
console.print(f"\n[bold]Registering tools for agent: {args.agent}[/bold]\n")
219
+
# Use config default if no agent specified
220
+
agent_name = args.agent if args.agent is not None else agent_config['name']
221
+
console.print(f"\n[bold]Registering tools for agent: {agent_name}[/bold]\n")
214
222
register_tools(args.agent, args.tools)
+23
requirements.txt
+23
requirements.txt
···
1
+
# Core dependencies for Void Bot
2
+
3
+
# Configuration and utilities
4
+
PyYAML>=6.0.2
5
+
rich>=14.0.0
6
+
python-dotenv>=1.0.0
7
+
8
+
# Letta API client
9
+
letta-client>=0.1.198
10
+
11
+
# AT Protocol (Bluesky) client
12
+
atproto>=0.0.54
13
+
14
+
# HTTP client for API calls
15
+
httpx>=0.28.1
16
+
httpx-sse>=0.4.0
17
+
requests>=2.31.0
18
+
19
+
# Data validation
20
+
pydantic>=2.11.7
21
+
22
+
# Async support
23
+
anyio>=4.9.0