+105
-20
bsky.py
+105
-20
bsky.py
···
9
9
import subprocess
10
10
from pathlib import Path
11
11
from datetime import datetime
12
+
from collections import defaultdict
13
+
import time
12
14
13
15
from utils import (
14
16
upsert_block,
···
43
45
level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
44
46
)
45
47
logger = logging.getLogger("void_bot")
46
-
logger.setLevel(logging.INFO)
48
+
logger.setLevel(logging.DEBUG)
47
49
48
-
# Set httpx logging to DEBUG to reduce noise
49
-
logging.getLogger("httpx").setLevel(logging.DEBUG)
50
+
# Create a separate logger for prompts (set to WARNING to hide by default)
51
+
prompt_logger = logging.getLogger("void_bot.prompts")
52
+
prompt_logger.setLevel(logging.WARNING) # Change to DEBUG if you want to see prompts
53
+
54
+
# Disable httpx logging completely
55
+
logging.getLogger("httpx").setLevel(logging.CRITICAL)
50
56
51
57
52
58
# Create a client with extended timeout for LLM operations
···
70
76
71
77
# Maximum number of processed notifications to track
72
78
MAX_PROCESSED_NOTIFICATIONS = 10000
79
+
80
+
# Message tracking counters
81
+
message_counters = defaultdict(int)
82
+
start_time = time.time()
73
83
74
84
def export_agent_state(client, agent):
75
85
"""Export agent state to agent_archive/ (timestamped) and agents/ (current)."""
···
112
122
logger.error(f"Failed to export agent: {e}")
113
123
114
124
def initialize_void():
125
+
logger.info("Starting void agent initialization...")
115
126
116
127
# Ensure that a shared zeitgeist block exists
128
+
logger.info("Creating/updating zeitgeist block...")
117
129
zeigeist_block = upsert_block(
118
130
CLIENT,
119
131
label = "zeitgeist",
···
122
134
)
123
135
124
136
# Ensure that a shared void personality block exists
137
+
logger.info("Creating/updating void-persona block...")
125
138
persona_block = upsert_block(
126
139
CLIENT,
127
140
label = "void-persona",
···
130
143
)
131
144
132
145
# Ensure that a shared void human block exists
146
+
logger.info("Creating/updating void-humans block...")
133
147
human_block = upsert_block(
134
148
CLIENT,
135
149
label = "void-humans",
···
138
152
)
139
153
140
154
# Create the agent if it doesn't exist
155
+
logger.info("Creating/updating void agent...")
141
156
void_agent = upsert_agent(
142
157
CLIENT,
143
158
name = "void",
···
154
169
)
155
170
156
171
# Export agent state
172
+
logger.info("Exporting agent state...")
157
173
export_agent_state(CLIENT, void_agent)
158
174
159
175
# Log agent details
···
229
245
logger.error(f"Thread attributes: {thread.__dict__}")
230
246
# Try to continue with a simple context
231
247
thread_context = f"Error processing thread context: {str(yaml_error)}"
232
-
233
-
# print(thread_context)
234
248
235
249
# Create a prompt for the Letta agent with thread context
236
250
prompt = f"""You received a mention on Bluesky from @{author_handle} ({author_name or author_handle}).
···
269
283
270
284
# Get response from Letta agent
271
285
logger.info(f"Mention from @{author_handle}: {mention_text}")
272
-
logger.debug(f"Prompt being sent: {prompt}")
273
286
274
-
# Log the exact parameters being sent to Letta
275
-
logger.debug(f"Calling Letta API with agent_id: {void_agent.id}")
276
-
logger.debug(f"Message content length: {len(prompt)} characters")
287
+
# Log prompt details to separate logger
288
+
prompt_logger.debug(f"Full prompt being sent:\n{prompt}")
289
+
290
+
# Log concise prompt info to main logger
291
+
thread_handles_count = len(unique_handles)
292
+
logger.info(f"💬 Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users")
277
293
278
294
try:
279
295
message_response = CLIENT.agents.messages.create(
···
333
349
334
350
# Extract the reply text from the agent's response
335
351
reply_text = ""
336
-
for message in message_response.messages:
337
-
print(message)
352
+
logger.debug(f"Processing {len(message_response.messages)} response messages...")
353
+
354
+
for i, message in enumerate(message_response.messages, 1):
355
+
# Log concise message info instead of full object
356
+
msg_type = getattr(message, 'message_type', 'unknown')
357
+
if hasattr(message, 'reasoning') and message.reasoning:
358
+
logger.debug(f" {i}. {msg_type}: {message.reasoning[:100]}...")
359
+
elif hasattr(message, 'tool_call') and message.tool_call:
360
+
tool_name = message.tool_call.name
361
+
logger.debug(f" {i}. {msg_type}: {tool_name}")
362
+
elif hasattr(message, 'tool_return'):
363
+
tool_name = getattr(message, 'name', 'unknown_tool')
364
+
return_preview = str(message.tool_return)[:100] if message.tool_return else "None"
365
+
logger.debug(f" {i}. {msg_type}: {tool_name} -> {return_preview}...")
366
+
elif hasattr(message, 'text'):
367
+
logger.debug(f" {i}. {msg_type}: {message.text[:100]}...")
368
+
else:
369
+
logger.debug(f" {i}. {msg_type}: <no content>")
338
370
339
371
# Check if this is a ToolCallMessage with bluesky_reply tool
340
372
if hasattr(message, 'tool_call') and message.tool_call:
···
478
510
479
511
def load_and_process_queued_notifications(void_agent, atproto_client):
480
512
"""Load and process all notifications from the queue."""
513
+
logger.info("Loading queued notifications from disk...")
481
514
try:
482
515
# Get all JSON files in queue directory (excluding processed_notifications.json)
483
516
queue_files = sorted([f for f in QUEUE_DIR.glob("*.json") if f.name != "processed_notifications.json"])
484
517
485
518
if not queue_files:
486
-
logger.debug("No queued notifications to process")
519
+
logger.info("No queued notifications found")
487
520
return
488
521
489
522
logger.info(f"Processing {len(queue_files)} queued notifications")
523
+
524
+
# Log current statistics
525
+
elapsed_time = time.time() - start_time
526
+
total_messages = sum(message_counters.values())
527
+
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
528
+
529
+
logger.info(f"📊 Session stats: {total_messages} total messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies, {message_counters['follows']} follows) | {messages_per_minute:.1f} msg/min")
490
530
491
-
for filepath in queue_files:
531
+
for i, filepath in enumerate(queue_files, 1):
532
+
logger.info(f"Processing queue file {i}/{len(queue_files)}: {filepath.name}")
492
533
try:
493
534
# Load notification data
494
535
with open(filepath, 'r') as f:
495
536
notif_data = json.load(f)
496
537
497
538
# Process based on type using dict data directly
539
+
logger.info(f"Processing {notif_data['reason']} from @{notif_data['author']['handle']}")
498
540
success = False
499
541
if notif_data['reason'] == "mention":
500
542
success = process_mention(void_agent, atproto_client, notif_data)
543
+
if success:
544
+
message_counters['mentions'] += 1
501
545
elif notif_data['reason'] == "reply":
502
546
success = process_mention(void_agent, atproto_client, notif_data)
547
+
if success:
548
+
message_counters['replies'] += 1
503
549
elif notif_data['reason'] == "follow":
504
550
author_handle = notif_data['author']['handle']
505
551
author_display_name = notif_data['author'].get('display_name', 'no display name')
506
552
follow_update = f"@{author_handle} ({author_display_name}) started following you."
553
+
logger.info(f"Notifying agent about new follower: @{author_handle}")
507
554
CLIENT.agents.messages.create(
508
555
agent_id = void_agent.id,
509
556
messages = [{"role":"user", "content": f"Update: {follow_update}"}]
510
557
)
511
558
success = True # Follow updates are always successful
559
+
if success:
560
+
message_counters['follows'] += 1
512
561
elif notif_data['reason'] == "repost":
513
562
logger.info(f"Skipping repost notification from @{notif_data['author']['handle']}")
514
563
success = True # Skip reposts but mark as successful to remove from queue
564
+
if success:
565
+
message_counters['reposts_skipped'] += 1
515
566
else:
516
567
logger.warning(f"Unknown notification type: {notif_data['reason']}")
517
568
success = True # Remove unknown types from queue
···
519
570
# Handle file based on processing result
520
571
if success:
521
572
filepath.unlink()
522
-
logger.info(f"Processed and removed: {filepath.name}")
573
+
logger.info(f"✅ Successfully processed and removed: {filepath.name}")
523
574
524
575
# Mark as processed to avoid reprocessing
525
576
processed_uris = load_processed_notifications()
···
529
580
elif success is None: # Special case for moving to error directory
530
581
error_path = QUEUE_ERROR_DIR / filepath.name
531
582
filepath.rename(error_path)
532
-
logger.warning(f"Moved {filepath.name} to errors directory")
583
+
logger.warning(f"❌ Moved {filepath.name} to errors directory")
533
584
534
585
# Also mark as processed to avoid retrying
535
586
processed_uris = load_processed_notifications()
···
537
588
save_processed_notifications(processed_uris)
538
589
539
590
else:
540
-
logger.warning(f"Failed to process {filepath.name}, keeping in queue for retry")
591
+
logger.warning(f"⚠️ Failed to process {filepath.name}, keeping in queue for retry")
541
592
542
593
except Exception as e:
543
-
logger.error(f"Error processing queued notification {filepath.name}: {e}")
594
+
logger.error(f"💥 Error processing queued notification {filepath.name}: {e}")
544
595
# Keep the file for retry later
545
596
546
597
except Exception as e:
···
549
600
550
601
def process_notifications(void_agent, atproto_client):
551
602
"""Fetch new notifications, queue them, and process the queue."""
603
+
logger.info("Starting notification processing cycle...")
552
604
try:
553
605
# First, process any existing queued notifications
606
+
logger.info("Processing existing queued notifications...")
554
607
load_and_process_queued_notifications(void_agent, atproto_client)
555
608
556
609
# Get current time for marking notifications as seen
610
+
logger.debug("Getting current time for notification marking...")
557
611
last_seen_at = atproto_client.get_current_time_iso()
558
612
559
613
# Fetch ALL notifications using pagination
614
+
logger.info("Beginning notification fetch with pagination...")
560
615
all_notifications = []
561
616
cursor = None
562
617
page_count = 0
···
615
670
break
616
671
617
672
# Queue all unread notifications (except likes)
673
+
logger.info("Queuing unread notifications...")
618
674
new_count = 0
619
675
for notification in all_notifications:
620
676
if not notification.is_read and notification.reason != "like":
···
629
685
logger.debug("No new notifications to queue")
630
686
631
687
# Process the queue (including any newly added notifications)
688
+
logger.info("Processing notification queue after fetching...")
632
689
load_and_process_queued_notifications(void_agent, atproto_client)
633
690
634
691
except Exception as e:
···
637
694
638
695
def main():
639
696
"""Main bot loop that continuously monitors for notifications."""
697
+
global start_time
698
+
start_time = time.time()
699
+
logger.info("=== STARTING VOID BOT ===")
640
700
logger.info("Initializing Void bot...")
641
701
642
702
# Initialize the Letta agent
703
+
logger.info("Calling initialize_void()...")
643
704
void_agent = initialize_void()
644
705
logger.info(f"Void agent initialized: {void_agent.id}")
645
706
···
658
719
logger.warning("Agent has no tools registered!")
659
720
660
721
# Initialize Bluesky client
722
+
logger.info("Connecting to Bluesky...")
661
723
atproto_client = bsky_utils.default_login()
662
724
logger.info("Connected to Bluesky")
663
725
664
726
# Main loop
727
+
logger.info(f"=== ENTERING MAIN LOOP ===")
665
728
logger.info(f"Starting notification monitoring, checking every {FETCH_NOTIFICATIONS_DELAY_SEC} seconds")
666
729
730
+
cycle_count = 0
667
731
while True:
668
732
try:
733
+
cycle_count += 1
734
+
logger.info(f"=== MAIN LOOP CYCLE {cycle_count} ===")
669
735
process_notifications(void_agent, atproto_client)
670
-
logger.debug("Sleeping, no notifications were detected")
736
+
# Log cycle completion with stats
737
+
elapsed_time = time.time() - start_time
738
+
total_messages = sum(message_counters.values())
739
+
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
740
+
741
+
logger.info(f"Cycle {cycle_count} complete. Session totals: {total_messages} messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies) | {messages_per_minute:.1f} msg/min")
742
+
logger.info(f"Sleeping for {FETCH_NOTIFICATIONS_DELAY_SEC} seconds...")
671
743
sleep(FETCH_NOTIFICATIONS_DELAY_SEC)
672
744
673
745
except KeyboardInterrupt:
674
-
logger.info("Bot stopped by user")
746
+
# Final stats
747
+
elapsed_time = time.time() - start_time
748
+
total_messages = sum(message_counters.values())
749
+
messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0
750
+
751
+
logger.info("=== BOT STOPPED BY USER ===")
752
+
logger.info(f"📊 Final session stats: {total_messages} total messages processed in {elapsed_time/60:.1f} minutes")
753
+
logger.info(f" - {message_counters['mentions']} mentions")
754
+
logger.info(f" - {message_counters['replies']} replies")
755
+
logger.info(f" - {message_counters['follows']} follows")
756
+
logger.info(f" - {message_counters['reposts_skipped']} reposts skipped")
757
+
logger.info(f" - Average rate: {messages_per_minute:.1f} messages/minute")
675
758
break
676
759
except Exception as e:
677
-
logger.error(f"Error in main loop: {e}")
760
+
logger.error(f"=== ERROR IN MAIN LOOP CYCLE {cycle_count} ===")
761
+
logger.error(f"Error details: {e}")
678
762
# Wait a bit longer on errors
763
+
logger.info(f"Sleeping for {FETCH_NOTIFICATIONS_DELAY_SEC * 2} seconds due to error...")
679
764
sleep(FETCH_NOTIFICATIONS_DELAY_SEC * 2)
680
765
681
766