a digital person for bluesky

Enhance display formatting and improve notification processing

- Add Rich panel formatting for all logs when --rich flag is used
- Add --reasoning flag to control reasoning display and log level
- Add --simple-logs flag for cleaner log format without timestamps
- Fix archival memory search display with proper Python literal parsing
- Add interleaved notification checking during queue processing
- Filter out and auto-delete like notifications to improve performance
- Remove emojis from log messages for cleaner output
- Add pretty-printed panels for mentions, reasoning, tool calls, and results
- Improve error handling for various notification types

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

Changed files
+372 -115
+372 -115
bsky.py
··· 1 1 from rich import print # pretty printing tools 2 + from rich.console import Console 3 + from rich.table import Table 4 + from rich.panel import Panel 5 + from rich.text import Text 2 6 from time import sleep 3 7 from letta_client import Letta 4 8 from bsky_utils import thread_to_yaml_string ··· 41 45 _extract_recursive(data) 42 46 return list(handles) 43 47 44 - # Configure logging 45 - logging.basicConfig( 46 - level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" 47 - ) 48 - logger = logging.getLogger("void_bot") 49 - logger.setLevel(logging.INFO) 48 + # Logging will be configured after argument parsing 49 + logger = None 50 + prompt_logger = None 51 + console = None 52 + USE_RICH = False 53 + SHOW_REASONING = False 54 + last_archival_query = "archival memory search" 50 55 51 - # Create a separate logger for prompts (set to WARNING to hide by default) 52 - prompt_logger = logging.getLogger("void_bot.prompts") 53 - prompt_logger.setLevel(logging.WARNING) # Change to DEBUG if you want to see prompts 54 - 55 - # Disable httpx logging completely 56 - logging.getLogger("httpx").setLevel(logging.CRITICAL) 56 + def log_with_panel(message, title=None, border_color="white"): 57 + """Log a message with Rich panel if USE_RICH is enabled, otherwise use regular logger""" 58 + if USE_RICH and console: 59 + if title: 60 + panel = Panel( 61 + message, 62 + title=title, 63 + title_align="left", 64 + border_style=border_color, 65 + padding=(0, 1) 66 + ) 67 + console.print(panel) 68 + else: 69 + console.print(message) 70 + else: 71 + logger.info(message) 57 72 58 73 59 74 # Create a client with extended timeout for LLM operations ··· 67 82 68 83 # Notification check delay 69 84 FETCH_NOTIFICATIONS_DELAY_SEC = 30 85 + 86 + # Check for new notifications every N queue items 87 + CHECK_NEW_NOTIFICATIONS_EVERY_N_ITEMS = 5 70 88 71 89 # Queue directory 72 90 QUEUE_DIR = Path("queue") ··· 121 139 with open(current_file, 'w', encoding='utf-8') as f: 122 140 json.dump(agent_data, f, indent=2, ensure_ascii=False) 123 141 124 - logger.info(f"✅ Agent exported to {archive_file} and {current_file}") 142 + logger.info(f"Agent exported to {archive_file} and {current_file}") 125 143 126 144 # Git add only the current agent file (archive is ignored) unless skip_git is True 127 145 if not skip_git: ··· 230 248 author_handle = notification_data.author.handle 231 249 author_name = notification_data.author.display_name or author_handle 232 250 233 - logger.info(f"Extracted data - URI: {uri}, Author: @{author_handle}, Text: {mention_text[:50]}...") 251 + logger.debug(f"Extracted data - URI: {uri}, Author: @{author_handle}, Text: {mention_text[:50]}...") 234 252 235 253 # Retrieve the entire thread associated with the mention 236 254 try: ··· 330 348 # Continue without user blocks rather than failing completely 331 349 332 350 # Get response from Letta agent 333 - logger.info(f"Mention from @{author_handle}: {mention_text}") 351 + if USE_RICH: 352 + # Create and print Rich panel directly 353 + mention_panel = Panel( 354 + mention_text, 355 + title=f"MENTION FROM @{author_handle}", 356 + title_align="left", 357 + border_style="cyan", 358 + padding=(0, 1) 359 + ) 360 + console.print(mention_panel) 361 + else: 362 + # Simple text format when Rich is disabled 363 + print(f"\n{'='*60}") 364 + print(f"MENTION FROM @{author_handle}") 365 + print('='*60) 366 + print(f"{mention_text}") 367 + print('='*60 + "\n") 334 368 335 369 # Log prompt details to separate logger 336 370 prompt_logger.debug(f"Full prompt being sent:\n{prompt}") 337 371 338 372 # Log concise prompt info to main logger 339 373 thread_handles_count = len(unique_handles) 340 - logger.info(f"💬 Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users") 374 + logger.debug(f"Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users") 341 375 342 376 try: 343 377 # Use streaming to avoid 524 timeout errors ··· 355 389 if hasattr(chunk, 'message_type'): 356 390 if chunk.message_type == 'reasoning_message': 357 391 # Show full reasoning without truncation 358 - logger.info(f"🧠 Reasoning: {chunk.reasoning}") 392 + if SHOW_REASONING and USE_RICH: 393 + # Create and print Rich panel for reasoning 394 + reasoning_panel = Panel( 395 + chunk.reasoning, 396 + title="Reasoning", 397 + title_align="left", 398 + border_style="yellow", 399 + padding=(0, 1) 400 + ) 401 + console.print(reasoning_panel) 402 + elif SHOW_REASONING: 403 + # Simple text format when Rich is disabled but reasoning is enabled 404 + print(f"\n{'='*60}") 405 + print("Reasoning") 406 + print('='*60) 407 + print(f"{chunk.reasoning}") 408 + print('='*60 + "\n") 409 + else: 410 + # Default log format (only when --reasoning is used due to log level) 411 + if USE_RICH: 412 + reasoning_panel = Panel( 413 + chunk.reasoning, 414 + title="Reasoning", 415 + title_align="left", 416 + border_style="yellow", 417 + padding=(0, 1) 418 + ) 419 + console.print(reasoning_panel) 420 + else: 421 + logger.info(f"Reasoning: {chunk.reasoning}") 359 422 elif chunk.message_type == 'tool_call_message': 360 423 # Parse tool arguments for better display 361 424 tool_name = chunk.tool_call.name ··· 368 431 if messages and isinstance(messages, list): 369 432 preview = messages[0][:100] + "..." if len(messages[0]) > 100 else messages[0] 370 433 msg_count = f" ({len(messages)} msgs)" if len(messages) > 1 else "" 371 - logger.info(f"🔧 Tool call: {tool_name} → \"{preview}\"{msg_count} [lang: {lang}]") 434 + if USE_RICH: 435 + tool_panel = Panel( 436 + f"\"{preview}\"{msg_count} [lang: {lang}]", 437 + title=f"Tool call: {tool_name}", 438 + title_align="left", 439 + border_style="blue", 440 + padding=(0, 1) 441 + ) 442 + console.print(tool_panel) 443 + else: 444 + logger.info(f"Tool call: {tool_name} → \"{preview}\"{msg_count} [lang: {lang}]") 372 445 else: 373 - logger.info(f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)") 446 + log_with_panel(chunk.tool_call.arguments[:150] + "...", f"Tool call: {tool_name}", "blue") 374 447 elif tool_name == 'archival_memory_search': 375 448 query = args.get('query', 'unknown') 376 - logger.info(f"🔧 Tool call: {tool_name} → query: \"{query}\"") 449 + global last_archival_query 450 + last_archival_query = query 451 + if USE_RICH: 452 + tool_panel = Panel( 453 + f"query: \"{query}\"", 454 + title=f"Tool call: {tool_name}", 455 + title_align="left", 456 + border_style="blue", 457 + padding=(0, 1) 458 + ) 459 + console.print(tool_panel) 460 + else: 461 + log_with_panel(f"query: \"{query}\"", f"Tool call: {tool_name}", "blue") 377 462 elif tool_name == 'update_block': 378 463 label = args.get('label', 'unknown') 379 464 value_preview = str(args.get('value', ''))[:50] + "..." if len(str(args.get('value', ''))) > 50 else str(args.get('value', '')) 380 - logger.info(f"🔧 Tool call: {tool_name} → {label}: \"{value_preview}\"") 465 + log_with_panel(f"{label}: \"{value_preview}\"", f"Tool call: {tool_name}", "blue") 381 466 else: 382 467 # Generic display for other tools 383 468 args_str = ', '.join(f"{k}={v}" for k, v in args.items() if k != 'request_heartbeat') 384 469 if len(args_str) > 150: 385 470 args_str = args_str[:150] + "..." 386 - logger.info(f"🔧 Tool call: {tool_name}({args_str})") 471 + log_with_panel(args_str, f"Tool call: {tool_name}", "blue") 387 472 except: 388 473 # Fallback to original format if parsing fails 389 - logger.info(f"🔧 Tool call: {tool_name}({chunk.tool_call.arguments[:150]}...)") 474 + log_with_panel(chunk.tool_call.arguments[:150] + "...", f"Tool call: {tool_name}", "blue") 390 475 elif chunk.message_type == 'tool_return_message': 391 476 # Enhanced tool result logging 392 477 tool_name = chunk.name ··· 397 482 if hasattr(chunk, 'tool_return') and chunk.tool_return: 398 483 result_str = str(chunk.tool_return) 399 484 if tool_name == 'archival_memory_search': 400 - # Count number of results if it looks like a list 401 - if result_str.startswith('[') and result_str.endswith(']'): 402 - try: 403 - results = json.loads(result_str) 404 - logger.info(f"📋 Tool result: {tool_name} ✓ Found {len(results)} memory entries") 405 - except: 406 - logger.info(f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...") 407 - else: 408 - logger.info(f"📋 Tool result: {tool_name} ✓ {result_str[:100]}...") 485 + 486 + try: 487 + # Handle both string and list formats 488 + if isinstance(chunk.tool_return, str): 489 + # The string format is: "([{...}, {...}], count)" 490 + # We need to extract just the list part 491 + if chunk.tool_return.strip(): 492 + # Find the list part between the first [ and last ] 493 + start_idx = chunk.tool_return.find('[') 494 + end_idx = chunk.tool_return.rfind(']') 495 + if start_idx != -1 and end_idx != -1: 496 + list_str = chunk.tool_return[start_idx:end_idx+1] 497 + # Use ast.literal_eval since this is Python literal syntax, not JSON 498 + import ast 499 + results = ast.literal_eval(list_str) 500 + else: 501 + logger.warning("Could not find list in archival_memory_search result") 502 + results = [] 503 + else: 504 + logger.warning("Empty string returned from archival_memory_search") 505 + results = [] 506 + else: 507 + # If it's already a list, use directly 508 + results = chunk.tool_return 509 + 510 + log_with_panel(f"Found {len(results)} memory entries", f"Tool result: {tool_name} ✓", "green") 511 + 512 + # Use the captured search query from the tool call 513 + search_query = last_archival_query 514 + 515 + # Combine all results into a single text block 516 + content_text = "" 517 + for i, entry in enumerate(results, 1): 518 + timestamp = entry.get('timestamp', 'N/A') 519 + content = entry.get('content', '') 520 + content_text += f"[{i}/{len(results)}] {timestamp}\n{content}\n\n" 521 + 522 + if USE_RICH: 523 + # Create and print Rich panel directly 524 + memory_panel = Panel( 525 + content_text.strip(), 526 + title=f"{search_query} ({len(results)} results)", 527 + title_align="left", 528 + border_style="blue", 529 + padding=(0, 1) 530 + ) 531 + console.print(memory_panel) 532 + else: 533 + # Use simple text format when Rich is disabled 534 + print(f"\n{search_query} ({len(results)} results)") 535 + print("="*80) 536 + print(content_text.strip()) 537 + print("="*80 + "\n") 538 + 539 + except Exception as e: 540 + logger.error(f"Error formatting archival memory results: {e}") 541 + log_with_panel(result_str[:100] + "...", f"Tool result: {tool_name} ✓", "green") 409 542 elif tool_name == 'bluesky_reply': 410 - logger.info(f"📋 Tool result: {tool_name} ✓ Reply posted successfully") 543 + log_with_panel("Reply posted successfully", f"Tool result: {tool_name} ✓", "green") 411 544 elif tool_name == 'update_block': 412 - logger.info(f"📋 Tool result: {tool_name} ✓ Memory block updated") 545 + log_with_panel("Memory block updated", f"Tool result: {tool_name} ✓", "green") 413 546 else: 414 547 # Generic success with preview 415 548 preview = result_str[:100] + "..." if len(result_str) > 100 else result_str 416 - logger.info(f"📋 Tool result: {tool_name} ✓ {preview}") 549 + log_with_panel(preview, f"Tool result: {tool_name} ✓", "green") 417 550 else: 418 - logger.info(f"📋 Tool result: {tool_name} ✓") 551 + log_with_panel("Success", f"Tool result: {tool_name} ✓", "green") 419 552 elif status == 'error': 420 553 # Show error details 421 554 error_preview = "" 422 555 if hasattr(chunk, 'tool_return') and chunk.tool_return: 423 556 error_str = str(chunk.tool_return) 424 557 error_preview = error_str[:100] + "..." if len(error_str) > 100 else error_str 425 - logger.info(f"📋 Tool result: {tool_name} ✗ Error: {error_preview}") 558 + log_with_panel(f"Error: {error_preview}", f"Tool result: {tool_name} ✗", "red") 426 559 else: 427 - logger.info(f"📋 Tool result: {tool_name} ✗ Error occurred") 560 + log_with_panel("Error occurred", f"Tool result: {tool_name} ✗", "red") 428 561 else: 429 - logger.info(f"📋 Tool result: {tool_name} - {status}") 562 + logger.info(f"Tool result: {tool_name} - {status}") 430 563 elif chunk.message_type == 'assistant_message': 431 - logger.info(f"💬 Assistant: {chunk.content[:150]}...") 564 + if USE_RICH: 565 + # Create and print Rich panel directly 566 + response_panel = Panel( 567 + chunk.content, 568 + title="Assistant Response", 569 + title_align="left", 570 + border_style="green", 571 + padding=(0, 1) 572 + ) 573 + console.print(response_panel) 574 + else: 575 + # Simple text format when Rich is disabled 576 + print(f"\n{'='*60}") 577 + print("Assistant Response") 578 + print('='*60) 579 + print(f"{chunk.content}") 580 + print('='*60 + "\n") 432 581 else: 433 - logger.info(f"📨 {chunk.message_type}: {str(chunk)[:150]}...") 582 + logger.info(f"{chunk.message_type}: {str(chunk)[:150]}...") 434 583 else: 435 584 logger.info(f"📦 Stream status: {chunk}") 436 585 ··· 524 673 ignored_notification = True 525 674 logger.info(f"🚫 Notification ignored - Category: {ignore_category}, Reason: {ignore_reason}") 526 675 elif message.name == 'bluesky_reply': 527 - logger.error("❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!") 676 + logger.error("DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!") 528 677 logger.error("Please use add_post_to_bluesky_reply_thread instead.") 529 678 logger.error("Update the agent's tools using register_tools.py") 530 679 # Export agent state before terminating ··· 565 714 # Delete the queue file before terminating 566 715 if queue_filepath and queue_filepath.exists(): 567 716 queue_filepath.unlink() 568 - logger.info(f"✅ Deleted queue file: {queue_filepath.name}") 717 + logger.info(f"Deleted queue file: {queue_filepath.name}") 569 718 570 719 # Also mark as processed to avoid reprocessing 571 720 processed_uris = load_processed_notifications() ··· 582 731 # Check for deprecated bluesky_reply tool 583 732 if hasattr(message, 'tool_call') and message.tool_call: 584 733 if message.tool_call.name == 'bluesky_reply': 585 - logger.error("❌ DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!") 734 + logger.error("DEPRECATED TOOL DETECTED: bluesky_reply is no longer supported!") 586 735 logger.error("Please use add_post_to_bluesky_reply_thread instead.") 587 736 logger.error("Update the agent's tools using register_tools.py") 588 737 # Export agent state before terminating ··· 646 795 647 796 # Send the reply(s) with language (unless in testing mode) 648 797 if testing_mode: 649 - logger.info("🧪 TESTING MODE: Skipping actual Bluesky post") 798 + logger.info("TESTING MODE: Skipping actual Bluesky post") 650 799 response = True # Simulate success 651 800 else: 652 801 if len(reply_messages) == 1: ··· 743 892 logger.error(f"Error saving processed notifications: {e}") 744 893 745 894 746 - def save_notification_to_queue(notification): 895 + def save_notification_to_queue(notification, is_priority=None): 747 896 """Save a notification to the queue directory with priority-based filename.""" 748 897 try: 749 898 # Check if already processed 750 899 processed_uris = load_processed_notifications() 751 - if notification.uri in processed_uris: 752 - logger.debug(f"Notification already processed: {notification.uri}") 900 + 901 + # Handle both notification objects and dicts 902 + if isinstance(notification, dict): 903 + notif_dict = notification 904 + notification_uri = notification.get('uri') 905 + else: 906 + notif_dict = notification_to_dict(notification) 907 + notification_uri = notification.uri 908 + 909 + if notification_uri in processed_uris: 910 + logger.debug(f"Notification already processed: {notification_uri}") 753 911 return False 754 - 755 - # Convert notification to dict 756 - notif_dict = notification_to_dict(notification) 757 912 758 913 # Create JSON string 759 914 notif_json = json.dumps(notif_dict, sort_keys=True) ··· 761 916 # Generate hash for filename (to avoid duplicates) 762 917 notif_hash = hashlib.sha256(notif_json.encode()).hexdigest()[:16] 763 918 764 - # Determine priority based on author handle 765 - author_handle = getattr(notification.author, 'handle', '') if hasattr(notification, 'author') else '' 766 - priority_prefix = "0_" if author_handle == "cameron.pfiffer.org" else "1_" 919 + # Determine priority based on author handle or explicit priority 920 + if is_priority is not None: 921 + priority_prefix = "0_" if is_priority else "1_" 922 + else: 923 + if isinstance(notification, dict): 924 + author_handle = notification.get('author', {}).get('handle', '') 925 + else: 926 + author_handle = getattr(notification.author, 'handle', '') if hasattr(notification, 'author') else '' 927 + priority_prefix = "0_" if author_handle == "cameron.pfiffer.org" else "1_" 767 928 768 929 # Create filename with priority, timestamp and hash 769 930 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 770 - filename = f"{priority_prefix}{timestamp}_{notification.reason}_{notif_hash}.json" 931 + reason = notif_dict.get('reason', 'unknown') 932 + filename = f"{priority_prefix}{timestamp}_{reason}_{notif_hash}.json" 771 933 filepath = QUEUE_DIR / filename 772 934 773 935 # Check if this notification URI is already in the queue ··· 777 939 try: 778 940 with open(existing_file, 'r') as f: 779 941 existing_data = json.load(f) 780 - if existing_data.get('uri') == notification.uri: 781 - logger.debug(f"Notification already queued (URI: {notification.uri})") 942 + if existing_data.get('uri') == notification_uri: 943 + logger.debug(f"Notification already queued (URI: {notification_uri})") 782 944 return False 783 945 except: 784 946 continue ··· 801 963 try: 802 964 # Get all JSON files in queue directory (excluding processed_notifications.json) 803 965 # Files are sorted by name, which puts priority files first (0_ prefix before 1_ prefix) 804 - queue_files = sorted([f for f in QUEUE_DIR.glob("*.json") if f.name != "processed_notifications.json"]) 966 + all_queue_files = sorted([f for f in QUEUE_DIR.glob("*.json") if f.name != "processed_notifications.json"]) 967 + 968 + # Filter out and delete like notifications immediately 969 + queue_files = [] 970 + likes_deleted = 0 971 + 972 + for filepath in all_queue_files: 973 + try: 974 + with open(filepath, 'r') as f: 975 + notif_data = json.load(f) 976 + 977 + # If it's a like, delete it immediately and don't process 978 + if notif_data.get('reason') == 'like': 979 + filepath.unlink() 980 + likes_deleted += 1 981 + logger.debug(f"Deleted like notification: {filepath.name}") 982 + else: 983 + queue_files.append(filepath) 984 + except Exception as e: 985 + logger.warning(f"Error checking notification file {filepath.name}: {e}") 986 + queue_files.append(filepath) # Keep it in case it's valid 987 + 988 + if likes_deleted > 0: 989 + logger.info(f"Deleted {likes_deleted} like notifications from queue") 805 990 806 991 if not queue_files: 807 992 return ··· 813 998 total_messages = sum(message_counters.values()) 814 999 messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0 815 1000 816 - logger.info(f"📊 Session stats: {total_messages} total messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies, {message_counters['follows']} follows) | {messages_per_minute:.1f} msg/min") 1001 + logger.info(f"Session stats: {total_messages} total messages ({message_counters['mentions']} mentions, {message_counters['replies']} replies, {message_counters['follows']} follows) | {messages_per_minute:.1f} msg/min") 817 1002 818 1003 for i, filepath in enumerate(queue_files, 1): 1004 + # Check for new notifications periodically during queue processing 1005 + if i % CHECK_NEW_NOTIFICATIONS_EVERY_N_ITEMS == 0 and i > 1: 1006 + logger.info(f"🔄 Checking for new notifications (processed {i-1}/{len(queue_files)} queue items)") 1007 + try: 1008 + # Fetch and queue new notifications without processing them 1009 + new_count = fetch_and_queue_new_notifications(atproto_client) 1010 + 1011 + if new_count > 0: 1012 + logger.info(f"Added {new_count} new notifications to queue") 1013 + # Reload the queue files to include the new items 1014 + updated_queue_files = sorted([f for f in QUEUE_DIR.glob("*.json") if f.name != "processed_notifications.json"]) 1015 + queue_files = updated_queue_files 1016 + logger.info(f"Queue updated: now {len(queue_files)} total items") 1017 + except Exception as e: 1018 + logger.error(f"Error checking for new notifications: {e}") 1019 + 819 1020 logger.info(f"Processing queue file {i}/{len(queue_files)}: {filepath.name}") 820 1021 try: 821 1022 # Load notification data ··· 849 1050 success = True # Skip reposts but mark as successful to remove from queue 850 1051 if success: 851 1052 message_counters['reposts_skipped'] += 1 1053 + elif notif_data['reason'] == "like": 1054 + # Skip likes silently 1055 + success = True # Skip likes but mark as successful to remove from queue 1056 + if success: 1057 + message_counters.setdefault('likes_skipped', 0) 1058 + message_counters['likes_skipped'] += 1 852 1059 else: 853 1060 logger.warning(f"Unknown notification type: {notif_data['reason']}") 854 1061 success = True # Remove unknown types from queue ··· 856 1063 # Handle file based on processing result 857 1064 if success: 858 1065 if testing_mode: 859 - logger.info(f"🧪 TESTING MODE: Keeping queue file: {filepath.name}") 1066 + logger.info(f"TESTING MODE: Keeping queue file: {filepath.name}") 860 1067 else: 861 1068 filepath.unlink() 862 - logger.info(f"✅ Successfully processed and removed: {filepath.name}") 1069 + logger.info(f"Successfully processed and removed: {filepath.name}") 863 1070 864 1071 # Mark as processed to avoid reprocessing 865 1072 processed_uris = load_processed_notifications() ··· 869 1076 elif success is None: # Special case for moving to error directory 870 1077 error_path = QUEUE_ERROR_DIR / filepath.name 871 1078 filepath.rename(error_path) 872 - logger.warning(f"❌ Moved {filepath.name} to errors directory") 1079 + logger.warning(f"Moved {filepath.name} to errors directory") 873 1080 874 1081 # Also mark as processed to avoid retrying 875 1082 processed_uris = load_processed_notifications() ··· 879 1086 elif success == "no_reply": # Special case for moving to no_reply directory 880 1087 no_reply_path = QUEUE_NO_REPLY_DIR / filepath.name 881 1088 filepath.rename(no_reply_path) 882 - logger.info(f"📭 Moved {filepath.name} to no_reply directory") 1089 + logger.info(f"Moved {filepath.name} to no_reply directory") 883 1090 884 1091 # Also mark as processed to avoid retrying 885 1092 processed_uris = load_processed_notifications() ··· 907 1114 logger.error(f"Error loading queued notifications: {e}") 908 1115 909 1116 910 - def process_notifications(void_agent, atproto_client, testing_mode=False): 911 - """Fetch new notifications, queue them, and process the queue.""" 1117 + def fetch_and_queue_new_notifications(atproto_client): 1118 + """Fetch new notifications and queue them without processing.""" 912 1119 try: 913 1120 # Get current time for marking notifications as seen 914 1121 logger.debug("Getting current time for notification marking...") 915 1122 last_seen_at = atproto_client.get_current_time_iso() 916 1123 917 - # Fetch ALL notifications using pagination first 918 - logger.info("Beginning notification fetch with pagination...") 1124 + # Fetch ALL notifications using pagination 919 1125 all_notifications = [] 920 1126 cursor = None 921 1127 page_count = 0 922 1128 max_pages = 20 # Safety limit to prevent infinite loops 923 - 924 - logger.info("Fetching all unread notifications...") 925 1129 926 1130 while page_count < max_pages: 927 1131 try: ··· 938 1142 page_count += 1 939 1143 page_notifications = notifications_response.notifications 940 1144 941 - # Count unread notifications in this page 942 - unread_count = sum(1 for n in page_notifications if not n.is_read and n.reason != "like") 943 - logger.debug(f"Page {page_count}: {len(page_notifications)} notifications, {unread_count} unread (non-like)") 1145 + if not page_notifications: 1146 + break 944 1147 945 - # Add all notifications to our list 946 1148 all_notifications.extend(page_notifications) 947 1149 948 - # Check if we have more pages 949 - if hasattr(notifications_response, 'cursor') and notifications_response.cursor: 950 - cursor = notifications_response.cursor 951 - # If this page had no unread notifications, we can stop 952 - if unread_count == 0: 953 - logger.info(f"No more unread notifications found after {page_count} pages") 954 - break 955 - else: 956 - # No more pages 957 - logger.info(f"Fetched all notifications across {page_count} pages") 1150 + # Check if there are more pages 1151 + cursor = getattr(notifications_response, 'cursor', None) 1152 + if not cursor: 958 1153 break 959 1154 960 1155 except Exception as e: 961 - error_str = str(e) 962 1156 logger.error(f"Error fetching notifications page {page_count}: {e}") 1157 + break 1158 + 1159 + # Now process all fetched notifications 1160 + new_count = 0 1161 + if all_notifications: 1162 + # Mark as seen first 1163 + try: 1164 + atproto_client.app.bsky.notification.update_seen( 1165 + data={'seenAt': last_seen_at} 1166 + ) 1167 + logger.debug(f"Marked {len(all_notifications)} notifications as seen at {last_seen_at}") 1168 + except Exception as e: 1169 + logger.error(f"Error marking notifications as seen: {e}") 1170 + 1171 + # Queue all new notifications (except likes and already read) 1172 + for notif in all_notifications: 1173 + # Skip if already read or if it's a like 1174 + if (hasattr(notif, 'is_read') and notif.is_read) or (hasattr(notif, 'reason') and notif.reason == 'like'): 1175 + continue 1176 + 1177 + notif_dict = notif.model_dump() if hasattr(notif, 'model_dump') else notif 963 1178 964 - # Handle specific API errors 965 - if 'rate limit' in error_str.lower(): 966 - logger.warning("Rate limit hit while fetching notifications, will retry next cycle") 967 - break 968 - elif '401' in error_str or 'unauthorized' in error_str.lower(): 969 - logger.error("Authentication error, re-raising exception") 970 - raise 971 - else: 972 - # For other errors, try to continue with what we have 973 - logger.warning("Continuing with notifications fetched so far") 974 - break 975 - 976 - # Queue all unread notifications (except likes) 977 - logger.info("Queuing unread notifications...") 978 - new_count = 0 979 - for notification in all_notifications: 980 - if not notification.is_read and notification.reason != "like": 981 - if save_notification_to_queue(notification): 1179 + # Skip likes in dict form too 1180 + if notif_dict.get('reason') == 'like': 1181 + continue 1182 + 1183 + # Check if it's a priority notification 1184 + is_priority = False 1185 + if notif_dict.get('reason') == 'mention': 1186 + # Get the mention text to check for priority keywords 1187 + record = notif_dict.get('record', {}) 1188 + text = record.get('text', '') 1189 + if any(keyword in text.lower() for keyword in ['urgent', 'priority', 'important', 'emergency']): 1190 + is_priority = True 1191 + 1192 + if save_notification_to_queue(notif_dict, is_priority=is_priority): 982 1193 new_count += 1 1194 + 1195 + logger.info(f"Queued {new_count} new notifications and marked as seen") 1196 + else: 1197 + logger.debug("No new notifications to queue") 1198 + 1199 + return new_count 1200 + 1201 + except Exception as e: 1202 + logger.error(f"Error fetching and queueing notifications: {e}") 1203 + return 0 983 1204 984 - # Mark all notifications as seen immediately after queuing (unless in testing mode) 985 - if testing_mode: 986 - logger.info("🧪 TESTING MODE: Skipping marking notifications as seen") 987 - else: 988 - if new_count > 0: 989 - atproto_client.app.bsky.notification.update_seen({'seen_at': last_seen_at}) 990 - logger.info(f"Queued {new_count} new notifications and marked as seen") 991 - else: 992 - logger.debug("No new notifications to queue") 1205 + 1206 + def process_notifications(void_agent, atproto_client, testing_mode=False): 1207 + """Fetch new notifications, queue them, and process the queue.""" 1208 + try: 1209 + # Fetch and queue new notifications 1210 + new_count = fetch_and_queue_new_notifications(atproto_client) 1211 + 1212 + if new_count > 0: 1213 + logger.info(f"Found {new_count} new notifications to process") 993 1214 994 1215 # Now process the entire queue (old + new notifications) 995 1216 load_and_process_queued_notifications(void_agent, atproto_client, testing_mode) ··· 1003 1224 parser = argparse.ArgumentParser(description='Void Bot - Bluesky autonomous agent') 1004 1225 parser.add_argument('--test', action='store_true', help='Run in testing mode (no messages sent, queue files preserved)') 1005 1226 parser.add_argument('--no-git', action='store_true', help='Skip git operations when exporting agent state') 1227 + parser.add_argument('--simple-logs', action='store_true', help='Use simplified log format (void - LEVEL - message)') 1228 + parser.add_argument('--rich', action='store_true', help='Enable Rich formatting for archival memory display') 1229 + parser.add_argument('--reasoning', action='store_true', help='Display reasoning in panels and set reasoning log level to INFO') 1006 1230 args = parser.parse_args() 1007 1231 1008 - global TESTING_MODE 1232 + # Configure logging based on command line arguments 1233 + if args.simple_logs: 1234 + log_format = "void - %(levelname)s - %(message)s" 1235 + else: 1236 + log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 1237 + 1238 + # Reset logging configuration and apply new format 1239 + for handler in logging.root.handlers[:]: 1240 + logging.root.removeHandler(handler) 1241 + logging.basicConfig(level=logging.INFO, format=log_format, force=True) 1242 + 1243 + global logger, prompt_logger, console 1244 + logger = logging.getLogger("void_bot") 1245 + logger.setLevel(logging.INFO) 1246 + 1247 + # Create a separate logger for prompts (set to WARNING to hide by default) 1248 + prompt_logger = logging.getLogger("void_bot.prompts") 1249 + if args.reasoning: 1250 + prompt_logger.setLevel(logging.INFO) # Show reasoning when --reasoning is used 1251 + else: 1252 + prompt_logger.setLevel(logging.WARNING) # Hide by default 1253 + 1254 + # Disable httpx logging completely 1255 + logging.getLogger("httpx").setLevel(logging.CRITICAL) 1256 + 1257 + # Create Rich console for pretty printing 1258 + console = Console() 1259 + 1260 + global TESTING_MODE, SKIP_GIT, USE_RICH, SHOW_REASONING 1009 1261 TESTING_MODE = args.test 1010 1262 1011 1263 # Store no-git flag globally for use in export_agent_state calls 1012 - global SKIP_GIT 1013 1264 SKIP_GIT = args.no_git 1014 1265 1266 + # Store rich flag globally 1267 + USE_RICH = args.rich 1268 + 1269 + # Store reasoning flag globally 1270 + SHOW_REASONING = args.reasoning 1271 + 1015 1272 if TESTING_MODE: 1016 - logger.info("🧪 === RUNNING IN TESTING MODE ===") 1273 + logger.info("=== RUNNING IN TESTING MODE ===") 1017 1274 logger.info(" - No messages will be sent to Bluesky") 1018 1275 logger.info(" - Queue files will not be deleted") 1019 1276 logger.info(" - Notifications will not be marked as seen") ··· 1063 1320 messages_per_minute = (total_messages / elapsed_time * 60) if elapsed_time > 0 else 0 1064 1321 1065 1322 logger.info("=== BOT STOPPED BY USER ===") 1066 - logger.info(f"📊 Final session stats: {total_messages} total messages processed in {elapsed_time/60:.1f} minutes") 1323 + logger.info(f"Final session stats: {total_messages} total messages processed in {elapsed_time/60:.1f} minutes") 1067 1324 logger.info(f" - {message_counters['mentions']} mentions") 1068 1325 logger.info(f" - {message_counters['replies']} replies") 1069 1326 logger.info(f" - {message_counters['follows']} follows")