a digital person for bluesky

Implement streaming for Letta API calls to prevent 524 timeouts

- Replace synchronous agents.messages.create with create_stream
- Add step streaming with max_steps=100 for better responsiveness
- Increase client timeout from 5 to 10 minutes
- Add condensed info-level logging for streaming chunks with emojis
- Add debug-level logging for full chunk details
- Maintain backward compatibility with existing response handling

๐Ÿค– Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

Changed files
+36 -4
+36 -4
bsky.py
··· 58 58 # Create a client with extended timeout for LLM operations 59 59 CLIENT= Letta( 60 60 token=os.environ["LETTA_API_KEY"], 61 - timeout=300 # 5 minutes timeout for API calls 61 + timeout=600 # 10 minutes timeout for API calls - higher than Cloudflare's 524 timeout 62 62 ) 63 63 64 64 # Use the "Bluesky" project ··· 313 313 logger.info(f"๐Ÿ’ฌ Sending to LLM: @{author_handle} mention | msg: \"{mention_text[:50]}...\" | context: {len(thread_context)} chars, {thread_handles_count} users") 314 314 315 315 try: 316 - message_response = CLIENT.agents.messages.create( 317 - agent_id = void_agent.id, 318 - messages = [{"role":"user", "content": prompt}] 316 + # Use streaming to avoid 524 timeout errors 317 + message_stream = CLIENT.agents.messages.create_stream( 318 + agent_id=void_agent.id, 319 + messages=[{"role": "user", "content": prompt}], 320 + stream_tokens=False, # Step streaming only (faster than token streaming) 321 + max_steps=100 319 322 ) 323 + 324 + # Collect the streaming response 325 + all_messages = [] 326 + for chunk in message_stream: 327 + # Log condensed chunk info 328 + if hasattr(chunk, 'message_type'): 329 + if chunk.message_type == 'reasoning_message': 330 + logger.info(f"๐Ÿง  Reasoning: {chunk.reasoning[:100]}...") 331 + elif chunk.message_type == 'tool_call_message': 332 + logger.info(f"๐Ÿ”ง Tool call: {chunk.tool_call.name}({chunk.tool_call.arguments[:50]}...)") 333 + elif chunk.message_type == 'tool_return_message': 334 + logger.info(f"๐Ÿ“‹ Tool result: {chunk.name} - {chunk.status}") 335 + elif chunk.message_type == 'assistant_message': 336 + logger.info(f"๐Ÿ’ฌ Assistant: {chunk.content[:100]}...") 337 + else: 338 + logger.info(f"๐Ÿ“จ {chunk.message_type}: {str(chunk)[:100]}...") 339 + else: 340 + logger.info(f"๐Ÿ“ฆ Stream status: {chunk}") 341 + 342 + # Log full chunk for debugging 343 + logger.debug(f"Full streaming chunk: {chunk}") 344 + all_messages.append(chunk) 345 + if str(chunk) == 'done': 346 + break 347 + 348 + # Convert streaming response to standard format for compatibility 349 + message_response = type('StreamingResponse', (), { 350 + 'messages': [msg for msg in all_messages if hasattr(msg, 'message_type')] 351 + })() 320 352 except Exception as api_error: 321 353 import traceback 322 354 error_str = str(api_error)