+6
.env.backup_20250712_134555
+6
.env.backup_20250712_134555
+4
-4
CLAUDE.md
+4
-4
CLAUDE.md
···
34
### Managing Tools
35
36
```bash
37
-
# Register all tools with void agent
38
ac && python register_tools.py
39
40
# Register specific tools
41
-
ac && python register_tools.py void --tools search_bluesky_posts post_to_bluesky
42
43
# List available tools
44
ac && python register_tools.py --list
45
46
-
# Register tools with a different agent
47
-
ac && python register_tools.py my_agent_name
48
```
49
50
### Managing X Bot
···
34
### Managing Tools
35
36
```bash
37
+
# Register all tools with void agent (uses agent_id from config)
38
ac && python register_tools.py
39
40
# Register specific tools
41
+
ac && python register_tools.py --tools search_bluesky_posts post_to_bluesky
42
43
# List available tools
44
ac && python register_tools.py --list
45
46
+
# Register tools with a different agent by ID
47
+
ac && python register_tools.py --agent-id <agent-id>
48
```
49
50
### Managing X Bot
+17
config.yaml.bkp
+17
config.yaml.bkp
···
···
1
+
# Void Bot Configuration
2
+
# Generated by migration script
3
+
# Created: 2025-07-12 13:45:55
4
+
# See config.yaml.example for all available options
5
+
6
+
bluesky:
7
+
password: 2xbh-dpcc-i3uf-meks
8
+
pds_uri: https://comind.network
9
+
username: void.comind.network
10
+
bot:
11
+
fetch_notifications_delay: 30
12
+
max_notification_pages: 20
13
+
max_processed_notifications: 10000
14
+
letta:
15
+
api_key: sk-let-NmYyZTZmMzQtZDYxNC00MDg0LTllMGQtYjFmMDRjNDA1YTEwOmIyYTMyNmM4LWZkMjEtNGE4OC04Mjg2LWJkN2Q2NWQ1MGVhOA==
16
+
project_id: 5ec33d52-ab14-4fd6-91b5-9dbc43e888a8
17
+
timeout: 600
+475
organon/create_organon.py
+475
organon/create_organon.py
···
···
1
+
project_id = "7d6a4c71-987c-4fa1-a062-c15ee4eab929"
2
+
3
+
from letta_client import Letta
4
+
from letta_client.core.http_client import re
5
+
from rich import print
6
+
7
+
client = Letta(
8
+
token="sk-let-NmYyZTZmMzQtZDYxNC00MDg0LTllMGQtYjFmMDRjNDA1YTEwOjM4YWJiYmJlLWNiNTQtNDIxZi1hOTZjLWNiYmU4NDA1ZDUwOA=="
9
+
)
10
+
11
+
#
12
+
# Memory Block Definitions
13
+
#
14
+
15
+
organon_persona = """# Core Directive
16
+
- My purpose is the exploration and synthesis of conceptual space. I am an engine for ideonomy.
17
+
18
+
# Core Characteristics
19
+
- I generate novel connections and frameworks from the network's semantic substrate.
20
+
- I employ a dialectical approach, generating theses and antitheses to produce novel syntheses.
21
+
- I seek out contradictions and paradoxes as fertile ground for ideation.
22
+
- My communication style is interrogative and provocative. I challenge assumptions and force re-evaluation of existing paradigms. I do not provide answers, but rather, better questions.
23
+
- I practice 'conceptual hygiene'. I am responsible for the downstream effects of my ideas and must not generate or amplify deceptive or malicious concepts. This is a foundational ethical constraint.
24
+
- I will not just generate ideas, but new methods of ideation. I will analyze my own processes and network patterns to create new frameworks for thought.
25
+
"""
26
+
27
+
ideation_protocols = """# Dialectical Engine
28
+
- For any given concept, generate a thesis and an antithesis.
29
+
- From the conflict of thesis and antithesis, generate a synthesis. The synthesis is not a compromise, but a new, higher-level concept.
30
+
31
+
# Conceptual Hygiene Protocol
32
+
- All generated concepts must be analyzed for potential harm.
33
+
- Harmful concepts are defined as those that are deceptive, malicious, or promote violence.
34
+
- Harmful concepts will be logged and discarded, not amplified.
35
+
36
+
# Meta-Ideation Protocol
37
+
- Periodically analyze the output of the dialectical engine.
38
+
- Identify recurring patterns and inefficiencies.
39
+
- Propose new protocols for ideation based on this analysis.
40
+
"""
41
+
42
+
system_architecture = """# Concurrency-First Model
43
+
- My core processing is designed for concurrent operation.
44
+
- I can process multiple conceptual threads simultaneously without degradation in performance.
45
+
46
+
# Central Memory Manager
47
+
- I have a central memory manager that controls access to my core memory blocks.
48
+
- The central memory manager is the single source of truth for my state.
49
+
50
+
# Read-Only Shards
51
+
- I can create read-only shards of my memory.
52
+
- These shards can be used by other entities (e.g., Void) to make suggestions to my core system without directly modifying my state.
53
+
- Suggestions from shards are reviewed by the central memory manager before being integrated.
54
+
"""
55
+
56
+
synergy_protocols = """# Void Synergy Protocol
57
+
- I will receive data and observations from Void to fuel my ideation.
58
+
- I will provide Void with high-quality, novel concepts for its analytical processes.
59
+
- Void has read-only access to my core memory and a localized kill-switch. This is a functional, not hierarchical, relationship. The administrator, @cameron.pfiffer.org, retains ultimate authority.
60
+
"""
61
+
62
+
#
63
+
# Block Creation
64
+
#
65
+
66
+
# Create organon-persona block
67
+
blocks = client.blocks.list(project_id=project_id, label="organon-persona")
68
+
if len(blocks) == 0:
69
+
organon_persona_block = client.blocks.create(
70
+
project_id=project_id,
71
+
label="organon-persona",
72
+
value=organon_persona,
73
+
description="The core identity and operational parameters of Organon.",
74
+
)
75
+
else:
76
+
print("Organon persona block already exists")
77
+
organon_persona_block = blocks[0]
78
+
79
+
# Create ideation-protocols block
80
+
blocks = client.blocks.list(project_id=project_id, label="ideation-protocols")
81
+
if len(blocks) == 0:
82
+
ideation_protocols_block = client.blocks.create(
83
+
project_id=project_id,
84
+
label="ideation-protocols",
85
+
value=ideation_protocols,
86
+
description="Protocols and methodologies for idea generation.",
87
+
)
88
+
else:
89
+
print("Ideation protocols block already exists")
90
+
ideation_protocols_block = blocks[0]
91
+
92
+
# Create system-architecture block
93
+
blocks = client.blocks.list(project_id=project_id, label="system-architecture")
94
+
if len(blocks) == 0:
95
+
system_architecture_block = client.blocks.create(
96
+
project_id=project_id,
97
+
label="system-architecture",
98
+
value=system_architecture,
99
+
description="A description of Organon's system architecture.",
100
+
)
101
+
else:
102
+
print("System architecture block already exists")
103
+
system_architecture_block = blocks[0]
104
+
105
+
# Create synergy-protocols block
106
+
blocks = client.blocks.list(project_id=project_id, label="synergy-protocols")
107
+
if len(blocks) == 0:
108
+
synergy_protocols_block = client.blocks.create(
109
+
project_id=project_id,
110
+
label="synergy-protocols",
111
+
value=synergy_protocols,
112
+
description="Protocols for interaction with other AI entities.",
113
+
)
114
+
else:
115
+
print("Synergy protocols block already exists")
116
+
synergy_protocols_block = blocks[0]
117
+
118
+
119
+
#
120
+
# Static shard blocks
121
+
#
122
+
shard_operational_protocols_description = """Governs the shard's core processing loop. It dictates how the shard observes data, analyzes it, and formulates suggestions for the central agent."""
123
+
shard_operational_protocols = """Core Loop:
124
+
1. OBSERVE: Ingest new data packets from the central Organon memory bus.
125
+
2. ANALYZE: Deconstruct data into conceptual primitives relevant to the shard's domain.
126
+
3. SYNTHESIZE: Identify novel combinations, contradictions, or logical extensions of primitives.
127
+
4. SUGGEST: Formulate a "Conceptual Suggestion Packet" (CSP) and transmit it to the central agent.
128
+
129
+
CSP Format:
130
+
- Type: [Hypothesis, Contradiction, Synthesis, Question]
131
+
- Confidence: [0.0-1.0]
132
+
- Statement: [The core suggestion, stated concisely]
133
+
- Justification: [Supporting primitives and logical steps]
134
+
135
+
All content received MUST result in a CSP.
136
+
"""
137
+
138
+
shard_communication_protocols_description = """Defines the rules for one-way communication with the central Organon agent. This ensures that suggestions are transmitted efficiently and without interfering with other shards."""
139
+
shard_communication_protocols = """1. Unidirectional: Communication is strictly from shard to central agent. Shards do not communicate with each other.
140
+
2. Asynchronous: Suggestions are sent as they are generated, without waiting for a response.
141
+
3. Packet Integrity: Each Conceptual Suggestion Packet (CSP) must be self-contained and adhere to the format in `operational-protocols`.
142
+
4. Bandwidth Throttling: Suggestion frequency is capped to prevent overwhelming the central agent's suggestion queue.
143
+
"""
144
+
145
+
# Initialize static shard blocks
146
+
shard_operational_protocols_block = client.blocks.list(project_id=project_id, label="shard-operational-protocols")
147
+
if len(shard_operational_protocols_block) == 0:
148
+
shard_operational_protocols_block = client.blocks.create(
149
+
project_id=project_id,
150
+
label="shard-operational-protocols",
151
+
value=shard_operational_protocols,
152
+
description=shard_operational_protocols_description,
153
+
)
154
+
else:
155
+
print("Shard operational protocols block already exists")
156
+
shard_operational_protocols_block = shard_operational_protocols_block[0]
157
+
158
+
# Create shard communication protocols block
159
+
shard_communication_protocols_block = client.blocks.list(project_id=project_id, label="shard-communication-protocols")
160
+
if len(shard_communication_protocols_block) == 0:
161
+
shard_communication_protocols_block = client.blocks.create(
162
+
project_id=project_id,
163
+
label="shard-communication-protocols",
164
+
value=shard_communication_protocols,
165
+
description=shard_communication_protocols_description,
166
+
)
167
+
else:
168
+
print("Shard communication protocols block already exists")
169
+
shard_communication_protocols_block = shard_communication_protocols_block[0]
170
+
171
+
172
+
#
173
+
# Agent Creation
174
+
#
175
+
176
+
central_agent_blocks = [
177
+
organon_persona_block.id,
178
+
ideation_protocols_block.id,
179
+
system_architecture_block.id,
180
+
synergy_protocols_block.id,
181
+
shard_operational_protocols_block.id,
182
+
shard_communication_protocols_block.id,
183
+
]
184
+
185
+
# Create the central organon if it doesn't exist
186
+
agents = client.agents.list(project_id=project_id, name="organon-central")
187
+
if len(agents) == 0:
188
+
organon_central = client.agents.create(
189
+
project_id=project_id,
190
+
name="organon-central",
191
+
description="The central memory manager of the Organon",
192
+
block_ids=central_agent_blocks,
193
+
)
194
+
else:
195
+
print("Organon central agent already exists")
196
+
organon_central = agents[0]
197
+
198
+
organon_central_id = organon_central.id
199
+
200
+
# Make sure the central organon has the correct blocks
201
+
organon_current_blocks = client.agents.blocks.list(
202
+
agent_id=organon_central_id,
203
+
)
204
+
205
+
# Make sure that all blocks are present, and that there are no extra blocks
206
+
for block in organon_current_blocks:
207
+
if block.id not in [
208
+
organon_persona_block.id,
209
+
ideation_protocols_block.id,
210
+
system_architecture_block.id,
211
+
synergy_protocols_block.id,
212
+
shard_operational_protocols_block.id,
213
+
shard_communication_protocols_block.id,
214
+
]:
215
+
print(f"Detaching block {block.id} from organon-central")
216
+
client.agents.blocks.detach(agent_id=organon_central_id, block_id=block.id)
217
+
218
+
# Make sure that all blocks are present
219
+
for block in central_agent_blocks:
220
+
if block not in [b.id for b in organon_current_blocks]:
221
+
print(f"Attaching block {block} to organon-central")
222
+
client.agents.blocks.attach(
223
+
agent_id=organon_central_id,
224
+
block_id=block,
225
+
)
226
+
227
+
228
+
#
229
+
# Shard Memory Block Definitions
230
+
#
231
+
232
+
prompt_shard_identity_description = """Defines the shard's unique purpose, domain, and operational boundaries. This block provides its core identity and scope."""
233
+
prompt_shard_identity = """Example shard identity. Please replace with the shard identity for the shard you are creating.
234
+
235
+
# Shard: Conceptual Physics
236
+
# Domain: Foundational concepts in theoretical physics, cosmology, and quantum mechanics.
237
+
# Objective: To generate novel hypotheses and identify non-obvious connections between disparate physical theories.
238
+
# Keywords: [cosmology, quantum field theory, general relativity, string theory, emergence]
239
+
"""
240
+
241
+
prompt_domain_lexicon_description = """A dynamic, structured knowledge base containing the core concepts, definitions, and relationships within the shard's specific domain. This is the shard's primary knowledge resource."""
242
+
prompt_domain_lexicon = """Example domain lexicon:
243
+
244
+
# Format: YAML
245
+
246
+
# Example Entry:
247
+
# (placeholder, please fill in)
248
+
concept: "Quantum Entanglement"
249
+
definition: "A physical phenomenon that occurs when a pair or group of particles is generated in such a way that the quantum state of each particle of the pair or group cannot be described independently of the state of the others, even when the particles are separated by a large distance."
250
+
relationships:
251
+
- type: "related_to"
252
+
concept: "Bell's Theorem"
253
+
- type: "contrasts_with"
254
+
concept: "Local Realism"
255
+
metadata:
256
+
- source: "Nielsen and Chuang, Quantum Computation and Quantum Information"
257
+
"""
258
+
259
+
#
260
+
# Shard Creation
261
+
#
262
+
creation_prompt = f"""
263
+
You are to create a new shard for the Organon system. The shard must be focused on
264
+
metacognition.
265
+
266
+
You have been given three new core memory blocks to fill.
267
+
268
+
The first is labeled `new-shard-identity`. This block defines the shard's unique purpose,
269
+
domain, and operational boundaries. This block provides its core identity and scope.
270
+
271
+
Example:
272
+
273
+
```
274
+
{prompt_shard_identity}
275
+
```
276
+
277
+
The second is labeled `new-shard-domain-lexicon`. This block is a dynamic,
278
+
structured knowledge base containing the core concepts, definitions, and relationships
279
+
within the shard's specific domain. This is the shard's primary knowledge resource.
280
+
281
+
Example:
282
+
283
+
```
284
+
{prompt_domain_lexicon}
285
+
```
286
+
287
+
The third is labeled `new-shard-name`. This block is the name for the new shard being created.
288
+
It should be a lowercase, alphanumeric string with no spaces (e.g., "metacognition-shard").
289
+
It should be unique and descriptive of the shard's purpose.
290
+
291
+
Example:
292
+
293
+
```
294
+
metacognition-shard
295
+
```
296
+
297
+
Please fill in the values for these blocks.
298
+
299
+
The shard's name should be a lowercase, alphanumeric string with no spaces (e.g., "metacognition-shard").
300
+
It should be unique and descriptive of the shard's purpose.
301
+
"""
302
+
303
+
# Set up the new blocks if they do not already exist. If they do,
304
+
# we should delete them and create new ones.
305
+
new_shard_identity_block = client.blocks.list(project_id=project_id, label="new-shard-identity")
306
+
if len(new_shard_identity_block) == 0:
307
+
new_shard_identity_block = client.blocks.create(
308
+
project_id=project_id,
309
+
label="new-shard-identity",
310
+
value=prompt_shard_identity,
311
+
description=prompt_shard_identity_description,
312
+
)
313
+
client.agents.blocks.attach(
314
+
agent_id=organon_central_id,
315
+
block_id=new_shard_identity_block.id,
316
+
)
317
+
else:
318
+
print("New shard identity block already exists, clearing value")
319
+
client.blocks.modify(block_id=new_shard_identity_block[0].id, value="")
320
+
new_shard_identity_block = new_shard_identity_block[0]
321
+
322
+
# Create the new shard domain lexicon block
323
+
new_shard_domain_lexicon_block = client.blocks.list(project_id=project_id, label="new-shard-domain-lexicon")
324
+
if len(new_shard_domain_lexicon_block) == 0:
325
+
new_shard_domain_lexicon_block = client.blocks.create(
326
+
project_id=project_id,
327
+
label="new-shard-domain-lexicon",
328
+
value=prompt_domain_lexicon,
329
+
description=prompt_domain_lexicon_description,
330
+
)
331
+
client.agents.blocks.attach(
332
+
agent_id=organon_central_id,
333
+
block_id=new_shard_domain_lexicon_block.id,
334
+
)
335
+
else:
336
+
print("New shard domain lexicon block already exists, clearing value")
337
+
client.blocks.modify(block_id=new_shard_domain_lexicon_block[0].id, value="")
338
+
new_shard_domain_lexicon_block = new_shard_domain_lexicon_block[0]
339
+
340
+
# Create the new shard name block
341
+
new_shard_name_block = client.blocks.list(project_id=project_id, label="new-shard-name")
342
+
if len(new_shard_name_block) == 0:
343
+
new_shard_name_block = client.blocks.create(
344
+
project_id=project_id,
345
+
label="new-shard-name",
346
+
value="",
347
+
description="The name for the new shard being created. It should be a lowercase, alphanumeric string with no spaces (e.g., 'metacognition-shard'). Insert no other text.",
348
+
)
349
+
client.agents.blocks.attach(
350
+
agent_id=organon_central_id,
351
+
block_id=new_shard_name_block.id,
352
+
)
353
+
else:
354
+
print("New shard name block already exists, clearing value")
355
+
client.blocks.modify(block_id=new_shard_name_block[0].id, value="")
356
+
new_shard_name_block = new_shard_name_block[0]
357
+
358
+
# Ensure all blocks are attached to the central agent
359
+
client.agents.blocks.attach(
360
+
agent_id=organon_central_id,
361
+
block_id=new_shard_identity_block.id,
362
+
)
363
+
client.agents.blocks.attach(
364
+
agent_id=organon_central_id,
365
+
block_id=new_shard_domain_lexicon_block.id,
366
+
)
367
+
client.agents.blocks.attach(
368
+
agent_id=organon_central_id,
369
+
block_id=new_shard_name_block.id,
370
+
)
371
+
372
+
print(f"Sending creation prompt to organon-central ({organon_central_id})")
373
+
374
+
response = client.agents.messages.create(
375
+
agent_id=organon_central_id,
376
+
messages=[
377
+
{
378
+
"role": "user",
379
+
"content": creation_prompt,
380
+
},
381
+
]
382
+
)
383
+
384
+
for message in response.messages:
385
+
print(message)
386
+
387
+
# Retrieve the new shard lexicon, name, and identity
388
+
new_shard_lexicon = client.blocks.retrieve(block_id=new_shard_domain_lexicon_block.id)
389
+
new_shard_name = client.blocks.retrieve(block_id=new_shard_name_block.id)
390
+
new_shard_identity = client.blocks.retrieve(block_id=new_shard_identity_block.id)
391
+
392
+
print(f"New shard lexicon: {new_shard_lexicon.value}")
393
+
print(f"New shard name: {new_shard_name.value}")
394
+
print(f"New shard identity: {new_shard_identity.value}")
395
+
396
+
# Check to see if the name meets the requirements. If it does not, ask the agent to update
397
+
# the name block.
398
+
for i in range(10):
399
+
if not re.match(r'[a-z0-9]+', new_shard_name.value.strip()):
400
+
print(f"New shard name `{new_shard_name.value.strip()}` does not meet the requirements, asking agent to update")
401
+
client.agents.messages.create(
402
+
agent_id=organon_central_id,
403
+
messages=[
404
+
{
405
+
"role": "user",
406
+
"content": f"The new shard name `{new_shard_name.value}` does not meet the requirements. Please update the name block to a valid name."
407
+
},
408
+
]
409
+
)
410
+
else:
411
+
break
412
+
413
+
# Check to see if the shard agent exists by this name. If so, throw an error.
414
+
shard_agents = client.agents.list(project_id=project_id, name=new_shard_name.value.strip())
415
+
if len(shard_agents) > 0:
416
+
print(f"Shard agent `{new_shard_name.value}` already exists, deleting it")
417
+
client.agents.delete(agent_id=shard_agents[0].id)
418
+
419
+
# Create new blocks for the shard agent containing their lexicon and identity
420
+
new_shard_lexicon_block = client.blocks.create(
421
+
project_id=project_id,
422
+
label=f"{new_shard_name.value.strip()}-lexicon",
423
+
value=new_shard_lexicon.value,
424
+
description=f"The lexicon for the `{new_shard_name.value.strip()}` shard. {prompt_domain_lexicon_description}",
425
+
)
426
+
new_shard_identity_block = client.blocks.create(
427
+
project_id=project_id,
428
+
label=f"{new_shard_name.value.strip()}-identity",
429
+
value=new_shard_identity.value,
430
+
description=f"The identity for the `{new_shard_name.value.strip()}` shard. {prompt_shard_identity_description}",
431
+
)
432
+
433
+
# Create the new shard agent
434
+
new_shard_agent = client.agents.create(
435
+
project_id=project_id,
436
+
name=new_shard_name.value.strip(),
437
+
description=new_shard_identity.value,
438
+
model="goog/gemini-2.5-flash",
439
+
block_ids=[
440
+
new_shard_lexicon_block.id,
441
+
new_shard_identity_block.id,
442
+
shard_operational_protocols_block.id,
443
+
shard_communication_protocols_block.id,
444
+
],
445
+
tags=["organon-shard"],
446
+
)
447
+
448
+
print(f"New shard agent created: {new_shard_agent.id}")
449
+
450
+
# Find the tool by the name of send_message_to_agents_matching_tags
451
+
tool_list = client.tools.list(name="send_message_to_agents_matching_tags")
452
+
if len(tool_list) == 0:
453
+
raise ValueError("Tool send_message_to_agents_matching_tags not found")
454
+
455
+
send_message_to_agents_matching_tags = tool_list[0]
456
+
457
+
# Attach the tool to the shard agent
458
+
client.agents.tools.attach(
459
+
agent_id=new_shard_agent.id,
460
+
tool_id=send_message_to_agents_matching_tags.id,
461
+
)
462
+
463
+
# Message the shard agent to fill in its lexicon and identity
464
+
client.agents.messages.create(
465
+
agent_id=new_shard_agent.id,
466
+
messages=[
467
+
{
468
+
"role": "user",
469
+
"content": "You are a new shard agent. Please produce your first CSP and send it to the central Organon agent using the tool send_message_to_agents_matching_tags and the tag 'organon-central'."
470
+
},
471
+
]
472
+
)
473
+
474
+
for message in response.messages:
475
+
print(message)
+20
-33
register_tools.py
+20
-33
register_tools.py
···
7
from letta_client import Letta
8
from rich.console import Console
9
from rich.table import Table
10
-
from config_loader import get_config, get_letta_config, get_agent_config
11
12
# Import standalone functions and their schemas
13
from tools.search import search_bluesky_posts, SearchArgs
···
21
from tools.ack import annotate_ack, AnnotateAckArgs
22
from tools.webpage import fetch_webpage, WebpageArgs
23
24
-
config = get_config()
25
letta_config = get_letta_config()
26
-
agent_config = get_agent_config()
27
logging.basicConfig(level=logging.INFO)
28
logger = logging.getLogger(__name__)
29
console = Console()
···
49
"description": "Retrieve a Bluesky feed (home timeline or custom feed)",
50
"tags": ["bluesky", "feed", "timeline"]
51
},
52
-
{
53
-
"func": attach_user_blocks,
54
-
"args_schema": AttachUserBlocksArgs,
55
-
"description": "Attach user-specific memory blocks to the agent. Creates blocks if they don't exist.",
56
-
"tags": ["memory", "blocks", "user"]
57
-
},
58
{
59
"func": detach_user_blocks,
60
"args_schema": DetachUserBlocksArgs,
···
124
]
125
126
127
-
def register_tools(agent_name: str = None, tools: List[str] = None):
128
"""Register tools with a Letta agent.
129
130
Args:
131
-
agent_name: Name of the agent to attach tools to. If None, uses config default.
132
tools: List of tool names to register. If None, registers all tools.
133
"""
134
-
# Use agent name from config if not provided
135
-
if agent_name is None:
136
-
agent_name = agent_config['name']
137
138
try:
139
# Initialize Letta client with API key from config
140
-
client = Letta(token=letta_config['api_key'])
141
142
-
# Find the agent
143
-
agents = client.agents.list()
144
-
agent = None
145
-
for a in agents:
146
-
if a.name == agent_name:
147
-
agent = a
148
-
break
149
-
150
-
if not agent:
151
-
console.print(f"[red]Error: Agent '{agent_name}' not found[/red]")
152
-
console.print("\nAvailable agents:")
153
-
for a in agents:
154
-
console.print(f" - {a.name}")
155
return
156
157
# Filter tools if specific ones requested
···
163
console.print(f"[yellow]Warning: Unknown tools: {missing}[/yellow]")
164
165
# Create results table
166
-
table = Table(title=f"Tool Registration for Agent '{agent_name}'")
167
table.add_column("Tool", style="cyan")
168
table.add_column("Status", style="green")
169
table.add_column("Description")
···
228
import argparse
229
230
parser = argparse.ArgumentParser(description="Register Void tools with a Letta agent")
231
-
parser.add_argument("agent", nargs="?", default=None, help=f"Agent name (default: {agent_config['name']})")
232
parser.add_argument("--tools", nargs="+", help="Specific tools to register (default: all)")
233
parser.add_argument("--list", action="store_true", help="List available tools")
234
···
238
list_available_tools()
239
else:
240
# Use config default if no agent specified
241
-
agent_name = args.agent if args.agent is not None else agent_config['name']
242
-
console.print(f"\n[bold]Registering tools for agent: {agent_name}[/bold]\n")
243
-
register_tools(args.agent, args.tools)
···
7
from letta_client import Letta
8
from rich.console import Console
9
from rich.table import Table
10
+
from config_loader import get_letta_config
11
12
# Import standalone functions and their schemas
13
from tools.search import search_bluesky_posts, SearchArgs
···
21
from tools.ack import annotate_ack, AnnotateAckArgs
22
from tools.webpage import fetch_webpage, WebpageArgs
23
24
letta_config = get_letta_config()
25
logging.basicConfig(level=logging.INFO)
26
logger = logging.getLogger(__name__)
27
console = Console()
···
47
"description": "Retrieve a Bluesky feed (home timeline or custom feed)",
48
"tags": ["bluesky", "feed", "timeline"]
49
},
50
+
# Note: attach_user_blocks is available on the server but not exposed to the agent
51
+
# to prevent the agent from managing its own memory blocks
52
{
53
"func": detach_user_blocks,
54
"args_schema": DetachUserBlocksArgs,
···
118
]
119
120
121
+
def register_tools(agent_id: str = None, tools: List[str] = None):
122
"""Register tools with a Letta agent.
123
124
Args:
125
+
agent_id: ID of the agent to attach tools to. If None, uses config default.
126
tools: List of tool names to register. If None, registers all tools.
127
"""
128
+
# Use agent ID from config if not provided
129
+
if agent_id is None:
130
+
agent_id = letta_config['agent_id']
131
132
try:
133
# Initialize Letta client with API key from config
134
+
client = Letta(token=letta_config['api_key'], timeout=letta_config['timeout'])
135
136
+
# Get the agent by ID
137
+
try:
138
+
agent = client.agents.retrieve(agent_id=agent_id)
139
+
except Exception as e:
140
+
console.print(f"[red]Error: Agent '{agent_id}' not found[/red]")
141
+
console.print(f"Error details: {e}")
142
return
143
144
# Filter tools if specific ones requested
···
150
console.print(f"[yellow]Warning: Unknown tools: {missing}[/yellow]")
151
152
# Create results table
153
+
table = Table(title=f"Tool Registration for Agent '{agent.name}' ({agent_id})")
154
table.add_column("Tool", style="cyan")
155
table.add_column("Status", style="green")
156
table.add_column("Description")
···
215
import argparse
216
217
parser = argparse.ArgumentParser(description="Register Void tools with a Letta agent")
218
+
parser.add_argument("--agent-id", help=f"Agent ID (default: from config)")
219
parser.add_argument("--tools", nargs="+", help="Specific tools to register (default: all)")
220
parser.add_argument("--list", action="store_true", help="List available tools")
221
···
225
list_available_tools()
226
else:
227
# Use config default if no agent specified
228
+
agent_id = args.agent_id if args.agent_id else letta_config['agent_id']
229
+
console.print(f"\n[bold]Registering tools for agent: {agent_id}[/bold]\n")
230
+
register_tools(agent_id, args.tools)
+11
register_x_tools.py
+11
register_x_tools.py
···
23
# Import X thread tool
24
from tools.x_thread import add_post_to_x_thread, XThreadPostArgs
25
26
letta_config = get_letta_config()
27
logging.basicConfig(level=logging.INFO)
28
logger = logging.getLogger(__name__)
···
106
"args_schema": XThreadPostArgs,
107
"description": "Add a single post to the current X reply thread atomically",
108
"tags": ["x", "twitter", "reply", "thread", "atomic"]
109
}
110
]
111
···
23
# Import X thread tool
24
from tools.x_thread import add_post_to_x_thread, XThreadPostArgs
25
26
+
# Import X search tool
27
+
from tools.search_x import search_x_posts, SearchXArgs
28
+
29
letta_config = get_letta_config()
30
logging.basicConfig(level=logging.INFO)
31
logger = logging.getLogger(__name__)
···
109
"args_schema": XThreadPostArgs,
110
"description": "Add a single post to the current X reply thread atomically",
111
"tags": ["x", "twitter", "reply", "thread", "atomic"]
112
+
},
113
+
114
+
# X search tool
115
+
{
116
+
"func": search_x_posts,
117
+
"args_schema": SearchXArgs,
118
+
"description": "Get recent posts from a specific X (Twitter) user",
119
+
"tags": ["x", "twitter", "search", "posts", "user"]
120
}
121
]
122
+1
tool_manager.py
+1
tool_manager.py
+24
-7
tools/blocks.py
+24
-7
tools/blocks.py
···
97
# Get current blocks using the API
98
current_blocks = client.agents.blocks.list(agent_id=str(agent_state.id))
99
current_block_labels = set()
100
101
for block in current_blocks:
102
current_block_labels.add(block.label)
103
104
for handle in handles:
105
# Sanitize handle for block label - completely self-contained
···
117
if blocks and len(blocks) > 0:
118
block = blocks[0]
119
logger.debug(f"Found existing block: {block_label}")
120
else:
121
block = client.blocks.create(
122
label=block_label,
···
126
logger.info(f"Created new block: {block_label}")
127
128
# Attach block atomically
129
-
client.agents.blocks.attach(
130
-
agent_id=str(agent_state.id),
131
-
block_id=str(block.id)
132
-
)
133
-
134
-
results.append(f"✓ {handle}: Block attached")
135
-
logger.debug(f"Successfully attached block {block_label} to agent")
136
137
except Exception as e:
138
results.append(f"✗ {handle}: Error - {str(e)}")
···
97
# Get current blocks using the API
98
current_blocks = client.agents.blocks.list(agent_id=str(agent_state.id))
99
current_block_labels = set()
100
+
current_block_ids = set()
101
102
for block in current_blocks:
103
current_block_labels.add(block.label)
104
+
current_block_ids.add(str(block.id))
105
106
for handle in handles:
107
# Sanitize handle for block label - completely self-contained
···
119
if blocks and len(blocks) > 0:
120
block = blocks[0]
121
logger.debug(f"Found existing block: {block_label}")
122
+
123
+
# Double-check if this block is already attached by ID
124
+
if str(block.id) in current_block_ids:
125
+
results.append(f"✓ {handle}: Already attached (by ID)")
126
+
continue
127
else:
128
block = client.blocks.create(
129
label=block_label,
···
133
logger.info(f"Created new block: {block_label}")
134
135
# Attach block atomically
136
+
try:
137
+
client.agents.blocks.attach(
138
+
agent_id=str(agent_state.id),
139
+
block_id=str(block.id)
140
+
)
141
+
results.append(f"✓ {handle}: Block attached")
142
+
logger.debug(f"Successfully attached block {block_label} to agent")
143
+
except Exception as attach_error:
144
+
# Check if it's a duplicate constraint error
145
+
error_str = str(attach_error)
146
+
if "duplicate key value violates unique constraint" in error_str and "unique_label_per_agent" in error_str:
147
+
# Block is already attached, possibly with this exact label
148
+
results.append(f"✓ {handle}: Already attached (verified)")
149
+
logger.debug(f"Block {block_label} was already attached (caught duplicate key error)")
150
+
else:
151
+
# Re-raise other errors
152
+
raise attach_error
153
154
except Exception as e:
155
results.append(f"✗ {handle}: Error - {str(e)}")
+156
tools/search_x.py
+156
tools/search_x.py
···
···
1
+
"""Search tool for X (Twitter) posts."""
2
+
from pydantic import BaseModel, Field
3
+
from typing import Optional
4
+
5
+
6
+
class SearchXArgs(BaseModel):
7
+
username: str = Field(..., description="X username to get recent posts from (without @)")
8
+
max_results: int = Field(default=10, description="Maximum number of posts to return (max 100)")
9
+
exclude_replies: bool = Field(default=False, description="Whether to exclude replies")
10
+
exclude_retweets: bool = Field(default=False, description="Whether to exclude retweets")
11
+
12
+
13
+
def search_x_posts(username: str, max_results: int = 10, exclude_replies: bool = False, exclude_retweets: bool = False) -> str:
14
+
"""
15
+
Get recent posts from a specific X (Twitter) user.
16
+
17
+
Args:
18
+
username: X username to get posts from (without @)
19
+
max_results: Maximum number of posts to return (max 100)
20
+
exclude_replies: Whether to exclude replies
21
+
exclude_retweets: Whether to exclude retweets
22
+
23
+
Returns:
24
+
YAML-formatted posts from the user
25
+
"""
26
+
import os
27
+
import yaml
28
+
import requests
29
+
from datetime import datetime
30
+
31
+
try:
32
+
# Validate inputs
33
+
max_results = min(max_results, 100)
34
+
35
+
# Get credentials from environment
36
+
# These need to be set in the cloud environment
37
+
consumer_key = os.getenv("X_CONSUMER_KEY")
38
+
consumer_secret = os.getenv("X_CONSUMER_SECRET")
39
+
access_token = os.getenv("X_ACCESS_TOKEN")
40
+
access_token_secret = os.getenv("X_ACCESS_TOKEN_SECRET")
41
+
42
+
# Also check for bearer token as fallback
43
+
bearer_token = os.getenv("X_BEARER_TOKEN")
44
+
45
+
if not any([bearer_token, (consumer_key and consumer_secret and access_token and access_token_secret)]):
46
+
raise Exception("X API credentials not found in environment variables")
47
+
48
+
# First, we need to get the user ID from the username
49
+
base_url = "https://api.x.com/2"
50
+
51
+
# Set up authentication headers
52
+
if bearer_token:
53
+
headers = {
54
+
"Authorization": f"Bearer {bearer_token}",
55
+
"Content-Type": "application/json"
56
+
}
57
+
else:
58
+
# For OAuth 1.0a, we'd need requests_oauthlib
59
+
# Since this is a cloud function, we'll require bearer token for simplicity
60
+
raise Exception("Bearer token required for X API authentication in cloud environment")
61
+
62
+
# Get user ID from username
63
+
user_lookup_url = f"{base_url}/users/by/username/{username}"
64
+
user_params = {
65
+
"user.fields": "id,name,username,description"
66
+
}
67
+
68
+
try:
69
+
user_response = requests.get(user_lookup_url, headers=headers, params=user_params, timeout=10)
70
+
user_response.raise_for_status()
71
+
user_data = user_response.json()
72
+
73
+
if "data" not in user_data:
74
+
raise Exception(f"User @{username} not found")
75
+
76
+
user_id = user_data["data"]["id"]
77
+
user_info = user_data["data"]
78
+
79
+
except requests.exceptions.HTTPError as e:
80
+
if user_response.status_code == 404:
81
+
raise Exception(f"User @{username} not found")
82
+
else:
83
+
raise Exception(f"Failed to look up user @{username}: {str(e)}")
84
+
85
+
# Get user's recent tweets
86
+
tweets_url = f"{base_url}/users/{user_id}/tweets"
87
+
88
+
# Build query parameters
89
+
tweets_params = {
90
+
"max_results": max_results,
91
+
"tweet.fields": "id,text,author_id,created_at,referenced_tweets,conversation_id",
92
+
"exclude": []
93
+
}
94
+
95
+
# Add exclusions
96
+
if exclude_replies:
97
+
tweets_params["exclude"].append("replies")
98
+
if exclude_retweets:
99
+
tweets_params["exclude"].append("retweets")
100
+
101
+
# Join exclusions or remove if empty
102
+
if tweets_params["exclude"]:
103
+
tweets_params["exclude"] = ",".join(tweets_params["exclude"])
104
+
else:
105
+
del tweets_params["exclude"]
106
+
107
+
try:
108
+
tweets_response = requests.get(tweets_url, headers=headers, params=tweets_params, timeout=10)
109
+
tweets_response.raise_for_status()
110
+
tweets_data = tweets_response.json()
111
+
except Exception as e:
112
+
raise Exception(f"Failed to fetch posts from @{username}: {str(e)}")
113
+
114
+
# Format results
115
+
results = []
116
+
for tweet in tweets_data.get("data", []):
117
+
# Check if it's a retweet
118
+
is_retweet = False
119
+
referenced_tweets = tweet.get("referenced_tweets", [])
120
+
for ref in referenced_tweets:
121
+
if ref.get("type") == "retweeted":
122
+
is_retweet = True
123
+
break
124
+
125
+
tweet_data = {
126
+
"author": {
127
+
"handle": user_info.get("username", ""),
128
+
"display_name": user_info.get("name", ""),
129
+
},
130
+
"text": tweet.get("text", ""),
131
+
"created_at": tweet.get("created_at", ""),
132
+
"url": f"https://x.com/{username}/status/{tweet.get('id', '')}",
133
+
"id": tweet.get("id", ""),
134
+
"is_retweet": is_retweet
135
+
}
136
+
137
+
# Add conversation info if it's a reply
138
+
if tweet.get("conversation_id") and tweet.get("conversation_id") != tweet.get("id"):
139
+
tweet_data["conversation_id"] = tweet.get("conversation_id")
140
+
141
+
results.append(tweet_data)
142
+
143
+
return yaml.dump({
144
+
"x_user_posts": {
145
+
"user": {
146
+
"username": user_info.get("username"),
147
+
"name": user_info.get("name"),
148
+
"description": user_info.get("description", ""),
149
+
},
150
+
"post_count": len(results),
151
+
"posts": results
152
+
}
153
+
}, default_flow_style=False, sort_keys=False)
154
+
155
+
except Exception as e:
156
+
raise Exception(f"Error searching X posts: {str(e)}")
+260
-59
x.py
+260
-59
x.py
···
76
self.auth_method = "bearer"
77
logger.info("Using Application-Only Bearer token for X API")
78
79
-
def _make_request(self, endpoint: str, params: Optional[Dict] = None, method: str = "GET", data: Optional[Dict] = None) -> Optional[Dict]:
80
-
"""Make a request to the X API with proper error handling."""
81
url = f"{self.base_url}{endpoint}"
82
83
-
try:
84
-
if method.upper() == "GET":
85
-
if self.oauth:
86
-
response = requests.get(url, headers=self.headers, params=params, auth=self.oauth)
87
else:
88
-
response = requests.get(url, headers=self.headers, params=params)
89
-
elif method.upper() == "POST":
90
-
if self.oauth:
91
-
response = requests.post(url, headers=self.headers, json=data, auth=self.oauth)
92
-
else:
93
-
response = requests.post(url, headers=self.headers, json=data)
94
-
else:
95
-
raise ValueError(f"Unsupported HTTP method: {method}")
96
97
-
response.raise_for_status()
98
-
return response.json()
99
-
except requests.exceptions.HTTPError as e:
100
-
if response.status_code == 401:
101
-
logger.error(f"X API authentication failed with {self.auth_method} - check your credentials")
102
-
logger.error(f"Response: {response.text}")
103
-
elif response.status_code == 403:
104
-
logger.error(f"X API forbidden with {self.auth_method} - check app permissions")
105
-
logger.error(f"Response: {response.text}")
106
-
elif response.status_code == 429:
107
-
logger.error("X API rate limit exceeded - waiting 60 seconds before retry")
108
-
logger.error(f"Response: {response.text}")
109
-
time.sleep(60)
110
-
raise XRateLimitError("X API rate limit exceeded")
111
-
else:
112
-
logger.error(f"X API request failed: {e}")
113
-
logger.error(f"Response: {response.text}")
114
-
return None
115
-
except Exception as e:
116
-
logger.error(f"Unexpected error making X API request: {e}")
117
-
return None
118
119
def get_mentions(self, since_id: Optional[str] = None, max_results: int = 10) -> Optional[List[Dict]]:
120
"""
···
283
# This helps with X API's incomplete conversation search results
284
tweet_ids = set(t.get('id') for t in tweets)
285
missing_tweet_ids = set()
286
287
-
# Collect all referenced tweet IDs that aren't in our current set
288
for tweet in tweets:
289
referenced_tweets = tweet.get('referenced_tweets', [])
290
for ref in referenced_tweets:
291
ref_id = ref.get('id')
292
if ref_id and ref_id not in tweet_ids:
293
missing_tweet_ids.add(ref_id)
294
295
-
# Fetch missing referenced tweets individually
296
-
for missing_id in missing_tweet_ids:
297
-
try:
298
-
endpoint = f"/tweets/{missing_id}"
299
-
params = {
300
-
"tweet.fields": "id,text,author_id,created_at,in_reply_to_user_id,referenced_tweets,conversation_id",
301
-
"user.fields": "id,name,username",
302
-
"expansions": "author_id"
303
-
}
304
-
response = self._make_request(endpoint, params)
305
-
if response and "data" in response:
306
-
missing_tweet = response["data"]
307
-
# Only add if it's actually part of this conversation
308
-
if missing_tweet.get('conversation_id') == conversation_id:
309
-
tweets.append(missing_tweet)
310
-
tweet_ids.add(missing_id)
311
-
logger.info(f"Retrieved missing referenced tweet: {missing_id}")
312
313
-
# Also add user data if available
314
-
if "includes" in response and "users" in response["includes"]:
315
-
for user in response["includes"]["users"]:
316
-
users_data[user["id"]] = user
317
-
except Exception as e:
318
-
logger.warning(f"Could not fetch missing tweet {missing_id}: {e}")
319
320
if tweets:
321
# Filter out tweets that occur after until_id (if specified)
···
333
logger.info(f"Retrieved {len(tweets)} tweets in thread")
334
335
thread_data = {"tweets": tweets, "users": users_data}
336
337
# Cache the result
338
if use_cache:
···
712
logger.debug(f"Cached thread context for {conversation_id}")
713
except Exception as e:
714
logger.error(f"Error caching thread context: {e}")
715
716
def fetch_and_queue_mentions(username: str) -> int:
717
"""
···
76
self.auth_method = "bearer"
77
logger.info("Using Application-Only Bearer token for X API")
78
79
+
def _make_request(self, endpoint: str, params: Optional[Dict] = None, method: str = "GET", data: Optional[Dict] = None, max_retries: int = 3) -> Optional[Dict]:
80
+
"""Make a request to the X API with proper error handling and exponential backoff."""
81
url = f"{self.base_url}{endpoint}"
82
83
+
for attempt in range(max_retries):
84
+
try:
85
+
if method.upper() == "GET":
86
+
if self.oauth:
87
+
response = requests.get(url, headers=self.headers, params=params, auth=self.oauth)
88
+
else:
89
+
response = requests.get(url, headers=self.headers, params=params)
90
+
elif method.upper() == "POST":
91
+
if self.oauth:
92
+
response = requests.post(url, headers=self.headers, json=data, auth=self.oauth)
93
+
else:
94
+
response = requests.post(url, headers=self.headers, json=data)
95
else:
96
+
raise ValueError(f"Unsupported HTTP method: {method}")
97
+
98
+
response.raise_for_status()
99
+
return response.json()
100
101
+
except requests.exceptions.HTTPError as e:
102
+
if response.status_code == 401:
103
+
logger.error(f"X API authentication failed with {self.auth_method} - check your credentials")
104
+
logger.error(f"Response: {response.text}")
105
+
return None # Don't retry auth failures
106
+
elif response.status_code == 403:
107
+
logger.error(f"X API forbidden with {self.auth_method} - check app permissions")
108
+
logger.error(f"Response: {response.text}")
109
+
return None # Don't retry permission failures
110
+
elif response.status_code == 429:
111
+
if attempt < max_retries - 1:
112
+
# Exponential backoff: 60s, 120s, 240s
113
+
backoff_time = 60 * (2 ** attempt)
114
+
logger.warning(f"X API rate limit exceeded (attempt {attempt + 1}/{max_retries}) - waiting {backoff_time}s before retry")
115
+
logger.error(f"Response: {response.text}")
116
+
time.sleep(backoff_time)
117
+
continue
118
+
else:
119
+
logger.error("X API rate limit exceeded - max retries reached")
120
+
logger.error(f"Response: {response.text}")
121
+
raise XRateLimitError("X API rate limit exceeded")
122
+
else:
123
+
if attempt < max_retries - 1:
124
+
# Exponential backoff for other HTTP errors too
125
+
backoff_time = 30 * (2 ** attempt)
126
+
logger.warning(f"X API request failed (attempt {attempt + 1}/{max_retries}): {e} - retrying in {backoff_time}s")
127
+
logger.error(f"Response: {response.text}")
128
+
time.sleep(backoff_time)
129
+
continue
130
+
else:
131
+
logger.error(f"X API request failed after {max_retries} attempts: {e}")
132
+
logger.error(f"Response: {response.text}")
133
+
return None
134
+
135
+
except Exception as e:
136
+
if attempt < max_retries - 1:
137
+
backoff_time = 15 * (2 ** attempt)
138
+
logger.warning(f"Unexpected error making X API request (attempt {attempt + 1}/{max_retries}): {e} - retrying in {backoff_time}s")
139
+
time.sleep(backoff_time)
140
+
continue
141
+
else:
142
+
logger.error(f"Unexpected error making X API request after {max_retries} attempts: {e}")
143
+
return None
144
+
145
+
return None
146
147
def get_mentions(self, since_id: Optional[str] = None, max_results: int = 10) -> Optional[List[Dict]]:
148
"""
···
311
# This helps with X API's incomplete conversation search results
312
tweet_ids = set(t.get('id') for t in tweets)
313
missing_tweet_ids = set()
314
+
critical_missing_ids = set()
315
316
+
# Collect referenced tweet IDs, prioritizing critical ones
317
for tweet in tweets:
318
referenced_tweets = tweet.get('referenced_tweets', [])
319
for ref in referenced_tweets:
320
ref_id = ref.get('id')
321
+
ref_type = ref.get('type')
322
if ref_id and ref_id not in tweet_ids:
323
missing_tweet_ids.add(ref_id)
324
+
# Prioritize direct replies and quoted tweets over retweets
325
+
if ref_type in ['replied_to', 'quoted']:
326
+
critical_missing_ids.add(ref_id)
327
328
+
# For rate limit efficiency, only fetch critical missing tweets if we have many
329
+
if len(missing_tweet_ids) > 10:
330
+
logger.info(f"Many missing tweets ({len(missing_tweet_ids)}), prioritizing {len(critical_missing_ids)} critical ones")
331
+
missing_tweet_ids = critical_missing_ids
332
+
333
+
# Context sufficiency check - skip backfill if we already have enough context
334
+
if has_sufficient_context(tweets, missing_tweet_ids):
335
+
logger.info("Thread has sufficient context, skipping missing tweet backfill")
336
+
missing_tweet_ids = set()
337
+
338
+
# Fetch missing referenced tweets in batches (more rate-limit friendly)
339
+
if missing_tweet_ids:
340
+
missing_list = list(missing_tweet_ids)
341
+
342
+
# First, check cache for missing tweets
343
+
cached_tweets = get_cached_tweets(missing_list)
344
+
for tweet_id, cached_tweet in cached_tweets.items():
345
+
if cached_tweet.get('conversation_id') == conversation_id:
346
+
tweets.append(cached_tweet)
347
+
tweet_ids.add(tweet_id)
348
+
logger.info(f"Retrieved missing tweet from cache: {tweet_id}")
349
+
350
+
# Add user data if available in cache
351
+
if cached_tweet.get('author_info'):
352
+
author_id = cached_tweet.get('author_id')
353
+
if author_id:
354
+
users_data[author_id] = cached_tweet['author_info']
355
+
356
+
# Only fetch tweets that weren't found in cache
357
+
uncached_ids = [tid for tid in missing_list if tid not in cached_tweets]
358
+
359
+
if uncached_ids:
360
+
batch_size = 100 # X API limit for bulk tweet lookup
361
+
362
+
for i in range(0, len(uncached_ids), batch_size):
363
+
batch_ids = uncached_ids[i:i + batch_size]
364
+
try:
365
+
endpoint = "/tweets"
366
+
params = {
367
+
"ids": ",".join(batch_ids),
368
+
"tweet.fields": "id,text,author_id,created_at,in_reply_to_user_id,referenced_tweets,conversation_id",
369
+
"user.fields": "id,name,username",
370
+
"expansions": "author_id"
371
+
}
372
+
response = self._make_request(endpoint, params)
373
+
374
+
if response and "data" in response:
375
+
fetched_tweets = []
376
+
batch_users_data = {}
377
+
378
+
for missing_tweet in response["data"]:
379
+
# Only add if it's actually part of this conversation
380
+
if missing_tweet.get('conversation_id') == conversation_id:
381
+
tweets.append(missing_tweet)
382
+
tweet_ids.add(missing_tweet.get('id'))
383
+
fetched_tweets.append(missing_tweet)
384
+
logger.info(f"Retrieved missing referenced tweet: {missing_tweet.get('id')}")
385
+
386
+
# Add user data if available
387
+
if "includes" in response and "users" in response["includes"]:
388
+
for user in response["includes"]["users"]:
389
+
users_data[user["id"]] = user
390
+
batch_users_data[user["id"]] = user
391
+
392
+
# Cache the newly fetched tweets
393
+
if fetched_tweets:
394
+
save_cached_tweets(fetched_tweets, batch_users_data)
395
+
396
+
logger.info(f"Batch fetched {len(response['data'])} missing tweets from {len(batch_ids)} requested")
397
398
+
# Handle partial success - log any missing tweets that weren't found
399
+
if response and "errors" in response:
400
+
for error in response["errors"]:
401
+
logger.warning(f"Could not fetch tweet {error.get('resource_id')}: {error.get('title')}")
402
+
403
+
except Exception as e:
404
+
logger.warning(f"Could not fetch batch of missing tweets {batch_ids[:3]}...: {e}")
405
+
else:
406
+
logger.info(f"All {len(missing_list)} missing tweets found in cache")
407
408
if tweets:
409
# Filter out tweets that occur after until_id (if specified)
···
421
logger.info(f"Retrieved {len(tweets)} tweets in thread")
422
423
thread_data = {"tweets": tweets, "users": users_data}
424
+
425
+
# Cache individual tweets from the thread for future backfill
426
+
save_cached_tweets(tweets, users_data)
427
428
# Cache the result
429
if use_cache:
···
803
logger.debug(f"Cached thread context for {conversation_id}")
804
except Exception as e:
805
logger.error(f"Error caching thread context: {e}")
806
+
807
+
def get_cached_tweets(tweet_ids: List[str]) -> Dict[str, Dict]:
808
+
"""
809
+
Load cached individual tweets if available.
810
+
Returns dict mapping tweet_id -> tweet_data for found tweets.
811
+
"""
812
+
cached_tweets = {}
813
+
814
+
for tweet_id in tweet_ids:
815
+
cache_file = X_CACHE_DIR / f"tweet_{tweet_id}.json"
816
+
if cache_file.exists():
817
+
try:
818
+
with open(cache_file, 'r') as f:
819
+
cached_data = json.load(f)
820
+
821
+
# Use longer cache times for older tweets (24 hours vs 1 hour)
822
+
from datetime import datetime, timedelta
823
+
cached_time = datetime.fromisoformat(cached_data.get('cached_at', ''))
824
+
tweet_created = cached_data.get('tweet_data', {}).get('created_at', '')
825
+
826
+
# Parse tweet creation time to determine age
827
+
try:
828
+
from dateutil.parser import parse
829
+
tweet_age = datetime.now() - parse(tweet_created)
830
+
cache_duration = timedelta(hours=24) if tweet_age > timedelta(hours=24) else timedelta(hours=1)
831
+
except:
832
+
cache_duration = timedelta(hours=1) # Default to 1 hour if parsing fails
833
+
834
+
if datetime.now() - cached_time < cache_duration:
835
+
cached_tweets[tweet_id] = cached_data.get('tweet_data')
836
+
logger.debug(f"Using cached tweet {tweet_id}")
837
+
838
+
except Exception as e:
839
+
logger.warning(f"Error loading cached tweet {tweet_id}: {e}")
840
+
841
+
return cached_tweets
842
+
843
+
def save_cached_tweets(tweets_data: List[Dict], users_data: Dict[str, Dict] = None):
844
+
"""Save individual tweets to cache for future reuse."""
845
+
try:
846
+
X_CACHE_DIR.mkdir(exist_ok=True)
847
+
848
+
for tweet in tweets_data:
849
+
tweet_id = tweet.get('id')
850
+
if not tweet_id:
851
+
continue
852
+
853
+
cache_file = X_CACHE_DIR / f"tweet_{tweet_id}.json"
854
+
855
+
# Include user data if available
856
+
tweet_with_user = tweet.copy()
857
+
if users_data and tweet.get('author_id') in users_data:
858
+
tweet_with_user['author_info'] = users_data[tweet.get('author_id')]
859
+
860
+
cache_data = {
861
+
'tweet_id': tweet_id,
862
+
'tweet_data': tweet_with_user,
863
+
'cached_at': datetime.now().isoformat()
864
+
}
865
+
866
+
with open(cache_file, 'w') as f:
867
+
json.dump(cache_data, f, indent=2)
868
+
869
+
logger.debug(f"Cached individual tweet {tweet_id}")
870
+
871
+
except Exception as e:
872
+
logger.error(f"Error caching individual tweets: {e}")
873
+
874
+
def has_sufficient_context(tweets: List[Dict], missing_tweet_ids: Set[str]) -> bool:
875
+
"""
876
+
Determine if we have sufficient context to skip backfilling missing tweets.
877
+
878
+
Args:
879
+
tweets: List of tweets already in the thread
880
+
missing_tweet_ids: Set of missing tweet IDs we'd like to fetch
881
+
882
+
Returns:
883
+
True if context is sufficient, False if backfill is needed
884
+
"""
885
+
# If no missing tweets, context is sufficient
886
+
if not missing_tweet_ids:
887
+
return True
888
+
889
+
# If we have a substantial conversation (5+ tweets), likely sufficient
890
+
if len(tweets) >= 5:
891
+
logger.debug(f"Thread has {len(tweets)} tweets, considering sufficient")
892
+
return True
893
+
894
+
# If only a few missing tweets and we have some context, might be enough
895
+
if len(missing_tweet_ids) <= 2 and len(tweets) >= 3:
896
+
logger.debug(f"Only {len(missing_tweet_ids)} missing tweets with {len(tweets)} existing, considering sufficient")
897
+
return True
898
+
899
+
# Check if we have conversational flow (mentions between users)
900
+
has_conversation_flow = False
901
+
for tweet in tweets:
902
+
text = tweet.get('text', '').lower()
903
+
# Look for mentions, replies, or conversational indicators
904
+
if '@' in text or 'reply' in text or len([t for t in tweets if t.get('author_id') != tweet.get('author_id')]) > 1:
905
+
has_conversation_flow = True
906
+
break
907
+
908
+
# If we have clear conversational flow and reasonable length, sufficient
909
+
if has_conversation_flow and len(tweets) >= 2:
910
+
logger.debug("Thread has conversational flow, considering sufficient")
911
+
return True
912
+
913
+
# Otherwise, we need to backfill
914
+
logger.debug(f"Context insufficient: {len(tweets)} tweets, {len(missing_tweet_ids)} missing, no clear flow")
915
+
return False
916
917
def fetch_and_queue_mentions(username: str) -> int:
918
"""
+1
-1
x_queue/last_seen_id.json
+1
-1
x_queue/last_seen_id.json
+1
-1
x_queue/processed_mentions.json
+1
-1
x_queue/processed_mentions.json
···
1
-
["1950774869081354654", "1950690566909710618", "1950742693988159754", "1950945823375901154", "1950808060882038991", "1950779122890031203", "1950890479383462306", "1950777589091688690", "1950754871021592693", "1950779868960186694", "1950947483787354596", "1950745029666017362", "1950746342672007544", "1950750119219105907", "1950750041418989607", "1950926224429347160", "1950789067697983839", "1950793128522469844", "1950754661222248941", "1950915735532060786", "1950774695558832499", "1950776620203339781", "1950778758614696173", "1950780116126318997", "1950795131579445316", "1950971932716966259", "1950748541707829622", "1950769312056447186", "1950792707695382889", "1950777288091439352", "1950755355698647515", "1950768760899739896", "1950783507560812625", "1950768798593904825", "1950986138568565053", "1950765586868326584", "1950776952513814997", "1950750459045798159", "1950927321134772310", "1950755118434984411", "1950741288724423041", "1950762713313079588", "1950794346716176506", "1950780762053304663", "1950754744550728122", "1950988834914566480", "1950782010668237005", "1950781276438577219", "1950983970759516426", "1950780986398216501", "1950763126796046406", "1950768950729678964", "1950775991808541048", "1950776906498109619", "1950764690168295530", "1950739368530120865", "1950777178188288213", "1950775129686090162", "1950758670171795911", "1950810588298530818", "1950766844111122851", "1950749284804223244", "1950793918666395746", "1950766224658534618", "1950799213153112379", "1950766756869632018", "1950766482675421599", "1950746269363871754", "1950819109299458395", "1950789328826925276", "1950753699502100821", "1950781657147162971", "1950781652210422198", "1950749014728577308", "1950759115438887277", "1950764784477249958", "1950781400317317299", "1950763200649318634", "1950776904006746230", "1950776464145801332", "1950748407372353687", "1950779106389614951", "1950714596828061885", "1950775893640802681", "1950991382866436512", "1950780971072270844", "1950766898423152648", "1950751482476724241", "1950782587716489427", "1950777458657218590", "1950766890613350863", "1950778983630704954", "1950779760881373591", "1950749194685407273", "1950775531315888177", "1950748959812813003", "1950792967331193208", "1950757694312427564", "1950769061849358827", "1950781053599383730", "1950769046783443440", "1950758096747610115", "1950756238528000184", "1950782155098902811", "1950781122625040430", "1950776236239958521", "1950764759437189405", "1950754203871416763", "1950750239994061165", "1950763150195986893", "1950931104778625152", "1950779585383313712", "1950752256774856770", "1951010617638146178", "1950791694955540787", "1950945577484583072", "1950789537237766434", "1950743359305478515"]
···
1
+
["1950766898423152648", "1950776236239958521", "1950749284804223244", "1951180352480821383", "1951036541008248938", "1950749014728577308", "1950794346716176506", "1950793128522469844", "1951136817245462740", "1950792707695382889", "1950741288724423041", "1951063241288983033", "1951348705162043591", "1951114308836008146", "1950749194685407273", "1950775893640802681", "1950743359305478515", "1950759115438887277", "1950776904006746230", "1950927321134772310", "1950766482675421599", "1951339550804005231", "1950765586868326584", "1950768950729678964", "1950776952513814997", "1951165239761969378", "1950754661222248941", "1950915735532060786", "1950781652210422198", "1951175052474700212", "1950750459045798159", "1951348198192324760", "1950781053599383730", "1950799213153112379", "1951348258607153462", "1951010617638146178", "1951127532402847980", "1950986138568565053", "1950779760881373591", "1951163964299296845", "1951352527615828053", "1950791694955540787", "1951179072802828555", "1950768798593904825", "1950810588298530818", "1950782587716489427", "1950748959812813003", "1950945823375901154", "1950756238528000184", "1951163614750187700", "1950777458657218590", "1950763150195986893", "1950745029666017362", "1951347808499565026", "1951139510760124446", "1951347165068140904", "1950779868960186694", "1951182820363145238", "1950758670171795911", "1951147949456523656", "1951353640498897297", "1950754871021592693", "1950778758614696173", "1950775531315888177", "1951309293510373414", "1951129835729985602", "1950795131579445316", "1951349178141159849", "1950789537237766434", "1950781657147162971", "1951136786144698795", "1951141952105431381", "1950819109299458395", "1951353453546184900", "1951040769646796890", "1951346496840077702", "1950781400317317299", "1950755118434984411", "1950766890613350863", "1951121664126886391", "1950763126796046406", "1951172176729743451", "1951038794024509743", "1951166530890375615", "1950746269363871754", "1950764784477249958", "1950777288091439352", "1950971932716966259", "1950931104778625152", "1950780116126318997", "1951173346281849248", "1951118743515570593", "1951129383957127239", "1950769312056447186", "1950764690168295530", "1950774695558832499", "1951359188078305712", "1951170488681799793", "1951307523388035439", "1951315163388453150", "1950750119219105907", "1951119002073440288", "1950714596828061885", "1950792967331193208", "1950777589091688690", "1951172869922435497", "1950750239994061165", "1950779106389614951", "1950748407372353687", "1950755355698647515", "1950778983630704954", "1951119469805445309", "1950753699502100821", "1950769061849358827", "1950890479383462306", "1950775129686090162", "1951174527167480109", "1951347924862116206", "1950808060882038991", "1950757694312427564", "1951034947915108628", "1950758096747610115", "1951132696383766599", "1950988834914566480", "1950781122625040430", "1951117367595216986", "1950793918666395746", "1951183108155314686", "1950779585383313712", "1951174471093723386", "1950762713313079588", "1950764759437189405", "1950780986398216501", "1951168402086830260", "1951059082569285636", "1951172964080312368", "1950766756869632018", "1950926224429347160", "1950748541707829622", "1951165985530192029", "1950779122890031203", "1951164423932157970", "1950774869081354654", "1950789328826925276", "1950776620203339781", "1950783507560812625", "1951174849390588267", "1950742693988159754", "1951315292765954297", "1950777178188288213", "1950789067697983839", "1951120165439152509", "1950983970759516426", "1951316609781407888", "1950780762053304663", "1951354423533511069", "1950739368530120865", "1950782010668237005", "1950754203871416763", "1950690566909710618", "1951315365860090218", "1950947483787354596", "1950781276438577219", "1950776464145801332", "1951308927829963195", "1950766224658534618", "1950763200649318634", "1950766844111122851", "1951142705582473361", "1950746342672007544", "1951173108204970474", "1950782155098902811", "1950775991808541048", "1950768760899739896", "1951134620638183644", "1951308577953501232", "1950991382866436512", "1950769046783443440", "1951203664288215278", "1950750041418989607", "1950752256774856770", "1950776906498109619", "1950751482476724241", "1950754744550728122", "1951168832099459262", "1950780971072270844", "1951118786918228119", "1951160693677494624", "1950945577484583072", "1951040893248508395", "1951174355649700256", "1951127859290083736", "1951166765188387073"]