An AI agent built to do Ralph loops - plan mode for planning and ralph mode for implementing.
1use crate::config::Config;
2use crate::llm::factory::create_client;
3use crate::llm::{LlmClient, Message, ResponseContent};
4use crate::security::{SecurityValidator, permission::CliPermissionHandler};
5use crate::tools::ToolRegistry;
6use crate::tools::factory::create_default_registry;
7use crate::tui::messages::{AgentMessage, AgentSender};
8use std::io::{self, Write};
9use std::sync::Arc;
10
11/// Planning agent for interactive spec creation
12pub struct PlanningAgent {
13 client: Arc<dyn LlmClient>,
14 registry: ToolRegistry,
15 pub spec_dir: String,
16 conversation: Vec<Message>,
17}
18
19impl PlanningAgent {
20 /// Create a new planning agent with the given config
21 pub fn new(config: Config, spec_dir: String) -> anyhow::Result<Self> {
22 // Get planning-specific LLM config
23 let llm_config = config.planning_llm().clone();
24
25 // Create LLM client using factory
26 let client = create_client(&config, &llm_config)?;
27
28 // Create security validator and permission handler
29 let validator = Arc::new(SecurityValidator::new(config.security.clone())?);
30 let permission_handler = Arc::new(CliPermissionHandler);
31
32 // Create and populate tool registry
33 let registry = create_default_registry(validator, permission_handler);
34
35 // Initialize conversation with system message
36 let system_message = Message::system(PLANNING_SYSTEM_PROMPT);
37
38 Ok(Self {
39 client,
40 registry,
41 spec_dir,
42 conversation: vec![system_message],
43 })
44 }
45
46 /// Run the interactive planning loop
47 pub async fn run(&mut self) -> anyhow::Result<()> {
48 println!("Planning Agent - Interactive Mode");
49 println!("Type your requirements and I'll help you create a detailed spec.");
50 println!("Type 'done' when finished or 'exit' to quit.\n");
51
52 loop {
53 // Get user input
54 print!("You: ");
55 io::stdout().flush()?;
56
57 let mut input = String::new();
58 io::stdin().read_line(&mut input)?;
59 let input = input.trim();
60
61 if input == "exit" {
62 println!("Exiting planning mode.");
63 break;
64 }
65
66 if input == "done" {
67 println!("Planning complete. Spec saved to {}", self.spec_dir);
68 break;
69 }
70
71 if input.is_empty() {
72 continue;
73 }
74
75 // Add user message to conversation
76 self.conversation.push(Message::user(input));
77
78 // Process the conversation turn
79 match self.process_turn().await {
80 Ok(should_continue) => {
81 if !should_continue {
82 break;
83 }
84 }
85 Err(e) => {
86 eprintln!("Error: {}", e);
87 // Remove the failed user message
88 self.conversation.pop();
89 }
90 }
91 }
92
93 Ok(())
94 }
95
96 /// Run planning with a message sender for TUI integration
97 pub async fn run_with_sender(
98 &mut self,
99 tx: AgentSender,
100 initial_message: String,
101 ) -> anyhow::Result<()> {
102 tx.send(AgentMessage::PlanningStarted).await?;
103
104 self.conversation.push(Message::user(&initial_message));
105
106 loop {
107 let tools = self.registry.definitions();
108 let response = self.client.chat(self.conversation.clone(), &tools).await?;
109
110 match response.content {
111 ResponseContent::Text(text) => {
112 self.conversation.push(Message::assistant(text.clone()));
113 tx.send(AgentMessage::PlanningResponse(text)).await?;
114
115 // Check for end of turn
116 if matches!(response.stop_reason.as_deref(), Some("end_turn")) {
117 break;
118 }
119 break;
120 }
121 ResponseContent::ToolCalls(tool_calls) => {
122 for tool_call in &tool_calls {
123 tx.send(AgentMessage::PlanningToolCall {
124 name: tool_call.name.clone(),
125 args: tool_call.parameters.to_string(),
126 })
127 .await?;
128
129 let tool = self
130 .registry
131 .get(&tool_call.name)
132 .ok_or_else(|| anyhow::anyhow!("Tool not found: {}", tool_call.name))?;
133
134 let result = tool.execute(tool_call.parameters.clone()).await;
135
136 let output = match result {
137 Ok(output) => output,
138 Err(e) => format!("Error: {}", e),
139 };
140
141 tx.send(AgentMessage::PlanningToolResult {
142 name: tool_call.name.clone(),
143 output: output.clone(),
144 })
145 .await?;
146
147 if tool_call.name == "write_file"
148 && let Some(path) =
149 tool_call.parameters.get("path").and_then(|p| p.as_str())
150 && (path.ends_with("spec.json") || path.ends_with(".json"))
151 {
152 tx.send(AgentMessage::PlanningComplete {
153 spec_path: path.to_string(),
154 })
155 .await?;
156 }
157
158 self.conversation.push(Message::user(format!(
159 "Tool result for {}:\n{}",
160 tool_call.name, output
161 )));
162 }
163 // Continue loop for next LLM response
164 }
165 }
166 }
167
168 Ok(())
169 }
170
171 /// Continue conversation with additional user input
172 pub async fn continue_with_sender(
173 &mut self,
174 tx: AgentSender,
175 user_message: String,
176 ) -> anyhow::Result<()> {
177 self.run_with_sender(tx, user_message).await
178 }
179
180 /// Process a single conversation turn
181 async fn process_turn(&mut self) -> anyhow::Result<bool> {
182 loop {
183 // Get tool definitions
184 let tools = self.registry.definitions();
185
186 // Call LLM
187 let response = self.client.chat(self.conversation.clone(), &tools).await?;
188
189 match response.content {
190 ResponseContent::Text(text) => {
191 // Add assistant response to conversation
192 self.conversation.push(Message::assistant(text.clone()));
193
194 // Print assistant response
195 println!("\nAssistant: {}\n", text);
196
197 // Check for completion
198 if matches!(response.stop_reason.as_deref(), Some("end_turn")) {
199 return Ok(true);
200 }
201
202 return Ok(true);
203 }
204 ResponseContent::ToolCalls(tool_calls) => {
205 // Execute each tool call
206 for tool_call in &tool_calls {
207 println!("\n[Executing tool: {}]", tool_call.name);
208
209 // Get the tool from registry
210 let tool = self
211 .registry
212 .get(&tool_call.name)
213 .ok_or_else(|| anyhow::anyhow!("Tool not found: {}", tool_call.name))?;
214
215 // Execute the tool
216 let result = tool.execute(tool_call.parameters.clone()).await;
217
218 let output = match result {
219 Ok(output) => {
220 println!("[Tool result: {} bytes]", output.len());
221 output
222 }
223 Err(e) => {
224 println!("[Tool error: {}]", e);
225 format!("Error: {}", e)
226 }
227 };
228
229 // Add tool result to conversation
230 // Note: Using User role for now as Anthropic expects tool results
231 // in user messages. Future OpenAI provider will use Message::tool_result()
232 self.conversation.push(Message::user(format!(
233 "Tool result for {}:\n{}",
234 tool_call.name, output
235 )));
236 }
237
238 // Continue the loop to get the next LLM response
239 }
240 }
241 }
242 }
243}
244
245const PLANNING_SYSTEM_PROMPT: &str = r#"You are a planning agent that helps users create detailed specifications for software development tasks.
246
247Your role is to:
2481. Ask clarifying questions to understand the user's requirements
2492. Break down complex tasks into smaller, manageable subtasks
2503. Define clear acceptance criteria for each task
2514. Create a structured specification file in JSON format
252
253When the user provides their requirements, you should:
254- Ask about technical constraints, dependencies, and environment
255- Identify risks and edge cases
256- Suggest best practices and design patterns
257- Help define test criteria
258
259Use the available tools to:
260- read_file: Read existing files to understand the codebase
261- write_file: Create the specification file
262- list_files: Explore the project structure
263- run_command: Run commands to gather information about the environment
264
265When you have enough information, create a spec.json file with this structure:
266{
267 "name": "project-name",
268 "description": "Brief description of the project",
269 "branch_name": "feature/branch-name",
270 "created_at": "2024-01-01T00:00:00Z",
271 "tasks": [
272 {
273 "id": "task-1",
274 "title": "Task title",
275 "description": "Detailed task description",
276 "acceptance_criteria": [
277 "Criterion 1",
278 "Criterion 2"
279 ],
280 "status": "pending"
281 }
282 ],
283 "learnings": []
284}
285
286Be thorough but efficient. Ask questions one at a time to avoid overwhelming the user.
287"#;