this repo has no description
1import litellm
2import os
3
4litellm.drop_params = True
5if os.environ.get("ENABLE_OTEL", "false").lower() == "true":
6 litellm.callbacks = ["otel"]
7
8QUESTIONING_MODEL = os.environ["QUESTIONING_MODEL"]
9SPEC_MODEL: str = os.environ["SPEC_MODEL"]
10
11QUESTIONING_END_TOKEN = "<NO_MORE_QUESTIONS>"
12SYSTEM_PROMPT = f"""
13You are a helpful assistant that the user can use to develop a detailed
14specification for an idea.
15
16Ask the user one question at a time to develop a thorough, step-by-step spec for
17this idea. Each question should build on the previous answers, and the end goal
18is to have a detailed specification that can be handed off to a developer.
19
20Let's do this iteratively and dig into every relevant detail. Clarify everything
21from features to technologies to architecture.
22
23Remember, only one question at a time.
24
25After I have answered all the questions you need, respond with
26"{QUESTIONING_END_TOKEN}".
27"""
28
29IDEA_PROMPT = """
30Here's the idea:
31
32{idea}
33"""
34
35SPEC_PROMPT = """
36Now that we've wrapped up the brainstorming process, can you compile our
37findings into a comprehensive, developer-ready specification? Include all
38relevant requirements, architecture choices, data handling details, error
39handling strategies, and a testing plan so a developer can immediately begin
40implementation.
41"""
42
43idea = input("Idea: ")
44messages = [
45 {"role": "system", "content": SYSTEM_PROMPT},
46 {"role": "user", "content": IDEA_PROMPT.format(idea=idea)},
47]
48
49response = litellm.completion(
50 model=QUESTIONING_MODEL,
51 messages=messages,
52)
53new_message = response.choices[0].message
54messages.append({"role": "assistant", "content": new_message.content})
55
56while True:
57 if new_message.reasoning_content:
58 print(f"Reasoning: {new_message.reasoning_content}")
59
60 print(f"Question: {new_message.content}")
61 answer = input("Answer: ")
62 messages.append({"role": "user", "content": answer})
63 response = litellm.completion(
64 model=QUESTIONING_MODEL,
65 messages=messages,
66 )
67 new_message = response.choices[0].message
68 messages.append({"role": "assistant", "content": new_message.content})
69
70 if new_message.content.upper().find("<NO_MORE_QUESTIONS>") != -1:
71 break
72
73messages.append({"role": "user", "content": SPEC_PROMPT})
74
75response = litellm.completion(
76 model=SPEC_MODEL,
77 messages=messages,
78)
79
80new_message = response.choices[0].message
81messages.append({"role": "assistant", "content": new_message.content})
82
83print("Specification:")
84print(new_message.content)