Reference implementation for the Phoenix Architecture. Work in progress.
aicoding.leaflet.pub/
ai
coding
crazy
1/**
2 * OpenAI (GPT) LLM Provider.
3 *
4 * Uses the Chat Completions API via native fetch.
5 * Requires OPENAI_API_KEY env var.
6 */
7
8import type { LLMProvider, GenerateOptions } from './provider.js';
9
10const API_URL = 'https://api.openai.com/v1/chat/completions';
11
12export class OpenAIProvider implements LLMProvider {
13 readonly name = 'openai';
14 readonly model: string;
15 private apiKey: string;
16
17 constructor(apiKey: string, model: string) {
18 this.apiKey = apiKey;
19 this.model = model;
20 }
21
22 async generate(prompt: string, options?: GenerateOptions): Promise<string> {
23 const messages: Array<{ role: string; content: string }> = [];
24
25 if (options?.system) {
26 messages.push({ role: 'system', content: options.system });
27 }
28 messages.push({ role: 'user', content: prompt });
29
30 const body: Record<string, unknown> = {
31 model: this.model,
32 messages,
33 max_tokens: options?.maxTokens ?? 8192,
34 };
35
36 if (options?.temperature !== undefined) {
37 body.temperature = options.temperature;
38 }
39
40 const res = await fetch(API_URL, {
41 method: 'POST',
42 headers: {
43 'Content-Type': 'application/json',
44 'Authorization': `Bearer ${this.apiKey}`,
45 },
46 body: JSON.stringify(body),
47 });
48
49 if (!res.ok) {
50 const text = await res.text();
51 throw new Error(`OpenAI API error ${res.status}: ${text}`);
52 }
53
54 const data = await res.json() as {
55 choices: Array<{ message: { content: string } }>;
56 };
57
58 if (!data.choices?.length) {
59 throw new Error('OpenAI returned no choices');
60 }
61
62 return data.choices[0].message.content;
63 }
64}