Monorepo for Aesthetic.Computer
aesthetic.computer
1// Ask, 23.05.16.13.49
2// A vercel edge function to handle OpenAI text prediction APIs.
3
4import { corsHeaders } from "../help.mjs";
5// import { count } from "openai-gpt-token-counter";
6// ^ Would require moving to a different runtime. 23.05.29.18.02
7
8const prod = process.env.NODE_ENV !== "development";
9
10export default async function handler(req) {
11 const headers = corsHeaders(req);
12 const allowedOrigins = [
13 "https://aesthetic.computer",
14 "https://botce.ac",
15 "https://chat-system.aesthetic.computer",
16 ];
17 const origin = req.headers.get("Origin");
18
19 // Allow requests in development environment or if the origin is in the allowed list
20 if (prod && !allowedOrigins.includes(origin)) {
21 return new Response("Access denied.", {
22 status: 403,
23 headers: { "Content-Type": "text/plain", ...headers },
24 });
25 }
26
27 if (req.method === "GET") {
28 return new Response("Wrong method.", {
29 headers: { "Content-Type": "text/plain", ...headers },
30 });
31 }
32
33 if (req.method === "OPTIONS") {
34 return new Response("Success!", {
35 headers: { "Content-Type": "text/plain", ...headers },
36 });
37 }
38
39 if (req.method === "POST") {
40 const body = await req.json();
41 let { messages, hint } = body;
42
43 console.log("🧠 Processing:", body);
44
45 try {
46 messages = messages?.map((message) => {
47 return { role: message.by, content: message.text };
48 });
49
50 // ❤️🔥 TODO: Measure max token size for conversation history.
51
52 // Defaults
53 let temperature = 1;
54 let top_p = 1; // Maximum: 1
55 let max_tokens = 256;
56
57 // Tweak for "character" dialogical output.
58 if (hint.startsWith("character")) {
59 temperature = 1;
60 top_p = 0.5;
61 max_tokens = 256;
62 }
63
64 // const model = hint.split(":")[1] || "gpt-3.5-turbo";
65 let model = hint.split(":")[1] || "gpt-4o-mini";
66
67 // Tweak for language filtering...
68 if (hint.startsWith("code")) {
69 model = "gpt-4o";
70 max_tokens = 512;
71 }
72
73 // Request streaming response
74 const payload: OpenAIStreamPayload = {
75 model,
76 messages,
77 temperature,
78 top_p,
79 frequency_penalty: 0,
80 presence_penalty: 0,
81 max_tokens,
82 stream: true,
83 n: 1,
84 };
85
86 const stream = await OpenAIStream(payload);
87
88 if (!stream) {
89 return new Response("Error", {
90 status: 500,
91 headers: { "Content-Type": "text/plain", ...headers },
92 });
93 } else {
94 return new Response(stream, { headers });
95 }
96 } catch (error) {
97 console.error("Failed to process the request:", error);
98 return new Response("Error", {
99 status: 500,
100 headers: { "Content-Type": "text/plain", ...headers },
101 });
102 }
103 } else {
104 return new Response("Wrong method.");
105 }
106}
107
108export const config = { runtime: "edge" };
109
110// Extracted from: https://github.com/Nutlope/twitterbio/blob/main/utils/OpenAIStream.ts
111
112import {
113 createParser,
114 ParsedEvent,
115 ReconnectInterval,
116} from "eventsource-parser";
117
118type ChatGPTAgent = "user" | "system";
119
120interface ChatGPTMessage {
121 role: ChatGPTAgent;
122 content: string;
123}
124
125interface OpenAIStreamPayload {
126 model: string;
127 messages: ChatGPTMessage[];
128 temperature: number;
129 top_p: number;
130 frequency_penalty: number;
131 presence_penalty: number;
132 max_tokens: number;
133 stream: boolean;
134 n: number;
135}
136
137async function OpenAIStream(payload: OpenAIStreamPayload) {
138 const encoder = new TextEncoder();
139 const decoder = new TextDecoder();
140
141 let counter = 0;
142 let res;
143
144 try {
145 res = await fetch("https://api.openai.com/v1/chat/completions", {
146 headers: {
147 "Content-Type": "application/json",
148 Authorization: `Bearer ${process.env.OPENAI_API_KEY ?? ""}`,
149 },
150 method: "POST",
151 body: JSON.stringify(payload),
152 });
153
154 if (!res.ok) {
155 const errorData = await res.json();
156 console.error("Request failed:", errorData);
157 }
158 } catch (err) {
159 console.error("Request failed:", err);
160 }
161
162 if (!res || !res.ok) return; // Return early if an error was caught.
163
164 const stream = new ReadableStream({
165 async start(controller) {
166 // callback
167 function parse(event: ParsedEvent | ReconnectInterval) {
168 if (event.type === "event") {
169 const data = event.data;
170 // https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
171 if (data === "[DONE]") {
172 controller.close();
173 return;
174 }
175 try {
176 const json = JSON.parse(data);
177 const text = json.choices[0].delta?.content || "";
178 // prefix character (i.e., "\n\n"), do nothing
179 if (counter < 2 && (text.match(/\n/) || []).length) return;
180 const queue = encoder.encode(text);
181 controller.enqueue(queue);
182 counter += 1;
183 } catch (e) {
184 controller.error(e);
185 }
186 }
187 }
188
189 // The stream response (SSE) from OpenAI may be fragmented into multiple
190 // chunks this ensures we properly read chunks and invoke events for each.
191 const parser = createParser(parse);
192 // https://web.dev/streams/#asynchronous-iteration
193 for await (const chunk of res.body as any) {
194 parser.feed(decoder.decode(chunk));
195 }
196 },
197 });
198
199 return stream;
200}