Script for easily configuring, using, switching and comparing local offline coding models
1import { OLLAMA_URL } from "../config.js";
2import { getActiveChatModel } from "../runtime-config.js";
3import { err } from "../log.js";
4import { ensureOllama } from "./server.js";
5
6export async function runPipe(prompt: string): Promise<void> {
7 await ensureOllama();
8 const model = getActiveChatModel();
9
10 // Read stdin
11 const chunks: Buffer[] = [];
12 for await (const chunk of process.stdin) {
13 chunks.push(chunk as Buffer);
14 }
15 const input = Buffer.concat(chunks).toString("utf-8");
16
17 const body = JSON.stringify({
18 model: model.ollamaTag,
19 messages: [
20 {
21 role: "system",
22 content:
23 "You are an expert programmer. Output only code, no explanations.",
24 },
25 { role: "user", content: `${prompt}\n\n\`\`\`\n${input}\n\`\`\`` },
26 ],
27 stream: false,
28 });
29
30 let res: Response;
31 try {
32 res = await fetch(
33 `${OLLAMA_URL}/v1/chat/completions`,
34 {
35 method: "POST",
36 headers: { "Content-Type": "application/json" },
37 body,
38 },
39 );
40 } catch {
41 err("Ollama not running. Start it with: localcode start");
42 }
43
44 if (!res!.ok) {
45 err(`Server returned ${res!.status}`);
46 }
47
48 const data = (await res!.json()) as {
49 choices?: { message?: { content?: string } }[];
50 };
51 const content = data.choices?.[0]?.message?.content ?? "";
52 process.stdout.write(content + "\n");
53}