Script for easily configuring, using, switching and comparing local offline coding models
at main 92 lines 2.5 kB view raw
1import { spawn, execSync } from "node:child_process"; 2import { OLLAMA_URL } from "../config.js"; 3import { 4 getActiveChatModel, 5 getActiveAutocompleteModel, 6} from "../runtime-config.js"; 7import { log, warn, err } from "../log.js"; 8import { commandExists, runPassthrough } from "../util.js"; 9 10async function ollamaHealthy(): Promise<boolean> { 11 try { 12 const res = await fetch(`${OLLAMA_URL}/api/tags`); 13 return res.ok; 14 } catch { 15 return false; 16 } 17} 18 19async function waitForOllama(timeoutSec: number): Promise<boolean> { 20 process.stdout.write(" Waiting for Ollama"); 21 for (let i = 0; i < timeoutSec; i++) { 22 if (await ollamaHealthy()) { 23 console.log(" ready!"); 24 return true; 25 } 26 process.stdout.write("."); 27 await new Promise((r) => setTimeout(r, 1000)); 28 } 29 console.log(" timed out."); 30 return false; 31} 32 33export async function ensureOllama(): Promise<void> { 34 if (await ollamaHealthy()) return; 35 36 if (!commandExists("ollama")) { 37 log("Ollama not found. Installing via Homebrew..."); 38 runPassthrough("brew install ollama"); 39 } 40 41 log("Starting Ollama..."); 42 const child = spawn("ollama", ["serve"], { 43 stdio: "ignore", 44 detached: true, 45 env: { ...process.env, OLLAMA_HOST: "127.0.0.1" }, 46 }); 47 child.unref(); 48 49 const ready = await waitForOllama(15); 50 if (!ready) { 51 err("Ollama failed to start. Run: ollama serve"); 52 } 53} 54 55export function isModelPulled(ollamaTag: string): boolean { 56 try { 57 const output = execSync("ollama list", { encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] }); 58 return output.includes(ollamaTag); 59 } catch { 60 return false; 61 } 62} 63 64export async function pullIfNeeded(ollamaTag: string, label: string): Promise<void> { 65 if (isModelPulled(ollamaTag)) { 66 log(`${label} already pulled: ${ollamaTag}`); 67 return; 68 } 69 log(`Pulling ${label}: ${ollamaTag} (this may take a while)...`); 70 runPassthrough(`ollama pull ${ollamaTag}`); 71} 72 73export async function startServers(): Promise<void> { 74 await ensureOllama(); 75 76 const chatModel = getActiveChatModel(); 77 const autoModel = getActiveAutocompleteModel(); 78 79 await pullIfNeeded(chatModel.ollamaTag, `chat model (${chatModel.name})`); 80 await pullIfNeeded(autoModel.ollamaTag, `autocomplete model (${autoModel.name})`); 81 82 log("Ollama is running. Models are pulled and ready."); 83} 84 85export function stopServers(): void { 86 try { 87 execSync("pkill -f ollama", { stdio: "ignore" }); 88 log("Ollama stopped."); 89 } catch { 90 warn("Ollama not running."); 91 } 92}