Script for easily configuring, using, switching and comparing local offline coding models

Add Goose, Codex CLI, gptme, and Claude Code TUIs

Ollama supports many coding agents. Add all viable ones with per-TUI
env vars, config templates, and launch args. Auto-install Ollama if
missing when running ensureOllama.

+97 -13
+40 -12
src/commands/run.ts
··· 23 23 const tui = getActiveTui(); 24 24 const chatModel = getActiveChatModel(); 25 25 let tuiArgs: string[]; 26 + let tuiCmd = tui.checkCmd; 27 + const env = { ...process.env }; 26 28 27 - if (tui.id === "aider") { 28 - process.env.OPENAI_API_KEY = "sk-not-needed"; 29 - process.env.OPENAI_API_BASE = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 30 - tuiArgs = [ 31 - "--model", `openai/${chatModel.id}`, 32 - "--no-show-model-warnings", 33 - "--no-check-update", 34 - ...args, 35 - ]; 36 - } else { 37 - tuiArgs = args; 29 + switch (tui.id) { 30 + case "aider": 31 + env.OPENAI_API_KEY = "sk-not-needed"; 32 + env.OPENAI_API_BASE = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 33 + tuiArgs = [ 34 + "--model", `openai/${chatModel.id}`, 35 + "--no-show-model-warnings", 36 + "--no-check-update", 37 + ...args, 38 + ]; 39 + break; 40 + 41 + case "goose": 42 + env.GOOSE_PROVIDER = "ollama"; 43 + env.OLLAMA_HOST = `http://127.0.0.1:${OLLAMA_PORT}`; 44 + tuiArgs = [...args]; 45 + break; 46 + 47 + case "codex": 48 + // Codex reads ~/.codex/config.toml written by writeTuiConfig 49 + tuiArgs = ["--provider", "ollama", ...args]; 50 + break; 51 + 52 + case "gptme": 53 + env.OPENAI_BASE_URL = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 54 + tuiArgs = ["--model", `local/${chatModel.ollamaTag}`, ...args]; 55 + break; 56 + 57 + case "claude": 58 + // ollama launch claude handles all config automatically 59 + tuiCmd = "ollama"; 60 + tuiArgs = ["launch", "claude", ...args]; 61 + break; 62 + 63 + default: 64 + tuiArgs = [...args]; 65 + break; 38 66 } 39 67 40 68 if (tui.resumeArgs) { ··· 42 70 } 43 71 44 72 log(`Launching ${tui.name}...`); 45 - const child = spawn(tui.checkCmd, tuiArgs, { stdio: "inherit" }); 73 + const child = spawn(tuiCmd, tuiArgs, { stdio: "inherit", env }); 46 74 child.on("exit", (code) => process.exit(code ?? 0)); 47 75 }
+6 -1
src/commands/server.ts
··· 5 5 getActiveAutocompleteModel, 6 6 } from "../runtime-config.js"; 7 7 import { log, warn, err } from "../log.js"; 8 - import { runPassthrough } from "../util.js"; 8 + import { commandExists, runPassthrough } from "../util.js"; 9 9 10 10 async function ollamaHealthy(): Promise<boolean> { 11 11 try { ··· 32 32 33 33 export async function ensureOllama(): Promise<void> { 34 34 if (await ollamaHealthy()) return; 35 + 36 + if (!commandExists("ollama")) { 37 + log("Ollama not found. Installing via Homebrew..."); 38 + runPassthrough("brew install ollama"); 39 + } 35 40 36 41 log("Starting Ollama..."); 37 42 const child = spawn("ollama", ["serve"], {
+2
src/config.ts
··· 13 13 export const PI_CONFIG_DIR = join(homedir(), ".pi", "agent"); 14 14 export const PI_MODELS_FILE = join(PI_CONFIG_DIR, "models.json"); 15 15 export const PI_SETTINGS_FILE = join(PI_CONFIG_DIR, "settings.json"); 16 + export const CODEX_CONFIG_DIR = join(homedir(), ".codex"); 17 + export const CODEX_CONFIG_FILE = join(CODEX_CONFIG_DIR, "config.toml");
+30
src/registry/tuis.ts
··· 31 31 launchArgs: '"$@"', 32 32 resumeArgs: ["--continue"], 33 33 }, 34 + { 35 + id: "goose", 36 + name: "Goose", 37 + installCmd: "brew install block-goose-cli", 38 + checkCmd: "goose", 39 + launchArgs: '"$@"', 40 + }, 41 + { 42 + id: "codex", 43 + name: "Codex CLI", 44 + installCmd: "npm install -g @openai/codex", 45 + checkCmd: "codex", 46 + launchArgs: '"$@"', 47 + }, 48 + { 49 + id: "gptme", 50 + name: "gptme", 51 + installCmd: "pipx install gptme", 52 + checkCmd: "gptme", 53 + launchArgs: '"$@"', 54 + resumeArgs: ["-r"], 55 + }, 56 + { 57 + id: "claude", 58 + name: "Claude Code", 59 + installCmd: "npm install -g @anthropic-ai/claude-code", 60 + checkCmd: "claude", 61 + launchArgs: '"$@"', 62 + resumeArgs: ["--continue"], 63 + }, 34 64 ]; 35 65 36 66 export function getTuiById(id: string): TuiDef | undefined {
+7
src/steps/aider-config.ts
··· 6 6 OPENCODE_CONFIG_FILE, 7 7 PI_MODELS_FILE, 8 8 PI_SETTINGS_FILE, 9 + CODEX_CONFIG_FILE, 9 10 } from "../config.js"; 10 11 import { getActiveChatModel, getActiveTui } from "../runtime-config.js"; 11 12 import { aiderConfig, aiderEnv } from "../templates/aider.js"; 12 13 import { opencodeConfig } from "../templates/opencode.js"; 13 14 import { piModelsConfig, piSettingsConfig } from "../templates/pi.js"; 15 + import { codexConfig } from "../templates/codex.js"; 14 16 15 17 export async function writeTuiConfig(): Promise<void> { 16 18 const chatModel = getActiveChatModel(); ··· 33 35 ); 34 36 await writeConfig(PI_SETTINGS_FILE, piSettingsConfig()); 35 37 log(`Pi config written to ${PI_MODELS_FILE}`); 38 + 39 + await writeConfig(CODEX_CONFIG_FILE, codexConfig(chatModel.id)); 40 + log(`Codex config written to ${CODEX_CONFIG_FILE}`); 41 + 42 + // Goose and gptme are configured via env vars at launch time (no config files needed) 36 43 37 44 log(`Active TUI: ${tui.name}`); 38 45 }
+12
src/templates/codex.ts
··· 1 + import { OLLAMA_PORT } from "../config.js"; 2 + 3 + export function codexConfig(modelId: string): string { 4 + return `# Codex CLI — Ollama configuration 5 + model = "${modelId}" 6 + provider = "ollama" 7 + 8 + [providers.ollama] 9 + name = "Ollama (local)" 10 + base_url = "http://127.0.0.1:${OLLAMA_PORT}/v1" 11 + `; 12 + }