Script for easily configuring, using, switching and comparing local offline coding models

Launch-first CLI with auto-detect TUI/model IDs

Rewrite CLI to launch immediately: localcode goose, localcode qwen3-coder,
localcode claude gpt-oss. Parser auto-detects TUI and model IDs from
registries. Add qwen3-coder, glm-flash, gpt-oss models. Add cline,
droid, openclaw TUIs via ollama launch. Use ollamaLaunch field to
delegate to ollama launch for supported TUIs. Auto-pull models before
launch. Set GOOSE_MODEL env var for goose. Default model now qwen3-coder.

+283 -124
+57
src/commands/list.ts
··· 1 + import { execSync } from "node:child_process"; 2 + import { getChatModels, getAutocompleteModels } from "../registry/models.js"; 3 + import { TUIS } from "../registry/tuis.js"; 4 + import { getActiveChatModel, getActiveAutocompleteModel, getActiveTui } from "../runtime-config.js"; 5 + import { commandExists } from "../util.js"; 6 + 7 + const BOLD = "\x1b[1m"; 8 + const GREEN = "\x1b[0;32m"; 9 + const DIM = "\x1b[2m"; 10 + const RESET = "\x1b[0m"; 11 + 12 + function isPulled(ollamaTag: string): boolean { 13 + try { 14 + const output = execSync("ollama list", { encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] }); 15 + return output.includes(ollamaTag); 16 + } catch { 17 + return false; 18 + } 19 + } 20 + 21 + export function listModels(): void { 22 + const activeChatId = getActiveChatModel().id; 23 + const activeAutoId = getActiveAutocompleteModel().id; 24 + 25 + console.log(`\n${BOLD}Chat models:${RESET}`); 26 + for (const m of getChatModels()) { 27 + const active = m.id === activeChatId ? ` ${GREEN}<- active${RESET}` : ""; 28 + const pulled = isPulled(m.ollamaTag) ? "" : ` ${DIM}(not pulled)${RESET}`; 29 + console.log( 30 + ` ${BOLD}${m.id}${RESET} ${m.name} ${DIM}${m.ollamaTag}${RESET}${active}${pulled}`, 31 + ); 32 + } 33 + 34 + console.log(`\n${BOLD}Autocomplete models:${RESET}`); 35 + for (const m of getAutocompleteModels()) { 36 + const active = m.id === activeAutoId ? ` ${GREEN}<- active${RESET}` : ""; 37 + const pulled = isPulled(m.ollamaTag) ? "" : ` ${DIM}(not pulled)${RESET}`; 38 + console.log( 39 + ` ${BOLD}${m.id}${RESET} ${m.name} ${DIM}${m.ollamaTag}${RESET}${active}${pulled}`, 40 + ); 41 + } 42 + console.log(""); 43 + } 44 + 45 + export function listTuis(): void { 46 + const activeId = getActiveTui().id; 47 + 48 + console.log(`\n${BOLD}Available TUIs:${RESET}`); 49 + for (const t of TUIS) { 50 + const active = t.id === activeId ? ` ${GREEN}<- active${RESET}` : ""; 51 + const installed = commandExists(t.checkCmd) 52 + ? "" 53 + : ` ${DIM}(not installed)${RESET}`; 54 + console.log(` ${BOLD}${t.id}${RESET} ${t.name}${active}${installed}`); 55 + } 56 + console.log(""); 57 + }
+105 -44
src/commands/run.ts
··· 1 1 import { spawn, execSync } from "node:child_process"; 2 2 import { existsSync } from "node:fs"; 3 3 import { OLLAMA_PORT } from "../config.js"; 4 - import { getActiveTui, getActiveChatModel } from "../runtime-config.js"; 5 - import { log } from "../log.js"; 6 - import { ensureOllama } from "./server.js"; 4 + import { 5 + loadConfig, 6 + saveConfig, 7 + getActiveTui, 8 + getActiveChatModel, 9 + } from "../runtime-config.js"; 10 + import { getModelById } from "../registry/models.js"; 11 + import { getTuiById } from "../registry/tuis.js"; 12 + import { log, err } from "../log.js"; 13 + import { ensureOllama, pullIfNeeded } from "./server.js"; 14 + import { commandExists, runPassthrough } from "../util.js"; 15 + import type { ModelDef } from "../registry/models.js"; 16 + import type { TuiDef } from "../registry/tuis.js"; 17 + 18 + export interface LaunchOverrides { 19 + model?: string; 20 + tui?: string; 21 + passthrough: string[]; 22 + } 7 23 8 24 function ensureGit(): void { 9 25 if (!existsSync(".git")) { ··· 16 32 } 17 33 } 18 34 19 - export async function runDefault(args: string[]): Promise<void> { 35 + function resolveOverrides(overrides: LaunchOverrides): { 36 + chatModel: ModelDef; 37 + tui: TuiDef; 38 + } { 39 + const config = loadConfig(); 40 + let changed = false; 41 + 42 + let chatModel: ModelDef; 43 + if (overrides.model) { 44 + const m = getModelById(overrides.model); 45 + if (!m) err(`Unknown model: ${overrides.model}`); 46 + if (m.role !== "chat") err(`${overrides.model} is not a chat model.`); 47 + chatModel = m; 48 + config.chatModel = m.id; 49 + changed = true; 50 + } else { 51 + chatModel = getActiveChatModel(); 52 + } 53 + 54 + let tui: TuiDef; 55 + if (overrides.tui) { 56 + const t = getTuiById(overrides.tui); 57 + if (!t) err(`Unknown TUI: ${overrides.tui}`); 58 + tui = t; 59 + config.tui = t.id; 60 + changed = true; 61 + } else { 62 + tui = getActiveTui(); 63 + } 64 + 65 + if (changed) saveConfig(config); 66 + 67 + return { chatModel, tui }; 68 + } 69 + 70 + function ensureTuiInstalled(tui: TuiDef): void { 71 + // ollama launch handles installation 72 + if (tui.ollamaLaunch) return; 73 + 74 + if (!commandExists(tui.checkCmd)) { 75 + log(`Installing ${tui.name}...`); 76 + runPassthrough(tui.installCmd); 77 + } 78 + } 79 + 80 + export async function runDefault(overrides: LaunchOverrides): Promise<void> { 20 81 await ensureOllama(); 82 + const { chatModel, tui } = resolveOverrides(overrides); 83 + await pullIfNeeded(chatModel.ollamaTag, chatModel.name); 84 + ensureTuiInstalled(tui); 21 85 ensureGit(); 22 86 23 - const tui = getActiveTui(); 24 - const chatModel = getActiveChatModel(); 87 + const args = overrides.passthrough; 25 88 let tuiArgs: string[]; 26 - let tuiCmd = tui.checkCmd; 89 + let tuiCmd: string; 27 90 const env = { ...process.env }; 28 91 29 - switch (tui.id) { 30 - case "aider": 31 - env.OPENAI_API_KEY = "sk-not-needed"; 32 - env.OPENAI_API_BASE = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 33 - tuiArgs = [ 34 - "--model", `openai/${chatModel.id}`, 35 - "--no-show-model-warnings", 36 - "--no-check-update", 37 - ...args, 38 - ]; 39 - break; 92 + if (tui.ollamaLaunch) { 93 + // ollama launch handles all config automatically 94 + tuiCmd = "ollama"; 95 + tuiArgs = ["launch", tui.ollamaLaunch, "--model", chatModel.ollamaTag, ...args]; 96 + } else { 97 + tuiCmd = tui.checkCmd; 40 98 41 - case "goose": 42 - env.GOOSE_PROVIDER = "ollama"; 43 - env.OLLAMA_HOST = `http://127.0.0.1:${OLLAMA_PORT}`; 44 - tuiArgs = [...args]; 45 - break; 46 - 47 - case "codex": 48 - // Codex reads ~/.codex/config.toml written by writeTuiConfig 49 - tuiArgs = ["--provider", "ollama", ...args]; 50 - break; 99 + switch (tui.id) { 100 + case "aider": 101 + env.OPENAI_API_KEY = "sk-not-needed"; 102 + env.OPENAI_API_BASE = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 103 + tuiArgs = [ 104 + "--model", `openai/${chatModel.id}`, 105 + "--no-show-model-warnings", 106 + "--no-check-update", 107 + ...args, 108 + ]; 109 + break; 51 110 52 - case "gptme": 53 - env.OPENAI_BASE_URL = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 54 - tuiArgs = ["--model", `local/${chatModel.ollamaTag}`, ...args]; 55 - break; 111 + case "goose": 112 + env.GOOSE_PROVIDER = "ollama"; 113 + env.GOOSE_MODEL = chatModel.ollamaTag; 114 + env.OLLAMA_HOST = `http://127.0.0.1:${OLLAMA_PORT}`; 115 + tuiArgs = [...args]; 116 + break; 56 117 57 - case "claude": 58 - // ollama launch claude handles all config automatically 59 - tuiCmd = "ollama"; 60 - tuiArgs = ["launch", "claude", ...args]; 61 - break; 118 + case "gptme": 119 + env.OPENAI_BASE_URL = `http://127.0.0.1:${OLLAMA_PORT}/v1`; 120 + tuiArgs = ["--model", `local/${chatModel.ollamaTag}`, ...args]; 121 + break; 62 122 63 - default: 64 - tuiArgs = [...args]; 65 - break; 66 - } 123 + default: 124 + tuiArgs = [...args]; 125 + break; 126 + } 67 127 68 - if (tui.resumeArgs) { 69 - tuiArgs.push(...tui.resumeArgs); 128 + if (tui.resumeArgs) { 129 + tuiArgs.push(...tui.resumeArgs); 130 + } 70 131 } 71 132 72 - log(`Launching ${tui.name}...`); 133 + log(`Launching ${tui.name} with ${chatModel.name}...`); 73 134 const child = spawn(tuiCmd, tuiArgs, { stdio: "inherit", env }); 74 135 child.on("exit", (code) => process.exit(code ?? 0)); 75 136 }
+2 -2
src/commands/server.ts
··· 52 52 } 53 53 } 54 54 55 - function isModelPulled(ollamaTag: string): boolean { 55 + export function isModelPulled(ollamaTag: string): boolean { 56 56 try { 57 57 const output = execSync("ollama list", { encoding: "utf-8", stdio: ["pipe", "pipe", "pipe"] }); 58 58 return output.includes(ollamaTag); ··· 61 61 } 62 62 } 63 63 64 - async function pullIfNeeded(ollamaTag: string, label: string): Promise<void> { 64 + export async function pullIfNeeded(ollamaTag: string, label: string): Promise<void> { 65 65 if (isModelPulled(ollamaTag)) { 66 66 log(`${label} already pulled: ${ollamaTag}`); 67 67 return;
+49 -57
src/main.ts
··· 1 1 import { runSetup } from "./commands/setup.js"; 2 - import { listModels, setChatModel, setAutocompleteModel } from "./commands/models.js"; 3 - import { listTuis, setTui } from "./commands/tuis.js"; 2 + import { listModels, listTuis } from "./commands/list.js"; 4 3 import { runBench } from "./commands/bench.js"; 5 4 import { runDefault } from "./commands/run.js"; 6 5 import { showStatus } from "./commands/status.js"; 7 6 import { startServers, stopServers } from "./commands/server.js"; 8 7 import { runPipe } from "./commands/pipe.js"; 8 + import { getTuiById } from "./registry/tuis.js"; 9 + import { getModelById } from "./registry/models.js"; 9 10 10 11 const BOLD = "\x1b[1m"; 11 12 const DIM = "\x1b[2m"; ··· 15 16 console.log(` 16 17 ${BOLD}localcode${RESET} — local AI coding environment (Ollama) 17 18 18 - ${BOLD}Usage:${RESET} 19 - localcode ${DIM}[flags...]${RESET} Launch active TUI in current directory 20 - localcode status Show current config and server health 19 + ${BOLD}Launch:${RESET} 20 + localcode Launch with defaults 21 + localcode <tui> Launch with specified TUI 22 + localcode <model> Launch with specified model 23 + localcode <tui> <model> Launch with both overrides 21 24 22 25 ${BOLD}Server:${RESET} 23 - localcode start Start Ollama + pull models 24 - localcode stop Stop Ollama 26 + localcode start Start Ollama + pull models 27 + localcode stop Stop Ollama 28 + localcode status Show config and server health 25 29 26 - ${BOLD}Config:${RESET} 27 - localcode model List available models 28 - localcode set model <id> Switch the chat model 29 - localcode set autocomplete <id> Switch the autocomplete model 30 - localcode tui List available TUIs 31 - localcode set tui <id> Switch the active TUI 30 + ${BOLD}List:${RESET} 31 + localcode models List available models 32 + localcode tuis List available TUIs 32 33 33 34 ${BOLD}Benchmark:${RESET} 34 - localcode bench Benchmark the running chat model 35 - localcode bench history Show past benchmark results 35 + localcode bench Benchmark the running chat model 36 + localcode bench history Show past benchmark results 36 37 37 38 ${BOLD}Other:${RESET} 38 - localcode pipe "prompt" Pipe stdin through the model 39 - localcode setup Full install (Ollama, models, tools) 39 + localcode pipe "prompt" Pipe stdin through the model 40 + localcode setup Full install (Ollama, models, tools) 40 41 `); 41 42 } 42 43 44 + interface LaunchOverrides { 45 + model?: string; 46 + tui?: string; 47 + passthrough: string[]; 48 + } 49 + 50 + function parseLaunchArgs(argv: string[]): LaunchOverrides { 51 + const overrides: LaunchOverrides = { passthrough: [] }; 52 + for (const arg of argv) { 53 + if (getTuiById(arg)) { 54 + overrides.tui = arg; 55 + } else if (getModelById(arg)) { 56 + overrides.model = arg; 57 + } else { 58 + overrides.passthrough.push(arg); 59 + } 60 + } 61 + return overrides; 62 + } 63 + 43 64 async function main(): Promise<void> { 44 65 const cmd = process.argv[2]; 45 66 46 67 switch (cmd) { 47 - case undefined: 48 - await runDefault(process.argv.slice(3)); 49 - break; 50 - 51 68 case "status": 52 69 await showStatus(); 53 70 break; ··· 60 77 stopServers(); 61 78 break; 62 79 63 - case "model": 64 80 case "models": 65 81 listModels(); 66 82 break; 67 83 68 - case "tui": 69 84 case "tuis": 70 85 listTuis(); 71 86 break; 72 87 73 - case "set": { 74 - const what = process.argv[3]; 75 - const id = process.argv[4]; 76 - 77 - if (what === "model" || what === "chat") { 78 - if (!id) { 79 - console.error("Usage: localcode set model <model-id>"); 80 - process.exit(1); 81 - } 82 - await setChatModel(id); 83 - } else if (what === "autocomplete" || what === "auto") { 84 - if (!id) { 85 - console.error("Usage: localcode set autocomplete <model-id>"); 86 - process.exit(1); 87 - } 88 - await setAutocompleteModel(id); 89 - } else if (what === "tui") { 90 - if (!id) { 91 - console.error("Usage: localcode set tui <tui-id>"); 92 - process.exit(1); 93 - } 94 - await setTui(id); 95 - } else { 96 - console.error(`Unknown: localcode set ${what ?? ""}`); 97 - console.error("Usage: localcode set model|autocomplete|tui <id>"); 98 - process.exit(1); 99 - } 100 - break; 101 - } 102 - 103 88 case "bench": { 104 89 const sub = process.argv[3]; 105 90 if (sub === "history") { ··· 126 111 printUsage(); 127 112 break; 128 113 129 - default: 130 - console.error(`Unknown command: ${cmd}`); 131 - printUsage(); 132 - process.exit(1); 114 + default: { 115 + // Everything else is a launch command with optional overrides: 116 + // localcode 117 + // localcode tui aider 118 + // localcode model qwen3-coder 119 + // localcode model qwen3-coder tui goose 120 + const args = process.argv.slice(2); 121 + const overrides = parseLaunchArgs(args); 122 + await runDefault(overrides); 123 + break; 124 + } 133 125 } 134 126 } 135 127
+21
src/registry/models.ts
··· 29 29 ctxSize: 16384, 30 30 }, 31 31 { 32 + id: "qwen3-coder", 33 + name: "Qwen3 Coder 30B-A3B", 34 + role: "chat", 35 + ollamaTag: "qwen3-coder", 36 + ctxSize: 65536, 37 + }, 38 + { 39 + id: "glm-flash", 40 + name: "GLM-4.7 Flash 30B-A3B", 41 + role: "chat", 42 + ollamaTag: "glm-4.7-flash", 43 + ctxSize: 65536, 44 + }, 45 + { 46 + id: "gpt-oss", 47 + name: "GPT-OSS 20B", 48 + role: "chat", 49 + ollamaTag: "gpt-oss:20b", 50 + ctxSize: 65536, 51 + }, 52 + { 32 53 id: "qwen-1.5b-autocomplete", 33 54 name: "Qwen 2.5 Coder 1.5B", 34 55 role: "autocomplete",
+48 -20
src/registry/tuis.ts
··· 6 6 launchArgs: string; 7 7 /** Extra args to pass when resuming a previous session. */ 8 8 resumeArgs?: string[]; 9 + /** If set, launch via `ollama launch <ollamaLaunch> --model <tag>` instead of direct exec. */ 10 + ollamaLaunch?: string; 9 11 } 10 12 11 13 export const TUIS: TuiDef[] = [ 12 14 { 13 - id: "aider", 14 - name: "Aider", 15 - installCmd: "pipx install aider-chat", 16 - checkCmd: "aider", 15 + id: "claude", 16 + name: "Claude Code", 17 + installCmd: "npm install -g @anthropic-ai/claude-code", 18 + checkCmd: "claude", 17 19 launchArgs: '"$@"', 20 + ollamaLaunch: "claude", 21 + }, 22 + { 23 + id: "codex", 24 + name: "Codex CLI", 25 + installCmd: "npm install -g @openai/codex", 26 + checkCmd: "codex", 27 + launchArgs: '"$@"', 28 + ollamaLaunch: "codex", 18 29 }, 19 30 { 20 31 id: "opencode", ··· 22 33 installCmd: "npm install -g opencode-ai@latest", 23 34 checkCmd: "opencode", 24 35 launchArgs: '"$@"', 36 + ollamaLaunch: "opencode", 25 37 }, 26 38 { 27 39 id: "pi", ··· 29 41 installCmd: "npm install -g @mariozechner/pi-coding-agent", 30 42 checkCmd: "pi", 31 43 launchArgs: '"$@"', 32 - resumeArgs: ["--continue"], 44 + ollamaLaunch: "pi", 45 + }, 46 + { 47 + id: "cline", 48 + name: "Cline", 49 + installCmd: "npm install -g @anthropic-ai/cline", 50 + checkCmd: "cline", 51 + launchArgs: '"$@"', 52 + ollamaLaunch: "cline", 53 + }, 54 + { 55 + id: "droid", 56 + name: "Droid", 57 + installCmd: "npm install -g droid", 58 + checkCmd: "droid", 59 + launchArgs: '"$@"', 60 + ollamaLaunch: "droid", 61 + }, 62 + { 63 + id: "openclaw", 64 + name: "OpenClaw", 65 + installCmd: "npm install -g openclaw", 66 + checkCmd: "openclaw", 67 + launchArgs: '"$@"', 68 + ollamaLaunch: "openclaw", 69 + }, 70 + { 71 + id: "aider", 72 + name: "Aider", 73 + installCmd: "pipx install aider-chat", 74 + checkCmd: "aider", 75 + launchArgs: '"$@"', 33 76 }, 34 77 { 35 78 id: "goose", 36 79 name: "Goose", 37 80 installCmd: "brew install block-goose-cli", 38 81 checkCmd: "goose", 39 - launchArgs: '"$@"', 40 - }, 41 - { 42 - id: "codex", 43 - name: "Codex CLI", 44 - installCmd: "npm install -g @openai/codex", 45 - checkCmd: "codex", 46 82 launchArgs: '"$@"', 47 83 }, 48 84 { ··· 52 88 checkCmd: "gptme", 53 89 launchArgs: '"$@"', 54 90 resumeArgs: ["-r"], 55 - }, 56 - { 57 - id: "claude", 58 - name: "Claude Code", 59 - installCmd: "npm install -g @anthropic-ai/claude-code", 60 - checkCmd: "claude", 61 - launchArgs: '"$@"', 62 - resumeArgs: ["--continue"], 63 91 }, 64 92 ]; 65 93
+1 -1
src/runtime-config.ts
··· 15 15 const CONFIG_PATH = join(homedir(), ".config", "localcode", "config.json"); 16 16 17 17 const DEFAULTS: RuntimeConfig = { 18 - chatModel: "qwen-32b-chat", 18 + chatModel: "qwen3-coder", 19 19 autocompleteModel: "qwen-1.5b-autocomplete", 20 20 tui: "aider", 21 21 };