Monorepo for Aesthetic.Computer
aesthetic.computer
1// native-builder.mjs — FedAC Native kernel OTA builds for oven
2//
3// Triggered via POST /native-build after commits to fedac/native/ on main.
4// Runs build-and-flash.sh (no --flash) then upload-release.sh.
5// Models os-base-build.mjs.
6
7import { promises as fs } from "fs";
8import path from "path";
9import { randomUUID } from "crypto";
10import { spawn } from "child_process";
11import { MongoClient } from "mongodb";
12
13function runSync(cmd, args, cwd, env = null) {
14 return new Promise((resolve) => {
15 const proc = spawn(cmd, args, {
16 cwd,
17 env: env ? { ...process.env, ...env } : process.env,
18 stdio: ['ignore', 'pipe', 'ignore'],
19 });
20 let out = '';
21 proc.stdout.on('data', d => out += d);
22 proc.on('close', () => resolve(out.trim()));
23 proc.on('error', () => resolve(''));
24 });
25}
26
27const MAX_RECENT_JOBS = 10;
28const MAX_LOG_LINES = 2000;
29const NATIVE_BUILD_COLLECTION =
30 process.env.NATIVE_BUILD_COLLECTION || "oven-native-builds";
31
32// fedac/native/ lives in the native-git repo on oven (polled by native-git-poller).
33const NATIVE_DIR =
34 process.env.NATIVE_DIR || "/opt/oven/native-git/fedac/native";
35const NATIVE_BRANCH = process.env.NATIVE_GIT_BRANCH || "main";
36const NIX_DATA_PARTITION_MIB = process.env.NIX_DATA_PARTITION_MIB || "512";
37const MEDIA_HELPER_IMAGE =
38 process.env.AC_MEDIA_HELPER_IMAGE || "ac-os-media-helper:img-v1";
39const NIX_BIN_CANDIDATES = [
40 process.env.NIX_BIN || "",
41 "/usr/local/bin/nix",
42 "/nix/var/nix/profiles/default/bin/nix",
43 "/home/oven/.nix-profile/bin/nix",
44 "/root/.nix-profile/bin/nix",
45];
46const NIX_GC_CANDIDATES = [
47 process.env.NIX_GC_BIN || "",
48 "/usr/local/bin/nix-collect-garbage",
49 "/nix/var/nix/profiles/default/bin/nix-collect-garbage",
50 "/home/oven/.nix-profile/bin/nix-collect-garbage",
51 "/root/.nix-profile/bin/nix-collect-garbage",
52];
53
54// Kernel build cache: symlinked from fedac/native/build so kernel object
55// files survive rsync --delete between commits (5-10x faster warm builds).
56const CACHE_DIR =
57 process.env.NATIVE_CACHE_DIR || "/opt/oven/native-cache";
58
59const jobs = new Map();
60const jobOrder = [];
61let activeJobId = null;
62
63let progressCallback = null;
64let lastProgressBroadcast = 0;
65let nativeBuildMongoClient = null;
66let nativeBuildMongoDb = null;
67
68export function onNativeBuildProgress(cb) {
69 progressCallback = cb;
70}
71
72function nowISO() {
73 return new Date().toISOString();
74}
75
76function uniqueNonEmpty(items) {
77 return [...new Set((items || []).filter(Boolean))];
78}
79
80function toDateOrNull(v) {
81 if (!v) return null;
82 const d = new Date(v);
83 if (Number.isNaN(d.getTime())) return null;
84 return d;
85}
86
87async function getNativeBuildMongo() {
88 if (nativeBuildMongoDb) return nativeBuildMongoDb;
89 const uri = process.env.MONGODB_CONNECTION_STRING;
90 const dbName = process.env.MONGODB_NAME;
91 if (!uri || !dbName) return null;
92 try {
93 nativeBuildMongoClient = await MongoClient.connect(uri);
94 nativeBuildMongoDb = nativeBuildMongoClient.db(dbName);
95 await nativeBuildMongoDb
96 .collection(NATIVE_BUILD_COLLECTION)
97 .createIndex({ when: -1 });
98 await nativeBuildMongoDb
99 .collection(NATIVE_BUILD_COLLECTION)
100 .createIndex({ buildName: 1, when: -1 });
101 return nativeBuildMongoDb;
102 } catch (err) {
103 console.error("[native-builder] MongoDB connect failed:", err.message);
104 return null;
105 }
106}
107
108async function persistNativeBuildRecord(job) {
109 try {
110 const db = await getNativeBuildMongo();
111 if (!db) return;
112 const startedAt = toDateOrNull(job.startedAt);
113 const finishedAt = toDateOrNull(job.finishedAt);
114 const durationMs =
115 startedAt && finishedAt ? Math.max(0, finishedAt - startedAt) : null;
116 const record = {
117 jobId: job.id,
118 buildName: job.buildName || null,
119 ref: job.ref || null,
120 gitHash: job.ref && job.ref !== "unknown" ? String(job.ref).slice(0, 40) : null,
121 status: job.status || "unknown",
122 stage: job.stage || null,
123 percent: Number.isFinite(job.percent) ? job.percent : null,
124 error: job.error || null,
125 exitCode: Number.isFinite(job.exitCode) ? job.exitCode : null,
126 commitMsg: job.commitMsg || null,
127 flags: Array.isArray(job.flags) ? job.flags : [],
128 changedPaths: job.changedPaths || "",
129 variant: job.variant || "c",
130 createdAt: toDateOrNull(job.createdAt),
131 startedAt,
132 updatedAt: toDateOrNull(job.updatedAt),
133 finishedAt,
134 durationMs,
135 logCount: Array.isArray(job.logs) ? job.logs.length : 0,
136 logTail: Array.isArray(job.logs)
137 ? job.logs.slice(-120).map((l) => l.line)
138 : [],
139 source: "oven-native-builder",
140 when: new Date(),
141 };
142 await db.collection(NATIVE_BUILD_COLLECTION).insertOne(record);
143 } catch (err) {
144 console.error("[native-builder] Failed to persist build record:", err.message);
145 }
146}
147
148function stripAnsi(s) {
149 return String(s || "").replace(/\u001b\[[0-9;]*m/g, "");
150}
151
152async function resolveBinary(cmd, candidates = [], cwd = NATIVE_DIR) {
153 const fromPath = await runSync("bash", ["-lc", `command -v ${cmd} || true`], cwd);
154 if (fromPath) return fromPath.split("\n").pop().trim();
155 for (const candidate of uniqueNonEmpty(candidates)) {
156 try {
157 await fs.access(candidate);
158 return candidate;
159 } catch {}
160 }
161 return "";
162}
163
164function addLogLine(job, stream, line) {
165 const clean = stripAnsi(line).replace(/\r/g, "").trimEnd();
166 if (!clean) return;
167 job.logs.push({ ts: nowISO(), stream, line: clean });
168 if (job.logs.length > MAX_LOG_LINES)
169 job.logs.splice(0, job.logs.length - MAX_LOG_LINES);
170 job.updatedAt = nowISO();
171
172 // Broadcast every log line with the last few lines attached
173 if (progressCallback) {
174 const now = Date.now();
175 // Full snapshot every 2s, lightweight log-only message in between
176 if (now - lastProgressBroadcast > 2000) {
177 lastProgressBroadcast = now;
178 const snap = makeSnapshot(job);
179 snap.recentLines = job.logs.slice(-8).map(l => l.line);
180 progressCallback(snap);
181 } else {
182 progressCallback({ id: job.id, line: clean, stage: job.stage, percent: job.percent });
183 }
184 }
185
186 // Parse progress hints from build-and-flash.sh + upload-release.sh output
187 if (clean.includes("[build]") || clean.includes("[ac-os]")) {
188 if (clean.match(/Building kernel|bzImage|vmlinuz/i)) {
189 job.stage = "kernel";
190 job.percent = Math.max(job.percent, 55);
191 } else if (clean.match(/initramfs|cpio|lz4|Repacking|Copying firmware/i)) {
192 job.stage = "initramfs";
193 job.percent = Math.max(job.percent, 30);
194 } else if (clean.match(/Building binary|Built:|ac-native|gcc|musl/i)) {
195 job.stage = "binary";
196 job.percent = Math.max(job.percent, 10);
197 }
198 }
199 // Nix build progress hints
200 if (clean.match(/copying path|building.*\.drv|fetching.*narinfo/i)) {
201 if (job.stage !== "nix-upload") {
202 job.stage = "nix-build";
203 job.percent = Math.max(job.percent, 65);
204 }
205 }
206 if (clean.match(/SMOKE TEST|smoke_test|qemu/i)) {
207 job.stage = "smoke-test";
208 job.percent = Math.max(job.percent, 80);
209 }
210 if (clean.match(/Uploading|uploaded:/i)) {
211 job.stage = "upload";
212 job.percent = Math.max(job.percent, 90);
213 }
214 if (clean.includes("Release published")) {
215 job.stage = "done";
216 job.percent = 100;
217 }
218}
219
220function makeSnapshot(job, opts = {}) {
221 const { includeLogs = false, tail = 200 } = opts;
222 const snap = {
223 id: job.id,
224 ref: job.ref,
225 status: job.status,
226 stage: job.stage,
227 percent: job.percent,
228 flags: job.flags,
229 createdAt: job.createdAt,
230 startedAt: job.startedAt,
231 updatedAt: job.updatedAt,
232 finishedAt: job.finishedAt,
233 exitCode: job.exitCode,
234 error: job.error,
235 buildName: job.buildName || null,
236 variant: job.variant || "c",
237 commitMsg: job.commitMsg || null,
238 logCount: job.logs.length,
239 elapsedMs: job.startedAt
240 ? (job.finishedAt ? Date.parse(job.finishedAt) : Date.now()) -
241 Date.parse(job.startedAt)
242 : 0,
243 };
244 if (includeLogs) {
245 const start = Math.max(0, job.logs.length - Math.max(0, tail));
246 snap.logs = job.logs.slice(start);
247 }
248 return snap;
249}
250
251// Determine build-and-flash.sh flags based on which paths changed.
252// Never skip binary: AC_BUILD_NAME and AC_GIT_HASH are compiled into the
253// binary via CFLAGS and change on every commit. The Makefile's CFLAGS
254// signature check (`.cflags` md5) handles incremental rebuilds efficiently —
255// only object files are recompiled when flags change, not the full kernel.
256// Skipping the binary causes version string mismatch (device shows stale name).
257function buildFlagsFor(changedPaths = "") {
258 return [];
259}
260
261// Symlink fedac/native/build → CACHE_DIR so kernel object files survive
262// the rsync --delete that happens on every deploy/push sync.
263async function setupBuildCache() {
264 await fs.mkdir(CACHE_DIR, { recursive: true });
265 const buildLink = path.join(NATIVE_DIR, "build");
266 let stat;
267 try {
268 stat = await fs.lstat(buildLink);
269 } catch {
270 await fs.symlink(CACHE_DIR, buildLink);
271 return;
272 }
273 if (stat.isSymbolicLink()) return;
274 if (stat.isDirectory()) {
275 try {
276 await fs.rename(buildLink, CACHE_DIR);
277 } catch {
278 await fs.rm(buildLink, { recursive: true, force: true });
279 }
280 }
281 await fs.symlink(CACHE_DIR, buildLink);
282}
283
284function wireStream(job, proc, streamName) {
285 let pending = "";
286 const s = streamName === "stdout" ? proc.stdout : proc.stderr;
287 s.on("data", (chunk) => {
288 pending += chunk.toString();
289 let idx;
290 while ((idx = pending.indexOf("\n")) >= 0) {
291 addLogLine(job, streamName, pending.slice(0, idx));
292 pending = pending.slice(idx + 1);
293 }
294 });
295 s.on("end", () => {
296 if (pending) addLogLine(job, streamName, pending);
297 });
298}
299
300async function runPhase(job, label, cmd, args, cwd, extraEnv = {}) {
301 job.stage = label;
302 job.updatedAt = nowISO();
303 return new Promise((resolve, reject) => {
304 const proc = spawn(cmd, args, {
305 cwd,
306 env: {
307 ...process.env,
308 TERM: "dumb",
309 CLICOLOR: "0",
310 FORCE_COLOR: "0",
311 ...extraEnv,
312 },
313 stdio: ["ignore", "pipe", "pipe"],
314 });
315 job.process = proc;
316 job.pid = proc.pid;
317 wireStream(job, proc, "stdout");
318 wireStream(job, proc, "stderr");
319 proc.on("error", reject);
320 proc.on("close", (code) => {
321 job.process = null;
322 job.exitCode = code;
323 if (code !== 0) reject(new Error(`${label} failed (exit ${code})`));
324 else resolve();
325 });
326 });
327}
328
329async function runBuildJob(job) {
330 try {
331 job.status = "running";
332 job.startedAt = nowISO();
333 job.percent = 0;
334
335 const repoDir = path.resolve(NATIVE_DIR, "../..");
336
337 // Determine variant: "c" (default), "cl", "nix", "both", or "all"
338 const variant = job.variant || "c";
339 const buildC = variant === "c" || variant === "both" || variant === "all";
340 const buildCL = variant === "cl" || variant === "both" || variant === "all";
341 const buildNix = variant === "nix" || variant === "all";
342 const needsDockerBuild = buildC || buildCL;
343
344 // Preflight: hard-sync build repo and refuse conflicted/dirty native/Nix trees.
345 addLogLine(job, "stdout", "Preflight: syncing native git checkout...");
346 await runPhase(job, "preflight-sync", "bash", ["-lc", [
347 "set -euo pipefail",
348 `git fetch origin ${NATIVE_BRANCH} --quiet || true`,
349 `git checkout -f ${NATIVE_BRANCH} --quiet || true`,
350 `if git rev-parse --verify origin/${NATIVE_BRANCH} >/dev/null 2>&1; then`,
351 ` git reset --hard origin/${NATIVE_BRANCH} --quiet`,
352 "fi",
353 "git clean -fdq -- fedac/native fedac/nixos",
354 ].join("\n")], repoDir);
355
356 const syncedRef = await runSync("git", ["rev-parse", "HEAD"], repoDir);
357 if (syncedRef) job.ref = syncedRef;
358
359 const trackedDirty = await runSync(
360 "git",
361 ["status", "--porcelain", "--untracked-files=no", "--", "fedac/native", "fedac/nixos"],
362 repoDir,
363 );
364 if (trackedDirty) {
365 throw new Error(
366 `Refusing native build: fedac/native or fedac/nixos tree is dirty after sync:\n${trackedDirty}`,
367 );
368 }
369
370 const unresolved = await runSync(
371 "git",
372 ["diff", "--name-only", "--diff-filter=U", "--", "fedac/native", "fedac/nixos"],
373 repoDir,
374 );
375 if (unresolved) {
376 throw new Error(
377 `Refusing native build: unresolved merge conflict(s): ${unresolved}`,
378 );
379 }
380
381 const conflictMarkers = await runSync(
382 "bash",
383 [
384 "-lc",
385 "grep -nE '^(<<<<<<<|=======|>>>>>>>)|Updated upstream|Stashed changes' fedac/native/initramfs/init 2>/dev/null || true",
386 ],
387 repoDir,
388 );
389 if (conflictMarkers) {
390 throw new Error(
391 `Refusing native build: conflict markers detected in initramfs/init:\n${conflictMarkers}`,
392 );
393 }
394
395 // Parse-check init script explicitly so syntax issues fail before kernel compile/upload.
396 await runPhase(
397 job,
398 "preflight-init",
399 "bash",
400 ["-lc", "set -euo pipefail\nsh -n fedac/native/initramfs/init"],
401 repoDir,
402 );
403
404 // Auto-cleanup: prune Docker + old build artifacts to prevent disk-full failures.
405 addLogLine(job, "stdout", "Preflight: freeing disk space...");
406 await runPhase(job, "preflight-cleanup", "bash", ["-lc", [
407 "set -euo pipefail",
408 "docker system prune -f --volumes 2>/dev/null | tail -1 || true",
409 "rm -rf /tmp/oven-vmlinuz-* /tmp/ac-build-* 2>/dev/null || true",
410 "df -h / | tail -1",
411 ].join("\n")], repoDir);
412
413 // Resolve ref from git HEAD if manual trigger didn't provide one
414 if (!job.ref || job.ref === "unknown") {
415 const headRef = await runSync("git", ["rev-parse", "HEAD"], repoDir);
416 if (headRef) job.ref = headRef;
417 }
418
419 const buildName = await runSync("bash", ["scripts/build-name.sh"], NATIVE_DIR) || `oven-${job.ref.slice(0, 7)}`;
420 const buildGitHash = await runSync("git", ["rev-parse", "--short", "HEAD"], repoDir) || job.ref.slice(0, 9);
421 const buildTs = new Date().toISOString().slice(0, 16);
422 const commitMsg = await runSync("git", ["log", "-1", "--format=%s", job.ref], repoDir) || "";
423 job.buildName = buildName;
424 job.buildGitHash = buildGitHash;
425 job.buildTs = buildTs;
426 job.commitMsg = commitMsg;
427 const vmlinuzOut = `/tmp/oven-vmlinuz-${job.id}`;
428
429 // Pre-build: prune Docker only when a Docker-backed variant is needed.
430 if (needsDockerBuild) {
431 addLogLine(job, "stdout", "Pre-build: Pruning Docker artifacts...");
432 try {
433 await runPhase(job, "prune", "bash", ["-c",
434 "docker container prune -f && docker image prune -af --filter until=2h && docker builder prune -af --filter until=30m && docker volume prune -f",
435 ], repoDir);
436 const dfOut = await runSync("bash", ["-c", "df --output=avail / | tail -1"], repoDir);
437 const availKB = parseInt(dfOut, 10) || 0;
438 const availGB = availKB / 1048576;
439 addLogLine(job, "stdout", ` Disk: ${availGB.toFixed(1)}GB free`);
440 if (availGB < 10) {
441 addLogLine(job, "stderr", ` WARNING: Only ${availGB.toFixed(1)}GB free — running full prune...`);
442 await runPhase(job, "emergency-prune", "bash", ["-c",
443 "docker system prune -af --volumes",
444 ], repoDir);
445 }
446 } catch { addLogLine(job, "stdout", " Prune skipped (non-fatal)"); }
447
448 // Phase 1: Docker image build (cached layers = fast)
449 addLogLine(job, "stdout", "Phase 1: Building Docker image...");
450 await runPhase(job, "docker-build", "docker", [
451 "build", "-t", "ac-os-builder",
452 "-f", path.join(repoDir, "fedac/native/Dockerfile.builder"),
453 repoDir,
454 ], repoDir);
455
456 job.percent = 30;
457 }
458
459 const uploadScript = path.join(NATIVE_DIR, "scripts/upload-release.sh");
460 const uploadEnv = {
461 DO_SPACES_KEY: process.env.DO_SPACES_KEY || process.env.ART_SPACES_KEY || "",
462 DO_SPACES_SECRET: process.env.DO_SPACES_SECRET || process.env.ART_SPACES_SECRET || "",
463 AC_BUILD_NAME: buildName,
464 AC_GIT_HASH: buildGitHash,
465 AC_BUILD_TS: buildTs,
466 AC_COMMIT_MSG: commitMsg,
467 };
468
469 // ── C variant ──
470 if (buildC) {
471 addLogLine(job, "stdout", "Phase 2: Compiling C kernel in Docker...");
472 const cidFile = `/tmp/oven-cid-${job.id}`;
473 await runPhase(job, "build", "bash", ["-c", [
474 `CID=$(docker create -e AC_BUILD_NAME=${buildName} -e AC_GIT_HASH=${buildGitHash} -e AC_BUILD_TS=${buildTs} -v ac-os-ccache:/ccache ac-os-builder)`,
475 `echo $CID > ${cidFile}`,
476 `docker start -a $CID`,
477 ].join(" && ")], repoDir);
478
479 job.percent = 75;
480
481 addLogLine(job, "stdout", "Phase 3: Extracting C kernel + ISO + slim kernel + initramfs...");
482 const cid = (await fs.readFile(cidFile, "utf8")).trim();
483 const isoOut = `/tmp/oven-iso-${job.id}`;
484 const slimOut = `/tmp/oven-vmlinuz-slim-${job.id}`;
485 const initramfsOut = `/tmp/oven-initramfs-${job.id}`;
486 await runPhase(job, "extract", "bash", ["-c", [
487 `docker cp ${cid}:/tmp/ac-build/vmlinuz ${vmlinuzOut}`,
488 `docker cp ${cid}:/tmp/ac-build/ac-os.iso ${isoOut} 2>/dev/null || docker cp ${cid}:/out/ac-os.iso ${isoOut} 2>/dev/null || true`,
489 `docker cp ${cid}:/tmp/ac-build/vmlinuz-slim ${slimOut} 2>/dev/null || docker cp ${cid}:/out/vmlinuz-slim ${slimOut} 2>/dev/null || true`,
490 `docker cp ${cid}:/tmp/ac-build/initramfs.cpio.gz ${initramfsOut} 2>/dev/null || docker cp ${cid}:/out/initramfs.cpio.gz ${initramfsOut} 2>/dev/null || true`,
491 `ls -lh ${slimOut} ${initramfsOut} 2>/dev/null || echo "WARNING: slim/initramfs not extracted"`,
492 `docker rm ${cid} >/dev/null`,
493 ].join("; ")], repoDir);
494
495 job.percent = 80;
496
497 addLogLine(job, "stdout", "Phase 4: Uploading C variant to CDN...");
498 const uploadDir = `/tmp/oven-upload-${job.id}`;
499 const vmlinuzUpload = `${uploadDir}/vmlinuz`;
500 const isoUpload = `${uploadDir}/ac-os.iso`;
501 const slimUpload = `${uploadDir}/vmlinuz-slim`;
502 const initramfsUpload = `${uploadDir}/initramfs.cpio.gz`;
503 await fs.mkdir(uploadDir, { recursive: true });
504 await fs.rename(vmlinuzOut, vmlinuzUpload);
505 try { await fs.rename(isoOut, isoUpload); } catch {}
506 try { await fs.rename(slimOut, slimUpload); } catch {}
507 try { await fs.rename(initramfsOut, initramfsUpload); } catch {}
508 // upload-release.sh auto-detects sibling files (vmlinuz-slim, initramfs.cpio.gz, ac-os.iso)
509 await runPhase(job, "upload", "bash", [uploadScript, vmlinuzUpload], NATIVE_DIR, uploadEnv);
510
511 try { await fs.rm(uploadDir, { recursive: true }); } catch {}
512 try { await fs.unlink(cidFile); } catch {}
513 addLogLine(job, "stdout", "C variant uploaded successfully");
514 }
515
516 job.percent = buildCL ? 50 : 90;
517
518 // ── CL variant ──
519 if (buildCL) {
520 addLogLine(job, "stdout", `Phase ${buildC ? 5 : 2}: Compiling CL kernel in Docker...`);
521 const clCidFile = `/tmp/oven-cl-cid-${job.id}`;
522 const clVmlinuzOut = `/tmp/oven-cl-vmlinuz-${job.id}`;
523
524 await runPhase(job, "cl-build", "bash", ["-c", [
525 `CID=$(docker create -e AC_BUILD_NAME=${buildName} -e AC_GIT_HASH=${buildGitHash} -e AC_BUILD_TS=${buildTs} -e AC_BUILD_VARIANT=cl -e AC_BUILD_LISP=1 ac-os-builder)`,
526 `echo $CID > ${clCidFile}`,
527 `docker start -a $CID`,
528 ].join(" && ")], repoDir);
529
530 job.percent = buildC ? 85 : 75;
531
532 addLogLine(job, "stdout", "Extracting CL kernel...");
533 const clCid = (await fs.readFile(clCidFile, "utf8")).trim();
534 const clIsoOut = `/tmp/oven-cl-iso-${job.id}`;
535 await runPhase(job, "cl-extract", "bash", ["-c",
536 `docker cp ${clCid}:/tmp/ac-build/vmlinuz ${clVmlinuzOut} && docker cp ${clCid}:/tmp/ac-build/ac-os.iso ${clIsoOut} 2>/dev/null; docker rm ${clCid} >/dev/null`
537 ], repoDir);
538
539 job.percent = buildC ? 90 : 80;
540
541 addLogLine(job, "stdout", "Uploading CL variant to CDN...");
542 const clUploadDir = `/tmp/oven-cl-upload-${job.id}`;
543 const clVmlinuzUpload = `${clUploadDir}/vmlinuz`;
544 const clIsoUpload = `${clUploadDir}/ac-os.iso`;
545 await fs.mkdir(clUploadDir, { recursive: true });
546 await fs.rename(clVmlinuzOut, clVmlinuzUpload);
547 try { await fs.rename(clIsoOut, clIsoUpload); } catch {}
548 await runPhase(job, "cl-upload", "bash", [uploadScript, clVmlinuzUpload], NATIVE_DIR, {
549 ...uploadEnv,
550 OTA_CHANNEL: "cl",
551 });
552
553 try { await fs.rm(clUploadDir, { recursive: true }); } catch {}
554 try { await fs.unlink(clCidFile); } catch {}
555 addLogLine(job, "stdout", "CL variant uploaded successfully");
556 }
557
558 // ── NixOS variant: build directly on host with nix (no Docker) ──
559 if (buildNix) {
560 const nixosDir = path.resolve(NATIVE_DIR, "../nixos");
561 const nixHomeDir = `/tmp/oven-nix-home-${job.id}`;
562 const nixUploadDir = `/tmp/oven-nix-upload-${job.id}`;
563 const nixBin = await resolveBinary("nix", NIX_BIN_CANDIDATES, nixosDir);
564 if (!nixBin) {
565 throw new Error(
566 "Nix binary not found on oven host. Checked PATH and: " +
567 uniqueNonEmpty(NIX_BIN_CANDIDATES).join(", "),
568 );
569 }
570 const nixGcBin = await resolveBinary(
571 "nix-collect-garbage",
572 [
573 path.join(path.dirname(nixBin), "nix-collect-garbage"),
574 ...NIX_GC_CANDIDATES,
575 ],
576 nixosDir,
577 );
578 await fs.mkdir(path.join(nixHomeDir, ".cache", "nix"), { recursive: true });
579 const nixEnv = {
580 HOME: nixHomeDir,
581 XDG_CACHE_HOME: path.join(nixHomeDir, ".cache"),
582 NIX_CONFIG: "experimental-features = nix-command flakes\nwarn-dirty = false",
583 AC_NIX_NATIVE_SRC: NATIVE_DIR,
584 PATH: uniqueNonEmpty([
585 path.dirname(nixBin),
586 nixGcBin ? path.dirname(nixGcBin) : "",
587 process.env.PATH || "",
588 ]).join(":"),
589 };
590 addLogLine(job, "stdout", `Phase N: using nix at ${nixBin}`);
591
592 try {
593 // Preflight: garbage collect old nix store entries
594 addLogLine(job, "stdout", "Phase N: NixOS — cleaning Nix store...");
595 if (nixGcBin) {
596 try {
597 await runPhase(
598 job,
599 "nix-gc",
600 nixGcBin,
601 ["--delete-older-than", "3d"],
602 nixosDir,
603 nixEnv,
604 );
605 } catch {}
606 } else {
607 addLogLine(job, "stdout", "Phase N: skipping Nix GC — nix-collect-garbage not found");
608 }
609
610 addLogLine(job, "stdout", "Phase N: NixOS — building image with Nix...");
611 job.stage = "nix-build";
612 job.percent = Math.max(job.percent, 60);
613 if (progressCallback) progressCallback(makeSnapshot(job));
614
615 // fedac/nixos reads AC_NIX_NATIVE_SRC from the host env to import fedac/native.
616 // Build the raw NixOS disk image.
617 await runPhase(job, "nix-build", nixBin, [
618 "build", ".#usb-image",
619 "--impure",
620 "--no-link", "--print-out-paths",
621 ], nixosDir, nixEnv);
622
623 job.percent = Math.max(job.percent, 85);
624
625 // Reuse the stdout path from the build phase instead of invoking nix twice.
626 const nixOutResult = [...job.logs]
627 .reverse()
628 .find((entry) =>
629 entry.stream === "stdout" &&
630 /^\/nix\/store\/.+$/.test(entry.line || "")
631 )?.line || "";
632 if (!nixOutResult) {
633 throw new Error("NixOS build finished without returning an output path");
634 }
635
636 // Find the raw disk image in the output directory.
637 const imgPath = await runSync(
638 "bash",
639 ["-lc", "find \"$1\" -name '*.img' -type f | head -1", "_", nixOutResult],
640 nixosDir,
641 );
642
643 if (!imgPath) {
644 throw new Error("NixOS build produced no image file");
645 }
646
647 addLogLine(job, "stdout", `NixOS image: ${imgPath}`);
648
649 // Copy to upload directory
650 await fs.mkdir(nixUploadDir, { recursive: true });
651 const nixImgUpload = path.join(nixUploadDir, "ac-os-nixos.img");
652 const nixConfigUpload = path.join(nixUploadDir, "config.json");
653 await fs.copyFile(imgPath, nixImgUpload);
654 await fs.writeFile(
655 nixConfigUpload,
656 `${JSON.stringify({ handle: "", piece: "notepat", sub: "", email: "" })}\n`,
657 );
658
659 addLogLine(job, "stdout", "Phase N: building media helper image...");
660 await runPhase(job, "nix-helper-build", "docker", [
661 "build", "-t", MEDIA_HELPER_IMAGE,
662 "-f", path.join(repoDir, "fedac/native/Dockerfile.flash-helper"),
663 repoDir,
664 ], repoDir, {
665 ...nixEnv,
666 DOCKER_BUILDKIT: "0",
667 });
668
669 addLogLine(job, "stdout", "Phase N: appending AC-MAC + ACDATA partitions...");
670 await runPhase(job, "nix-package", "docker", [
671 "run", "--rm", "--privileged",
672 "-v", `${nixUploadDir}:/work`,
673 "--entrypoint", "/bin/bash",
674 MEDIA_HELPER_IMAGE,
675 "-lc",
676 "exec /usr/local/bin/ac-os-nixos-image-helper /work/ac-os-nixos.img /work/config.json",
677 ], nixUploadDir, nixEnv);
678
679 job.stage = "nix-upload";
680 job.percent = Math.max(job.percent, 90);
681
682 // Upload with nix- channel prefix
683 await runPhase(job, "nix-upload", "bash", [
684 uploadScript, "--image", nixImgUpload,
685 ], NATIVE_DIR, {
686 ...uploadEnv,
687 OTA_CHANNEL: "nix",
688 });
689
690 addLogLine(job, "stdout", "NixOS variant uploaded successfully");
691 } finally {
692 try { await fs.rm(nixUploadDir, { recursive: true }); } catch {}
693 try { await fs.rm(nixHomeDir, { recursive: true }); } catch {}
694 }
695 }
696
697 job.status = "success";
698 job.stage = "done";
699 job.percent = 100;
700 job.error = null;
701 job.finishedAt = nowISO();
702 if (progressCallback) progressCallback(makeSnapshot(job));
703 } catch (err) {
704 job.finishedAt = nowISO();
705 job.status = job.status === "cancelled" ? "cancelled" : "failed";
706 job.stage = job.status;
707 job.error = err.message || String(err);
708 if (progressCallback) progressCallback(makeSnapshot(job));
709 } finally {
710 await persistNativeBuildRecord(job);
711 if (activeJobId === job.id) activeJobId = null;
712 }
713}
714
715export async function startNativeBuild(options = {}) {
716 if (activeJobId) {
717 const err = new Error(`Native build already running: ${activeJobId}`);
718 err.code = "NATIVE_BUILD_BUSY";
719 err.activeJobId = activeJobId;
720 throw err;
721 }
722
723 const id = randomUUID().slice(0, 10);
724 const flags = buildFlagsFor(options.changed_paths || "");
725 const job = {
726 id,
727 ref: options.ref || "unknown",
728 flags,
729 status: "queued",
730 stage: "queued",
731 percent: 0,
732 createdAt: nowISO(),
733 startedAt: null,
734 updatedAt: nowISO(),
735 finishedAt: null,
736 pid: null,
737 process: null,
738 exitCode: null,
739 error: null,
740 changedPaths: options.changed_paths || "",
741 variant: options.variant || "c",
742 logs: [],
743 };
744
745 jobs.set(id, job);
746 jobOrder.unshift(id);
747 while (jobOrder.length > MAX_RECENT_JOBS) {
748 const old = jobOrder.pop();
749 if (old !== activeJobId) jobs.delete(old);
750 }
751 activeJobId = id;
752 runBuildJob(job).catch(() => {});
753 return makeSnapshot(job);
754}
755
756export function getNativeBuild(jobId, opts = {}) {
757 const job = jobs.get(jobId);
758 return job ? makeSnapshot(job, opts) : null;
759}
760
761export function getNativeBuildsSummary() {
762 return {
763 activeJobId,
764 active: activeJobId ? makeSnapshot(jobs.get(activeJobId)) : null,
765 recent: jobOrder
766 .map((id) => jobs.get(id))
767 .filter(Boolean)
768 .map((j) => makeSnapshot(j)),
769 };
770}
771
772export function cancelNativeBuild(jobId) {
773 const job = jobs.get(jobId);
774 if (!job) return { ok: false, error: "not found" };
775 if (job.status !== "running" || !job.process)
776 return { ok: false, error: "not running" };
777 try {
778 job.process.kill("SIGTERM");
779 job.status = "cancelled";
780 return { ok: true };
781 } catch (err) {
782 return { ok: false, error: err.message };
783 }
784}