Anonymize your writing style. Zig WASM engine detects authorship markers, fine-tuned LLM rewrites to remove them. Runs entirely in-browser. fantasma.qstorage.quilibrium.com/
wasm privacy qwen zig
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat: wire browser deployment with binary tokenizer format

WASM init was hanging because std.json.parseFromSlice on the 20MB
tokenizer.json (248K vocab + 248K merges) creates millions of heap
allocations via memory.grow. Replace with a compact binary format
(.tkn) that loads via flat array reads in O(n).

- Add scripts/convert_tokenizer.py (JSON -> .tkn binary converter)
- Add Tokenizer.loadFromBinary() with TOKN magic auto-detection
- Update dist.sh to validate model files, convert tokenizer, copy
model.q4 + tokenizer.tkn into dist/
- Update app to download tokenizer.tkn + model.q4, store both in
IndexedDB, pass to init_with_tokenizer() via WASM

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
Entire-Checkpoint: a3bc79e811d3

chris 65711379 436bb31c

+308 -15
+30 -3
dist.sh
··· 3 3 4 4 ROOT="$(cd "$(dirname "$0")" && pwd)" 5 5 DIST="$ROOT/web/dist" 6 + MODEL_DIR="${MODEL_DIR:-$ROOT/QwenTheBard}" 6 7 7 8 echo "=== Fantasma dist ===" 8 9 10 + # Validate model files exist 11 + if [[ ! -f "$MODEL_DIR/model.q4" ]]; then 12 + echo "ERROR: Missing $MODEL_DIR/model.q4" 13 + echo " Set MODEL_DIR= or place files in QwenTheBard/" 14 + exit 1 15 + fi 16 + if [[ ! -f "$MODEL_DIR/tokenizer.json" ]]; then 17 + echo "ERROR: Missing $MODEL_DIR/tokenizer.json" 18 + echo " Set MODEL_DIR= or place files in QwenTheBard/" 19 + exit 1 20 + fi 21 + 22 + # Convert tokenizer to binary if needed 23 + if [[ ! -f "$MODEL_DIR/tokenizer.tkn" ]] || [[ "$MODEL_DIR/tokenizer.json" -nt "$MODEL_DIR/tokenizer.tkn" ]]; then 24 + echo "[0/4] Converting tokenizer..." 25 + python3 "$ROOT/scripts/convert_tokenizer.py" "$MODEL_DIR/tokenizer.json" "$MODEL_DIR/tokenizer.tkn" 26 + fi 27 + 9 28 # Build WASM 10 - echo "[1/3] Building WASM..." 29 + echo "[1/4] Building WASM..." 11 30 cd "$ROOT" 12 31 zig build wasm 13 32 echo " fantasma.wasm: $(du -h zig-out/bin/fantasma.wasm | cut -f1)" ··· 16 35 cp zig-out/bin/fantasma.wasm "$ROOT/web/public/" 17 36 18 37 # Build frontend with Vite 19 - echo "[2/3] Building frontend..." 38 + echo "[2/4] Building frontend..." 20 39 cd "$ROOT/web" 21 40 bun run build 22 41 42 + # Copy model assets into dist (after Vite build so they aren't hashed) 43 + echo "[3/4] Copying model assets..." 44 + cp "$MODEL_DIR/model.q4" "$DIST/" 45 + cp "$MODEL_DIR/tokenizer.tkn" "$DIST/" 46 + echo " model.q4: $(du -h "$DIST/model.q4" | cut -f1)" 47 + echo " tokenizer.tkn: $(du -h "$DIST/tokenizer.tkn" | cut -f1)" 48 + 49 + echo "" 23 50 echo " $(find "$DIST" -type f | wc -l) files -> web/dist/" 24 51 echo "" 25 52 ls -lh "$DIST" ··· 27 54 28 55 # Serve 29 56 PORT="${1:-8080}" 30 - echo "[3/3] Serving at http://localhost:$PORT" 57 + echo "[4/4] Serving at http://localhost:$PORT" 31 58 echo " Ctrl+C to stop" 32 59 echo "" 33 60 cd "$DIST"
+141
scripts/convert_tokenizer.py
··· 1 + #!/usr/bin/env python3 2 + """Convert HuggingFace tokenizer.json to compact binary format (.tkn). 3 + 4 + Binary format (all little-endian): 5 + Header (32 bytes): 6 + magic: 4 bytes "TOKN" 7 + vocab_size: u32 max_id + 1 (total slots) 8 + entry_count: u32 populated vocab entries 9 + merge_count: u32 BPE merge rules 10 + eos_id: u32 11 + im_start_id: u32 12 + im_end_id: u32 13 + string_pool_size: u32 14 + 15 + string_pool: [string_pool_size] u8 16 + 17 + vocab_table: [vocab_size] × (offset:u32, length:u32) 18 + offset=0xFFFFFFFF means empty slot 19 + 20 + token_to_id_table: [entry_count] × (offset:u32, length:u32, id:u32) 21 + sorted by string for binary search 22 + 23 + merges_table: [merge_count] × (first:u32, second:u32, result:u32, rank:u32) 24 + """ 25 + 26 + import json 27 + import struct 28 + import sys 29 + from pathlib import Path 30 + 31 + EMPTY = 0xFFFFFFFF 32 + 33 + 34 + def main(): 35 + if len(sys.argv) < 2: 36 + print(f"Usage: {sys.argv[0]} <tokenizer.json> [output.tkn]") 37 + sys.exit(1) 38 + 39 + in_path = Path(sys.argv[1]) 40 + out_path = Path(sys.argv[2]) if len(sys.argv) > 2 else in_path.with_suffix(".tkn") 41 + 42 + with open(in_path) as f: 43 + data = json.load(f) 44 + 45 + model = data["model"] 46 + vocab = model["vocab"] # str -> int 47 + merges = model["merges"] # list of [str, str] or "str str" 48 + 49 + # Special tokens 50 + added = {t["content"]: t["id"] for t in data.get("added_tokens", [])} 51 + eos_id = added.get("<|endoftext|>", 248046) 52 + im_start_id = added.get("<|im_start|>", 248045) 53 + im_end_id = added.get("<|im_end|>", 248046) 54 + 55 + # Merge added tokens into vocab 56 + all_vocab = dict(vocab) 57 + for content, tid in added.items(): 58 + all_vocab[content] = tid 59 + 60 + max_id = max(all_vocab.values()) 61 + vocab_size = max_id + 1 62 + entry_count = len(all_vocab) 63 + 64 + # Build string pool 65 + pool = bytearray() 66 + pool_index = {} # text -> (offset, length) 67 + for text in sorted(all_vocab.keys()): 68 + offset = len(pool) 69 + encoded = text.encode("utf-8") 70 + pool.extend(encoded) 71 + pool_index[text] = (offset, len(encoded)) 72 + 73 + # Vocab table: id -> (offset, length) 74 + vocab_table = bytearray(vocab_size * 8) 75 + for i in range(vocab_size): 76 + struct.pack_into("<II", vocab_table, i * 8, EMPTY, 0) 77 + for text, tid in all_vocab.items(): 78 + off, ln = pool_index[text] 79 + struct.pack_into("<II", vocab_table, tid * 8, off, ln) 80 + 81 + # Token-to-id table: sorted by text for binary search 82 + sorted_entries = sorted(all_vocab.items(), key=lambda x: x[0].encode("utf-8")) 83 + token_to_id_table = bytearray(entry_count * 12) 84 + for i, (text, tid) in enumerate(sorted_entries): 85 + off, ln = pool_index[text] 86 + struct.pack_into("<III", token_to_id_table, i * 12, off, ln, tid) 87 + 88 + # Build text->id lookup for merge resolution 89 + text_to_id = {text: tid for text, tid in all_vocab.items()} 90 + 91 + # Merges table 92 + merge_entries = [] 93 + for rank, m in enumerate(merges): 94 + if isinstance(m, list): 95 + first_s, second_s = m[0], m[1] 96 + else: 97 + parts = m.split(" ", 1) 98 + if len(parts) != 2: 99 + continue 100 + first_s, second_s = parts 101 + 102 + first_id = text_to_id.get(first_s) 103 + second_id = text_to_id.get(second_s) 104 + result_id = text_to_id.get(first_s + second_s) 105 + if first_id is None or second_id is None or result_id is None: 106 + continue 107 + merge_entries.append((first_id, second_id, result_id, rank)) 108 + 109 + merge_count = len(merge_entries) 110 + merges_table = bytearray(merge_count * 16) 111 + for i, (f, s, r, rk) in enumerate(merge_entries): 112 + struct.pack_into("<IIII", merges_table, i * 16, f, s, r, rk) 113 + 114 + # Write binary 115 + header = struct.pack( 116 + "<4sIIIIIII", 117 + b"TOKN", 118 + vocab_size, 119 + entry_count, 120 + merge_count, 121 + eos_id, 122 + im_start_id, 123 + im_end_id, 124 + len(pool), 125 + ) 126 + 127 + with open(out_path, "wb") as f: 128 + f.write(header) 129 + f.write(pool) 130 + f.write(vocab_table) 131 + f.write(token_to_id_table) 132 + f.write(merges_table) 133 + 134 + total = len(header) + len(pool) + len(vocab_table) + len(token_to_id_table) + len(merges_table) 135 + print(f"Wrote {out_path} ({total / 1048576:.1f} MB)") 136 + print(f" vocab_size={vocab_size} entries={entry_count} merges={merge_count}") 137 + print(f" pool={len(pool)} bytes vocab_table={len(vocab_table)} bytes") 138 + 139 + 140 + if __name__ == "__main__": 141 + main()
+92
src/inference/tokenizer.zig
··· 41 41 self.allocator.free(self.string_pool); 42 42 } 43 43 44 + /// Load tokenizer from compact binary format (.tkn). 45 + /// Much faster than JSON parsing — just flat array reads. 46 + pub fn loadFromBinary(allocator: std.mem.Allocator, data: []const u8) !Tokenizer { 47 + if (data.len < 32) return error.InvalidFormat; 48 + 49 + // Parse header 50 + const magic = data[0..4]; 51 + if (!std.mem.eql(u8, magic, "TOKN")) return error.InvalidFormat; 52 + 53 + const vocab_size = std.mem.readInt(u32, data[4..8], .little); 54 + const entry_count = std.mem.readInt(u32, data[8..12], .little); 55 + const merge_count = std.mem.readInt(u32, data[12..16], .little); 56 + const eos_id = std.mem.readInt(u32, data[16..20], .little); 57 + const im_start_id = std.mem.readInt(u32, data[20..24], .little); 58 + const im_end_id = std.mem.readInt(u32, data[24..28], .little); 59 + const pool_size = std.mem.readInt(u32, data[28..32], .little); 60 + 61 + var offset: usize = 32; 62 + 63 + // String pool — copy so it's owned by the tokenizer 64 + if (offset + pool_size > data.len) return error.InvalidFormat; 65 + const string_pool = try allocator.alloc(u8, pool_size); 66 + @memcpy(string_pool, data[offset .. offset + pool_size]); 67 + offset += pool_size; 68 + 69 + // Vocab table: vocab_size × (offset:u32, length:u32) 70 + const vtable_size = @as(usize, vocab_size) * 8; 71 + if (offset + vtable_size > data.len) return error.InvalidFormat; 72 + const vtable = data[offset .. offset + vtable_size]; 73 + offset += vtable_size; 74 + 75 + const vocab = try allocator.alloc([]const u8, vocab_size); 76 + for (0..vocab_size) |i| { 77 + const base = i * 8; 78 + const str_off = std.mem.readInt(u32, vtable[base..][0..4], .little); 79 + const str_len = std.mem.readInt(u32, vtable[base + 4 ..][0..4], .little); 80 + if (str_off == 0xFFFFFFFF) { 81 + vocab[i] = ""; 82 + } else { 83 + vocab[i] = string_pool[str_off .. str_off + str_len]; 84 + } 85 + } 86 + 87 + // Token-to-id table: entry_count × (offset:u32, length:u32, id:u32) 88 + const ttable_size = @as(usize, entry_count) * 12; 89 + if (offset + ttable_size > data.len) return error.InvalidFormat; 90 + const ttable = data[offset .. offset + ttable_size]; 91 + offset += ttable_size; 92 + 93 + const token_to_id = try allocator.alloc(TokenEntry, entry_count); 94 + for (0..entry_count) |i| { 95 + const base = i * 12; 96 + const str_off = std.mem.readInt(u32, ttable[base..][0..4], .little); 97 + const str_len = std.mem.readInt(u32, ttable[base + 4 ..][0..4], .little); 98 + const id = std.mem.readInt(u32, ttable[base + 8 ..][0..4], .little); 99 + token_to_id[i] = .{ 100 + .text = string_pool[str_off .. str_off + str_len], 101 + .id = id, 102 + }; 103 + } 104 + 105 + // Merges table: merge_count × (first:u32, second:u32, result:u32, rank:u32) 106 + const mtable_size = @as(usize, merge_count) * 16; 107 + if (offset + mtable_size > data.len) return error.InvalidFormat; 108 + const mtable = data[offset .. offset + mtable_size]; 109 + 110 + const merges = try allocator.alloc(Merge, merge_count); 111 + for (0..merge_count) |i| { 112 + const base = i * 16; 113 + merges[i] = .{ 114 + .first = std.mem.readInt(u32, mtable[base..][0..4], .little), 115 + .second = std.mem.readInt(u32, mtable[base + 4 ..][0..4], .little), 116 + .result = std.mem.readInt(u32, mtable[base + 8 ..][0..4], .little), 117 + .rank = std.mem.readInt(u32, mtable[base + 12 ..][0..4], .little), 118 + }; 119 + } 120 + 121 + return Tokenizer{ 122 + .vocab = vocab, 123 + .vocab_size = vocab_size, 124 + .merges = merges, 125 + .merge_count = merge_count, 126 + .token_to_id = token_to_id, 127 + .token_count = entry_count, 128 + .eos_id = eos_id, 129 + .im_start_id = im_start_id, 130 + .im_end_id = im_end_id, 131 + .allocator = allocator, 132 + .string_pool = string_pool, 133 + }; 134 + } 135 + 44 136 /// Load tokenizer from a tokenizer.json file using std.json. 45 137 pub fn loadFromFile(allocator: std.mem.Allocator, path: []const u8) !Tokenizer { 46 138 const file = try std.fs.cwd().openFile(path, .{});
+8 -2
src/main.zig
··· 124 124 fn initFromData(weights_data: []const u8, tok_ptr: ?[*]const u8, tok_len: ?u32) bool { 125 125 const allocator = getAllocator(); 126 126 127 - // Load tokenizer 127 + // Load tokenizer (binary .tkn format for WASM, JSON for native) 128 128 if (tok_ptr) |tp| { 129 129 if (tok_len) |tl| { 130 - g_tokenizer = inference_tokenizer.Tokenizer.loadFromJson(allocator, tp[0..tl]) catch return false; 130 + const tok_data = tp[0..tl]; 131 + // Try binary format first (starts with "TOKN"), fall back to JSON 132 + if (tok_data.len >= 4 and std.mem.eql(u8, tok_data[0..4], "TOKN")) { 133 + g_tokenizer = inference_tokenizer.Tokenizer.loadFromBinary(allocator, tok_data) catch return false; 134 + } else { 135 + g_tokenizer = inference_tokenizer.Tokenizer.loadFromJson(allocator, tok_data) catch return false; 136 + } 131 137 } 132 138 } 133 139
+1
web/src/db.ts
··· 2 2 const DB_VERSION = 1; 3 3 const STORE_NAME = "model"; 4 4 export const WEIGHTS_KEY = "weights"; 5 + export const TOKENIZER_KEY = "tokenizer"; 5 6 6 7 function openDB(): Promise<IDBDatabase> { 7 8 return new Promise((resolve, reject) => {
+13 -5
web/src/main.ts
··· 3 3 4 4 import { store } from "./store"; 5 5 import { loadWasm, wasmCall, loadModelWeights } from "./wasm"; 6 - import { dbGet, dbPut, dbClear, WEIGHTS_KEY } from "./db"; 6 + import { dbGet, dbPut, dbClear, WEIGHTS_KEY, TOKENIZER_KEY } from "./db"; 7 7 import { updateStatus, renderProfile } from "./render"; 8 8 9 9 const $ = (sel: string) => document.querySelector<HTMLElement>(sel)!; ··· 121 121 downloadProgress.hidden = false; 122 122 123 123 try { 124 - progressText.textContent = "Downloading model..."; 124 + // 1. Download tokenizer.tkn (compact binary, ~7MB) 125 + progressText.textContent = "Downloading tokenizer..."; 125 126 progressFill.style.width = "0%"; 127 + const tokResponse = await fetch("/tokenizer.tkn"); 128 + if (!tokResponse.ok) throw new Error(`Tokenizer HTTP ${tokResponse.status}`); 129 + const tokenizerBytes = await tokResponse.arrayBuffer(); 130 + await dbPut(TOKENIZER_KEY, tokenizerBytes); 126 131 127 - const weightsBytes = await fetchWithProgress("/model.bin", (recv, total) => { 132 + // 2. Download model weights (large, with progress) 133 + progressText.textContent = "Downloading model..."; 134 + const weightsBytes = await fetchWithProgress("/model.q4", (recv, total) => { 128 135 if (total > 0) { 129 136 const pct = Math.round((recv / total) * 100); 130 137 progressFill.style.width = pct + "%"; ··· 139 146 progressFill.style.width = "100%"; 140 147 141 148 const { wasmInstance } = store.getState(); 142 - if (wasmInstance && loadModelWeights(wasmInstance, weightsBytes)) { 149 + if (wasmInstance && loadModelWeights(wasmInstance, weightsBytes, tokenizerBytes)) { 143 150 store.getState().setModelLoaded(true); 144 151 downloadProgress.hidden = true; 145 152 btnDownload.hidden = true; ··· 240 247 try { 241 248 const storedWeights = await dbGet(WEIGHTS_KEY); 242 249 if (storedWeights) { 243 - if (loadModelWeights(instance, storedWeights)) { 250 + const storedTokenizer = await dbGet(TOKENIZER_KEY); 251 + if (loadModelWeights(instance, storedWeights, storedTokenizer)) { 244 252 store.getState().setModelLoaded(true); 245 253 store.getState().setStatus("model-ready"); 246 254 }
+23 -5
web/src/wasm.ts
··· 7 7 profile(ptr: number, len: number): number; 8 8 neutralize(ptr: number, len: number): number; 9 9 init(ptr: number, len: number): number; 10 + init_with_tokenizer( 11 + weights_ptr: number, 12 + weights_len: number, 13 + tok_ptr: number, 14 + tok_len: number, 15 + ): number; 10 16 get_output_ptr(): number; 11 17 get_output_len(): number; 12 18 } ··· 59 65 export function loadModelWeights( 60 66 instance: WebAssembly.Instance, 61 67 weightsBytes: ArrayBuffer, 68 + tokenizerBytes?: ArrayBuffer, 62 69 ): boolean { 63 70 const exports = instance.exports as unknown as FantasmaExports; 64 - const arr = new Uint8Array(weightsBytes); 65 - const ptr = exports.alloc(arr.length); 66 - new Uint8Array(exports.memory.buffer, ptr, arr.length).set(arr); 67 - const ok = exports.init(ptr, arr.length); 68 - exports.dealloc(ptr, arr.length); 71 + const wArr = new Uint8Array(weightsBytes); 72 + const wPtr = exports.alloc(wArr.length); 73 + new Uint8Array(exports.memory.buffer, wPtr, wArr.length).set(wArr); 74 + 75 + let ok: number; 76 + if (tokenizerBytes) { 77 + const tArr = new Uint8Array(tokenizerBytes); 78 + const tPtr = exports.alloc(tArr.length); 79 + new Uint8Array(exports.memory.buffer, tPtr, tArr.length).set(tArr); 80 + ok = exports.init_with_tokenizer(wPtr, wArr.length, tPtr, tArr.length); 81 + exports.dealloc(tPtr, tArr.length); 82 + } else { 83 + ok = exports.init(wPtr, wArr.length); 84 + } 85 + 86 + exports.dealloc(wPtr, wArr.length); 69 87 return !!ok; 70 88 }