experiments in a post-browser web
10
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(server): add storage abstraction layer for image handling

- Create storage/types.js with StorageAdapter interface
- Create storage/filesystem-adapter.js with sync/async methods
- Create storage/index.js factory for storage adapter selection
- Refactor db.js saveImage/getImageData to use storage adapter
- Refactor index.js image serving to use getImageData
- Hash-based storage keys for content-addressable storage

Prepares for future R2/S3 backend switching.

resolve: regenerate schema files

+1297 -29
+63 -13
backend/server/db.js
··· 1 1 const { sqlFactory } = require("./sql"); 2 + const { createStorageAdapter } = require("./storage"); 2 3 const path = require("path"); 3 4 const crypto = require("crypto"); 4 5 const fs = require("fs"); ··· 16 17 // Connection pool - one connection per user:profile 17 18 // Now stores SqlAdapter instances instead of raw Database instances 18 19 const connections = new Map(); 20 + 21 + // Storage adapter pool - one per user:profile 22 + const storageAdapters = new Map(); 23 + 24 + /** 25 + * Get storage adapter for a user's profile. 26 + * @param {string} userId 27 + * @param {string} [profileId='default'] 28 + * @returns {import('./storage/types').StorageAdapter} 29 + */ 30 + function getStorageAdapter(userId, profileId = "default") { 31 + const key = `${userId}:${profileId}`; 32 + if (storageAdapters.has(key)) { 33 + return storageAdapters.get(key); 34 + } 35 + 36 + const profileDir = path.join(DATA_DIR, userId, "profiles", profileId); 37 + const imagesDir = path.join(profileDir, "images"); 38 + 39 + const adapter = createStorageAdapter({ 40 + type: "filesystem", 41 + basePath: imagesDir, 42 + }); 43 + 44 + storageAdapters.set(key, adapter); 45 + return adapter; 46 + } 19 47 20 48 function getConnection(userId, profileId = "default") { 21 49 if (!userId) { ··· 805 833 } 806 834 807 835 const conn = getConnection(userId, profileId); 836 + const storage = getStorageAdapter(userId, profileId); 808 837 const timestamp = now(); 809 838 810 - // Compute hash for file deduplication (not item dedup) 839 + // Compute hash for content-addressable storage 811 840 const hash = hashBuffer(buffer); 812 841 const ext = getExtensionFromMime(mimeType); 813 - const imageFilename = `${hash}.${ext}`; 814 - 815 - // Ensure images directory exists 816 - const imagesDir = getUserImagesDir(userId, profileId); 817 - if (!fs.existsSync(imagesDir)) { 818 - fs.mkdirSync(imagesDir, { recursive: true }); 819 - } 820 842 821 - const imagePath = path.join(imagesDir, imageFilename); 843 + // Storage key is hash-based for easy backend switching 844 + // Key format: {hash}.{ext} (e.g., "abc123...def.jpg") 845 + const storageKey = `${hash}.${ext}`; 822 846 823 - // Write file only if it doesn't exist (file-level dedup) 824 - if (!fs.existsSync(imagePath)) { 825 - fs.writeFileSync(imagePath, buffer); 826 - } 847 + // Store via adapter (uses sync method for filesystem adapter) 848 + storage.putSync(storageKey, buffer, { mime: mimeType }); 827 849 828 850 // Create item record 829 851 const itemId = generateUUID(); ··· 899 921 900 922 const imagesDir = getUserImagesDir(userId, profileId); 901 923 return path.join(imagesDir, `${image.metadata.hash}.${image.metadata.ext}`); 924 + } 925 + 926 + /** 927 + * Get image data using storage adapter. 928 + * Returns the raw image buffer for serving. 929 + * 930 + * @param {string} userId 931 + * @param {string} itemId 932 + * @param {string} [profileId='default'] 933 + * @returns {{buffer: Buffer, metadata: Object, filename: string} | null} 934 + */ 935 + function getImageData(userId, itemId, profileId = "default") { 936 + const image = getImageById(userId, itemId, profileId); 937 + if (!image || !image.metadata.hash) return null; 938 + 939 + const storage = getStorageAdapter(userId, profileId); 940 + const storageKey = `${image.metadata.hash}.${image.metadata.ext}`; 941 + 942 + const buffer = storage.getSync(storageKey); 943 + if (!buffer) return null; 944 + 945 + return { 946 + buffer, 947 + metadata: image.metadata, 948 + filename: image.filename, 949 + }; 902 950 } 903 951 904 952 function deleteImage(userId, itemId, profileId = "default") { ··· 1073 1121 getConnection, 1074 1122 closeAllConnections, 1075 1123 closeConnection, 1124 + getStorageAdapter, 1076 1125 // Unified functions 1077 1126 saveItem, 1078 1127 getItems, ··· 1090 1139 getImages, 1091 1140 getImageById, 1092 1141 getImagePath, 1142 + getImageData, 1093 1143 deleteImage, 1094 1144 MAX_IMAGE_SIZE, 1095 1145 // Backward-compatible (URLs)
+6 -12
backend/server/index.js
··· 335 335 const profileId = users.resolveProfileId(userId, c.req.query("profile") || "default"); 336 336 const id = c.req.param("id"); 337 337 338 - const image = db.getImageById(userId, id, profileId); 339 - if (!image) { 338 + const imageData = db.getImageData(userId, id, profileId); 339 + if (!imageData) { 340 340 return c.json({ error: "image not found" }, 404); 341 341 } 342 342 343 - const imagePath = db.getImagePath(userId, id, profileId); 344 - if (!imagePath || !fs.existsSync(imagePath)) { 345 - return c.json({ error: "image file not found" }, 404); 346 - } 347 - 348 - const fileBuffer = fs.readFileSync(imagePath); 349 - return new Response(fileBuffer, { 343 + return new Response(imageData.buffer, { 350 344 headers: { 351 - "Content-Type": image.metadata.mime, 352 - "Content-Length": fileBuffer.length.toString(), 353 - "Content-Disposition": `inline; filename="${image.filename}"`, 345 + "Content-Type": imageData.metadata.mime, 346 + "Content-Length": imageData.buffer.length.toString(), 347 + "Content-Disposition": `inline; filename="${imageData.filename}"`, 354 348 }, 355 349 }); 356 350 });
+178
backend/server/storage/filesystem-adapter.js
··· 1 + /** 2 + * Filesystem Storage Adapter 3 + * 4 + * Implements StorageAdapter interface for local filesystem storage. 5 + * Used for development and self-hosted deployments. 6 + * 7 + * Provides both sync and async methods - sync for filesystem (local), 8 + * async interface ready for cloud storage backends. 9 + */ 10 + 11 + const fs = require("fs"); 12 + const path = require("path"); 13 + 14 + /** 15 + * @typedef {import('./types').StorageAdapter} StorageAdapter 16 + */ 17 + 18 + /** 19 + * @implements {StorageAdapter} 20 + */ 21 + class FilesystemAdapter { 22 + /** 23 + * @param {string} basePath - Base directory for all storage operations 24 + */ 25 + constructor(basePath) { 26 + /** @private */ 27 + this.basePath = basePath; 28 + } 29 + 30 + /** 31 + * Store data at the specified key (sync version). 32 + * Creates parent directories if they don't exist. 33 + * Implements file-level deduplication by skipping writes if file exists. 34 + * 35 + * @param {string} key - Storage key (relative path) 36 + * @param {Buffer} data - Data to store 37 + * @param {Record<string, string>} [metadata] - Optional metadata (ignored for filesystem) 38 + */ 39 + putSync(key, data, metadata) { 40 + const fullPath = path.join(this.basePath, key); 41 + const dir = path.dirname(fullPath); 42 + 43 + if (!fs.existsSync(dir)) { 44 + fs.mkdirSync(dir, { recursive: true }); 45 + } 46 + 47 + // Skip write if file already exists (content-addressable dedup) 48 + if (!fs.existsSync(fullPath)) { 49 + fs.writeFileSync(fullPath, data); 50 + } 51 + } 52 + 53 + /** 54 + * Store data at the specified key (async version). 55 + * @param {string} key - Storage key (relative path) 56 + * @param {Buffer} data - Data to store 57 + * @param {Record<string, string>} [metadata] - Optional metadata (ignored for filesystem) 58 + */ 59 + async put(key, data, metadata) { 60 + this.putSync(key, data, metadata); 61 + } 62 + 63 + /** 64 + * Retrieve data by key (sync version). 65 + * 66 + * @param {string} key - Storage key 67 + * @returns {Buffer|null} Data or null if not found 68 + */ 69 + getSync(key) { 70 + const fullPath = path.join(this.basePath, key); 71 + return fs.existsSync(fullPath) ? fs.readFileSync(fullPath) : null; 72 + } 73 + 74 + /** 75 + * Retrieve data by key (async version). 76 + * 77 + * @param {string} key - Storage key 78 + * @returns {Promise<Buffer|null>} Data or null if not found 79 + */ 80 + async get(key) { 81 + return this.getSync(key); 82 + } 83 + 84 + /** 85 + * Delete data by key (sync version). 86 + * 87 + * @param {string} key - Storage key 88 + */ 89 + deleteSync(key) { 90 + const fullPath = path.join(this.basePath, key); 91 + if (fs.existsSync(fullPath)) { 92 + fs.unlinkSync(fullPath); 93 + } 94 + } 95 + 96 + /** 97 + * Delete data by key (async version). 98 + * 99 + * @param {string} key - Storage key 100 + */ 101 + async delete(key) { 102 + this.deleteSync(key); 103 + } 104 + 105 + /** 106 + * Check if key exists (sync version). 107 + * 108 + * @param {string} key - Storage key 109 + * @returns {boolean} 110 + */ 111 + existsSync(key) { 112 + return fs.existsSync(path.join(this.basePath, key)); 113 + } 114 + 115 + /** 116 + * Check if key exists (async version). 117 + * 118 + * @param {string} key - Storage key 119 + * @returns {Promise<boolean>} 120 + */ 121 + async exists(key) { 122 + return this.existsSync(key); 123 + } 124 + 125 + /** 126 + * List all keys with optional prefix (sync version). 127 + * Recursively walks directory structure. 128 + * 129 + * @param {string} [prefix] - Optional prefix to filter keys 130 + * @returns {string[]} Array of keys 131 + */ 132 + listSync(prefix) { 133 + const searchPath = prefix 134 + ? path.join(this.basePath, prefix) 135 + : this.basePath; 136 + 137 + if (!fs.existsSync(searchPath)) { 138 + return []; 139 + } 140 + 141 + const results = []; 142 + const walk = (dir) => { 143 + const entries = fs.readdirSync(dir, { withFileTypes: true }); 144 + for (const entry of entries) { 145 + const fullPath = path.join(dir, entry.name); 146 + if (entry.isDirectory()) { 147 + walk(fullPath); 148 + } else { 149 + // Convert back to key format (relative to basePath) 150 + const key = path.relative(this.basePath, fullPath); 151 + results.push(key); 152 + } 153 + } 154 + }; 155 + 156 + const stat = fs.statSync(searchPath); 157 + if (stat.isDirectory()) { 158 + walk(searchPath); 159 + } else { 160 + // prefix points to a file 161 + results.push(path.relative(this.basePath, searchPath)); 162 + } 163 + 164 + return results; 165 + } 166 + 167 + /** 168 + * List all keys with optional prefix (async version). 169 + * 170 + * @param {string} [prefix] - Optional prefix to filter keys 171 + * @returns {Promise<string[]>} Array of keys 172 + */ 173 + async list(prefix) { 174 + return this.listSync(prefix); 175 + } 176 + } 177 + 178 + module.exports = { FilesystemAdapter };
+53
backend/server/storage/index.js
··· 1 + /** 2 + * Storage Adapter Factory 3 + * 4 + * Selects and configures the appropriate storage adapter based on environment. 5 + * Supports filesystem (default), with R2/S3 support planned for future. 6 + */ 7 + 8 + const { FilesystemAdapter } = require("./filesystem-adapter"); 9 + 10 + /** 11 + * @typedef {import('./types').StorageAdapter} StorageAdapter 12 + * @typedef {import('./types').StorageConfig} StorageConfig 13 + */ 14 + 15 + const STORAGE_TYPE = process.env.STORAGE_TYPE || "filesystem"; 16 + 17 + /** 18 + * Create a storage adapter based on configuration. 19 + * 20 + * @param {StorageConfig} config - Storage configuration 21 + * @returns {StorageAdapter} 22 + */ 23 + function createStorageAdapter(config) { 24 + switch (config.type) { 25 + case "filesystem": 26 + if (!config.basePath) { 27 + throw new Error("basePath required for filesystem storage"); 28 + } 29 + return new FilesystemAdapter(config.basePath); 30 + 31 + case "r2": 32 + // TODO: Implement R2 adapter 33 + throw new Error("R2 storage adapter not yet implemented"); 34 + 35 + case "s3": 36 + // TODO: Implement S3 adapter 37 + throw new Error("S3 storage adapter not yet implemented"); 38 + 39 + default: 40 + throw new Error(`Unknown storage type: ${config.type}`); 41 + } 42 + } 43 + 44 + /** 45 + * Get storage type from environment. 46 + * 47 + * @returns {'filesystem' | 'r2' | 's3'} 48 + */ 49 + function getStorageType() { 50 + return STORAGE_TYPE; 51 + } 52 + 53 + module.exports = { createStorageAdapter, getStorageType, FilesystemAdapter };
+27
backend/server/storage/types.js
··· 1 + /** 2 + * Storage Abstraction Layer Types 3 + * 4 + * Abstracts blob/file storage to support multiple backends: 5 + * - Filesystem (Node.js, current) 6 + * - Cloudflare R2 (future) 7 + * - S3-compatible (future) 8 + * 9 + * @typedef {Object} StorageAdapter 10 + * @property {function(string, Buffer, Record<string, string>?): Promise<void>} put - Store data at key 11 + * @property {function(string): Promise<Buffer|null>} get - Retrieve data by key 12 + * @property {function(string): Promise<void>} delete - Delete data by key 13 + * @property {function(string): Promise<boolean>} exists - Check if key exists 14 + * @property {function(string?): Promise<string[]>} list - List keys with optional prefix 15 + * 16 + * @typedef {Object} StorageConfig 17 + * @property {'filesystem' | 'r2' | 's3'} type - Storage backend type 18 + * @property {string} [basePath] - Base path for filesystem storage 19 + * @property {string} [bucket] - Bucket name for R2/S3 20 + * @property {string} [region] - Region for S3 21 + * @property {string} [accessKeyId] - Access key ID for S3 22 + * @property {string} [secretAccessKey] - Secret access key for S3 23 + * @property {string} [endpoint] - Custom endpoint for S3-compatible storage 24 + */ 25 + 26 + // Export empty object - types are defined via JSDoc above 27 + module.exports = {};
+432
notes/cloudflare-vs-railway-evaluation.md
··· 1 + # Cloudflare vs Railway: Server Architecture Evaluation 2 + 3 + **Date:** 2026-02-02 4 + **Status:** Research 5 + **Author:** Architecture Review 6 + 7 + --- 8 + 9 + ## Executive Summary 10 + 11 + This document evaluates two deployment strategies for the Peek sync server: 12 + 13 + 1. **Cloudflare Edge Stack**: Workers + Durable Objects + R2 14 + 2. **Railway (Current)**: Full Node.js container with persistent storage 15 + 16 + **Recommendation:** Stay on Railway for now, but architect for future portability. The current server is simple enough that Cloudflare migration would add complexity without proportional benefit at current scale. However, if user growth demands global low-latency sync or Railway costs become prohibitive, Cloudflare offers a compelling edge-native model. 17 + 18 + --- 19 + 20 + ## Part 1: Cirrus Architecture Summary 21 + 22 + ### What Cirrus Does 23 + 24 + [Cirrus](https://github.com/ascorbic/cirrus) is a single-user AT Protocol Personal Data Server (PDS) optimized for edge deployment. It demonstrates a clean architecture for running per-user SQLite databases on Cloudflare's edge infrastructure. 25 + 26 + ### Cirrus Architecture 27 + 28 + | Component | Role | 29 + |-----------|------| 30 + | **Worker** | Stateless edge handler for routing, authentication, DID document serving | 31 + | **Durable Object** | Single-instance SQLite storage for AT Protocol repository (per user) | 32 + | **R2** | Object storage for blobs (images, videos) | 33 + 34 + ### Key Design Decisions 35 + 36 + 1. **Single-User per Durable Object**: Each user's entire AT Protocol repository lives in one Durable Object. This eliminates distributed consensus complexity - the DO is the authoritative state holder. 37 + 38 + 2. **Separation of Concerns**: 39 + - Structured data (posts, follows, profile) in SQLite within DO 40 + - Binary data (images, videos) in R2 object storage 41 + - Routing logic in stateless Worker 42 + 43 + 3. **Edge-Native Authentication**: OAuth 2.1 (PKCE, DPoP, PAR) and passkey support built into the edge layer. 44 + 45 + 4. **Pay-Per-Use**: Cloudflare's pricing model means costs scale with actual usage rather than reserved capacity. 46 + 47 + ### What We Can Learn 48 + 49 + - **Per-user DO isolation is viable**: One DO per user with embedded SQLite works well for personal data sync. 50 + - **R2 for blobs**: Keeping binary content out of SQLite simplifies sync and reduces DO storage costs. 51 + - **Stateless routing layer**: Workers handle auth and routing without maintaining state, making horizontal scaling trivial. 52 + 53 + --- 54 + 55 + ## Part 2: Our Current Server Architecture 56 + 57 + ### Stack 58 + 59 + | Component | Technology | 60 + |-----------|------------| 61 + | Runtime | Node.js 22+ | 62 + | Framework | Hono (Express-like, but faster) | 63 + | Database | SQLite via better-sqlite3 | 64 + | Deployment | Railway (Nixpacks container) | 65 + | Storage | Persistent volume at `DATA_DIR` | 66 + 67 + ### Data Model 68 + 69 + ``` 70 + data/ 71 + system.db # User registry, profile metadata 72 + {userId}/ 73 + profiles/ 74 + {profileId}/ 75 + datastore.sqlite # Items, tags, item_tags, settings 76 + images/ 77 + {hash}.{ext} # Deduplicated image files 78 + ``` 79 + 80 + ### Key Components 81 + 82 + **`db.js`** - Data Layer (~1100 lines) 83 + - Connection pool (one SQLite connection per user:profile) 84 + - Schema migrations with column renames (snake_case to camelCase) 85 + - Unified `saveItem()` with syncId-based deduplication 86 + - Soft deletes with `deletedAt` timestamps 87 + - Image storage with content-hash deduplication 88 + - Incremental sync via `getItemsSince(timestamp)` 89 + 90 + **`users.js`** - Multi-User Auth (~420 lines) 91 + - SHA-256 hashed API keys in system.db 92 + - Profile management (UUID-based folder naming) 93 + - Legacy migration support 94 + 95 + **`index.js`** - HTTP API (~770 lines) 96 + - Hono routes for items, tags, images, profiles, backups 97 + - Version compatibility middleware (HTTP 409 on mismatch) 98 + - Auth middleware (Bearer token lookup) 99 + - Automatic migrations and dedup on startup 100 + 101 + ### Current Endpoints 102 + 103 + | Endpoint | Purpose | 104 + |----------|---------| 105 + | `POST /items` | Create/update items (sync push) | 106 + | `GET /items/since/:timestamp` | Incremental pull | 107 + | `POST /images` | Upload images (multipart or base64) | 108 + | `GET /images/:id` | Serve image binary | 109 + | `GET /profiles` | List user profiles | 110 + | `POST /backups` | Trigger backup | 111 + 112 + ### Strengths of Current Design 113 + 114 + 1. **Simple deployment**: Single container, one persistent volume 115 + 2. **Proven stack**: Node.js + SQLite is battle-tested 116 + 3. **Clean data isolation**: Each user/profile has isolated database 117 + 4. **Incremental sync**: `updatedAt`-based change detection works 118 + 5. **Hot reload**: `npm run dev` for local development 119 + 120 + ### Pain Points 121 + 122 + 1. **Single region**: Railway hosts in one region; latency for distant users 123 + 2. **Vertical scaling only**: One container handles all users 124 + 3. **Cold starts**: Container can be evicted, slow restart 125 + 4. **Image serving**: Binary files served through Node.js, not CDN 126 + 127 + --- 128 + 129 + ## Part 3: What Would Need to Change for Cloudflare 130 + 131 + ### Architecture Mapping 132 + 133 + | Current (Railway) | Cloudflare Equivalent | 134 + |-------------------|----------------------| 135 + | Node.js container | Worker (JavaScript runtime) | 136 + | SQLite file per user | Durable Object with SQLite API | 137 + | Filesystem images | R2 bucket | 138 + | Railway volume | Not needed (DO + R2 are persistent) | 139 + | Single instance | Edge instances globally | 140 + 141 + ### Required Changes 142 + 143 + #### 1. HTTP Framework Migration 144 + 145 + ```javascript 146 + // Current: Hono with Node.js adapter 147 + const { serve } = require("@hono/node-server"); 148 + serve({ fetch: app.fetch, port }); 149 + 150 + // Cloudflare: Hono runs natively in Workers 151 + export default app; 152 + ``` 153 + 154 + Hono already supports Workers natively - this is a minimal change. 155 + 156 + #### 2. Database Layer Rewrite 157 + 158 + **Current** (better-sqlite3): 159 + ```javascript 160 + const db = new Database(dbPath); 161 + db.prepare("SELECT * FROM items WHERE id = ?").get(id); 162 + ``` 163 + 164 + **Cloudflare** (DO SQLite): 165 + ```javascript 166 + export class UserStore extends DurableObject { 167 + async fetch(request) { 168 + const db = this.ctx.storage.sql; 169 + const result = db.exec("SELECT * FROM items WHERE id = ?", id); 170 + return Response.json(result.toArray()); 171 + } 172 + } 173 + ``` 174 + 175 + **Migration Complexity: HIGH** 176 + 177 + - DO's SQLite API is slightly different (D1-style) 178 + - Connection pool becomes DO instantiation 179 + - Each request routes to the correct DO by user ID 180 + - All db.js functions need rewriting 181 + 182 + #### 3. Image Storage Migration 183 + 184 + **Current**: Filesystem in `data/{userId}/profiles/{profileId}/images/` 185 + 186 + **Cloudflare R2**: 187 + ```javascript 188 + // Upload 189 + await env.BUCKET.put(`${userId}/${hash}.${ext}`, imageBuffer, { 190 + httpMetadata: { contentType: mimeType } 191 + }); 192 + 193 + // Serve 194 + const object = await env.BUCKET.get(`${userId}/${hash}.${ext}`); 195 + return new Response(object.body, { 196 + headers: { 'Content-Type': object.httpMetadata.contentType } 197 + }); 198 + ``` 199 + 200 + **Migration Complexity: MEDIUM** 201 + 202 + - R2 API is straightforward 203 + - Need to migrate existing images (one-time script) 204 + - Can serve directly from R2 or through Worker 205 + 206 + #### 4. User Registry 207 + 208 + **Current**: `system.db` SQLite file shared across all users 209 + 210 + **Cloudflare Options**: 211 + 1. **D1**: Cloudflare's managed SQLite (good for small tables) 212 + 2. **KV**: Key-value store (fast lookup by API key hash) 213 + 3. **Dedicated DO**: One DO for user registry 214 + 215 + **Recommended**: D1 for user table, or KV for API key -> userId lookup. 216 + 217 + #### 5. Routing Layer 218 + 219 + Worker needs to: 220 + 1. Authenticate via API key (lookup in D1/KV) 221 + 2. Parse userId from auth 222 + 3. Get/create DO stub for that user 223 + 4. Forward request to DO 224 + 225 + ```javascript 226 + export default { 227 + async fetch(request, env) { 228 + const apiKey = request.headers.get('Authorization')?.slice(7); 229 + const userId = await env.USERS.get(hashKey(apiKey)); 230 + if (!userId) return new Response('Unauthorized', { status: 401 }); 231 + 232 + const doId = env.USER_STORES.idFromName(userId); 233 + const stub = env.USER_STORES.get(doId); 234 + return stub.fetch(request); 235 + } 236 + }; 237 + ``` 238 + 239 + #### 6. Migrations and Schema 240 + 241 + DO's SQLite doesn't have automatic migrations. Options: 242 + - Check schema version on first request, run migrations 243 + - Use DO's `alarm()` for background migration 244 + - Store schema version in DO storage 245 + 246 + --- 247 + 248 + ## Part 4: Pros and Cons Comparison 249 + 250 + ### Railway (Current) 251 + 252 + | Pros | Cons | 253 + |------|------| 254 + | Simple single-container model | Single region (latency for distant users) | 255 + | Standard Node.js - familiar stack | Vertical scaling only | 256 + | Filesystem access for images | Cold start after inactivity | 257 + | Easy local development (`npm run dev`) | Images served through Node.js (not CDN) | 258 + | Persistent volume (data survives redeploys) | $5/mo minimum even idle | 259 + | Full npm ecosystem | Container size affects cold start | 260 + 261 + **Cost Model**: 262 + - $5/mo hobby tier (512MB RAM, shared CPU) 263 + - $20/mo pro for more resources 264 + - Persistent volumes included 265 + - Predictable monthly cost 266 + 267 + ### Cloudflare (Durable Objects + R2) 268 + 269 + | Pros | Cons | 270 + |------|------| 271 + | Global edge - low latency everywhere | More complex architecture | 272 + | Per-user isolation via DO | DO's SQLite API differs from better-sqlite3 | 273 + | R2 + CDN for images | Limited Node.js API compatibility | 274 + | Auto-scales to zero (pay only for usage) | Debugging is harder (distributed) | 275 + | No cold start for Workers | 30-second wall-clock limit per request | 276 + | Built-in DDoS protection | Vendor lock-in (DO is Cloudflare-only) | 277 + 278 + **Cost Model**: 279 + - Workers: 10M free requests/mo, then $0.50/M 280 + - Durable Objects: $0.15/GB-month storage, $0.50/M requests 281 + - R2: $0.015/GB-month storage, free egress 282 + - Could be cheaper at low scale, more expensive at high scale 283 + 284 + ### Head-to-Head 285 + 286 + | Factor | Railway | Cloudflare | 287 + |--------|---------|------------| 288 + | **Latency** | Single region (US) | Global edge | 289 + | **Complexity** | Low | Medium-High | 290 + | **Migration Effort** | N/A (current) | 2-3 weeks solid work | 291 + | **Vendor Lock-in** | Low (standard container) | High (Durable Objects) | 292 + | **Cost at Low Scale** | $5-20/mo fixed | ~$0-5/mo (usage-based) | 293 + | **Cost at High Scale** | Predictable | Can grow unexpectedly | 294 + | **Local Dev** | Native Node.js | Wrangler + Miniflare | 295 + | **Debugging** | Standard logs | Distributed tracing needed | 296 + | **Image Serving** | Through app | R2 + CDN | 297 + 298 + --- 299 + 300 + ## Part 5: Recommendation 301 + 302 + ### Short-Term (Next 6 Months): Stay on Railway 303 + 304 + **Rationale:** 305 + 306 + 1. **Current scale doesn't justify complexity**: With a handful of users, single-region latency is acceptable. 307 + 308 + 2. **Development velocity matters more**: Spending 2-3 weeks rewriting for Cloudflare delays features. 309 + 310 + 3. **Railway is working**: No reported issues with current deployment. 311 + 312 + 4. **Migration risk**: Cloudflare DO's SQLite API quirks could cause subtle bugs. 313 + 314 + ### Medium-Term: Architect for Portability 315 + 316 + **Actions:** 317 + 318 + 1. **Abstract the storage layer**: Create a clean interface that could have Railway and Cloudflare implementations. 319 + 320 + ```javascript 321 + // storage/interface.js 322 + export interface StorageBackend { 323 + getItem(userId, profileId, itemId): Promise<Item>; 324 + saveItem(userId, profileId, item): Promise<string>; 325 + getItemsSince(userId, profileId, timestamp): Promise<Item[]>; 326 + // ... etc 327 + } 328 + ``` 329 + 330 + 2. **Move images to R2 now**: Even staying on Railway, images could be served from R2/CDN. This improves latency and reduces server load. 331 + 332 + 3. **Separate auth from data**: User registry could move to a shared service (D1, Turso, or even Cloudflare KV) while keeping data on Railway. 333 + 334 + ### Long-Term Triggers for Cloudflare Migration 335 + 336 + Migrate when: 337 + - User count exceeds ~1000 active users 338 + - Latency complaints from non-US users become frequent 339 + - Railway costs exceed $50/mo 340 + - You want to eliminate cold starts for better UX 341 + 342 + --- 343 + 344 + ## Part 6: Migration Path (If/When Needed) 345 + 346 + ### Phase 1: Decouple Images (1 week) 347 + 348 + 1. Set up R2 bucket 349 + 2. Modify image upload to write to both filesystem and R2 350 + 3. Modify image serving to read from R2 351 + 4. Write migration script for existing images 352 + 5. Deploy, test, then remove filesystem path 353 + 354 + ### Phase 2: Abstract Storage (1 week) 355 + 356 + 1. Create `StorageBackend` interface 357 + 2. Implement `RailwayStorageBackend` wrapping current db.js 358 + 3. Refactor index.js to use interface 359 + 4. Test everything still works 360 + 361 + ### Phase 3: Build Cloudflare Backend (2 weeks) 362 + 363 + 1. Create `CloudflareStorageBackend` implementing same interface 364 + 2. Build Worker routing layer 365 + 3. Build Durable Object with SQLite schema 366 + 4. Set up local development with Wrangler/Miniflare 367 + 5. Test extensively with synthetic data 368 + 369 + ### Phase 4: Parallel Run (1 week) 370 + 371 + 1. Deploy Cloudflare version alongside Railway 372 + 2. Run both in parallel, comparing results 373 + 3. Use feature flag to route percentage of traffic 374 + 4. Monitor for discrepancies 375 + 376 + ### Phase 5: Cutover (1 day) 377 + 378 + 1. Put Railway in read-only mode 379 + 2. Migrate any remaining data 380 + 3. Update DNS/client config 381 + 4. Monitor closely for 24 hours 382 + 5. Decommission Railway 383 + 384 + ### Supporting Both Simultaneously 385 + 386 + If you want to support Railway AND Cloudflare (e.g., self-hosted vs managed): 387 + 388 + 1. Keep both backends in the codebase 389 + 2. Use environment detection to pick backend 390 + 3. Maintain feature parity tests 391 + 4. Document deployment options 392 + 393 + --- 394 + 395 + ## Appendix: Reference Links 396 + 397 + - [Cirrus GitHub](https://github.com/ascorbic/cirrus) 398 + - [Cloudflare Durable Objects SQLite](https://developers.cloudflare.com/durable-objects/sqlite/) 399 + - [Cloudflare R2 Documentation](https://developers.cloudflare.com/r2/) 400 + - [Railway Persistent Volumes](https://docs.railway.app/reference/volumes) 401 + - [Hono on Cloudflare Workers](https://hono.dev/docs/getting-started/cloudflare-workers) 402 + 403 + --- 404 + 405 + ## Appendix: Cost Modeling 406 + 407 + ### Scenario: 100 Active Users, 10 syncs/day each 408 + 409 + **Railway:** 410 + - $5/mo hobby tier (probably sufficient) 411 + - Total: **$5/mo** 412 + 413 + **Cloudflare:** 414 + - Workers: 100 * 10 * 30 = 30,000 requests/mo (free tier) 415 + - DO: 100 users * ~1MB each = 100MB storage ($0.015/mo) + 30,000 requests ($0.015) 416 + - R2: 100 users * 50MB images = 5GB ($0.075/mo) 417 + - Total: **~$0.10/mo** 418 + 419 + ### Scenario: 10,000 Active Users, 10 syncs/day each 420 + 421 + **Railway:** 422 + - $20/mo pro tier 423 + - May need vertical scaling 424 + - Total: **$20-50/mo** 425 + 426 + **Cloudflare:** 427 + - Workers: 3M requests/mo (free tier covers) 428 + - DO: 10GB storage ($1.50) + 3M requests ($1.50) 429 + - R2: 500GB images ($7.50/mo) 430 + - Total: **~$10-15/mo** 431 + 432 + **Conclusion**: Cloudflare is cheaper at scale, Railway is simpler at small scale.
+534
notes/server-portability-plan.md
··· 1 + # Server Portability Plan 2 + 3 + ## Executive Summary 4 + 5 + This plan covers three architectural changes to make the Peek server portable across different hosting environments: 6 + 7 + 1. **SQL Abstraction Layer** - Abstract `better-sqlite3` to support Cloudflare Durable Objects SQLite API 8 + 2. **Image Storage Abstraction** - Abstract filesystem storage to support R2/S3 9 + 3. **User Registry Decoupling** - Enable single-user mode for simplified self-hosting 10 + 11 + --- 12 + 13 + ## Current Architecture Analysis 14 + 15 + ### Database Layer (`backend/server/db.js`) 16 + 17 + **SQLite-specific APIs currently used:** 18 + 19 + | API | Usage | 20 + |-----|-------| 21 + | `new Database(path)` | Connection creation | 22 + | `db.pragma("journal_mode = WAL")` | Write-ahead logging | 23 + | `db.exec(sql)` | DDL and multi-statement execution | 24 + | `db.prepare(sql).run(...)` | Parameterized writes | 25 + | `db.prepare(sql).get(...)` | Single row reads | 26 + | `db.prepare(sql).all(...)` | Multi-row reads | 27 + | `db.transaction(fn)()` | Atomic table rebuilds | 28 + 29 + **Connection pool pattern:** 30 + ```javascript 31 + const connections = new Map(); // keyed by `${userId}:${profileId}` 32 + ``` 33 + 34 + **Path structure:** 35 + ``` 36 + DATA_DIR/{userId}/profiles/{profileId}/datastore.sqlite 37 + ``` 38 + 39 + ### User Registry (`backend/server/users.js`) 40 + 41 + - Separate `system.db` at `DATA_DIR/system.db` 42 + - Tables: `users` (id, api_key_hash, created_at) and `profiles` (id, user_id, slug, name, ...) 43 + - API key auth: SHA-256 hash lookup 44 + - Profile resolution handles both UUIDs and legacy slugs 45 + 46 + ### Image Storage (`backend/server/db.js`) 47 + 48 + - Filesystem path: `DATA_DIR/{userId}/profiles/{profileId}/images/` 49 + - Content-hash deduplication: files named `{sha256}.{ext}` 50 + - Metadata in `items` table (type='image', metadata JSON with hash/mime/size) 51 + - Functions: `saveImage`, `getImagePath`, `getUserImagesDir` 52 + 53 + --- 54 + 55 + ## 1. SQL Abstraction Layer 56 + 57 + ### Proposed Interface 58 + 59 + ```typescript 60 + // backend/server/sql/types.ts 61 + 62 + interface RunResult { 63 + changes: number; 64 + lastInsertRowid?: number; 65 + } 66 + 67 + interface SqlAdapter { 68 + // Core query methods 69 + exec(sql: string): void; 70 + run(sql: string, params?: unknown[]): RunResult; 71 + get<T = unknown>(sql: string, params?: unknown[]): T | null; 72 + all<T = unknown>(sql: string, params?: unknown[]): T[]; 73 + 74 + // Transaction support 75 + transaction<T>(fn: () => T): T; 76 + 77 + // Lifecycle 78 + close(): void; 79 + } 80 + 81 + interface SqlAdapterFactory { 82 + open(path: string, options?: { readonly?: boolean }): SqlAdapter; 83 + init(adapter: SqlAdapter): void; // Platform-specific setup (WAL mode) 84 + } 85 + ``` 86 + 87 + ### better-sqlite3 Adapter Implementation 88 + 89 + ```typescript 90 + // backend/server/sql/better-sqlite3-adapter.ts 91 + 92 + import Database from 'better-sqlite3'; 93 + import type { SqlAdapter, SqlAdapterFactory, RunResult } from './types'; 94 + 95 + class BetterSqlite3Adapter implements SqlAdapter { 96 + private db: Database.Database; 97 + private stmtCache = new Map<string, Database.Statement>(); 98 + 99 + constructor(db: Database.Database) { 100 + this.db = db; 101 + } 102 + 103 + exec(sql: string): void { 104 + this.db.exec(sql); 105 + } 106 + 107 + run(sql: string, params: unknown[] = []): RunResult { 108 + const stmt = this.getOrPrepare(sql); 109 + const result = stmt.run(...params); 110 + return { changes: result.changes, lastInsertRowid: Number(result.lastInsertRowid) }; 111 + } 112 + 113 + get<T>(sql: string, params: unknown[] = []): T | null { 114 + return (this.getOrPrepare(sql).get(...params) as T) ?? null; 115 + } 116 + 117 + all<T>(sql: string, params: unknown[] = []): T[] { 118 + return this.getOrPrepare(sql).all(...params) as T[]; 119 + } 120 + 121 + transaction<T>(fn: () => T): T { 122 + return this.db.transaction(fn)(); 123 + } 124 + 125 + close(): void { 126 + this.db.close(); 127 + } 128 + 129 + private getOrPrepare(sql: string): Database.Statement { 130 + let stmt = this.stmtCache.get(sql); 131 + if (!stmt) { 132 + stmt = this.db.prepare(sql); 133 + this.stmtCache.set(sql, stmt); 134 + } 135 + return stmt; 136 + } 137 + } 138 + 139 + export const factory: SqlAdapterFactory = { 140 + open(path: string): SqlAdapter { 141 + return new BetterSqlite3Adapter(new Database(path)); 142 + }, 143 + init(adapter: SqlAdapter): void { 144 + adapter.exec("PRAGMA journal_mode = WAL"); 145 + } 146 + }; 147 + ``` 148 + 149 + ### Cloudflare DO SQLite Adapter 150 + 151 + ```typescript 152 + // backend/server/sql/do-sqlite-adapter.ts 153 + 154 + import type { SqlAdapter, RunResult } from './types'; 155 + 156 + class DoSqliteAdapter implements SqlAdapter { 157 + private sql: SqlStorage; 158 + 159 + constructor(storage: SqlStorage) { 160 + this.sql = storage; 161 + } 162 + 163 + exec(sql: string): void { 164 + this.sql.exec(sql); 165 + } 166 + 167 + run(sql: string, params: unknown[] = []): RunResult { 168 + const cursor = this.sql.exec(sql, ...params); 169 + return { changes: cursor.rowsWritten, lastInsertRowid: cursor.lastRowId }; 170 + } 171 + 172 + get<T>(sql: string, params: unknown[] = []): T | null { 173 + const rows = this.sql.exec(sql, ...params).toArray(); 174 + return rows.length > 0 ? (rows[0] as T) : null; 175 + } 176 + 177 + all<T>(sql: string, params: unknown[] = []): T[] { 178 + return this.sql.exec(sql, ...params).toArray() as T[]; 179 + } 180 + 181 + transaction<T>(fn: () => T): T { 182 + return fn(); // DO transactions are implicit 183 + } 184 + 185 + close(): void { 186 + // No-op for DO 187 + } 188 + } 189 + 190 + export function createFromStorage(storage: SqlStorage): SqlAdapter { 191 + return new DoSqliteAdapter(storage); 192 + } 193 + ``` 194 + 195 + ### Migration Strategy for db.js 196 + 197 + **Before (current):** 198 + ```javascript 199 + const db = new Database(dbPath); 200 + db.pragma("journal_mode = WAL"); 201 + db.exec(`CREATE TABLE IF NOT EXISTS items (...)`); 202 + const row = db.prepare("SELECT id FROM items WHERE id = ?").get(id); 203 + db.prepare("INSERT INTO items (...) VALUES (...)").run(v1, v2, v3); 204 + ``` 205 + 206 + **After (with adapter):** 207 + ```javascript 208 + const adapter = factory.open(dbPath); 209 + factory.init(adapter); 210 + adapter.exec(`CREATE TABLE IF NOT EXISTS items (...)`); 211 + const row = adapter.get("SELECT id FROM items WHERE id = ?", [id]); 212 + adapter.run("INSERT INTO items (...) VALUES (?, ?, ?)", [v1, v2, v3]); 213 + ``` 214 + 215 + ### Files to Create/Modify 216 + 217 + | File | Action | Notes | 218 + |------|--------|-------| 219 + | `backend/server/sql/types.ts` | Create | Interface definitions | 220 + | `backend/server/sql/better-sqlite3-adapter.ts` | Create | Current platform adapter | 221 + | `backend/server/sql/do-sqlite-adapter.ts` | Create | Cloudflare adapter (future) | 222 + | `backend/server/sql/index.ts` | Create | Factory selection based on config | 223 + | `backend/server/db.js` | Modify | Replace 50+ direct better-sqlite3 calls | 224 + | `backend/server/users.js` | Modify | Replace 15+ direct calls for system.db | 225 + | `backend/server/backup.js` | Modify | Handle `VACUUM INTO` (not available on DO) | 226 + 227 + --- 228 + 229 + ## 2. Image Storage Abstraction 230 + 231 + ### Proposed Interface 232 + 233 + ```typescript 234 + // backend/server/storage/types.ts 235 + 236 + interface StorageAdapter { 237 + put(key: string, data: Buffer, metadata?: Record<string, string>): Promise<void>; 238 + get(key: string): Promise<Buffer | null>; 239 + delete(key: string): Promise<void>; 240 + exists(key: string): Promise<boolean>; 241 + list(prefix?: string): Promise<string[]>; 242 + } 243 + 244 + interface StorageConfig { 245 + type: 'filesystem' | 'r2' | 's3'; 246 + // Filesystem 247 + basePath?: string; 248 + // R2/S3 249 + bucket?: string; 250 + region?: string; 251 + accessKeyId?: string; 252 + secretAccessKey?: string; 253 + endpoint?: string; 254 + } 255 + ``` 256 + 257 + ### Filesystem Adapter 258 + 259 + ```typescript 260 + // backend/server/storage/filesystem-adapter.ts 261 + 262 + import * as fs from 'fs'; 263 + import * as path from 'path'; 264 + import type { StorageAdapter } from './types'; 265 + 266 + export class FilesystemAdapter implements StorageAdapter { 267 + constructor(private basePath: string) {} 268 + 269 + async put(key: string, data: Buffer): Promise<void> { 270 + const fullPath = path.join(this.basePath, key); 271 + const dir = path.dirname(fullPath); 272 + if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); 273 + if (!fs.existsSync(fullPath)) fs.writeFileSync(fullPath, data); // Dedup 274 + } 275 + 276 + async get(key: string): Promise<Buffer | null> { 277 + const fullPath = path.join(this.basePath, key); 278 + return fs.existsSync(fullPath) ? fs.readFileSync(fullPath) : null; 279 + } 280 + 281 + async delete(key: string): Promise<void> { 282 + const fullPath = path.join(this.basePath, key); 283 + if (fs.existsSync(fullPath)) fs.unlinkSync(fullPath); 284 + } 285 + 286 + async exists(key: string): Promise<boolean> { 287 + return fs.existsSync(path.join(this.basePath, key)); 288 + } 289 + 290 + async list(prefix?: string): Promise<string[]> { 291 + // Recursive directory walk 292 + } 293 + } 294 + ``` 295 + 296 + ### R2 Adapter (for Workers) 297 + 298 + ```typescript 299 + // backend/server/storage/r2-adapter.ts 300 + 301 + import type { StorageAdapter } from './types'; 302 + 303 + export class R2Adapter implements StorageAdapter { 304 + constructor(private bucket: R2Bucket) {} 305 + 306 + async put(key: string, data: Buffer, metadata?: Record<string, string>): Promise<void> { 307 + await this.bucket.put(key, data, { customMetadata: metadata }); 308 + } 309 + 310 + async get(key: string): Promise<Buffer | null> { 311 + const obj = await this.bucket.get(key); 312 + return obj ? Buffer.from(await obj.arrayBuffer()) : null; 313 + } 314 + 315 + async delete(key: string): Promise<void> { 316 + await this.bucket.delete(key); 317 + } 318 + 319 + async exists(key: string): Promise<boolean> { 320 + return (await this.bucket.head(key)) !== null; 321 + } 322 + 323 + async list(prefix?: string): Promise<string[]> { 324 + const results: string[] = []; 325 + let cursor: string | undefined; 326 + do { 327 + const resp = await this.bucket.list({ prefix, cursor }); 328 + results.push(...resp.objects.map(o => o.key)); 329 + cursor = resp.truncated ? resp.cursor : undefined; 330 + } while (cursor); 331 + return results; 332 + } 333 + } 334 + ``` 335 + 336 + ### Key Format 337 + 338 + Storage key format: 339 + ``` 340 + {userId}/{profileId}/images/{hash}.{ext} 341 + ``` 342 + 343 + ### Files to Create/Modify 344 + 345 + | File | Action | 346 + |------|--------| 347 + | `backend/server/storage/types.ts` | Create | 348 + | `backend/server/storage/filesystem-adapter.ts` | Create | 349 + | `backend/server/storage/r2-adapter.ts` | Create | 350 + | `backend/server/storage/s3-adapter.ts` | Create (optional) | 351 + | `backend/server/storage/index.ts` | Create (factory) | 352 + | `backend/server/db.js` | Modify (saveImage, getImagePath) | 353 + | `backend/server/index.js` | Modify (image serving) | 354 + 355 + --- 356 + 357 + ## 3. User Registry Decoupling 358 + 359 + ### Single-User Mode Design 360 + 361 + ```typescript 362 + // backend/server/config.ts 363 + 364 + interface ServerConfig { 365 + mode: 'multi-user' | 'single-user'; 366 + singleUser?: { 367 + userId: string; // default: "default" 368 + token?: string; // optional bearer token 369 + }; 370 + } 371 + 372 + export function loadConfig(): ServerConfig { 373 + if (process.env.SINGLE_USER_MODE === 'true') { 374 + return { 375 + mode: 'single-user', 376 + singleUser: { 377 + userId: process.env.SINGLE_USER_ID || 'default', 378 + token: process.env.SINGLE_USER_TOKEN 379 + } 380 + }; 381 + } 382 + return { mode: 'multi-user' }; 383 + } 384 + ``` 385 + 386 + ### Auth Middleware Factory 387 + 388 + ```typescript 389 + // backend/server/auth.ts 390 + 391 + export function createAuthMiddleware(config: ServerConfig) { 392 + if (config.mode === 'single-user') { 393 + return singleUserMiddleware(config.singleUser!); 394 + } 395 + return multiUserMiddleware(); 396 + } 397 + 398 + function singleUserMiddleware({ userId, token }) { 399 + return async (c, next) => { 400 + if (c.req.path === '/') return next(); 401 + 402 + if (token) { 403 + const auth = c.req.header('Authorization'); 404 + if (!auth || auth !== `Bearer ${token}`) { 405 + return c.json({ error: 'Unauthorized' }, 401); 406 + } 407 + } 408 + 409 + c.set('userId', userId); 410 + return next(); 411 + }; 412 + } 413 + ``` 414 + 415 + ### Path Simplification in Single-User Mode 416 + 417 + **Multi-user (current):** 418 + ``` 419 + DATA_DIR/ 420 + ├── system.db 421 + ├── {userId}/ 422 + │ └── profiles/ 423 + │ └── {profileId}/ 424 + │ ├── datastore.sqlite 425 + │ └── images/ 426 + ``` 427 + 428 + **Single-user:** 429 + ``` 430 + DATA_DIR/ 431 + ├── profiles/ 432 + │ └── {profileId}/ 433 + │ ├── datastore.sqlite 434 + │ └── images/ 435 + ``` 436 + 437 + ### Config Flag Behavior 438 + 439 + | Config | system.db | User lookup | Path prefix | 440 + |--------|-----------|-------------|-------------| 441 + | `SINGLE_USER_MODE=false` | Required | API key hash | `{userId}/profiles/{profileId}` | 442 + | `SINGLE_USER_MODE=true` | Not created | Skip | `profiles/{profileId}` | 443 + 444 + ### Files to Create/Modify 445 + 446 + | File | Action | 447 + |------|--------| 448 + | `backend/server/config.ts` | Create | 449 + | `backend/server/auth.ts` | Create | 450 + | `backend/server/index.js` | Modify (use auth factory) | 451 + | `backend/server/db.js` | Modify (getConnection path logic) | 452 + | `backend/server/backup.js` | Modify (single-user backup path) | 453 + 454 + --- 455 + 456 + ## Implementation Order 457 + 458 + ### Phase 1: SQL Abstraction 459 + 1. Create `sql/` directory with types and adapters 460 + 2. Refactor `db.js` incrementally (keep tests passing) 461 + 3. Refactor `users.js` 462 + 4. Refactor `backup.js` 463 + 5. All existing tests must pass 464 + 465 + ### Phase 2: Storage Abstraction 466 + 1. Create `storage/` directory with adapters 467 + 2. Add `getImageKey()` helper to db.js 468 + 3. Refactor `saveImage()` to use storage adapter 469 + 4. Refactor image serving in `index.js` 470 + 5. Test with filesystem adapter (identical behavior) 471 + 472 + ### Phase 3: User Registry Decoupling 473 + 1. Create `config.ts` and `auth.ts` 474 + 2. Modify `index.js` to use auth middleware factory 475 + 3. Add path logic for single-user mode in `db.js` 476 + 4. Test single-user mode end-to-end 477 + 5. Update documentation 478 + 479 + --- 480 + 481 + ## Testing Approach 482 + 483 + ### Unit Tests (New) 484 + 485 + ```javascript 486 + // sql/better-sqlite3-adapter.test.js 487 + describe('BetterSqlite3Adapter', () => { 488 + it('exec() runs DDL', () => {}); 489 + it('run() returns changes count', () => {}); 490 + it('get() returns single row or null', () => {}); 491 + it('all() returns array', () => {}); 492 + it('transaction() is atomic', () => {}); 493 + }); 494 + ``` 495 + 496 + ### Integration Tests (Existing) 497 + 498 + All 92 existing tests in `backend/server/test.js` must pass after each phase. 499 + 500 + --- 501 + 502 + ## Configuration Summary 503 + 504 + ```bash 505 + # Current (multi-user, filesystem) 506 + # No changes needed 507 + 508 + # Single-user mode 509 + SINGLE_USER_MODE=true 510 + SINGLE_USER_TOKEN=secret-token # Optional 511 + 512 + # Future: R2 storage 513 + STORAGE_BACKEND=r2 514 + R2_BUCKET=peek-images 515 + 516 + # Future: DO SQLite (Cloudflare Workers only) 517 + SQL_ADAPTER=do-sqlite 518 + ``` 519 + 520 + --- 521 + 522 + ## Resolved Questions 523 + 524 + 1. **Backup with DO SQLite**: `VACUUM INTO` not available. Export to R2 as JSON? 525 + - **Answer**: Yes, but just add a TODO for now - not deploying to Cloudflare yet. 526 + 527 + 2. **Image URLs**: Direct R2 URLs or proxy through worker? 528 + - **Answer**: Put hash in URL so we can easily switch between backends later. 529 + 530 + 3. **Profiles in single-user**: Keep for workspaces or remove entirely? 531 + - **Answer**: Single-user will always have profiles - only removing multi-user from the base. 532 + 533 + 4. **Migration path**: How to migrate existing multi-user data to single-user? 534 + - **Answer**: Only one real user currently. At deployment time, find the one active account and migrate by uplifting one dir into another.
+1 -1
schema/generated/sqlite-full.sql
··· 1 1 -- Generated by schema/codegen.js 2 2 -- Schema version: 1 3 - -- Generated: 2026-02-01T23:03:47.198Z 3 + -- Generated: 2026-02-02T08:01:19.478Z 4 4 -- DO NOT EDIT - regenerate with: yarn schema:codegen 5 5 6 6 -- Unified content storage - URLs, text notes, tagsets, and images
+1 -1
schema/generated/sqlite-sync.sql
··· 1 1 -- Generated by schema/codegen.js 2 2 -- Schema version: 1 3 - -- Generated: 2026-02-01T23:03:47.198Z 3 + -- Generated: 2026-02-02T08:01:19.479Z 4 4 -- DO NOT EDIT - regenerate with: yarn schema:codegen 5 5 6 6 -- Unified content storage - URLs, text notes, tagsets, and images
+1 -1
schema/generated/types.rs
··· 1 1 // Generated by schema/codegen.js 2 2 // Schema version: 1 3 - // Generated: 2026-02-01T23:03:47.199Z 3 + // Generated: 2026-02-02T08:01:19.479Z 4 4 // DO NOT EDIT - regenerate with: yarn schema:codegen 5 5 6 6 use serde::{Deserialize, Serialize};
+1 -1
schema/generated/types.ts
··· 1 1 /** 2 2 * Generated by schema/codegen.js 3 3 * Schema version: 1 4 - * Generated: 2026-02-01T23:03:47.199Z 4 + * Generated: 2026-02-02T08:01:19.479Z 5 5 * DO NOT EDIT - regenerate with: yarn schema:codegen 6 6 */ 7 7