A zero-dependency AT Protocol Personal Data Server written in JavaScript
atproto pds

refactor!: normalize SQL schema to snake_case

BREAKING CHANGE: Existing Durable Objects need storage reset.

- Rename tables: blob → blobs, record_blob → record_blobs
- Rename columns: mimeType → mime_type, createdAt → created_at,
blobCid → blob_cid, recordUri → record_uri
- Update index: idx_record_blob_uri → idx_record_blobs_record_uri
- Add CHANGELOG.md
- Add .backup/ to .gitignore

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

Changed files
+73 -36
src
+1
.gitignore
··· 3 3 credentials-*.json 4 4 .env 5 5 .dev.vars 6 + .backup/
+36
CHANGELOG.md
··· 1 + # Changelog 2 + 3 + All notable changes to this project will be documented in this file. 4 + 5 + The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/). 6 + 7 + ## [Unreleased] 8 + 9 + ### Changed 10 + 11 + - **BREAKING:** Normalized SQL schema to snake_case convention 12 + - Tables: `blob` → `blobs`, `record_blob` → `record_blobs` 13 + - Columns: `mimeType` → `mime_type`, `createdAt` → `created_at`, `blobCid` → `blob_cid`, `recordUri` → `record_uri` 14 + - Existing Durable Objects require storage reset 15 + 16 + ## [0.1.0] - 2025-01-07 17 + 18 + Initial experimental release. 19 + 20 + ### Added 21 + 22 + - **Repo operations:** createRecord, getRecord, putRecord, deleteRecord, applyWrites, listRecords 23 + - **Sync endpoints:** getRepo (CAR export), subscribeRepos (WebSocket firehose), getLatestCommit 24 + - **Authentication:** createSession, getSession, refreshSession with JWT tokens 25 + - **Blob storage:** uploadBlob, getBlob, listBlobs with R2 backend 26 + - MIME type sniffing (JPEG, PNG, GIF, WebP, MP4, AVIF, HEIC) 27 + - Automatic orphaned blob cleanup via DO alarms 28 + - Blob-record association tracking 29 + - **Identity:** Handle resolution, PLC directory registration 30 + - **Federation:** Relay notification (requestCrawl), AppView proxy for app.bsky.* endpoints 31 + - **Infrastructure:** 32 + - Merkle Search Tree (MST) for repo structure 33 + - DAG-CBOR encoding with CID generation 34 + - P-256 ECDSA signing via Web Crypto 35 + - TypeScript checking via JSDoc annotations 36 + - Setup script for key generation and PLC registration
+36 -36
src/pds.js
··· 105 105 */ 106 106 107 107 /** 108 - * Row from the `blob` table - tracks uploaded blob metadata 108 + * Row from the `blobs` table - tracks uploaded blob metadata 109 109 * @typedef {Object} BlobRow 110 110 * @property {string} cid - Content ID of the blob (raw codec) 111 - * @property {string} mimeType - MIME type (sniffed or from Content-Type header) 111 + * @property {string} mime_type - MIME type (sniffed or from Content-Type header) 112 112 * @property {number} size - Size in bytes 113 - * @property {string} createdAt - ISO timestamp of upload 113 + * @property {string} created_at - ISO timestamp of upload 114 114 */ 115 115 116 116 /** ··· 1506 1506 evt BLOB NOT NULL 1507 1507 ); 1508 1508 1509 - CREATE TABLE IF NOT EXISTS blob ( 1509 + CREATE TABLE IF NOT EXISTS blobs ( 1510 1510 cid TEXT PRIMARY KEY, 1511 - mimeType TEXT NOT NULL, 1511 + mime_type TEXT NOT NULL, 1512 1512 size INTEGER NOT NULL, 1513 - createdAt TEXT NOT NULL 1513 + created_at TEXT NOT NULL 1514 1514 ); 1515 1515 1516 - CREATE TABLE IF NOT EXISTS record_blob ( 1517 - blobCid TEXT NOT NULL, 1518 - recordUri TEXT NOT NULL, 1519 - PRIMARY KEY (blobCid, recordUri) 1516 + CREATE TABLE IF NOT EXISTS record_blobs ( 1517 + blob_cid TEXT NOT NULL, 1518 + record_uri TEXT NOT NULL, 1519 + PRIMARY KEY (blob_cid, record_uri) 1520 1520 ); 1521 1521 1522 - CREATE INDEX IF NOT EXISTS idx_record_blob_uri ON record_blob(recordUri); 1522 + CREATE INDEX IF NOT EXISTS idx_record_blobs_record_uri ON record_blobs(record_uri); 1523 1523 1524 1524 CREATE INDEX IF NOT EXISTS idx_records_collection ON records(collection, rkey); 1525 1525 `); ··· 1638 1638 ); 1639 1639 1640 1640 // Associate blobs with this record (delete old associations first for updates) 1641 - this.sql.exec('DELETE FROM record_blob WHERE recordUri = ?', uri); 1641 + this.sql.exec('DELETE FROM record_blobs WHERE record_uri = ?', uri); 1642 1642 1643 1643 const blobRefs = findBlobRefs(record); 1644 1644 for (const blobCid of blobRefs) { 1645 1645 // Verify blob exists 1646 1646 const blobExists = this.sql 1647 - .exec('SELECT cid FROM blob WHERE cid = ?', blobCid) 1647 + .exec('SELECT cid FROM blobs WHERE cid = ?', blobCid) 1648 1648 .toArray(); 1649 1649 1650 1650 if (blobExists.length === 0) { ··· 1653 1653 1654 1654 // Create association 1655 1655 this.sql.exec( 1656 - 'INSERT INTO record_blob (blobCid, recordUri) VALUES (?, ?)', 1656 + 'INSERT INTO record_blobs (blob_cid, record_uri) VALUES (?, ?)', 1657 1657 blobCid, 1658 1658 uri, 1659 1659 ); ··· 1794 1794 1795 1795 // Get blobs associated with this record 1796 1796 const associatedBlobs = this.sql 1797 - .exec('SELECT blobCid FROM record_blob WHERE recordUri = ?', uri) 1797 + .exec('SELECT blob_cid FROM record_blobs WHERE record_uri = ?', uri) 1798 1798 .toArray(); 1799 1799 1800 1800 // Remove associations for this record 1801 - this.sql.exec('DELETE FROM record_blob WHERE recordUri = ?', uri); 1801 + this.sql.exec('DELETE FROM record_blobs WHERE record_uri = ?', uri); 1802 1802 1803 1803 // Check each blob for orphan status and delete if unreferenced 1804 - for (const { blobCid } of associatedBlobs) { 1804 + for (const { blob_cid } of associatedBlobs) { 1805 1805 const stillReferenced = this.sql 1806 - .exec('SELECT 1 FROM record_blob WHERE blobCid = ? LIMIT 1', blobCid) 1806 + .exec('SELECT 1 FROM record_blobs WHERE blob_cid = ? LIMIT 1', blob_cid) 1807 1807 .toArray(); 1808 1808 1809 1809 if (stillReferenced.length === 0) { 1810 1810 // Blob is orphaned, delete from R2 and database 1811 - await this.env?.BLOBS?.delete(`${did}/${blobCid}`); 1812 - this.sql.exec('DELETE FROM blob WHERE cid = ?', blobCid); 1811 + await this.env?.BLOBS?.delete(`${did}/${blob_cid}`); 1812 + this.sql.exec('DELETE FROM blobs WHERE cid = ?', blob_cid); 1813 1813 } 1814 1814 } 1815 1815 ··· 2853 2853 }); 2854 2854 2855 2855 // Insert metadata (INSERT OR IGNORE handles concurrent uploads) 2856 - const createdAt = new Date().toISOString(); 2856 + const created_at = new Date().toISOString(); 2857 2857 this.sql.exec( 2858 - 'INSERT OR IGNORE INTO blob (cid, mimeType, size, createdAt) VALUES (?, ?, ?, ?)', 2858 + 'INSERT OR IGNORE INTO blobs (cid, mime_type, size, created_at) VALUES (?, ?, ?, ?)', 2859 2859 cidStr, 2860 2860 mimeType, 2861 2861 size, 2862 - createdAt, 2862 + created_at, 2863 2863 ); 2864 2864 2865 2865 // Return BlobRef ··· 2903 2903 2904 2904 // Look up blob metadata 2905 2905 const rows = this.sql 2906 - .exec('SELECT mimeType, size FROM blob WHERE cid = ?', cid) 2906 + .exec('SELECT mime_type, size FROM blobs WHERE cid = ?', cid) 2907 2907 .toArray(); 2908 2908 2909 2909 if (rows.length === 0) { 2910 2910 return errorResponse('BlobNotFound', 'blob not found', 404); 2911 2911 } 2912 2912 2913 - const { mimeType, size } = rows[0]; 2913 + const { mime_type, size } = rows[0]; 2914 2914 2915 2915 // Fetch from R2 2916 2916 const r2Key = `${did}/${cid}`; ··· 2923 2923 // Return blob with security headers 2924 2924 return new Response(object.body, { 2925 2925 headers: { 2926 - 'Content-Type': /** @type {string} */ (mimeType), 2926 + 'Content-Type': /** @type {string} */ (mime_type), 2927 2927 'Content-Length': String(size), 2928 2928 'X-Content-Type-Options': 'nosniff', 2929 2929 'Content-Security-Policy': "default-src 'none'; sandbox", ··· 2952 2952 ); 2953 2953 } 2954 2954 2955 - // Query blobs with pagination (cursor is createdAt::cid for uniqueness) 2956 - let query = 'SELECT cid, createdAt FROM blob'; 2955 + // Query blobs with pagination (cursor is created_at::cid for uniqueness) 2956 + let query = 'SELECT cid, created_at FROM blobs'; 2957 2957 const params = []; 2958 2958 2959 2959 if (cursor) { 2960 2960 const [cursorTime, cursorCid] = cursor.split('::'); 2961 - query += ' WHERE (createdAt > ? OR (createdAt = ? AND cid > ?))'; 2961 + query += ' WHERE (created_at > ? OR (created_at = ? AND cid > ?))'; 2962 2962 params.push(cursorTime, cursorTime, cursorCid); 2963 2963 } 2964 2964 2965 - query += ' ORDER BY createdAt ASC, cid ASC LIMIT ?'; 2965 + query += ' ORDER BY created_at ASC, cid ASC LIMIT ?'; 2966 2966 params.push(limit + 1); // Fetch one extra to detect if there's more 2967 2967 2968 2968 const rows = this.sql.exec(query, ...params).toArray(); ··· 2972 2972 if (rows.length > limit) { 2973 2973 rows.pop(); // Remove the extra row 2974 2974 const last = rows[rows.length - 1]; 2975 - nextCursor = `${last.createdAt}::${last.cid}`; 2975 + nextCursor = `${last.created_at}::${last.cid}`; 2976 2976 } 2977 2977 2978 2978 return Response.json({ ··· 3044 3044 const did = await this.getDid(); 3045 3045 if (!did) return; 3046 3046 3047 - // Find orphans: blobs not in record_blob, older than 24h 3047 + // Find orphans: blobs not in record_blobs, older than 24h 3048 3048 const cutoff = new Date(Date.now() - 24 * 60 * 60 * 1000).toISOString(); 3049 3049 3050 3050 const orphans = this.sql 3051 3051 .exec( 3052 - `SELECT b.cid FROM blob b 3053 - LEFT JOIN record_blob rb ON b.cid = rb.blobCid 3054 - WHERE rb.blobCid IS NULL AND b.createdAt < ?`, 3052 + `SELECT b.cid FROM blobs b 3053 + LEFT JOIN record_blobs rb ON b.cid = rb.blob_cid 3054 + WHERE rb.blob_cid IS NULL AND b.created_at < ?`, 3055 3055 cutoff, 3056 3056 ) 3057 3057 .toArray(); 3058 3058 3059 3059 for (const { cid } of orphans) { 3060 3060 await this.env?.BLOBS?.delete(`${did}/${cid}`); 3061 - this.sql.exec('DELETE FROM blob WHERE cid = ?', cid); 3061 + this.sql.exec('DELETE FROM blobs WHERE cid = ?', cid); 3062 3062 } 3063 3063 } 3064 3064 }