Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol. wisp.place
96
fork

Configure Feed

Select the types of activity you want to include in your feed.

fixes to cli not handling subfs correctly, fix hosting service storing metadata as object Object I LOVE JAVASCRIPT

+217 -87
+83 -24
apps/main-app/src/routes/wisp.ts
··· 10 10 updateFileBlobs, 11 11 findLargeDirectories, 12 12 replaceDirectoryWithSubfs, 13 - estimateDirectorySize 13 + estimateDirectorySize, 14 + splitDirectoryIntoChunks, 15 + countFilesInDirectory 14 16 } from '@wispplace/fs-utils' 15 17 import { 16 18 shouldCompressFile, ··· 615 617 const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB) 616 618 const FILE_COUNT_THRESHOLD = 250; // Start splitting at this many files 617 619 const TARGET_FILE_COUNT = 200; // Try to keep main manifest under this many files 620 + const MAX_SUBFS_SIZE = 75 * 1024; // 75KB per subfs record 618 621 const subfsRecords: Array<{ uri: string; path: string }> = []; 619 622 let workingDirectory = updatedDirectory; 620 623 let currentFileCount = actualFileCount; ··· 645 648 const largestDir = directories[0]; 646 649 console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`); 647 650 648 - // Create a subfs record for this directory 649 - const subfsRkey = TID.nextStr(); 650 - const subfsManifest = { 651 - $type: 'place.wisp.subfs' as const, 652 - root: largestDir.directory, 653 - fileCount: largestDir.fileCount, 654 - createdAt: new Date().toISOString() 655 - }; 651 + let subfsUri: string; 652 + 653 + if (largestDir.size > MAX_SUBFS_SIZE) { 654 + // Directory too large for a single subfs — split into chunks 655 + console.log(` → Directory too large (${(largestDir.size / 1024).toFixed(1)}KB), splitting into chunks...`); 656 + const chunks = splitDirectoryIntoChunks(largestDir.directory, MAX_SUBFS_SIZE); 657 + console.log(` → Created ${chunks.length} chunks`); 658 + 659 + const chunkUris: string[] = []; 660 + for (let i = 0; i < chunks.length; i++) { 661 + const chunk = chunks[i]!; 662 + const chunkRkey = TID.nextStr(); 663 + const chunkFileCount = countFilesInDirectory(chunk); 664 + console.log(` → Uploading chunk ${i + 1}/${chunks.length} (${chunkFileCount} files)...`); 665 + 666 + const chunkRecord = await agent.com.atproto.repo.putRecord({ 667 + repo: did, 668 + collection: 'place.wisp.subfs', 669 + rkey: chunkRkey, 670 + record: { 671 + $type: 'place.wisp.subfs' as const, 672 + root: chunk, 673 + fileCount: chunkFileCount, 674 + createdAt: new Date().toISOString() 675 + } 676 + }); 677 + 678 + chunkUris.push(chunkRecord.data.uri); 679 + } 680 + 681 + // Create parent subfs referencing all chunks with flat: true 682 + console.log(` → Creating parent subfs with ${chunkUris.length} chunk references...`); 683 + const parentDirectory: Directory = { 684 + $type: 'place.wisp.fs#directory' as const, 685 + type: 'directory' as const, 686 + entries: chunkUris.map((uri, i) => ({ 687 + name: `chunk${i}`, 688 + node: { 689 + $type: 'place.wisp.fs#subfs' as const, 690 + type: 'subfs' as const, 691 + subject: uri, 692 + flat: true 693 + } 694 + })) 695 + }; 696 + 697 + const parentRkey = TID.nextStr(); 698 + const parentRecord = await agent.com.atproto.repo.putRecord({ 699 + repo: did, 700 + collection: 'place.wisp.subfs', 701 + rkey: parentRkey, 702 + record: { 703 + $type: 'place.wisp.subfs' as const, 704 + root: parentDirectory, 705 + fileCount: largestDir.fileCount, 706 + createdAt: new Date().toISOString() 707 + } 708 + }); 656 709 657 - // Validate subfs record 658 - // const subfsValidation = validateSubfsRecord(subfsManifest); 659 - // if (!subfsValidation.success) { 660 - // throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`); 661 - // } 710 + subfsUri = parentRecord.data.uri; 711 + console.log(` ✓ Created parent subfs with ${chunks.length} chunks`); 712 + logger.info(`Created chunked subfs for ${largestDir.path}: ${subfsUri} (${chunks.length} chunks)`); 713 + } else { 714 + // Directory fits in a single subfs record 715 + const subfsRkey = TID.nextStr(); 716 + const subfsRecord = await agent.com.atproto.repo.putRecord({ 717 + repo: did, 718 + collection: 'place.wisp.subfs', 719 + rkey: subfsRkey, 720 + record: { 721 + $type: 'place.wisp.subfs' as const, 722 + root: largestDir.directory, 723 + fileCount: largestDir.fileCount, 724 + createdAt: new Date().toISOString() 725 + } 726 + }); 662 727 663 - // Upload subfs record to PDS 664 - const subfsRecord = await agent.com.atproto.repo.putRecord({ 665 - repo: did, 666 - collection: 'place.wisp.subfs', 667 - rkey: subfsRkey, 668 - record: subfsManifest 669 - }); 728 + subfsUri = subfsRecord.data.uri; 729 + console.log(` ✅ Created subfs: ${subfsUri}`); 730 + logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`); 731 + } 670 732 671 - const subfsUri = subfsRecord.data.uri; 672 733 subfsRecords.push({ uri: subfsUri, path: largestDir.path }); 673 - console.log(` ✅ Created subfs: ${subfsUri}`); 674 - logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`); 675 734 676 735 // Replace directory with subfs node in the main tree 677 736 workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
+128 -60
cli/commands/deploy.ts
··· 25 25 // Constants for manifest splitting 26 26 const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB (PDS limit is 150KB) 27 27 const FILE_COUNT_THRESHOLD = 250; 28 + const TARGET_FILE_COUNT = 200; 28 29 const MAX_SUBFS_SIZE = 75 * 1024; // 75KB per subfs 29 - const MAX_CONCURRENT_UPLOADS = 5; 30 + const DEFAULT_CONCURRENT_UPLOADS = 3; 31 + const RATELIMITED_CONCURRENT_UPLOADS = 2; 30 32 31 33 export interface DeployOptions { 32 34 path: string; ··· 34 36 directory?: boolean; 35 37 spa?: boolean; 36 38 yes?: boolean; 39 + concurrency?: number; 37 40 } 38 41 39 42 interface FileInfo { ··· 141 144 agent: Agent, 142 145 content: Buffer, 143 146 mimeType: string, 144 - retries = 3 147 + retries = 3, 148 + onRateLimit?: () => void 145 149 ): Promise<BlobRef> { 146 150 for (let attempt = 0; attempt < retries; attempt++) { 147 151 try { ··· 154 158 155 159 // Handle rate limits 156 160 if (err?.status === 429) { 161 + onRateLimit?.(); 157 162 const delay = Math.pow(2, attempt) * 2000; 158 163 await new Promise(r => setTimeout(r, delay)); 159 164 } else { ··· 168 173 async function processAndUploadFiles( 169 174 agent: Agent, 170 175 files: FileInfo[], 171 - existingBlobMap: Map<string, { blobRef: BlobRef; cid: string }> 176 + existingBlobMap: Map<string, { blobRef: BlobRef; cid: string }>, 177 + concurrency: number 172 178 ): Promise<{ uploadedFiles: UploadedFile[]; uploadResults: FileUploadResult[]; filePaths: string[] }> { 173 179 const spinner = createSpinner(`Processing ${files.length} files...`).start(); 174 180 ··· 178 184 179 185 let uploaded = 0; 180 186 let reused = 0; 187 + let currentConcurrency = concurrency; 188 + 189 + const onRateLimit = () => { 190 + const reduced = Math.min(currentConcurrency, RATELIMITED_CONCURRENT_UPLOADS); 191 + if (reduced < currentConcurrency) { 192 + currentConcurrency = reduced; 193 + spinner.text = `Rate limited — reducing concurrency to ${currentConcurrency}`; 194 + } 195 + }; 181 196 182 197 // Process in batches for concurrency 183 - for (let i = 0; i < files.length; i += MAX_CONCURRENT_UPLOADS) { 184 - const batch = files.slice(i, i + MAX_CONCURRENT_UPLOADS); 198 + for (let i = 0; i < files.length; i += currentConcurrency) { 199 + const batch = files.slice(i, i + currentConcurrency); 185 200 186 201 await Promise.all(batch.map(async (file) => { 187 202 const content = readFileSync(file.path); ··· 216 231 reused++; 217 232 } else { 218 233 // Upload new blob 219 - blobRef = await uploadBlob(agent, processedContent, 'application/octet-stream'); 234 + blobRef = await uploadBlob(agent, processedContent, 'application/octet-stream', 3, onRateLimit); 220 235 uploaded++; 221 236 } 222 237 ··· 281 296 const spinner = createSpinner('Splitting large site into subfs records...').start(); 282 297 const subfsRkeys: string[] = []; 283 298 let currentDir = directory; 299 + let currentFileCount = countFilesInDirectory(currentDir); 284 300 let iteration = 0; 285 301 let chunkCounter = 0; 286 302 287 - while ( 288 - (estimateDirectorySize(currentDir) > MAX_MANIFEST_SIZE || 289 - countFilesInDirectory(currentDir) > FILE_COUNT_THRESHOLD) && 290 - iteration < 50 291 - ) { 303 + let manifest = createManifest(siteRkey, currentDir, currentFileCount); 304 + let manifestSize = JSON.stringify(manifest).length; 305 + 306 + while ((manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) && iteration < 100) { 292 307 iteration++; 293 308 294 - // Find largest directories 295 - const largeDirs = findLargeDirectories(currentDir) 296 - .filter(d => d.size > 1000) // Only consider dirs with meaningful size 297 - .sort((a, b) => b.size - a.size); 309 + // Find all directories sorted by size (largest first) 310 + const directories = findLargeDirectories(currentDir); 311 + directories.sort((a, b) => b.size - a.size); 298 312 299 - if (largeDirs.length === 0) break; 313 + if (directories.length > 0) { 314 + const largest = directories[0]!; 315 + spinner.text = `Split #${iteration}: ${largest.path} (${largest.fileCount} files, ${formatBytes(largest.size)})`; 300 316 301 - const largest = largeDirs[0]!; 302 - spinner.text = `Creating subfs ${iteration} for ${largest.path} (${formatBytes(largest.size)})`; 317 + let subfsUri: string; 303 318 304 - let subfsUri: string; 319 + // Check if directory is too large for a single subfs record 320 + if (largest.size > MAX_SUBFS_SIZE) { 321 + // Split into chunks 322 + console.log(pc.dim(`\n → Directory too large (${formatBytes(largest.size)}), splitting into chunks...`)); 323 + const chunks = splitDirectoryIntoChunks(largest.directory, MAX_SUBFS_SIZE); 324 + console.log(pc.dim(` → Created ${chunks.length} chunks`)); 305 325 306 - // Check if directory is too large for a single subfs record 307 - if (largest.size > MAX_SUBFS_SIZE) { 308 - // Split into chunks 309 - console.log(pc.dim(`\n → Directory too large (${formatBytes(largest.size)}), splitting into chunks...`)); 310 - const chunks = splitDirectoryIntoChunks(largest.directory, MAX_SUBFS_SIZE); 311 - console.log(pc.dim(` → Created ${chunks.length} chunks`)); 326 + // Upload each chunk as a subfs record 327 + const chunkUris: string[] = []; 328 + for (let i = 0; i < chunks.length; i++) { 329 + const chunk = chunks[i]!; 330 + const chunkRkey = `${siteRkey}-chunk-${chunkCounter++}`; 331 + const chunkSize = estimateDirectorySize(chunk); 332 + const chunkFileCount = countFilesInDirectory(chunk); 333 + 334 + console.log(pc.dim(` → Uploading chunk ${i + 1}/${chunks.length} (${chunkFileCount} files, ${formatBytes(chunkSize)})...`)); 312 335 313 - // Upload each chunk as a subfs record 314 - const chunkUris: string[] = []; 315 - for (let i = 0; i < chunks.length; i++) { 316 - const chunk = chunks[i]!; 317 - const chunkRkey = `${siteRkey}-chunk-${chunkCounter++}`; 318 - const chunkSize = estimateDirectorySize(chunk); 319 - const chunkFileCount = countFilesInDirectory(chunk); 336 + const chunkUri = await createSubfsRecord(agent, did, chunk, chunkRkey); 337 + chunkUris.push(chunkUri); 338 + subfsRkeys.push(chunkRkey); 339 + } 320 340 321 - console.log(pc.dim(` → Uploading chunk ${i + 1}/${chunks.length} (${chunkFileCount} files, ${formatBytes(chunkSize)})...`)); 341 + // Create parent subfs that references all chunks with flat: true 342 + console.log(pc.dim(` → Creating parent subfs with ${chunkUris.length} chunk references...`)); 322 343 323 - const chunkUri = await createSubfsRecord(agent, did, chunk, chunkRkey); 324 - chunkUris.push(chunkUri); 325 - subfsRkeys.push(chunkRkey); 344 + const parentEntries = chunkUris.map((uri, i) => ({ 345 + name: `chunk${i}`, 346 + node: { 347 + $type: 'place.wisp.fs#subfs' as const, 348 + type: 'subfs' as const, 349 + subject: uri, 350 + flat: true 351 + } 352 + })); 353 + 354 + const parentDirectory: Directory = { 355 + $type: 'place.wisp.fs#directory' as const, 356 + type: 'directory' as const, 357 + entries: parentEntries 358 + }; 359 + 360 + const parentRkey = `${siteRkey}-subfs-${iteration}`; 361 + subfsUri = await createSubfsRecord(agent, did, parentDirectory, parentRkey); 362 + subfsRkeys.push(parentRkey); 363 + 364 + console.log(pc.green(` ✓ Created parent subfs with ${chunks.length} chunks`)); 365 + } else { 366 + // Directory fits in a single subfs record 367 + const subfsRkey = `${siteRkey}-subfs-${iteration}`; 368 + subfsUri = await createSubfsRecord(agent, did, largest.directory, subfsRkey); 369 + subfsRkeys.push(subfsRkey); 326 370 } 327 371 328 - // Create parent subfs that references all chunks with flat: true 329 - console.log(pc.dim(` → Creating parent subfs with ${chunkUris.length} chunk references...`)); 372 + // Replace directory with subfs reference 373 + currentDir = replaceDirectoryWithSubfs(currentDir, largest.path, subfsUri); 374 + currentFileCount -= largest.fileCount; 375 + } else { 376 + // No subdirectories — split flat files at root level 377 + const rootFiles = currentDir.entries.filter(e => 'type' in e.node && e.node.type === 'file'); 378 + 379 + if (rootFiles.length === 0) { 380 + spinner.fail(`Cannot split further — no files or directories available (${currentFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB)`); 381 + break; 382 + } 383 + 384 + // Take a chunk of ~100 files 385 + const CHUNK_SIZE = 100; 386 + const chunkFiles = rootFiles.slice(0, Math.min(CHUNK_SIZE, rootFiles.length)); 387 + spinner.text = `Split #${iteration}: flat root (${chunkFiles.length} files)`; 330 388 331 - const parentEntries = chunkUris.map((uri, i) => ({ 332 - name: `chunk${i}`, 389 + // Create a directory with just these files 390 + const chunkDirectory: Directory = { 391 + $type: 'place.wisp.fs#directory' as const, 392 + type: 'directory' as const, 393 + entries: chunkFiles 394 + }; 395 + 396 + const subfsRkey = `${siteRkey}-subfs-${iteration}`; 397 + const subfsUri = await createSubfsRecord(agent, did, chunkDirectory, subfsRkey); 398 + subfsRkeys.push(subfsRkey); 399 + 400 + // Remove chunked files and add a flat subfs entry 401 + const remainingEntries = currentDir.entries.filter( 402 + e => !chunkFiles.some(cf => cf.name === e.name) 403 + ); 404 + 405 + remainingEntries.push({ 406 + name: `__subfs_${iteration}`, 333 407 node: { 334 408 $type: 'place.wisp.fs#subfs' as const, 335 409 type: 'subfs' as const, 336 - subject: uri, 337 - flat: true // Merge chunk contents into parent 410 + subject: subfsUri, 411 + flat: true 338 412 } 339 - })); 413 + }); 340 414 341 - const parentDirectory: Directory = { 415 + currentDir = { 342 416 $type: 'place.wisp.fs#directory' as const, 343 417 type: 'directory' as const, 344 - entries: parentEntries 418 + entries: remainingEntries 345 419 }; 346 420 347 - const parentRkey = `${siteRkey}-subfs-${iteration}`; 348 - subfsUri = await createSubfsRecord(agent, did, parentDirectory, parentRkey); 349 - subfsRkeys.push(parentRkey); 350 - 351 - console.log(pc.green(` ✓ Created parent subfs with ${chunks.length} chunks`)); 352 - } else { 353 - // Directory fits in a single subfs record 354 - const subfsRkey = `${siteRkey}-subfs-${iteration}`; 355 - subfsUri = await createSubfsRecord(agent, did, largest.directory, subfsRkey); 356 - subfsRkeys.push(subfsRkey); 421 + currentFileCount -= chunkFiles.length; 357 422 } 358 423 359 - // Replace directory with subfs reference 360 - currentDir = replaceDirectoryWithSubfs(currentDir, largest.path, subfsUri); 424 + // Recreate manifest and check new size 425 + manifest = createManifest(siteRkey, currentDir, currentFileCount); 426 + manifestSize = JSON.stringify(manifest).length; 361 427 } 362 428 363 429 spinner.succeed(`Created ${subfsRkeys.length} subfs records`); ··· 454 520 const { uploadedFiles, uploadResults, filePaths } = await processAndUploadFiles( 455 521 agent, 456 522 files, 457 - existingBlobMap 523 + existingBlobMap, 524 + options.concurrency ?? DEFAULT_CONCURRENT_UPLOADS 458 525 ); 459 526 460 527 // 5. Build directory structure ··· 467 534 let finalDirectory = directory; 468 535 let subfsRkeys: string[] = []; 469 536 537 + const initialManifest = createManifest(siteName, directory, fileCount); 470 538 if ( 471 - estimateDirectorySize(directory) > MAX_MANIFEST_SIZE || 472 - fileCount > FILE_COUNT_THRESHOLD 539 + fileCount >= FILE_COUNT_THRESHOLD || 540 + JSON.stringify(initialManifest).length > MAX_MANIFEST_SIZE 473 541 ) { 474 542 const result = await splitIntoSubfs(agent, did, directory, siteName); 475 543 finalDirectory = result.directory;
+3 -1
cli/index.ts
··· 22 22 .option('-s, --site <name>', 'Site name (defaults to directory name)') 23 23 .option('--directory', 'Enable directory listing') 24 24 .option('--spa', 'Enable SPA mode (serve index.html for all routes)') 25 + .option('-c, --concurrency <n>', 'Number of concurrent uploads (backs off to 2 on rate limit)', '3') 25 26 .option('--password <password>', 'App password for headless authentication') 26 27 .option('--store <path>', 'OAuth session store path') 27 28 .option('-y, --yes', 'Skip confirmation prompts') ··· 101 102 site: resolvedSite, 102 103 directory: options.directory, 103 104 spa: options.spa, 104 - yes: options.yes 105 + yes: options.yes, 106 + concurrency: parseInt(options.concurrency, 10) 105 107 }); 106 108 107 109 console.log();
+3 -2
packages/@wispplace/fs-utils/src/tree.ts
··· 256 256 collectFileCidsFromEntries(node.entries, currentPath, fileCids); 257 257 } else if ('type' in node && node.type === 'file' && 'blob' in node) { 258 258 const fileNode = node as File; 259 - // Extract CID from blob ref 260 259 if (fileNode.blob && fileNode.blob.ref) { 261 - const cid = fileNode.blob.ref.toString(); 260 + const ref = fileNode.blob.ref; 261 + // Handle both CID instances (CBOR/firehose) and IPLD link objects { $link } (JSON API) 262 + const cid = (typeof ref === 'object' && '$link' in ref) ? (ref as { $link: string }).$link : ref.toString(); 262 263 fileCids[currentPath] = cid; 263 264 } 264 265 }