+39
-41
hosting-service/src/lib/utils.ts
+39
-41
hosting-service/src/lib/utils.ts
···
257
257
258
258
/**
259
259
* Replace subfs nodes in a directory tree with their actual content
260
+
* Subfs entries are "merged" - their root entries are hoisted into the parent directory
260
261
*/
261
262
async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {
262
263
// Extract all subfs URIs
···
277
278
})
278
279
);
279
280
280
-
// Build a map of path -> directory content
281
-
const subfsMap = new Map<string, Directory>();
281
+
// Build a map of path -> root entries to merge
282
+
const subfsMap = new Map<string, Entry[]>();
282
283
for (const { record, path } of subfsRecords) {
283
-
if (record && record.root) {
284
-
subfsMap.set(path, record.root);
284
+
if (record && record.root && record.root.entries) {
285
+
subfsMap.set(path, record.root.entries);
285
286
}
286
287
}
287
288
288
-
// Replace subfs nodes with their actual content
289
+
// Replace subfs nodes by merging their root entries into the parent directory
289
290
function replaceSubfsInEntries(entries: Entry[], currentPath: string = ''): Entry[] {
290
-
return entries.map(entry => {
291
+
const result: Entry[] = [];
292
+
293
+
for (const entry of entries) {
291
294
const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
292
295
const node = entry.node;
293
296
294
297
if ('type' in node && node.type === 'subfs') {
295
-
// Replace with actual directory content
296
-
const subfsDir = subfsMap.get(fullPath);
297
-
if (subfsDir) {
298
-
console.log(`Expanding subfs node at ${fullPath}`);
299
-
return {
300
-
...entry,
301
-
node: subfsDir
302
-
};
298
+
// Merge subfs entries into parent directory
299
+
const subfsEntries = subfsMap.get(fullPath);
300
+
if (subfsEntries) {
301
+
console.log(`Merging subfs node at ${fullPath} (${subfsEntries.length} entries)`);
302
+
// Recursively process the merged entries in case they contain nested subfs
303
+
const processedEntries = replaceSubfsInEntries(subfsEntries, currentPath);
304
+
result.push(...processedEntries);
305
+
} else {
306
+
// If fetch failed, skip this entry
307
+
console.warn(`Failed to fetch subfs at ${fullPath}, skipping`);
303
308
}
304
-
// If fetch failed, keep the subfs node (will be skipped later)
305
-
return entry;
306
309
} else if ('type' in node && node.type === 'directory' && 'entries' in node) {
307
310
// Recursively process subdirectories
308
-
return {
311
+
result.push({
309
312
...entry,
310
313
node: {
311
314
...node,
312
315
entries: replaceSubfsInEntries(node.entries, fullPath)
313
316
}
314
-
};
317
+
});
318
+
} else {
319
+
// Regular file entry
320
+
result.push(entry);
315
321
}
322
+
}
316
323
317
-
return entry;
318
-
});
324
+
return result;
319
325
}
320
326
321
327
return {
···
473
479
474
480
console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);
475
481
476
-
// Copy unchanged files in parallel (fast local operations)
477
-
const copyLimit = 10;
482
+
// Copy unchanged files in parallel (fast local operations) - increased limit for better performance
483
+
const copyLimit = 50;
478
484
for (let i = 0; i < copyTasks.length; i += copyLimit) {
479
485
const batch = copyTasks.slice(i, i + copyLimit);
480
486
await Promise.all(batch.map(task => task()));
487
+
if (copyTasks.length > copyLimit) {
488
+
console.log(`[Cache Progress] Copied ${Math.min(i + copyLimit, copyTasks.length)}/${copyTasks.length} unchanged files`);
489
+
}
481
490
}
482
491
483
-
// Download new/changed files concurrently with a limit of 3 at a time
484
-
const downloadLimit = 3;
492
+
// Download new/changed files concurrently - increased from 3 to 20 for much better performance
493
+
const downloadLimit = 20;
485
494
for (let i = 0; i < downloadTasks.length; i += downloadLimit) {
486
495
const batch = downloadTasks.slice(i, i + downloadLimit);
487
496
await Promise.all(batch.map(task => task()));
497
+
if (downloadTasks.length > downloadLimit) {
498
+
console.log(`[Cache Progress] Downloaded ${Math.min(i + downloadLimit, downloadTasks.length)}/${downloadTasks.length} files`);
499
+
}
488
500
}
489
501
}
490
502
···
519
531
if (existsSync(sourceMetaFile)) {
520
532
await copyFile(sourceMetaFile, destMetaFile);
521
533
}
522
-
523
-
console.log(`[Incremental] Copied unchanged file: ${filePath}`);
524
534
} catch (err) {
525
-
console.error(`[Incremental] Failed to copy file ${filePath}, will attempt download:`, err);
535
+
console.error(`Failed to copy cached file ${filePath}, will attempt download:`, err);
526
536
throw err;
527
537
}
528
538
}
···
549
559
// Allow up to 500MB per file blob, with 5 minute timeout
550
560
let content = await safeFetchBlob(blobUrl, { maxSize: 500 * 1024 * 1024, timeout: 300000 });
551
561
552
-
console.log(`[DEBUG] ${filePath}: fetched ${content.length} bytes, base64=${base64}, encoding=${encoding}, mimeType=${mimeType}`);
553
-
554
562
// If content is base64-encoded, decode it back to raw binary (gzipped or not)
555
563
if (base64) {
556
-
const originalSize = content.length;
557
564
// Decode base64 directly from raw bytes - no string conversion
558
565
// The blob contains base64-encoded text as raw bytes, decode it in-place
559
566
const textDecoder = new TextDecoder();
560
567
const base64String = textDecoder.decode(content);
561
568
content = Buffer.from(base64String, 'base64');
562
-
console.log(`[DEBUG] ${filePath}: decoded base64 from ${originalSize} bytes to ${content.length} bytes`);
563
-
564
-
// Check if it's actually gzipped by looking at magic bytes
565
-
if (content.length >= 2) {
566
-
const hasGzipMagic = content[0] === 0x1f && content[1] === 0x8b;
567
-
console.log(`[DEBUG] ${filePath}: has gzip magic bytes: ${hasGzipMagic}`);
568
-
}
569
569
}
570
570
571
571
const cacheFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
···
579
579
const shouldStayCompressed = shouldCompressMimeType(mimeType);
580
580
581
581
// Decompress files that shouldn't be stored compressed
582
-
if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
582
+
if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
583
583
content[0] === 0x1f && content[1] === 0x8b) {
584
-
console.log(`[DEBUG] ${filePath}: decompressing non-compressible type (${mimeType}) before caching`);
585
584
try {
586
585
const { gunzipSync } = await import('zlib');
587
586
const decompressed = gunzipSync(content);
588
-
console.log(`[DEBUG] ${filePath}: decompressed from ${content.length} to ${decompressed.length} bytes`);
589
587
content = decompressed;
590
588
// Clear the encoding flag since we're storing decompressed
591
589
encoding = undefined;
592
590
} catch (error) {
593
-
console.log(`[DEBUG] ${filePath}: failed to decompress, storing original gzipped content. Error:`, error);
591
+
console.error(`Failed to decompress ${filePath}, storing original gzipped content:`, error);
594
592
}
595
593
}
596
594
+1
-1
lexicons/fs.json
+1
-1
lexicons/fs.json
···
51
51
"required": ["type", "subject"],
52
52
"properties": {
53
53
"type": { "type": "string", "const": "subfs" },
54
-
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to a place.wisp.subfs record containing this subtree" }
54
+
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to a place.wisp.subfs record containing this subtree. When expanded, the subfs record's root entries are merged (flattened) into the parent directory - the subfs entry itself is removed and replaced by all entries from the referenced record's root. This allows splitting large directories across multiple records while maintaining a flat structure." }
55
55
}
56
56
}
57
57
}
+2
-2
lexicons/subfs.json
+2
-2
lexicons/subfs.json
···
4
4
"defs": {
5
5
"main": {
6
6
"type": "record",
7
-
"description": "Virtual filesystem manifest within a place.wisp.fs record",
7
+
"description": "Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",
8
8
"record": {
9
9
"type": "object",
10
10
"required": ["root", "createdAt"],
···
51
51
"required": ["type", "subject"],
52
52
"properties": {
53
53
"type": { "type": "string", "const": "subfs" },
54
-
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees" }
54
+
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures." }
55
55
}
56
56
}
57
57
}
+12
-5
src/lib/wisp-utils.ts
+12
-5
src/lib/wisp-utils.ts
···
208
208
/**
209
209
* Update file blobs in directory structure after upload
210
210
* Uses path-based matching to correctly match files in nested directories
211
+
* Filters out files that were not successfully uploaded
211
212
*/
212
213
export function updateFileBlobs(
213
214
directory: Directory,
214
215
uploadResults: FileUploadResult[],
215
216
filePaths: string[],
216
-
currentPath: string = ''
217
+
currentPath: string = '',
218
+
successfulPaths?: Set<string>
217
219
): Directory {
218
220
const updatedEntries = directory.entries.map(entry => {
219
221
if ('type' in entry.node && entry.node.type === 'file') {
220
222
// Build the full path for this file
221
223
const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
224
+
225
+
// If successfulPaths is provided, skip files that weren't successfully uploaded
226
+
if (successfulPaths && !successfulPaths.has(fullPath)) {
227
+
return null; // Filter out failed files
228
+
}
222
229
223
230
// Find exact match in filePaths (need to handle normalized paths)
224
231
const fileIndex = filePaths.findIndex((path) => {
···
244
251
}
245
252
};
246
253
} else {
247
-
console.error(`❌ BLOB MATCHING ERROR: Could not find blob for file: ${fullPath}`);
248
-
console.error(` Available paths:`, filePaths.slice(0, 10), filePaths.length > 10 ? `... and ${filePaths.length - 10} more` : '');
254
+
console.error(`Could not find blob for file: ${fullPath}`);
255
+
return null; // Filter out files without blobs
249
256
}
250
257
} else if ('type' in entry.node && entry.node.type === 'directory') {
251
258
const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
252
259
return {
253
260
...entry,
254
-
node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath)
261
+
node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath, successfulPaths)
255
262
};
256
263
}
257
264
return entry;
258
-
}) as Entry[];
265
+
}).filter(entry => entry !== null) as Entry[]; // Remove null entries (failed files)
259
266
260
267
const result = {
261
268
$type: 'place.wisp.fs#directory' as const,
+223
-58
src/routes/wisp.ts
+223
-58
src/routes/wisp.ts
···
405
405
size: number;
406
406
}> = [];
407
407
408
+
// Track completed files count for accurate progress
409
+
let completedFilesCount = 0;
410
+
408
411
// Process file with sliding window concurrency
409
412
const processFile = async (file: UploadedFile, index: number) => {
410
413
try {
···
418
421
419
422
if (existingBlob && existingBlob.cid === fileCID) {
420
423
logger.info(`[File Upload] ♻️ Reused: ${file.name} (unchanged, CID: ${fileCID})`);
424
+
const reusedCount = (getUploadJob(jobId)?.progress.filesReused || 0) + 1;
425
+
completedFilesCount++;
421
426
updateJobProgress(jobId, {
422
-
filesReused: (getUploadJob(jobId)?.progress.filesReused || 0) + 1
427
+
filesReused: reusedCount,
428
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${file.name} (reused)`
423
429
});
424
430
425
431
return {
···
455
461
);
456
462
457
463
const returnedBlobRef = uploadResult.data.blob;
464
+
const uploadedCount = (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1;
465
+
completedFilesCount++;
458
466
updateJobProgress(jobId, {
459
-
filesUploaded: (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1
467
+
filesUploaded: uploadedCount,
468
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${file.name} (uploaded)`
460
469
});
461
470
logger.info(`[File Upload] ✅ Uploaded: ${file.name} (CID: ${fileCID})`);
462
471
···
488
497
};
489
498
logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
490
499
console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
500
+
501
+
completedFilesCount++;
502
+
updateJobProgress(jobId, {
503
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${fileName} (failed)`
504
+
});
491
505
492
506
// Track failed file but don't throw - continue with other files
493
507
failedFiles.push({
···
532
546
533
547
// Wait for remaining uploads
534
548
await Promise.all(executing.keys());
549
+
console.log(`\n✅ Upload complete: ${completedFilesCount}/${validUploadedFiles.length} files processed\n`);
535
550
return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries
536
551
};
537
552
···
557
572
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
558
573
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
559
574
560
-
// Update directory with file blobs
575
+
// Update directory with file blobs (only for successfully uploaded files)
561
576
console.log('Updating directory with blob references...');
562
577
updateJobProgress(jobId, { phase: 'creating_manifest' });
563
-
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
578
+
579
+
// Create a set of successfully uploaded paths for quick lookup
580
+
const successfulPaths = new Set(filePaths.map(path => path.replace(/^[^\/]*\//, '')));
581
+
582
+
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths, '', successfulPaths);
583
+
584
+
// Calculate actual file count (only successfully uploaded files)
585
+
const actualFileCount = uploadedBlobs.length;
564
586
565
587
// Check if we need to split into subfs records
566
588
// Split proactively if we have lots of files to avoid hitting manifest size limits
567
589
const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB)
568
-
const FILE_COUNT_THRESHOLD = 250; // Start splitting early
590
+
const FILE_COUNT_THRESHOLD = 250; // Start splitting at this many files
591
+
const TARGET_FILE_COUNT = 200; // Try to keep main manifest under this many files
569
592
const subfsRecords: Array<{ uri: string; path: string }> = [];
570
593
let workingDirectory = updatedDirectory;
571
-
let currentFileCount = fileCount;
594
+
let currentFileCount = actualFileCount;
572
595
573
596
// Create initial manifest to check size
574
-
let manifest = createManifest(siteName, workingDirectory, fileCount);
597
+
let manifest = createManifest(siteName, workingDirectory, actualFileCount);
575
598
let manifestSize = JSON.stringify(manifest).length;
576
599
577
600
// Split if we have lots of files OR if manifest is already too large
578
-
if (fileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
579
-
console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
580
-
logger.info(`Large site with ${fileCount} files, splitting into subfs records`);
601
+
if (actualFileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
602
+
console.log(`⚠️ Large site detected (${actualFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
603
+
logger.info(`Large site with ${actualFileCount} files, splitting into subfs records`);
581
604
582
-
// Keep splitting until manifest fits under limit
605
+
// Keep splitting until manifest fits under limits (both size and file count)
583
606
let attempts = 0;
584
607
const MAX_ATTEMPTS = 100; // Allow many splits for very large sites
585
608
586
-
while (manifestSize > MAX_MANIFEST_SIZE && attempts < MAX_ATTEMPTS) {
609
+
while ((manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) && attempts < MAX_ATTEMPTS) {
587
610
attempts++;
588
611
589
612
// Find all directories sorted by size (largest first)
590
613
const directories = findLargeDirectories(workingDirectory);
591
614
directories.sort((a, b) => b.size - a.size);
592
615
593
-
if (directories.length === 0) {
594
-
// No more directories to split - this should be very rare
595
-
throw new Error(
596
-
`Cannot split manifest further - no subdirectories available. ` +
597
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
598
-
`Try organizing files into subdirectories.`
599
-
);
600
-
}
616
+
// Check if we can split subdirectories or need to split flat files
617
+
if (directories.length > 0) {
618
+
// Split the largest subdirectory
619
+
const largestDir = directories[0];
620
+
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
621
+
622
+
// Create a subfs record for this directory
623
+
const subfsRkey = TID.nextStr();
624
+
const subfsManifest = {
625
+
$type: 'place.wisp.subfs' as const,
626
+
root: largestDir.directory,
627
+
fileCount: largestDir.fileCount,
628
+
createdAt: new Date().toISOString()
629
+
};
630
+
631
+
// Validate subfs record
632
+
const subfsValidation = validateSubfsRecord(subfsManifest);
633
+
if (!subfsValidation.success) {
634
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
635
+
}
636
+
637
+
// Upload subfs record to PDS
638
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
639
+
repo: did,
640
+
collection: 'place.wisp.subfs',
641
+
rkey: subfsRkey,
642
+
record: subfsManifest
643
+
});
644
+
645
+
const subfsUri = subfsRecord.data.uri;
646
+
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
647
+
console.log(` ✅ Created subfs: ${subfsUri}`);
648
+
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
649
+
650
+
// Replace directory with subfs node in the main tree
651
+
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
652
+
currentFileCount -= largestDir.fileCount;
653
+
} else {
654
+
// No subdirectories - split flat files at root level
655
+
const rootFiles = workingDirectory.entries.filter(e => 'type' in e.node && e.node.type === 'file');
601
656
602
-
// Pick the largest directory
603
-
const largestDir = directories[0];
604
-
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
657
+
if (rootFiles.length === 0) {
658
+
throw new Error(
659
+
`Cannot split manifest further - no files or directories available. ` +
660
+
`Current: ${currentFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB.`
661
+
);
662
+
}
605
663
606
-
// Create a subfs record for this directory
607
-
const subfsRkey = TID.nextStr();
608
-
const subfsManifest = {
609
-
$type: 'place.wisp.subfs' as const,
610
-
root: largestDir.directory,
611
-
fileCount: largestDir.fileCount,
612
-
createdAt: new Date().toISOString()
613
-
};
664
+
// Take a chunk of files (aim for ~100 files per chunk)
665
+
const CHUNK_SIZE = 100;
666
+
const chunkFiles = rootFiles.slice(0, Math.min(CHUNK_SIZE, rootFiles.length));
667
+
console.log(` Split #${attempts}: flat root (${chunkFiles.length} files)`);
668
+
669
+
// Create a directory with just these files
670
+
const chunkDirectory: Directory = {
671
+
$type: 'place.wisp.fs#directory' as const,
672
+
type: 'directory' as const,
673
+
entries: chunkFiles
674
+
};
675
+
676
+
// Create subfs record for this chunk
677
+
const subfsRkey = TID.nextStr();
678
+
const subfsManifest = {
679
+
$type: 'place.wisp.subfs' as const,
680
+
root: chunkDirectory,
681
+
fileCount: chunkFiles.length,
682
+
createdAt: new Date().toISOString()
683
+
};
684
+
685
+
// Validate subfs record
686
+
const subfsValidation = validateSubfsRecord(subfsManifest);
687
+
if (!subfsValidation.success) {
688
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
689
+
}
690
+
691
+
// Upload subfs record to PDS
692
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
693
+
repo: did,
694
+
collection: 'place.wisp.subfs',
695
+
rkey: subfsRkey,
696
+
record: subfsManifest
697
+
});
698
+
699
+
const subfsUri = subfsRecord.data.uri;
700
+
console.log(` ✅ Created flat subfs: ${subfsUri}`);
701
+
logger.info(`Created flat subfs record with ${chunkFiles.length} files: ${subfsUri}`);
614
702
615
-
// Validate subfs record
616
-
const subfsValidation = validateSubfsRecord(subfsManifest);
617
-
if (!subfsValidation.success) {
618
-
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
619
-
}
703
+
// Remove these files from the working directory and add a subfs entry
704
+
const remainingEntries = workingDirectory.entries.filter(
705
+
e => !chunkFiles.some(cf => cf.name === e.name)
706
+
);
620
707
621
-
// Upload subfs record to PDS
622
-
const subfsRecord = await agent.com.atproto.repo.putRecord({
623
-
repo: did,
624
-
collection: 'place.wisp.subfs',
625
-
rkey: subfsRkey,
626
-
record: subfsManifest
627
-
});
708
+
// Add subfs entry (will be merged flat when expanded)
709
+
remainingEntries.push({
710
+
name: `__subfs_${attempts}`, // Placeholder name, will be merged away
711
+
node: {
712
+
$type: 'place.wisp.fs#subfs' as const,
713
+
type: 'subfs' as const,
714
+
subject: subfsUri
715
+
}
716
+
});
628
717
629
-
const subfsUri = subfsRecord.data.uri;
630
-
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
631
-
console.log(` ✅ Created subfs: ${subfsUri}`);
632
-
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
718
+
workingDirectory = {
719
+
$type: 'place.wisp.fs#directory' as const,
720
+
type: 'directory' as const,
721
+
entries: remainingEntries
722
+
};
633
723
634
-
// Replace directory with subfs node in the main tree
635
-
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
724
+
subfsRecords.push({ uri: subfsUri, path: `__subfs_${attempts}` });
725
+
currentFileCount -= chunkFiles.length;
726
+
}
636
727
637
728
// Recreate manifest and check new size
638
-
currentFileCount -= largestDir.fileCount;
639
-
manifest = createManifest(siteName, workingDirectory, fileCount);
729
+
manifest = createManifest(siteName, workingDirectory, currentFileCount);
640
730
manifestSize = JSON.stringify(manifest).length;
641
731
const newSizeKB = (manifestSize / 1024).toFixed(1);
642
732
console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);
643
733
644
-
// Check if we're under the limit now
645
-
if (manifestSize <= MAX_MANIFEST_SIZE) {
646
-
console.log(` ✅ Manifest fits! (${newSizeKB}KB < 140KB)`);
734
+
// Check if we're under both limits now
735
+
if (manifestSize <= MAX_MANIFEST_SIZE && currentFileCount <= TARGET_FILE_COUNT) {
736
+
console.log(` ✅ Manifest fits! (${currentFileCount} files, ${newSizeKB}KB)`);
647
737
break;
648
738
}
649
739
}
650
740
651
-
if (manifestSize > MAX_MANIFEST_SIZE) {
741
+
if (manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) {
652
742
throw new Error(
653
743
`Failed to fit manifest after splitting ${attempts} directories. ` +
654
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
744
+
`Current: ${currentFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB. ` +
655
745
`This should never happen - please report this issue.`
656
746
);
657
747
}
···
902
992
const fileArray = Array.isArray(files) ? files : [files];
903
993
const jobId = createUploadJob(auth.did, siteName, fileArray.length);
904
994
905
-
// Create agent with OAuth session
906
-
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
995
+
// Track upload speeds to estimate progress
996
+
const uploadStats = {
997
+
speeds: [] as number[], // MB/s from completed uploads
998
+
getAverageSpeed(): number {
999
+
if (this.speeds.length === 0) return 3; // Default 3 MB/s
1000
+
const sum = this.speeds.reduce((a, b) => a + b, 0);
1001
+
return sum / this.speeds.length;
1002
+
}
1003
+
};
1004
+
1005
+
// Create agent with OAuth session and upload progress monitoring
1006
+
const wrappedFetchHandler = async (url: string, init?: RequestInit) => {
1007
+
// Check if this is an uploadBlob request with a body
1008
+
if (url.includes('uploadBlob') && init?.body) {
1009
+
const originalBody = init.body;
1010
+
const bodySize = originalBody instanceof Uint8Array ? originalBody.length :
1011
+
originalBody instanceof ArrayBuffer ? originalBody.byteLength :
1012
+
typeof originalBody === 'string' ? new TextEncoder().encode(originalBody).length : 0;
1013
+
1014
+
const startTime = Date.now();
1015
+
1016
+
if (bodySize > 10 * 1024 * 1024) { // Files over 10MB
1017
+
const sizeMB = (bodySize / 1024 / 1024).toFixed(1);
1018
+
const avgSpeed = uploadStats.getAverageSpeed();
1019
+
const estimatedDuration = (bodySize / 1024 / 1024) / avgSpeed;
1020
+
1021
+
console.log(`[Upload Progress] Starting upload of ${sizeMB}MB file`);
1022
+
console.log(`[Upload Stats] Measured speeds from last ${uploadStats.speeds.length} files:`, uploadStats.speeds.map(s => s.toFixed(2) + ' MB/s').join(', '));
1023
+
console.log(`[Upload Stats] Average speed: ${avgSpeed.toFixed(2)} MB/s, estimated duration: ${estimatedDuration.toFixed(0)}s`);
1024
+
1025
+
// Log estimated progress every 5 seconds
1026
+
const progressInterval = setInterval(() => {
1027
+
const elapsed = (Date.now() - startTime) / 1000;
1028
+
const estimatedPercent = Math.min(95, Math.round((elapsed / estimatedDuration) * 100));
1029
+
const estimatedMB = Math.min(bodySize / 1024 / 1024, elapsed * avgSpeed).toFixed(1);
1030
+
console.log(`[Upload Progress] ~${estimatedPercent}% (~${estimatedMB}/${sizeMB}MB) - ${elapsed.toFixed(0)}s elapsed`);
1031
+
}, 5000);
1032
+
1033
+
try {
1034
+
const result = await auth.session.fetchHandler(url, init);
1035
+
clearInterval(progressInterval);
1036
+
const totalTime = (Date.now() - startTime) / 1000;
1037
+
const actualSpeed = (bodySize / 1024 / 1024) / totalTime;
1038
+
uploadStats.speeds.push(actualSpeed);
1039
+
// Keep only last 10 uploads for rolling average
1040
+
if (uploadStats.speeds.length > 10) uploadStats.speeds.shift();
1041
+
console.log(`[Upload Progress] ✅ Completed ${sizeMB}MB in ${totalTime.toFixed(1)}s (${actualSpeed.toFixed(1)} MB/s)`);
1042
+
return result;
1043
+
} catch (err) {
1044
+
clearInterval(progressInterval);
1045
+
const elapsed = (Date.now() - startTime) / 1000;
1046
+
console.error(`[Upload Progress] ❌ Upload failed after ${elapsed.toFixed(1)}s`);
1047
+
throw err;
1048
+
}
1049
+
} else {
1050
+
// Track small files too for speed calculation
1051
+
try {
1052
+
const result = await auth.session.fetchHandler(url, init);
1053
+
const totalTime = (Date.now() - startTime) / 1000;
1054
+
if (totalTime > 0.5) { // Only track if > 0.5s
1055
+
const actualSpeed = (bodySize / 1024 / 1024) / totalTime;
1056
+
uploadStats.speeds.push(actualSpeed);
1057
+
if (uploadStats.speeds.length > 10) uploadStats.speeds.shift();
1058
+
console.log(`[Upload Stats] Small file: ${(bodySize / 1024).toFixed(1)}KB in ${totalTime.toFixed(2)}s = ${actualSpeed.toFixed(2)} MB/s`);
1059
+
}
1060
+
return result;
1061
+
} catch (err) {
1062
+
throw err;
1063
+
}
1064
+
}
1065
+
}
1066
+
1067
+
// Normal request
1068
+
return auth.session.fetchHandler(url, init);
1069
+
};
1070
+
1071
+
const agent = new Agent(wrappedFetchHandler)
907
1072
console.log('Agent created for DID:', auth.did);
908
1073
console.log('Created upload job:', jobId);
909
1074