One-click backups for AT Protocol
1import { Agent } from "@atproto/api";
2import { createBackupDir, getBackupDir } from "./paths";
3import { join, resolve } from "@tauri-apps/api/path";
4import {
5 mkdir,
6 readDir,
7 readTextFile,
8 writeFile,
9 remove,
10} from "@tauri-apps/plugin-fs";
11import { CarStats, getCarStats } from "./stats";
12
13export interface Metadata {
14 did: string;
15 timestamp: string;
16 backupType: string;
17 filePath: string;
18 blobsPath?: string;
19 blobCount?: number;
20 stats: CarStats;
21}
22
23export interface BlobReference {
24 cid: string;
25 mimeType?: string;
26 size?: number;
27}
28
29export type BackupStage =
30 | "fetching"
31 | "writing"
32 | "blobs"
33 | "cleanup"
34 | "complete";
35export interface ProgressInfo {
36 stage: BackupStage;
37 message: string;
38 progress?: number; // 0-100 percentage
39 current?: number;
40 total?: number;
41}
42
43export type ProgressCallback = (progress: ProgressInfo) => void;
44
45export class BackupAgent {
46 private agent: Agent;
47 private maxBackups = 3;
48 private downloadBlobs = true;
49 private progressCallback?: ProgressCallback;
50 private overwriteBackups = false;
51
52 constructor(
53 agent: Agent,
54 options?: {
55 downloadBlobs?: boolean;
56 onProgress?: ProgressCallback;
57 overwriteBackups?: boolean;
58 }
59 ) {
60 this.agent = agent;
61 this.downloadBlobs = options?.downloadBlobs ?? true;
62 this.progressCallback = options?.onProgress;
63 this.overwriteBackups = options?.overwriteBackups ?? false;
64 }
65
66 private reportProgress(progress: ProgressInfo) {
67 if (this.progressCallback) {
68 this.progressCallback(progress);
69 }
70 }
71
72 async startBackup(): Promise<Metadata> {
73 const did = this.agent.did;
74 if (did == null) throw Error("Unauthenticated");
75
76 try {
77 // Stage 1: Fetching repo data
78 this.reportProgress({
79 stage: "fetching",
80 message: "Fetching repository data...",
81 progress: 10,
82 });
83
84 const data = await this.agent.com.atproto.sync.getRepo({ did: did });
85
86 // Stage 2: Writing backup file
87 this.reportProgress({
88 stage: "writing",
89 message: "Writing backup to file...",
90 progress: 30,
91 });
92
93 const metadata = await this.writeBackupToFile(data.data, did);
94
95 // Stage 3: Download blobs if enabled
96 if (this.downloadBlobs) {
97 this.reportProgress({
98 stage: "blobs",
99 message: "Preparing to download blobs...",
100 progress: 40,
101 });
102
103 await this.downloadBlobsForBackup(metadata);
104 } else {
105 this.reportProgress({
106 stage: "blobs",
107 message: "Skipping blob download (disabled)",
108 progress: 80,
109 });
110 }
111
112 // Clean up old backups or overwrite existing one
113 if (this.overwriteBackups) {
114 this.reportProgress({
115 stage: "cleanup",
116 message: "Cleaning up previous backup...",
117 progress: 90,
118 });
119 await this.cleanupAllBackups();
120 } else {
121 this.reportProgress({
122 stage: "cleanup",
123 message: "Cleaning up old backups...",
124 progress: 90,
125 });
126 await this.cleanupOldBackups();
127 }
128
129 // Stage 5: Complete
130 this.reportProgress({
131 stage: "complete",
132 message: "Backup completed successfully!",
133 progress: 100,
134 });
135
136 return metadata;
137 } catch (error: any) {
138 this.reportProgress({
139 stage: "complete",
140 message: `Backup failed: ${error.message}`,
141 progress: 0,
142 });
143 throw error;
144 }
145 }
146
147 private async writeBackupToFile(
148 repoData: Uint8Array,
149 did: string
150 ): Promise<Metadata> {
151 try {
152 // Create backup directory structure
153 await createBackupDir();
154 const backupDir = await getBackupDir();
155
156 let backupPath: string;
157 if (this.overwriteBackups) {
158 // Use a consistent name for overwriting
159 backupPath = await join(backupDir, "current_backup");
160
161 // Remove existing backup if it exists
162 try {
163 await remove(backupPath, { recursive: true });
164 } catch (e) {
165 // Directory might not exist, which is fine
166 }
167 } else {
168 // Use timestamp-based naming, overwrite if exists for today
169 const timestamp = new Date().toISOString().split("T")[0]; // YYYY-MM-DD
170 backupPath = await join(backupDir, `${timestamp}_backup`);
171
172 // Remove existing backup for today if it exists
173 try {
174 await remove(backupPath, { recursive: true });
175 console.log(`Overwriting existing backup for ${timestamp}`);
176 } catch (e) {
177 // Directory might not exist, which is fine
178 }
179 }
180
181 await mkdir(backupPath);
182
183 // Write the repo data as binary file
184 const repoFilePath = await join(backupPath, "repo.car");
185 await writeFile(repoFilePath, repoData);
186
187 const stats = await getCarStats(repoData);
188
189 // Create a metadata file
190 const metadata = {
191 did: did,
192 timestamp: new Date().toISOString(),
193 backupType: "full_repo",
194 filePath: repoFilePath,
195 stats,
196 };
197
198 const metadataPath = await join(backupPath, "metadata.json");
199 const metadataJson = JSON.stringify(metadata, null, 2);
200 await writeFile(metadataPath, new TextEncoder().encode(metadataJson));
201
202 console.log(`Backup written to: ${backupPath}`);
203 return metadata;
204 } catch (error) {
205 console.error("Failed to write backup:", error);
206 throw error;
207 }
208 }
209
210 private async downloadBlobsForBackup(metadata: Metadata): Promise<void> {
211 try {
212 const backupDir = await resolve(metadata.filePath, "..");
213 const blobDir = await join(backupDir, "blobs");
214 await mkdir(blobDir);
215
216 // Extract blob references from the CAR file
217 this.reportProgress({
218 stage: "blobs",
219 message: "Extracting blob references...",
220 progress: 45,
221 });
222
223 const blobRefs = await this.extractBlobReferences();
224
225 if (blobRefs.length === 0) {
226 this.reportProgress({
227 stage: "blobs",
228 message: "No blobs found in backup",
229 progress: 80,
230 });
231 console.log("No blobs found in backup");
232 return;
233 }
234
235 console.log(`Downloading ${blobRefs.length} blobs...`);
236 let downloadedCount = 0;
237
238 for (let i = 0; i < blobRefs.length; i++) {
239 const blobRef = blobRefs[i];
240 const progress = 50 + Math.round((i / blobRefs.length) * 30); // 50-80% range
241
242 this.reportProgress({
243 stage: "blobs",
244 message: `Downloading blob ${i + 1} of ${blobRefs.length}...`,
245 progress,
246 current: i + 1,
247 total: blobRefs.length,
248 });
249
250 try {
251 const blobData = await this.agent.com.atproto.sync.getBlob({
252 did: metadata.did,
253 cid: blobRef,
254 });
255
256 const blobPath = await join(blobDir, `${blobRef}.blob`);
257 await writeFile(blobPath, blobData.data);
258 downloadedCount++;
259
260 // Optional: Save blob metadata
261 const blobMetadata = {
262 cid: blobRef,
263 size: blobData.data.length,
264 downloadedAt: new Date().toISOString(),
265 };
266
267 const blobMetadataPath = await join(blobDir, `${blobRef}.json`);
268 await writeFile(
269 blobMetadataPath,
270 new TextEncoder().encode(JSON.stringify(blobMetadata, null, 2))
271 );
272 } catch (error) {
273 console.error(`Failed to download blob ${blobRef}:`, error);
274 }
275 }
276
277 // Update main metadata with blob information
278 const updatedMetadata = {
279 ...metadata,
280 blobsPath: blobDir,
281 blobCount: downloadedCount,
282 };
283
284 const metadataPath = await join(backupDir, "metadata.json");
285 await writeFile(
286 metadataPath,
287 new TextEncoder().encode(JSON.stringify(updatedMetadata, null, 2))
288 );
289
290 this.reportProgress({
291 stage: "blobs",
292 message: `Downloaded ${downloadedCount}/${blobRefs.length} blobs`,
293 progress: 80,
294 });
295
296 console.log(`Downloaded ${downloadedCount}/${blobRefs.length} blobs`);
297 } catch (error) {
298 console.error("Failed to download blobs:", error);
299 // Don't throw - blob download failure shouldn't fail the entire backup
300 }
301 }
302
303 private async extractBlobReferences(): Promise<string[]> {
304 let allBlobs = [];
305 let cursor: string | undefined;
306 while (true) {
307 const blobs = await this.agent.com.atproto.sync.listBlobs({
308 did: this.agent.assertDid,
309 limit: 500,
310 cursor,
311 });
312 allBlobs.push(...blobs.data.cids);
313 if (blobs.data.cursor) cursor = blobs.data.cursor;
314 else break;
315 }
316
317 return allBlobs;
318 }
319
320 private async cleanupAllBackups(): Promise<void> {
321 try {
322 const backups = await this.getBackups();
323
324 for (const backup of backups) {
325 await this.deleteBackup(backup);
326 }
327
328 if (backups.length > 0) {
329 console.log(`Deleted ${backups.length} existing backup(s)`);
330 }
331 } catch (error) {
332 console.error("Failed to cleanup all backups:", error);
333 // Don't throw here - we don't want backup creation to fail because of cleanup issues
334 }
335 }
336
337 private async cleanupOldBackups(): Promise<void> {
338 try {
339 const backups = await this.getBackups();
340
341 // Sort backups by timestamp (newest first)
342 const sortedBackups = backups.sort(
343 (a, b) =>
344 new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime()
345 );
346
347 // If we have more than maxBackups, delete the oldest ones
348 if (sortedBackups.length > this.maxBackups) {
349 const backupsToDelete = sortedBackups.slice(this.maxBackups);
350
351 for (const backup of backupsToDelete) {
352 await this.deleteBackup(backup);
353 }
354
355 console.log(`Deleted ${backupsToDelete.length} old backup(s)`);
356 }
357 } catch (error) {
358 console.error("Failed to cleanup old backups:", error);
359 // Don't throw here - we don't want backup creation to fail because of cleanup issues
360 }
361 }
362
363 private async deleteBackup(backup: Metadata): Promise<void> {
364 try {
365 const rootBackupDir = await getBackupDir();
366 const dir = await readDir(rootBackupDir);
367
368 // Find the backup directory that contains this backup
369 for (const backupDir of dir) {
370 if (backupDir.isDirectory) {
371 const backupPath = await resolve(rootBackupDir, backupDir.name);
372 const metadataPath = await join(backupPath, "metadata.json");
373
374 try {
375 const metadata = await readTextFile(metadataPath);
376 const parsedMetadata = JSON.parse(metadata);
377
378 // Check if this is the backup we want to delete
379 if (
380 parsedMetadata.timestamp === backup.timestamp &&
381 parsedMetadata.did === backup.did
382 ) {
383 await remove(backupPath, { recursive: true });
384 console.log(`Deleted backup: ${backupPath}`);
385 break;
386 }
387 } catch (e) {
388 // Skip if we can't read metadata
389 continue;
390 }
391 }
392 }
393 } catch (error) {
394 console.error(`Failed to delete backup:`, error);
395 }
396 }
397
398 async getBackups(): Promise<Metadata[]> {
399 const data: Metadata[] = [];
400 await createBackupDir();
401 const rootBackupDir = await getBackupDir();
402 const dir = await readDir(rootBackupDir);
403
404 for (const backupDir of dir) {
405 if (backupDir.isDirectory) {
406 const backup = await readDir(
407 await resolve(rootBackupDir, backupDir.name)
408 );
409 const metadataFile = backup.find((e) => e.name == "metadata.json");
410 if (metadataFile) {
411 try {
412 const metadata = await readTextFile(
413 await resolve(rootBackupDir, backupDir.name, metadataFile.name)
414 );
415 data.push(JSON.parse(metadata));
416 } catch (error) {
417 console.error(
418 `Failed to read metadata for ${backupDir.name}:`,
419 error
420 );
421 // Continue processing other backups
422 }
423 }
424 }
425 }
426
427 return data;
428 }
429
430 // Method to restore a specific blob
431 async restoreBlob(
432 backupMetadata: Metadata,
433 blobCid: string
434 ): Promise<Uint8Array | null> {
435 if (!backupMetadata.blobsPath) {
436 throw new Error("No blobs available for this backup");
437 }
438
439 try {
440 const blobPath = await join(backupMetadata.blobsPath, `${blobCid}.blob`);
441 const blobData = await readTextFile(blobPath);
442 return new TextEncoder().encode(blobData);
443 } catch (error) {
444 console.error(`Failed to restore blob ${blobCid}:`, error);
445 return null;
446 }
447 }
448}