Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1mod cid;
2mod blob_map;
3mod metadata;
4mod download;
5mod pull;
6mod serve;
7mod subfs_utils;
8mod redirects;
9mod ignore_patterns;
10
11use clap::{Parser, Subcommand};
12use jacquard::CowStr;
13use jacquard::client::{Agent, FileAuthStore, AgentSessionExt, MemoryCredentialSession, AgentSession};
14use jacquard::oauth::client::OAuthClient;
15use jacquard::oauth::loopback::LoopbackConfig;
16use jacquard::prelude::IdentityResolver;
17use jacquard_common::types::string::{Datetime, Rkey, RecordKey, AtUri};
18use jacquard_common::types::blob::MimeType;
19use miette::IntoDiagnostic;
20use std::path::{Path, PathBuf};
21use std::collections::HashMap;
22use flate2::Compression;
23use flate2::write::GzEncoder;
24use std::io::Write;
25use base64::Engine;
26use futures::stream::{self, StreamExt};
27use indicatif::{ProgressBar, ProgressStyle, MultiProgress};
28
29use wisp_lexicons::place_wisp::fs::*;
30use wisp_lexicons::place_wisp::settings::*;
31
32/// Maximum number of concurrent file uploads to the PDS
33const MAX_CONCURRENT_UPLOADS: usize = 2;
34
35/// Limits for caching on wisp.place (from @wisp/constants)
36const MAX_FILE_COUNT: usize = 1000;
37const MAX_SITE_SIZE: usize = 300 * 1024 * 1024; // 300MB
38
39#[derive(Parser, Debug)]
40#[command(author, version, about = "wisp.place CLI tool")]
41struct Args {
42 #[command(subcommand)]
43 command: Option<Commands>,
44
45 // Deploy arguments (when no subcommand is specified)
46 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
47 #[arg(global = true, conflicts_with = "command")]
48 input: Option<CowStr<'static>>,
49
50 /// Path to the directory containing your static site
51 #[arg(short, long, global = true, conflicts_with = "command")]
52 path: Option<PathBuf>,
53
54 /// Site name (defaults to directory name)
55 #[arg(short, long, global = true, conflicts_with = "command")]
56 site: Option<String>,
57
58 /// Path to auth store file
59 #[arg(long, global = true, conflicts_with = "command")]
60 store: Option<String>,
61
62 /// App Password for authentication
63 #[arg(long, global = true, conflicts_with = "command")]
64 password: Option<CowStr<'static>>,
65
66 /// Enable directory listing mode for paths without index files
67 #[arg(long, global = true, conflicts_with = "command")]
68 directory: bool,
69
70 /// Enable SPA mode (serve index.html for all routes)
71 #[arg(long, global = true, conflicts_with = "command")]
72 spa: bool,
73
74 /// Skip confirmation prompts (automatically accept warnings)
75 #[arg(short = 'y', long, global = true, conflicts_with = "command")]
76 yes: bool,
77}
78
79#[derive(Subcommand, Debug)]
80enum Commands {
81 /// Deploy a static site to wisp.place (default command)
82 Deploy {
83 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
84 input: CowStr<'static>,
85
86 /// Path to the directory containing your static site
87 #[arg(short, long, default_value = ".")]
88 path: PathBuf,
89
90 /// Site name (defaults to directory name)
91 #[arg(short, long)]
92 site: Option<String>,
93
94 /// Path to auth store file (will be created if missing, only used with OAuth)
95 #[arg(long, default_value = "/tmp/wisp-oauth-session.json")]
96 store: String,
97
98 /// App Password for authentication (alternative to OAuth)
99 #[arg(long)]
100 password: Option<CowStr<'static>>,
101
102 /// Enable directory listing mode for paths without index files
103 #[arg(long)]
104 directory: bool,
105
106 /// Enable SPA mode (serve index.html for all routes)
107 #[arg(long)]
108 spa: bool,
109
110 /// Skip confirmation prompts (automatically accept warnings)
111 #[arg(short = 'y', long)]
112 yes: bool,
113 },
114 /// Pull a site from the PDS to a local directory
115 Pull {
116 /// Handle (e.g., alice.bsky.social) or DID
117 input: CowStr<'static>,
118
119 /// Site name (record key)
120 #[arg(short, long)]
121 site: String,
122
123 /// Output directory for the downloaded site
124 #[arg(short, long, default_value = ".")]
125 output: PathBuf,
126 },
127 /// Serve a site locally with real-time firehose updates
128 Serve {
129 /// Handle (e.g., alice.bsky.social) or DID
130 input: CowStr<'static>,
131
132 /// Site name (record key)
133 #[arg(short, long)]
134 site: String,
135
136 /// Output directory for the site files
137 #[arg(short, long, default_value = ".")]
138 output: PathBuf,
139
140 /// Port to serve on
141 #[arg(short, long, default_value = "8080")]
142 port: u16,
143 },
144}
145
146#[tokio::main]
147async fn main() -> miette::Result<()> {
148 let args = Args::parse();
149
150 let result = match args.command {
151 Some(Commands::Deploy { input, path, site, store, password, directory, spa, yes }) => {
152 // Dispatch to appropriate authentication method
153 if let Some(password) = password {
154 run_with_app_password(input, password, path, site, directory, spa, yes).await
155 } else {
156 run_with_oauth(input, store, path, site, directory, spa, yes).await
157 }
158 }
159 Some(Commands::Pull { input, site, output }) => {
160 pull::pull_site(input, CowStr::from(site), output).await
161 }
162 Some(Commands::Serve { input, site, output, port }) => {
163 serve::serve_site(input, CowStr::from(site), output, port).await
164 }
165 None => {
166 // Legacy mode: if input is provided, assume deploy command
167 if let Some(input) = args.input {
168 let path = args.path.unwrap_or_else(|| PathBuf::from("."));
169 let store = args.store.unwrap_or_else(|| "/tmp/wisp-oauth-session.json".to_string());
170
171 // Dispatch to appropriate authentication method
172 if let Some(password) = args.password {
173 run_with_app_password(input, password, path, args.site, args.directory, args.spa, args.yes).await
174 } else {
175 run_with_oauth(input, store, path, args.site, args.directory, args.spa, args.yes).await
176 }
177 } else {
178 // No command and no input, show help
179 use clap::CommandFactory;
180 Args::command().print_help().into_diagnostic()?;
181 Ok(())
182 }
183 }
184 };
185
186 // Force exit to avoid hanging on background tasks/connections
187 match result {
188 Ok(_) => std::process::exit(0),
189 Err(e) => {
190 eprintln!("{:?}", e);
191 std::process::exit(1)
192 }
193 }
194}
195
196/// Run deployment with app password authentication
197async fn run_with_app_password(
198 input: CowStr<'static>,
199 password: CowStr<'static>,
200 path: PathBuf,
201 site: Option<String>,
202 directory: bool,
203 spa: bool,
204 yes: bool,
205) -> miette::Result<()> {
206 let (session, auth) =
207 MemoryCredentialSession::authenticated(input, password, None, None).await?;
208 println!("Signed in as {}", auth.handle);
209
210 let agent: Agent<_> = Agent::from(session);
211 deploy_site(&agent, path, site, directory, spa, yes).await
212}
213
214/// Run deployment with OAuth authentication
215async fn run_with_oauth(
216 input: CowStr<'static>,
217 store: String,
218 path: PathBuf,
219 site: Option<String>,
220 directory: bool,
221 spa: bool,
222 yes: bool,
223) -> miette::Result<()> {
224 use jacquard::oauth::scopes::Scope;
225 use jacquard::oauth::atproto::AtprotoClientMetadata;
226 use jacquard::oauth::session::ClientData;
227 use url::Url;
228
229 // Request the necessary scopes for wisp.place (including settings)
230 let scopes = Scope::parse_multiple("atproto repo:place.wisp.fs repo:place.wisp.subfs repo:place.wisp.settings blob:*/*")
231 .map_err(|e| miette::miette!("Failed to parse scopes: {:?}", e))?;
232
233 // Create redirect URIs that match the loopback server (port 4000, path /oauth/callback)
234 let redirect_uris = vec![
235 Url::parse("http://127.0.0.1:4000/oauth/callback").into_diagnostic()?,
236 Url::parse("http://[::1]:4000/oauth/callback").into_diagnostic()?,
237 ];
238
239 // Create client metadata with matching redirect URIs and scopes
240 let client_data = ClientData {
241 keyset: None,
242 config: AtprotoClientMetadata::new_localhost(
243 Some(redirect_uris),
244 Some(scopes),
245 ),
246 };
247
248 let oauth = OAuthClient::new(FileAuthStore::new(&store), client_data);
249
250 let session = oauth
251 .login_with_local_server(input, Default::default(), LoopbackConfig::default())
252 .await?;
253
254 let agent: Agent<_> = Agent::from(session);
255 deploy_site(&agent, path, site, directory, spa, yes).await
256}
257
258/// Scan directory to count files and calculate total size
259/// Returns (file_count, total_size_bytes)
260fn scan_directory_stats(
261 dir_path: &Path,
262 ignore_matcher: &ignore_patterns::IgnoreMatcher,
263 current_path: String,
264) -> miette::Result<(usize, u64)> {
265 let mut file_count = 0;
266 let mut total_size = 0u64;
267
268 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
269 .into_diagnostic()?
270 .collect::<Result<Vec<_>, _>>()
271 .into_diagnostic()?;
272
273 for entry in dir_entries {
274 let path = entry.path();
275 let name = entry.file_name();
276 let name_str = name.to_str()
277 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
278 .to_string();
279
280 let full_path = if current_path.is_empty() {
281 name_str.clone()
282 } else {
283 format!("{}/{}", current_path, name_str)
284 };
285
286 // Skip files/directories that match ignore patterns
287 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
288 continue;
289 }
290
291 let metadata = entry.metadata().into_diagnostic()?;
292
293 if metadata.is_file() {
294 file_count += 1;
295 total_size += metadata.len();
296 } else if metadata.is_dir() {
297 let subdir_path = if current_path.is_empty() {
298 name_str
299 } else {
300 format!("{}/{}", current_path, name_str)
301 };
302 let (sub_count, sub_size) = scan_directory_stats(&path, ignore_matcher, subdir_path)?;
303 file_count += sub_count;
304 total_size += sub_size;
305 }
306 }
307
308 Ok((file_count, total_size))
309}
310
311/// Deploy the site using the provided agent
312async fn deploy_site(
313 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
314 path: PathBuf,
315 site: Option<String>,
316 directory_listing: bool,
317 spa_mode: bool,
318 skip_prompts: bool,
319) -> miette::Result<()> {
320 // Verify the path exists
321 if !path.exists() {
322 return Err(miette::miette!("Path does not exist: {}", path.display()));
323 }
324
325 // Get site name
326 let site_name = site.unwrap_or_else(|| {
327 path
328 .file_name()
329 .and_then(|n| n.to_str())
330 .unwrap_or("site")
331 .to_string()
332 });
333
334 println!("Deploying site '{}'...", site_name);
335
336 // Scan directory to check file count and size
337 let ignore_matcher = ignore_patterns::IgnoreMatcher::new(&path)?;
338 let (file_count, total_size) = scan_directory_stats(&path, &ignore_matcher, String::new())?;
339
340 let size_mb = total_size as f64 / (1024.0 * 1024.0);
341 println!("Scanned: {} files, {:.1} MB total", file_count, size_mb);
342
343 // Check if limits are exceeded
344 let exceeds_file_count = file_count > MAX_FILE_COUNT;
345 let exceeds_size = total_size > MAX_SITE_SIZE as u64;
346
347 if exceeds_file_count || exceeds_size {
348 println!("\n⚠️ Warning: Your site exceeds wisp.place caching limits:");
349
350 if exceeds_file_count {
351 println!(" • File count: {} (limit: {})", file_count, MAX_FILE_COUNT);
352 }
353
354 if exceeds_size {
355 let size_mb = total_size as f64 / (1024.0 * 1024.0);
356 let limit_mb = MAX_SITE_SIZE as f64 / (1024.0 * 1024.0);
357 println!(" • Total size: {:.1} MB (limit: {:.0} MB)", size_mb, limit_mb);
358 }
359
360 println!("\nwisp.place will NOT serve your site if you proceed.");
361 println!("Your site will be uploaded to your PDS, but will only be accessible via:");
362 println!(" • wisp-cli serve (local hosting)");
363 println!(" • Other hosting services with more generous limits");
364
365 if !skip_prompts {
366 // Prompt for confirmation
367 use std::io::{self, Write};
368 print!("\nDo you want to upload anyway? (y/N): ");
369 io::stdout().flush().into_diagnostic()?;
370
371 let mut input = String::new();
372 io::stdin().read_line(&mut input).into_diagnostic()?;
373 let input = input.trim().to_lowercase();
374
375 if input != "y" && input != "yes" {
376 println!("Upload cancelled.");
377 return Ok(());
378 }
379 } else {
380 println!("\nSkipping confirmation (--yes flag set).");
381 }
382
383 println!("\nProceeding with upload...\n");
384 }
385
386 // Try to fetch existing manifest for incremental updates
387 let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
388 use jacquard_common::types::string::AtUri;
389
390 // Get the DID for this session
391 let session_info = agent.session_info().await;
392 if let Some((did, _)) = session_info {
393 // Construct the AT URI for the record
394 let uri_string = format!("at://{}/place.wisp.fs/{}", did, site_name);
395 if let Ok(uri) = AtUri::new(&uri_string) {
396 match agent.get_record::<Fs>(&uri).await {
397 Ok(response) => {
398 match response.into_output() {
399 Ok(record_output) => {
400 let existing_manifest = record_output.value;
401 let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
402 println!("Found existing manifest with {} files in main record", blob_map.len());
403
404 // Extract subfs URIs from main record
405 let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
406
407 if !subfs_uris.is_empty() {
408 println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
409
410 // Merge blob maps from all subfs records
411 match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
412 Ok(merged_count) => {
413 println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
414 }
415 Err(e) => {
416 eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
417 }
418 }
419
420 (blob_map, subfs_uris)
421 } else {
422 (blob_map, Vec::new())
423 }
424 }
425 Err(_) => {
426 println!("No existing manifest found, uploading all files...");
427 (HashMap::new(), Vec::new())
428 }
429 }
430 }
431 Err(_) => {
432 // Record doesn't exist yet - this is a new site
433 println!("No existing manifest found, uploading all files...");
434 (HashMap::new(), Vec::new())
435 }
436 }
437 } else {
438 println!("No existing manifest found (invalid URI), uploading all files...");
439 (HashMap::new(), Vec::new())
440 }
441 } else {
442 println!("No existing manifest found (could not get DID), uploading all files...");
443 (HashMap::new(), Vec::new())
444 }
445 };
446
447 // Create progress tracking (spinner style since we don't know total count upfront)
448 let multi_progress = MultiProgress::new();
449 let progress = multi_progress.add(ProgressBar::new_spinner());
450 progress.set_style(
451 ProgressStyle::default_spinner()
452 .template("[{elapsed_precise}] {spinner:.cyan} {pos} files {msg}")
453 .into_diagnostic()?
454 .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ ")
455 );
456 progress.set_message("Scanning files...");
457 progress.enable_steady_tick(std::time::Duration::from_millis(100));
458
459 let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new(), &ignore_matcher, &progress).await?;
460 let uploaded_count = total_files - reused_count;
461
462 progress.finish_with_message(format!("✓ {} files ({} uploaded, {} reused)", total_files, uploaded_count, reused_count));
463
464 // Check if we need to split into subfs records
465 const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
466 const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
467 const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
468
469 let mut working_directory = root_dir;
470 let mut current_file_count = total_files;
471 let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
472
473 // Estimate initial manifest size
474 let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
475
476 if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
477 println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
478 total_files, manifest_size as f64 / 1024.0);
479
480 let mut attempts = 0;
481 const MAX_SPLIT_ATTEMPTS: usize = 50;
482
483 while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
484 attempts += 1;
485
486 // Find large directories to split
487 let directories = subfs_utils::find_large_directories(&working_directory, String::new());
488
489 if let Some(largest_dir) = directories.first() {
490 println!(" Split #{}: {} ({} files, {:.1}KB)",
491 attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
492
493 // Check if this directory is itself too large for a single subfs record
494 const MAX_SUBFS_SIZE: usize = 75 * 1024; // 75KB soft limit for safety
495 let mut subfs_uri = String::new();
496
497 if largest_dir.size > MAX_SUBFS_SIZE {
498 // Need to split this directory into multiple chunks
499 println!(" → Directory too large, splitting into chunks...");
500 let chunks = subfs_utils::split_directory_into_chunks(&largest_dir.directory, MAX_SUBFS_SIZE);
501 println!(" → Created {} chunks", chunks.len());
502
503 // Upload each chunk as a subfs record
504 let mut chunk_uris = Vec::new();
505 for (i, chunk) in chunks.iter().enumerate() {
506 use jacquard_common::types::string::Tid;
507 let chunk_tid = Tid::now_0();
508 let chunk_rkey = chunk_tid.to_string();
509
510 let chunk_file_count = subfs_utils::count_files_in_directory(chunk);
511 let chunk_size = subfs_utils::estimate_directory_size(chunk);
512
513 let chunk_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
514 .root(convert_fs_dir_to_subfs_dir(chunk.clone()))
515 .file_count(Some(chunk_file_count as i64))
516 .created_at(Datetime::now())
517 .build();
518
519 println!(" → Uploading chunk {}/{} ({} files, {:.1}KB)...",
520 i + 1, chunks.len(), chunk_file_count, chunk_size as f64 / 1024.0);
521
522 let chunk_output = agent.put_record(
523 RecordKey::from(Rkey::new(&chunk_rkey).into_diagnostic()?),
524 chunk_manifest
525 ).await.into_diagnostic()?;
526
527 let chunk_uri = chunk_output.uri.to_string();
528 chunk_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
529 new_subfs_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
530 }
531
532 // Create a parent subfs record that references all chunks
533 // Each chunk reference MUST have flat: true to merge chunk contents
534 println!(" → Creating parent subfs with {} chunk references...", chunk_uris.len());
535 use jacquard_common::CowStr;
536 use wisp_lexicons::place_wisp::fs::{Subfs};
537
538 // Convert to fs::Subfs (which has the 'flat' field) instead of subfs::Subfs
539 let parent_entries_fs: Vec<Entry> = chunk_uris.iter().enumerate().map(|(i, (uri, _))| {
540 let uri_string = uri.clone();
541 let at_uri = AtUri::new_cow(CowStr::from(uri_string)).expect("valid URI");
542 Entry::new()
543 .name(CowStr::from(format!("chunk{}", i)))
544 .node(EntryNode::Subfs(Box::new(
545 Subfs::new()
546 .r#type(CowStr::from("subfs"))
547 .subject(at_uri)
548 .flat(Some(true)) // EXPLICITLY TRUE - merge chunk contents
549 .build()
550 )))
551 .build()
552 }).collect();
553
554 let parent_root_fs = Directory::new()
555 .r#type(CowStr::from("directory"))
556 .entries(parent_entries_fs)
557 .build();
558
559 // Convert to subfs::Directory for the parent subfs record
560 let parent_root_subfs = convert_fs_dir_to_subfs_dir(parent_root_fs);
561
562 use jacquard_common::types::string::Tid;
563 let parent_tid = Tid::now_0();
564 let parent_rkey = parent_tid.to_string();
565
566 let parent_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
567 .root(parent_root_subfs)
568 .file_count(Some(largest_dir.file_count as i64))
569 .created_at(Datetime::now())
570 .build();
571
572 let parent_output = agent.put_record(
573 RecordKey::from(Rkey::new(&parent_rkey).into_diagnostic()?),
574 parent_manifest
575 ).await.into_diagnostic()?;
576
577 subfs_uri = parent_output.uri.to_string();
578 println!(" ✅ Created parent subfs with chunks (flat=true on each chunk): {}", subfs_uri);
579 } else {
580 // Directory fits in a single subfs record
581 use jacquard_common::types::string::Tid;
582 let subfs_tid = Tid::now_0();
583 let subfs_rkey = subfs_tid.to_string();
584
585 let subfs_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
586 .root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
587 .file_count(Some(largest_dir.file_count as i64))
588 .created_at(Datetime::now())
589 .build();
590
591 // Upload subfs record
592 let subfs_output = agent.put_record(
593 RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
594 subfs_manifest
595 ).await.into_diagnostic()?;
596
597 subfs_uri = subfs_output.uri.to_string();
598 println!(" ✅ Created subfs: {}", subfs_uri);
599 }
600
601 // Replace directory with subfs node (flat: false to preserve directory structure)
602 working_directory = subfs_utils::replace_directory_with_subfs(
603 working_directory,
604 &largest_dir.path,
605 &subfs_uri,
606 false // Preserve directory - the chunks inside have flat=true
607 )?;
608
609 new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
610 current_file_count -= largest_dir.file_count;
611
612 // Recalculate manifest size
613 manifest_size = subfs_utils::estimate_directory_size(&working_directory);
614 println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
615 manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
616
617 if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
618 println!("✅ Manifest now fits within limits");
619 break;
620 }
621 } else {
622 println!(" No more subdirectories to split - stopping");
623 break;
624 }
625 }
626
627 if attempts >= MAX_SPLIT_ATTEMPTS {
628 return Err(miette::miette!(
629 "Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
630 MAX_SPLIT_ATTEMPTS,
631 manifest_size as f64 / 1024.0,
632 current_file_count
633 ));
634 }
635
636 println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
637 new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
638 } else {
639 println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
640 total_files, manifest_size as f64 / 1024.0);
641 }
642
643 // Create the final Fs record
644 let fs_record = Fs::new()
645 .site(CowStr::from(site_name.clone()))
646 .root(working_directory)
647 .file_count(current_file_count as i64)
648 .created_at(Datetime::now())
649 .build();
650
651 // Use site name as the record key
652 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
653 let output = agent.put_record(RecordKey::from(rkey), fs_record).await?;
654
655 // Extract DID from the AT URI (format: at://did:plc:xxx/collection/rkey)
656 let uri_str = output.uri.to_string();
657 let did = uri_str
658 .strip_prefix("at://")
659 .and_then(|s| s.split('/').next())
660 .ok_or_else(|| miette::miette!("Failed to parse DID from URI"))?;
661
662 println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
663 println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
664 println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
665
666 // Clean up old subfs records
667 if !old_subfs_uris.is_empty() {
668 println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
669
670 let mut deleted_count = 0;
671 let mut failed_count = 0;
672
673 for (uri, _path) in old_subfs_uris {
674 match subfs_utils::delete_subfs_record(agent, &uri).await {
675 Ok(_) => {
676 deleted_count += 1;
677 println!(" 🗑️ Deleted old subfs: {}", uri);
678 }
679 Err(e) => {
680 failed_count += 1;
681 eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
682 }
683 }
684 }
685
686 if failed_count > 0 {
687 eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
688 } else {
689 println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
690 }
691 }
692
693 // Upload settings if either flag is set
694 if directory_listing || spa_mode {
695 // Validate mutual exclusivity
696 if directory_listing && spa_mode {
697 return Err(miette::miette!("Cannot enable both --directory and --SPA modes"));
698 }
699
700 println!("\n⚙️ Uploading site settings...");
701
702 // Build settings record
703 let mut settings_builder = Settings::new();
704
705 if directory_listing {
706 settings_builder = settings_builder.directory_listing(Some(true));
707 println!(" • Directory listing: enabled");
708 }
709
710 if spa_mode {
711 settings_builder = settings_builder.spa_mode(Some(CowStr::from("index.html")));
712 println!(" • SPA mode: enabled (serving index.html for all routes)");
713 }
714
715 let settings_record = settings_builder.build();
716
717 // Upload settings record with same rkey as site
718 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
719 match agent.put_record(RecordKey::from(rkey), settings_record).await {
720 Ok(settings_output) => {
721 println!("✅ Settings uploaded: {}", settings_output.uri);
722 }
723 Err(e) => {
724 eprintln!("⚠️ Failed to upload settings: {}", e);
725 eprintln!(" Site was deployed successfully, but settings may need to be configured manually.");
726 }
727 }
728 }
729
730 Ok(())
731}
732
733/// Recursively build a Directory from a filesystem path
734/// current_path is the path from the root of the site (e.g., "" for root, "config" for config dir)
735fn build_directory<'a>(
736 agent: &'a Agent<impl jacquard::client::AgentSession + IdentityResolver + 'a>,
737 dir_path: &'a Path,
738 existing_blobs: &'a HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
739 current_path: String,
740 ignore_matcher: &'a ignore_patterns::IgnoreMatcher,
741 progress: &'a ProgressBar,
742) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<(Directory<'static>, usize, usize)>> + 'a>>
743{
744 Box::pin(async move {
745 // Collect all directory entries first
746 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
747 .into_diagnostic()?
748 .collect::<Result<Vec<_>, _>>()
749 .into_diagnostic()?;
750
751 // Separate files and directories
752 let mut file_tasks = Vec::new();
753 let mut dir_tasks = Vec::new();
754
755 for entry in dir_entries {
756 let path = entry.path();
757 let name = entry.file_name();
758 let name_str = name.to_str()
759 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
760 .to_string();
761
762 // Construct full path for ignore checking
763 let full_path = if current_path.is_empty() {
764 name_str.clone()
765 } else {
766 format!("{}/{}", current_path, name_str)
767 };
768
769 // Skip files/directories that match ignore patterns
770 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
771 continue;
772 }
773
774 let metadata = entry.metadata().into_diagnostic()?;
775
776 if metadata.is_file() {
777 // Construct full path for this file (for blob map lookup)
778 let full_path = if current_path.is_empty() {
779 name_str.clone()
780 } else {
781 format!("{}/{}", current_path, name_str)
782 };
783 file_tasks.push((name_str, path, full_path));
784 } else if metadata.is_dir() {
785 dir_tasks.push((name_str, path));
786 }
787 }
788
789 // Process files concurrently with a limit of 2
790 let file_results: Vec<(Entry<'static>, bool)> = stream::iter(file_tasks)
791 .map(|(name, path, full_path)| async move {
792 let (file_node, reused) = process_file(agent, &path, &full_path, existing_blobs, progress).await?;
793 progress.inc(1);
794 let entry = Entry::new()
795 .name(CowStr::from(name))
796 .node(EntryNode::File(Box::new(file_node)))
797 .build();
798 Ok::<_, miette::Report>((entry, reused))
799 })
800 .buffer_unordered(MAX_CONCURRENT_UPLOADS)
801 .collect::<Vec<_>>()
802 .await
803 .into_iter()
804 .collect::<miette::Result<Vec<_>>>()?;
805
806 let mut file_entries = Vec::new();
807 let mut reused_count = 0;
808 let mut total_files = 0;
809
810 for (entry, reused) in file_results {
811 file_entries.push(entry);
812 total_files += 1;
813 if reused {
814 reused_count += 1;
815 }
816 }
817
818 // Process directories recursively (sequentially to avoid too much nesting)
819 let mut dir_entries = Vec::new();
820 for (name, path) in dir_tasks {
821 // Construct full path for subdirectory
822 let subdir_path = if current_path.is_empty() {
823 name.clone()
824 } else {
825 format!("{}/{}", current_path, name)
826 };
827 let (subdir, sub_total, sub_reused) = build_directory(agent, &path, existing_blobs, subdir_path, ignore_matcher, progress).await?;
828 dir_entries.push(Entry::new()
829 .name(CowStr::from(name))
830 .node(EntryNode::Directory(Box::new(subdir)))
831 .build());
832 total_files += sub_total;
833 reused_count += sub_reused;
834 }
835
836 // Combine file and directory entries
837 let mut entries = file_entries;
838 entries.extend(dir_entries);
839
840 let directory = Directory::new()
841 .r#type(CowStr::from("directory"))
842 .entries(entries)
843 .build();
844
845 Ok((directory, total_files, reused_count))
846 })
847}
848
849/// Process a single file: gzip -> base64 -> upload blob (or reuse existing)
850/// Returns (File, reused: bool)
851/// file_path_key is the full path from the site root (e.g., "config/file.json") for blob map lookup
852///
853/// Special handling: _redirects files are NOT compressed (uploaded as-is)
854async fn process_file(
855 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
856 file_path: &Path,
857 file_path_key: &str,
858 existing_blobs: &HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
859 progress: &ProgressBar,
860) -> miette::Result<(File<'static>, bool)>
861{
862 // Read file
863 let file_data = std::fs::read(file_path).into_diagnostic()?;
864
865 // Detect original MIME type
866 let original_mime = mime_guess::from_path(file_path)
867 .first_or_octet_stream()
868 .to_string();
869
870 // Check if this is a _redirects file (don't compress it)
871 let is_redirects_file = file_path.file_name()
872 .and_then(|n| n.to_str())
873 .map(|n| n == "_redirects")
874 .unwrap_or(false);
875
876 let (upload_bytes, encoding, is_base64) = if is_redirects_file {
877 // Don't compress _redirects - upload as-is
878 (file_data.clone(), None, false)
879 } else {
880 // Gzip compress
881 let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
882 encoder.write_all(&file_data).into_diagnostic()?;
883 let gzipped = encoder.finish().into_diagnostic()?;
884
885 // Base64 encode the gzipped data
886 let base64_bytes = base64::prelude::BASE64_STANDARD.encode(&gzipped).into_bytes();
887 (base64_bytes, Some("gzip"), true)
888 };
889
890 // Compute CID for this file
891 let file_cid = cid::compute_cid(&upload_bytes);
892
893 // Check if we have an existing blob with the same CID
894 let existing_blob = existing_blobs.get(file_path_key);
895
896 if let Some((existing_blob_ref, existing_cid)) = existing_blob {
897 if existing_cid == &file_cid {
898 // CIDs match - reuse existing blob
899 progress.set_message(format!("✓ Reused {}", file_path_key));
900 let mut file_builder = File::new()
901 .r#type(CowStr::from("file"))
902 .blob(existing_blob_ref.clone())
903 .mime_type(CowStr::from(original_mime));
904
905 if let Some(enc) = encoding {
906 file_builder = file_builder.encoding(CowStr::from(enc));
907 }
908 if is_base64 {
909 file_builder = file_builder.base64(true);
910 }
911
912 return Ok((file_builder.build(), true));
913 }
914 }
915
916 // File is new or changed - upload it
917 let mime_type = if is_redirects_file {
918 MimeType::new_static("text/plain")
919 } else {
920 MimeType::new_static("application/octet-stream")
921 };
922
923 // Format file size nicely
924 let size_str = if upload_bytes.len() < 1024 {
925 format!("{} B", upload_bytes.len())
926 } else if upload_bytes.len() < 1024 * 1024 {
927 format!("{:.1} KB", upload_bytes.len() as f64 / 1024.0)
928 } else {
929 format!("{:.1} MB", upload_bytes.len() as f64 / (1024.0 * 1024.0))
930 };
931
932 progress.set_message(format!("↑ Uploading {} ({})", file_path_key, size_str));
933 let blob = agent.upload_blob(upload_bytes, mime_type).await?;
934 progress.set_message(format!("✓ Uploaded {}", file_path_key));
935
936 let mut file_builder = File::new()
937 .r#type(CowStr::from("file"))
938 .blob(blob)
939 .mime_type(CowStr::from(original_mime));
940
941 if let Some(enc) = encoding {
942 file_builder = file_builder.encoding(CowStr::from(enc));
943 }
944 if is_base64 {
945 file_builder = file_builder.base64(true);
946 }
947
948 Ok((file_builder.build(), false))
949}
950
951/// Convert fs::Directory to subfs::Directory
952/// They have the same structure, but different types
953fn convert_fs_dir_to_subfs_dir(fs_dir: wisp_lexicons::place_wisp::fs::Directory<'static>) -> wisp_lexicons::place_wisp::subfs::Directory<'static> {
954 use wisp_lexicons::place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
955
956 let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
957 let node = match entry.node {
958 wisp_lexicons::place_wisp::fs::EntryNode::File(file) => {
959 SubfsEntryNode::File(Box::new(SubfsFile::new()
960 .r#type(file.r#type)
961 .blob(file.blob)
962 .encoding(file.encoding)
963 .mime_type(file.mime_type)
964 .base64(file.base64)
965 .build()))
966 }
967 wisp_lexicons::place_wisp::fs::EntryNode::Directory(dir) => {
968 SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
969 }
970 wisp_lexicons::place_wisp::fs::EntryNode::Subfs(subfs) => {
971 // Nested subfs in the directory we're converting
972 // Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
973 SubfsEntryNode::Subfs(Box::new(wisp_lexicons::place_wisp::subfs::Subfs::new()
974 .r#type(subfs.r#type)
975 .subject(subfs.subject)
976 .build()))
977 }
978 wisp_lexicons::place_wisp::fs::EntryNode::Unknown(unknown) => {
979 SubfsEntryNode::Unknown(unknown)
980 }
981 };
982
983 SubfsEntry::new()
984 .name(entry.name)
985 .node(node)
986 .build()
987 }).collect();
988
989 SubfsDirectory::new()
990 .r#type(fs_dir.r#type)
991 .entries(subfs_entries)
992 .build()
993}
994