Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1mod cid;
2mod blob_map;
3mod metadata;
4mod download;
5mod pull;
6mod serve;
7mod subfs_utils;
8mod redirects;
9mod ignore_patterns;
10
11use clap::{Parser, Subcommand};
12use jacquard::CowStr;
13use jacquard::client::{Agent, FileAuthStore, AgentSessionExt, MemoryCredentialSession, AgentSession};
14use jacquard::oauth::client::OAuthClient;
15use jacquard::oauth::loopback::LoopbackConfig;
16use jacquard::prelude::IdentityResolver;
17use jacquard_common::types::string::{Datetime, Rkey, RecordKey, AtUri};
18use jacquard_common::types::blob::MimeType;
19use miette::IntoDiagnostic;
20use std::path::{Path, PathBuf};
21use std::collections::HashMap;
22use flate2::Compression;
23use flate2::write::GzEncoder;
24use std::io::Write;
25use base64::Engine;
26use futures::stream::{self, StreamExt};
27use indicatif::{ProgressBar, ProgressStyle, MultiProgress};
28
29use wisp_lexicons::place_wisp::fs::*;
30use wisp_lexicons::place_wisp::settings::*;
31
32/// Maximum number of concurrent file uploads to the PDS
33const MAX_CONCURRENT_UPLOADS: usize = 2;
34
35/// Limits for caching on wisp.place (from @wisp/constants)
36const MAX_FILE_COUNT: usize = 1000;
37const MAX_SITE_SIZE: usize = 300 * 1024 * 1024; // 300MB
38
39#[derive(Parser, Debug)]
40#[command(author, version, about = "wisp.place CLI tool")]
41struct Args {
42 #[command(subcommand)]
43 command: Option<Commands>,
44
45 // Deploy arguments (when no subcommand is specified)
46 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
47 input: Option<CowStr<'static>>,
48
49 /// Path to the directory containing your static site
50 #[arg(short, long)]
51 path: Option<PathBuf>,
52
53 /// Site name (defaults to directory name)
54 #[arg(short, long)]
55 site: Option<String>,
56
57 /// Path to auth store file
58 #[arg(long)]
59 store: Option<String>,
60
61 /// App Password for authentication
62 #[arg(long)]
63 password: Option<CowStr<'static>>,
64
65 /// Enable directory listing mode for paths without index files
66 #[arg(long)]
67 directory: bool,
68
69 /// Enable SPA mode (serve index.html for all routes)
70 #[arg(long)]
71 spa: bool,
72
73 /// Skip confirmation prompts (automatically accept warnings)
74 #[arg(short = 'y', long)]
75 yes: bool,
76}
77
78#[derive(Subcommand, Debug)]
79enum Commands {
80 /// Deploy a static site to wisp.place (default command)
81 Deploy {
82 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
83 input: CowStr<'static>,
84
85 /// Path to the directory containing your static site
86 #[arg(short, long, default_value = ".")]
87 path: PathBuf,
88
89 /// Site name (defaults to directory name)
90 #[arg(short, long)]
91 site: Option<String>,
92
93 /// Path to auth store file (will be created if missing, only used with OAuth)
94 #[arg(long, default_value = "/tmp/wisp-oauth-session.json")]
95 store: String,
96
97 /// App Password for authentication (alternative to OAuth)
98 #[arg(long)]
99 password: Option<CowStr<'static>>,
100
101 /// Enable directory listing mode for paths without index files
102 #[arg(long)]
103 directory: bool,
104
105 /// Enable SPA mode (serve index.html for all routes)
106 #[arg(long)]
107 spa: bool,
108
109 /// Skip confirmation prompts (automatically accept warnings)
110 #[arg(short = 'y', long)]
111 yes: bool,
112 },
113 /// Pull a site from the PDS to a local directory
114 Pull {
115 /// Handle (e.g., alice.bsky.social) or DID
116 input: CowStr<'static>,
117
118 /// Site name (record key)
119 #[arg(short, long)]
120 site: String,
121
122 /// Output directory for the downloaded site
123 #[arg(short, long, default_value = ".")]
124 path: PathBuf,
125 },
126 /// Serve a site locally with real-time firehose updates
127 Serve {
128 /// Handle (e.g., alice.bsky.social) or DID
129 input: CowStr<'static>,
130
131 /// Site name (record key)
132 #[arg(short, long)]
133 site: String,
134
135 /// Output directory for the site files
136 #[arg(short, long, default_value = ".")]
137 path: PathBuf,
138
139 /// Port to serve on
140 #[arg(short = 'P', long, default_value = "8080")]
141 port: u16,
142 },
143}
144
145#[tokio::main]
146async fn main() -> miette::Result<()> {
147 let args = Args::parse();
148
149 let result = match args.command {
150 Some(Commands::Deploy { input, path, site, store, password, directory, spa, yes }) => {
151 // Dispatch to appropriate authentication method
152 if let Some(password) = password {
153 run_with_app_password(input, password, path, site, directory, spa, yes).await
154 } else {
155 run_with_oauth(input, store, path, site, directory, spa, yes).await
156 }
157 }
158 Some(Commands::Pull { input, site, path }) => {
159 pull::pull_site(input, CowStr::from(site), path).await
160 }
161 Some(Commands::Serve { input, site, path, port }) => {
162 serve::serve_site(input, CowStr::from(site), path, port).await
163 }
164 None => {
165 // Legacy mode: if input is provided, assume deploy command
166 if let Some(input) = args.input {
167 let path = args.path.unwrap_or_else(|| PathBuf::from("."));
168 let store = args.store.unwrap_or_else(|| "/tmp/wisp-oauth-session.json".to_string());
169
170 // Dispatch to appropriate authentication method
171 if let Some(password) = args.password {
172 run_with_app_password(input, password, path, args.site, args.directory, args.spa, args.yes).await
173 } else {
174 run_with_oauth(input, store, path, args.site, args.directory, args.spa, args.yes).await
175 }
176 } else {
177 // No command and no input, show help
178 use clap::CommandFactory;
179 Args::command().print_help().into_diagnostic()?;
180 Ok(())
181 }
182 }
183 };
184
185 // Force exit to avoid hanging on background tasks/connections
186 match result {
187 Ok(_) => std::process::exit(0),
188 Err(e) => {
189 eprintln!("{:?}", e);
190 std::process::exit(1)
191 }
192 }
193}
194
195/// Run deployment with app password authentication
196async fn run_with_app_password(
197 input: CowStr<'static>,
198 password: CowStr<'static>,
199 path: PathBuf,
200 site: Option<String>,
201 directory: bool,
202 spa: bool,
203 yes: bool,
204) -> miette::Result<()> {
205 let (session, auth) =
206 MemoryCredentialSession::authenticated(input, password, None, None).await?;
207 println!("Signed in as {}", auth.handle);
208
209 let agent: Agent<_> = Agent::from(session);
210 deploy_site(&agent, path, site, directory, spa, yes).await
211}
212
213/// Run deployment with OAuth authentication
214async fn run_with_oauth(
215 input: CowStr<'static>,
216 store: String,
217 path: PathBuf,
218 site: Option<String>,
219 directory: bool,
220 spa: bool,
221 yes: bool,
222) -> miette::Result<()> {
223 use jacquard::oauth::scopes::Scope;
224 use jacquard::oauth::atproto::AtprotoClientMetadata;
225 use jacquard::oauth::session::ClientData;
226 use url::Url;
227
228 // Request the necessary scopes for wisp.place (including settings)
229 let scopes = Scope::parse_multiple("atproto repo:place.wisp.fs repo:place.wisp.subfs repo:place.wisp.settings blob:*/*")
230 .map_err(|e| miette::miette!("Failed to parse scopes: {:?}", e))?;
231
232 // Create redirect URIs that match the loopback server (port 4000, path /oauth/callback)
233 let redirect_uris = vec![
234 Url::parse("http://127.0.0.1:4000/oauth/callback").into_diagnostic()?,
235 Url::parse("http://[::1]:4000/oauth/callback").into_diagnostic()?,
236 ];
237
238 // Create client metadata with matching redirect URIs and scopes
239 let client_data = ClientData {
240 keyset: None,
241 config: AtprotoClientMetadata::new_localhost(
242 Some(redirect_uris),
243 Some(scopes),
244 ),
245 };
246
247 let oauth = OAuthClient::new(FileAuthStore::new(&store), client_data);
248
249 let session = oauth
250 .login_with_local_server(input, Default::default(), LoopbackConfig::default())
251 .await?;
252
253 let agent: Agent<_> = Agent::from(session);
254 deploy_site(&agent, path, site, directory, spa, yes).await
255}
256
257/// Scan directory to count files and calculate total size
258/// Returns (file_count, total_size_bytes)
259fn scan_directory_stats(
260 dir_path: &Path,
261 ignore_matcher: &ignore_patterns::IgnoreMatcher,
262 current_path: String,
263) -> miette::Result<(usize, u64)> {
264 let mut file_count = 0;
265 let mut total_size = 0u64;
266
267 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
268 .into_diagnostic()?
269 .collect::<Result<Vec<_>, _>>()
270 .into_diagnostic()?;
271
272 for entry in dir_entries {
273 let path = entry.path();
274 let name = entry.file_name();
275 let name_str = name.to_str()
276 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
277 .to_string();
278
279 let full_path = if current_path.is_empty() {
280 name_str.clone()
281 } else {
282 format!("{}/{}", current_path, name_str)
283 };
284
285 // Skip files/directories that match ignore patterns
286 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
287 continue;
288 }
289
290 let metadata = entry.metadata().into_diagnostic()?;
291
292 if metadata.is_file() {
293 file_count += 1;
294 total_size += metadata.len();
295 } else if metadata.is_dir() {
296 let subdir_path = if current_path.is_empty() {
297 name_str
298 } else {
299 format!("{}/{}", current_path, name_str)
300 };
301 let (sub_count, sub_size) = scan_directory_stats(&path, ignore_matcher, subdir_path)?;
302 file_count += sub_count;
303 total_size += sub_size;
304 }
305 }
306
307 Ok((file_count, total_size))
308}
309
310/// Deploy the site using the provided agent
311async fn deploy_site(
312 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
313 path: PathBuf,
314 site: Option<String>,
315 directory_listing: bool,
316 spa_mode: bool,
317 skip_prompts: bool,
318) -> miette::Result<()> {
319 // Verify the path exists
320 if !path.exists() {
321 return Err(miette::miette!("Path does not exist: {}", path.display()));
322 }
323
324 // Get site name
325 let site_name = site.unwrap_or_else(|| {
326 path
327 .file_name()
328 .and_then(|n| n.to_str())
329 .unwrap_or("site")
330 .to_string()
331 });
332
333 println!("Deploying site '{}'...", site_name);
334
335 // Scan directory to check file count and size
336 let ignore_matcher = ignore_patterns::IgnoreMatcher::new(&path)?;
337 let (file_count, total_size) = scan_directory_stats(&path, &ignore_matcher, String::new())?;
338
339 let size_mb = total_size as f64 / (1024.0 * 1024.0);
340 println!("Scanned: {} files, {:.1} MB total", file_count, size_mb);
341
342 // Check if limits are exceeded
343 let exceeds_file_count = file_count > MAX_FILE_COUNT;
344 let exceeds_size = total_size > MAX_SITE_SIZE as u64;
345
346 if exceeds_file_count || exceeds_size {
347 println!("\n⚠️ Warning: Your site exceeds wisp.place caching limits:");
348
349 if exceeds_file_count {
350 println!(" • File count: {} (limit: {})", file_count, MAX_FILE_COUNT);
351 }
352
353 if exceeds_size {
354 let size_mb = total_size as f64 / (1024.0 * 1024.0);
355 let limit_mb = MAX_SITE_SIZE as f64 / (1024.0 * 1024.0);
356 println!(" • Total size: {:.1} MB (limit: {:.0} MB)", size_mb, limit_mb);
357 }
358
359 println!("\nwisp.place will NOT serve your site if you proceed.");
360 println!("Your site will be uploaded to your PDS, but will only be accessible via:");
361 println!(" • wisp-cli serve (local hosting)");
362 println!(" • Other hosting services with more generous limits");
363
364 if !skip_prompts {
365 // Prompt for confirmation
366 use std::io::{self, Write};
367 print!("\nDo you want to upload anyway? (y/N): ");
368 io::stdout().flush().into_diagnostic()?;
369
370 let mut input = String::new();
371 io::stdin().read_line(&mut input).into_diagnostic()?;
372 let input = input.trim().to_lowercase();
373
374 if input != "y" && input != "yes" {
375 println!("Upload cancelled.");
376 return Ok(());
377 }
378 } else {
379 println!("\nSkipping confirmation (--yes flag set).");
380 }
381
382 println!("\nProceeding with upload...\n");
383 }
384
385 // Try to fetch existing manifest for incremental updates
386 let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
387 use jacquard_common::types::string::AtUri;
388
389 // Get the DID for this session
390 let session_info = agent.session_info().await;
391 if let Some((did, _)) = session_info {
392 // Construct the AT URI for the record
393 let uri_string = format!("at://{}/place.wisp.fs/{}", did, site_name);
394 if let Ok(uri) = AtUri::new(&uri_string) {
395 match agent.get_record::<Fs>(&uri).await {
396 Ok(response) => {
397 match response.into_output() {
398 Ok(record_output) => {
399 let existing_manifest = record_output.value;
400 let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
401 println!("Found existing manifest with {} files in main record", blob_map.len());
402
403 // Extract subfs URIs from main record
404 let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
405
406 if !subfs_uris.is_empty() {
407 println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
408
409 // Merge blob maps from all subfs records
410 match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
411 Ok(merged_count) => {
412 println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
413 }
414 Err(e) => {
415 eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
416 }
417 }
418
419 (blob_map, subfs_uris)
420 } else {
421 (blob_map, Vec::new())
422 }
423 }
424 Err(_) => {
425 println!("No existing manifest found, uploading all files...");
426 (HashMap::new(), Vec::new())
427 }
428 }
429 }
430 Err(_) => {
431 // Record doesn't exist yet - this is a new site
432 println!("No existing manifest found, uploading all files...");
433 (HashMap::new(), Vec::new())
434 }
435 }
436 } else {
437 println!("No existing manifest found (invalid URI), uploading all files...");
438 (HashMap::new(), Vec::new())
439 }
440 } else {
441 println!("No existing manifest found (could not get DID), uploading all files...");
442 (HashMap::new(), Vec::new())
443 }
444 };
445
446 // Create progress tracking (spinner style since we don't know total count upfront)
447 let multi_progress = MultiProgress::new();
448 let progress = multi_progress.add(ProgressBar::new_spinner());
449 progress.set_style(
450 ProgressStyle::default_spinner()
451 .template("[{elapsed_precise}] {spinner:.cyan} {pos} files {msg}")
452 .into_diagnostic()?
453 .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ ")
454 );
455 progress.set_message("Scanning files...");
456 progress.enable_steady_tick(std::time::Duration::from_millis(100));
457
458 let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new(), &ignore_matcher, &progress).await?;
459 let uploaded_count = total_files - reused_count;
460
461 progress.finish_with_message(format!("✓ {} files ({} uploaded, {} reused)", total_files, uploaded_count, reused_count));
462
463 // Check if we need to split into subfs records
464 const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
465 const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
466 const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
467
468 let mut working_directory = root_dir;
469 let mut current_file_count = total_files;
470 let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
471
472 // Estimate initial manifest size
473 let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
474
475 if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
476 println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
477 total_files, manifest_size as f64 / 1024.0);
478
479 let mut attempts = 0;
480 const MAX_SPLIT_ATTEMPTS: usize = 50;
481
482 while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
483 attempts += 1;
484
485 // Find large directories to split
486 let directories = subfs_utils::find_large_directories(&working_directory, String::new());
487
488 if let Some(largest_dir) = directories.first() {
489 println!(" Split #{}: {} ({} files, {:.1}KB)",
490 attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
491
492 // Check if this directory is itself too large for a single subfs record
493 const MAX_SUBFS_SIZE: usize = 75 * 1024; // 75KB soft limit for safety
494 let mut subfs_uri = String::new();
495
496 if largest_dir.size > MAX_SUBFS_SIZE {
497 // Need to split this directory into multiple chunks
498 println!(" → Directory too large, splitting into chunks...");
499 let chunks = subfs_utils::split_directory_into_chunks(&largest_dir.directory, MAX_SUBFS_SIZE);
500 println!(" → Created {} chunks", chunks.len());
501
502 // Upload each chunk as a subfs record
503 let mut chunk_uris = Vec::new();
504 for (i, chunk) in chunks.iter().enumerate() {
505 use jacquard_common::types::string::Tid;
506 let chunk_tid = Tid::now_0();
507 let chunk_rkey = chunk_tid.to_string();
508
509 let chunk_file_count = subfs_utils::count_files_in_directory(chunk);
510 let chunk_size = subfs_utils::estimate_directory_size(chunk);
511
512 let chunk_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
513 .root(convert_fs_dir_to_subfs_dir(chunk.clone()))
514 .file_count(Some(chunk_file_count as i64))
515 .created_at(Datetime::now())
516 .build();
517
518 println!(" → Uploading chunk {}/{} ({} files, {:.1}KB)...",
519 i + 1, chunks.len(), chunk_file_count, chunk_size as f64 / 1024.0);
520
521 let chunk_output = agent.put_record(
522 RecordKey::from(Rkey::new(&chunk_rkey).into_diagnostic()?),
523 chunk_manifest
524 ).await.into_diagnostic()?;
525
526 let chunk_uri = chunk_output.uri.to_string();
527 chunk_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
528 new_subfs_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
529 }
530
531 // Create a parent subfs record that references all chunks
532 // Each chunk reference MUST have flat: true to merge chunk contents
533 println!(" → Creating parent subfs with {} chunk references...", chunk_uris.len());
534 use jacquard_common::CowStr;
535 use wisp_lexicons::place_wisp::fs::{Subfs};
536
537 // Convert to fs::Subfs (which has the 'flat' field) instead of subfs::Subfs
538 let parent_entries_fs: Vec<Entry> = chunk_uris.iter().enumerate().map(|(i, (uri, _))| {
539 let uri_string = uri.clone();
540 let at_uri = AtUri::new_cow(CowStr::from(uri_string)).expect("valid URI");
541 Entry::new()
542 .name(CowStr::from(format!("chunk{}", i)))
543 .node(EntryNode::Subfs(Box::new(
544 Subfs::new()
545 .r#type(CowStr::from("subfs"))
546 .subject(at_uri)
547 .flat(Some(true)) // EXPLICITLY TRUE - merge chunk contents
548 .build()
549 )))
550 .build()
551 }).collect();
552
553 let parent_root_fs = Directory::new()
554 .r#type(CowStr::from("directory"))
555 .entries(parent_entries_fs)
556 .build();
557
558 // Convert to subfs::Directory for the parent subfs record
559 let parent_root_subfs = convert_fs_dir_to_subfs_dir(parent_root_fs);
560
561 use jacquard_common::types::string::Tid;
562 let parent_tid = Tid::now_0();
563 let parent_rkey = parent_tid.to_string();
564
565 let parent_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
566 .root(parent_root_subfs)
567 .file_count(Some(largest_dir.file_count as i64))
568 .created_at(Datetime::now())
569 .build();
570
571 let parent_output = agent.put_record(
572 RecordKey::from(Rkey::new(&parent_rkey).into_diagnostic()?),
573 parent_manifest
574 ).await.into_diagnostic()?;
575
576 subfs_uri = parent_output.uri.to_string();
577 println!(" ✅ Created parent subfs with chunks (flat=true on each chunk): {}", subfs_uri);
578 } else {
579 // Directory fits in a single subfs record
580 use jacquard_common::types::string::Tid;
581 let subfs_tid = Tid::now_0();
582 let subfs_rkey = subfs_tid.to_string();
583
584 let subfs_manifest = wisp_lexicons::place_wisp::subfs::SubfsRecord::new()
585 .root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
586 .file_count(Some(largest_dir.file_count as i64))
587 .created_at(Datetime::now())
588 .build();
589
590 // Upload subfs record
591 let subfs_output = agent.put_record(
592 RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
593 subfs_manifest
594 ).await.into_diagnostic()?;
595
596 subfs_uri = subfs_output.uri.to_string();
597 println!(" ✅ Created subfs: {}", subfs_uri);
598 }
599
600 // Replace directory with subfs node (flat: false to preserve directory structure)
601 working_directory = subfs_utils::replace_directory_with_subfs(
602 working_directory,
603 &largest_dir.path,
604 &subfs_uri,
605 false // Preserve directory - the chunks inside have flat=true
606 )?;
607
608 new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
609 current_file_count -= largest_dir.file_count;
610
611 // Recalculate manifest size
612 manifest_size = subfs_utils::estimate_directory_size(&working_directory);
613 println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
614 manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
615
616 if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
617 println!("✅ Manifest now fits within limits");
618 break;
619 }
620 } else {
621 println!(" No more subdirectories to split - stopping");
622 break;
623 }
624 }
625
626 if attempts >= MAX_SPLIT_ATTEMPTS {
627 return Err(miette::miette!(
628 "Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
629 MAX_SPLIT_ATTEMPTS,
630 manifest_size as f64 / 1024.0,
631 current_file_count
632 ));
633 }
634
635 println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
636 new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
637 } else {
638 println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
639 total_files, manifest_size as f64 / 1024.0);
640 }
641
642 // Create the final Fs record
643 let fs_record = Fs::new()
644 .site(CowStr::from(site_name.clone()))
645 .root(working_directory)
646 .file_count(current_file_count as i64)
647 .created_at(Datetime::now())
648 .build();
649
650 // Use site name as the record key
651 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
652 let output = agent.put_record(RecordKey::from(rkey), fs_record).await?;
653
654 // Extract DID from the AT URI (format: at://did:plc:xxx/collection/rkey)
655 let uri_str = output.uri.to_string();
656 let did = uri_str
657 .strip_prefix("at://")
658 .and_then(|s| s.split('/').next())
659 .ok_or_else(|| miette::miette!("Failed to parse DID from URI"))?;
660
661 println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
662 println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
663 println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
664
665 // Clean up old subfs records
666 if !old_subfs_uris.is_empty() {
667 println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
668
669 let mut deleted_count = 0;
670 let mut failed_count = 0;
671
672 for (uri, _path) in old_subfs_uris {
673 match subfs_utils::delete_subfs_record(agent, &uri).await {
674 Ok(_) => {
675 deleted_count += 1;
676 println!(" 🗑️ Deleted old subfs: {}", uri);
677 }
678 Err(e) => {
679 failed_count += 1;
680 eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
681 }
682 }
683 }
684
685 if failed_count > 0 {
686 eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
687 } else {
688 println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
689 }
690 }
691
692 // Upload settings if either flag is set
693 if directory_listing || spa_mode {
694 // Validate mutual exclusivity
695 if directory_listing && spa_mode {
696 return Err(miette::miette!("Cannot enable both --directory and --SPA modes"));
697 }
698
699 println!("\n⚙️ Uploading site settings...");
700
701 // Build settings record
702 let mut settings_builder = Settings::new();
703
704 if directory_listing {
705 settings_builder = settings_builder.directory_listing(Some(true));
706 println!(" • Directory listing: enabled");
707 }
708
709 if spa_mode {
710 settings_builder = settings_builder.spa_mode(Some(CowStr::from("index.html")));
711 println!(" • SPA mode: enabled (serving index.html for all routes)");
712 }
713
714 let settings_record = settings_builder.build();
715
716 // Upload settings record with same rkey as site
717 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
718 match agent.put_record(RecordKey::from(rkey), settings_record).await {
719 Ok(settings_output) => {
720 println!("✅ Settings uploaded: {}", settings_output.uri);
721 }
722 Err(e) => {
723 eprintln!("⚠️ Failed to upload settings: {}", e);
724 eprintln!(" Site was deployed successfully, but settings may need to be configured manually.");
725 }
726 }
727 }
728
729 Ok(())
730}
731
732/// Recursively build a Directory from a filesystem path
733/// current_path is the path from the root of the site (e.g., "" for root, "config" for config dir)
734fn build_directory<'a>(
735 agent: &'a Agent<impl jacquard::client::AgentSession + IdentityResolver + 'a>,
736 dir_path: &'a Path,
737 existing_blobs: &'a HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
738 current_path: String,
739 ignore_matcher: &'a ignore_patterns::IgnoreMatcher,
740 progress: &'a ProgressBar,
741) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<(Directory<'static>, usize, usize)>> + 'a>>
742{
743 Box::pin(async move {
744 // Collect all directory entries first
745 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
746 .into_diagnostic()?
747 .collect::<Result<Vec<_>, _>>()
748 .into_diagnostic()?;
749
750 // Separate files and directories
751 let mut file_tasks = Vec::new();
752 let mut dir_tasks = Vec::new();
753
754 for entry in dir_entries {
755 let path = entry.path();
756 let name = entry.file_name();
757 let name_str = name.to_str()
758 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
759 .to_string();
760
761 // Construct full path for ignore checking
762 let full_path = if current_path.is_empty() {
763 name_str.clone()
764 } else {
765 format!("{}/{}", current_path, name_str)
766 };
767
768 // Skip files/directories that match ignore patterns
769 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
770 continue;
771 }
772
773 let metadata = entry.metadata().into_diagnostic()?;
774
775 if metadata.is_file() {
776 // Construct full path for this file (for blob map lookup)
777 let full_path = if current_path.is_empty() {
778 name_str.clone()
779 } else {
780 format!("{}/{}", current_path, name_str)
781 };
782 file_tasks.push((name_str, path, full_path));
783 } else if metadata.is_dir() {
784 dir_tasks.push((name_str, path));
785 }
786 }
787
788 // Process files concurrently with a limit of 2
789 let file_results: Vec<(Entry<'static>, bool)> = stream::iter(file_tasks)
790 .map(|(name, path, full_path)| async move {
791 let (file_node, reused) = process_file(agent, &path, &full_path, existing_blobs, progress).await?;
792 progress.inc(1);
793 let entry = Entry::new()
794 .name(CowStr::from(name))
795 .node(EntryNode::File(Box::new(file_node)))
796 .build();
797 Ok::<_, miette::Report>((entry, reused))
798 })
799 .buffer_unordered(MAX_CONCURRENT_UPLOADS)
800 .collect::<Vec<_>>()
801 .await
802 .into_iter()
803 .collect::<miette::Result<Vec<_>>>()?;
804
805 let mut file_entries = Vec::new();
806 let mut reused_count = 0;
807 let mut total_files = 0;
808
809 for (entry, reused) in file_results {
810 file_entries.push(entry);
811 total_files += 1;
812 if reused {
813 reused_count += 1;
814 }
815 }
816
817 // Process directories recursively (sequentially to avoid too much nesting)
818 let mut dir_entries = Vec::new();
819 for (name, path) in dir_tasks {
820 // Construct full path for subdirectory
821 let subdir_path = if current_path.is_empty() {
822 name.clone()
823 } else {
824 format!("{}/{}", current_path, name)
825 };
826 let (subdir, sub_total, sub_reused) = build_directory(agent, &path, existing_blobs, subdir_path, ignore_matcher, progress).await?;
827 dir_entries.push(Entry::new()
828 .name(CowStr::from(name))
829 .node(EntryNode::Directory(Box::new(subdir)))
830 .build());
831 total_files += sub_total;
832 reused_count += sub_reused;
833 }
834
835 // Combine file and directory entries
836 let mut entries = file_entries;
837 entries.extend(dir_entries);
838
839 let directory = Directory::new()
840 .r#type(CowStr::from("directory"))
841 .entries(entries)
842 .build();
843
844 Ok((directory, total_files, reused_count))
845 })
846}
847
848/// Process a single file: gzip -> base64 -> upload blob (or reuse existing)
849/// Returns (File, reused: bool)
850/// file_path_key is the full path from the site root (e.g., "config/file.json") for blob map lookup
851///
852/// Special handling: _redirects files are NOT compressed (uploaded as-is)
853async fn process_file(
854 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
855 file_path: &Path,
856 file_path_key: &str,
857 existing_blobs: &HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
858 progress: &ProgressBar,
859) -> miette::Result<(File<'static>, bool)>
860{
861 // Read file
862 let file_data = std::fs::read(file_path).into_diagnostic()?;
863
864 // Detect original MIME type
865 let original_mime = mime_guess::from_path(file_path)
866 .first_or_octet_stream()
867 .to_string();
868
869 // Check if this is a _redirects file (don't compress it)
870 let is_redirects_file = file_path.file_name()
871 .and_then(|n| n.to_str())
872 .map(|n| n == "_redirects")
873 .unwrap_or(false);
874
875 let (upload_bytes, encoding, is_base64) = if is_redirects_file {
876 // Don't compress _redirects - upload as-is
877 (file_data.clone(), None, false)
878 } else {
879 // Gzip compress
880 let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
881 encoder.write_all(&file_data).into_diagnostic()?;
882 let gzipped = encoder.finish().into_diagnostic()?;
883
884 // Base64 encode the gzipped data
885 let base64_bytes = base64::prelude::BASE64_STANDARD.encode(&gzipped).into_bytes();
886 (base64_bytes, Some("gzip"), true)
887 };
888
889 // Compute CID for this file
890 let file_cid = cid::compute_cid(&upload_bytes);
891
892 // Check if we have an existing blob with the same CID
893 let existing_blob = existing_blobs.get(file_path_key);
894
895 if let Some((existing_blob_ref, existing_cid)) = existing_blob {
896 if existing_cid == &file_cid {
897 // CIDs match - reuse existing blob
898 progress.set_message(format!("✓ Reused {}", file_path_key));
899 let mut file_builder = File::new()
900 .r#type(CowStr::from("file"))
901 .blob(existing_blob_ref.clone())
902 .mime_type(CowStr::from(original_mime));
903
904 if let Some(enc) = encoding {
905 file_builder = file_builder.encoding(CowStr::from(enc));
906 }
907 if is_base64 {
908 file_builder = file_builder.base64(true);
909 }
910
911 return Ok((file_builder.build(), true));
912 }
913 }
914
915 // File is new or changed - upload it
916 let mime_type = if is_redirects_file {
917 MimeType::new_static("text/plain")
918 } else {
919 MimeType::new_static("application/octet-stream")
920 };
921
922 // Format file size nicely
923 let size_str = if upload_bytes.len() < 1024 {
924 format!("{} B", upload_bytes.len())
925 } else if upload_bytes.len() < 1024 * 1024 {
926 format!("{:.1} KB", upload_bytes.len() as f64 / 1024.0)
927 } else {
928 format!("{:.1} MB", upload_bytes.len() as f64 / (1024.0 * 1024.0))
929 };
930
931 progress.set_message(format!("↑ Uploading {} ({})", file_path_key, size_str));
932 let blob = agent.upload_blob(upload_bytes, mime_type).await?;
933 progress.set_message(format!("✓ Uploaded {}", file_path_key));
934
935 let mut file_builder = File::new()
936 .r#type(CowStr::from("file"))
937 .blob(blob)
938 .mime_type(CowStr::from(original_mime));
939
940 if let Some(enc) = encoding {
941 file_builder = file_builder.encoding(CowStr::from(enc));
942 }
943 if is_base64 {
944 file_builder = file_builder.base64(true);
945 }
946
947 Ok((file_builder.build(), false))
948}
949
950/// Convert fs::Directory to subfs::Directory
951/// They have the same structure, but different types
952fn convert_fs_dir_to_subfs_dir(fs_dir: wisp_lexicons::place_wisp::fs::Directory<'static>) -> wisp_lexicons::place_wisp::subfs::Directory<'static> {
953 use wisp_lexicons::place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
954
955 let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
956 let node = match entry.node {
957 wisp_lexicons::place_wisp::fs::EntryNode::File(file) => {
958 SubfsEntryNode::File(Box::new(SubfsFile::new()
959 .r#type(file.r#type)
960 .blob(file.blob)
961 .encoding(file.encoding)
962 .mime_type(file.mime_type)
963 .base64(file.base64)
964 .build()))
965 }
966 wisp_lexicons::place_wisp::fs::EntryNode::Directory(dir) => {
967 SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
968 }
969 wisp_lexicons::place_wisp::fs::EntryNode::Subfs(subfs) => {
970 // Nested subfs in the directory we're converting
971 // Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
972 SubfsEntryNode::Subfs(Box::new(wisp_lexicons::place_wisp::subfs::Subfs::new()
973 .r#type(subfs.r#type)
974 .subject(subfs.subject)
975 .build()))
976 }
977 wisp_lexicons::place_wisp::fs::EntryNode::Unknown(unknown) => {
978 SubfsEntryNode::Unknown(unknown)
979 }
980 };
981
982 SubfsEntry::new()
983 .name(entry.name)
984 .node(node)
985 .build()
986 }).collect();
987
988 SubfsDirectory::new()
989 .r#type(fs_dir.r#type)
990 .entries(subfs_entries)
991 .build()
992}
993