+42
-51
cli/Cargo.lock
+42
-51
cli/Cargo.lock
···
175
175
176
176
[[package]]
177
177
name = "axum"
178
-
version = "0.7.9"
178
+
version = "0.8.7"
179
179
source = "registry+https://github.com/rust-lang/crates.io-index"
180
-
checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f"
180
+
checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425"
181
181
dependencies = [
182
-
"async-trait",
183
182
"axum-core",
184
183
"bytes",
184
+
"form_urlencoded",
185
185
"futures-util",
186
186
"http",
187
187
"http-body",
···
194
194
"mime",
195
195
"percent-encoding",
196
196
"pin-project-lite",
197
-
"rustversion",
198
-
"serde",
197
+
"serde_core",
199
198
"serde_json",
200
199
"serde_path_to_error",
201
200
"serde_urlencoded",
202
201
"sync_wrapper",
203
202
"tokio",
204
-
"tower 0.5.2",
203
+
"tower",
205
204
"tower-layer",
206
205
"tower-service",
207
206
"tracing",
···
209
208
210
209
[[package]]
211
210
name = "axum-core"
212
-
version = "0.4.5"
211
+
version = "0.5.5"
213
212
source = "registry+https://github.com/rust-lang/crates.io-index"
214
-
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
213
+
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
215
214
dependencies = [
216
-
"async-trait",
217
215
"bytes",
218
-
"futures-util",
216
+
"futures-core",
219
217
"http",
220
218
"http-body",
221
219
"http-body-util",
222
220
"mime",
223
221
"pin-project-lite",
224
-
"rustversion",
225
222
"sync_wrapper",
226
223
"tower-layer",
227
224
"tower-service",
···
1792
1789
"miette",
1793
1790
"multibase",
1794
1791
"multihash",
1795
-
"n0-future",
1792
+
"n0-future 0.1.3",
1796
1793
"ouroboros",
1797
1794
"p256",
1798
1795
"rand 0.9.2",
···
2146
2143
2147
2144
[[package]]
2148
2145
name = "matchit"
2149
-
version = "0.7.3"
2146
+
version = "0.8.4"
2150
2147
source = "registry+https://github.com/rust-lang/crates.io-index"
2151
-
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
2148
+
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
2152
2149
2153
2150
[[package]]
2154
2151
name = "memchr"
···
2289
2286
version = "0.1.3"
2290
2287
source = "registry+https://github.com/rust-lang/crates.io-index"
2291
2288
checksum = "7bb0e5d99e681ab3c938842b96fcb41bf8a7bb4bfdb11ccbd653a7e83e06c794"
2289
+
dependencies = [
2290
+
"cfg_aliases",
2291
+
"derive_more",
2292
+
"futures-buffered",
2293
+
"futures-lite",
2294
+
"futures-util",
2295
+
"js-sys",
2296
+
"pin-project",
2297
+
"send_wrapper",
2298
+
"tokio",
2299
+
"tokio-util",
2300
+
"wasm-bindgen",
2301
+
"wasm-bindgen-futures",
2302
+
"web-time",
2303
+
]
2304
+
2305
+
[[package]]
2306
+
name = "n0-future"
2307
+
version = "0.3.1"
2308
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2309
+
checksum = "8c0709ac8235ce13b82bc4d180ee3c42364b90c1a8a628c3422d991d75a728b5"
2292
2310
dependencies = [
2293
2311
"cfg_aliases",
2294
2312
"derive_more",
···
2991
3009
"tokio",
2992
3010
"tokio-rustls",
2993
3011
"tokio-util",
2994
-
"tower 0.5.2",
2995
-
"tower-http 0.6.6",
3012
+
"tower",
3013
+
"tower-http",
2996
3014
"tower-service",
2997
3015
"url",
2998
3016
"wasm-bindgen",
···
3973
3991
3974
3992
[[package]]
3975
3993
name = "tower"
3976
-
version = "0.4.13"
3977
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3978
-
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
3979
-
dependencies = [
3980
-
"tower-layer",
3981
-
"tower-service",
3982
-
"tracing",
3983
-
]
3984
-
3985
-
[[package]]
3986
-
name = "tower"
3987
3994
version = "0.5.2"
3988
3995
source = "registry+https://github.com/rust-lang/crates.io-index"
3989
3996
checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
···
4000
4007
4001
4008
[[package]]
4002
4009
name = "tower-http"
4003
-
version = "0.5.2"
4010
+
version = "0.6.6"
4004
4011
source = "registry+https://github.com/rust-lang/crates.io-index"
4005
-
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
4012
+
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
4006
4013
dependencies = [
4007
4014
"async-compression",
4008
4015
"bitflags",
···
4014
4021
"http-body-util",
4015
4022
"http-range-header",
4016
4023
"httpdate",
4024
+
"iri-string",
4017
4025
"mime",
4018
4026
"mime_guess",
4019
4027
"percent-encoding",
4020
4028
"pin-project-lite",
4021
4029
"tokio",
4022
4030
"tokio-util",
4031
+
"tower",
4023
4032
"tower-layer",
4024
4033
"tower-service",
4025
4034
"tracing",
4026
-
]
4027
-
4028
-
[[package]]
4029
-
name = "tower-http"
4030
-
version = "0.6.6"
4031
-
source = "registry+https://github.com/rust-lang/crates.io-index"
4032
-
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
4033
-
dependencies = [
4034
-
"bitflags",
4035
-
"bytes",
4036
-
"futures-util",
4037
-
"http",
4038
-
"http-body",
4039
-
"iri-string",
4040
-
"pin-project-lite",
4041
-
"tower 0.5.2",
4042
-
"tower-layer",
4043
-
"tower-service",
4044
4035
]
4045
4036
4046
4037
[[package]]
···
4913
4904
4914
4905
[[package]]
4915
4906
name = "wisp-cli"
4916
-
version = "0.2.0"
4907
+
version = "0.3.0"
4917
4908
dependencies = [
4918
4909
"axum",
4919
4910
"base64 0.22.1",
···
4933
4924
"mime_guess",
4934
4925
"multibase",
4935
4926
"multihash",
4936
-
"n0-future",
4927
+
"n0-future 0.3.1",
4937
4928
"reqwest",
4938
4929
"rustversion",
4939
4930
"serde",
···
4941
4932
"sha2",
4942
4933
"shellexpand",
4943
4934
"tokio",
4944
-
"tower 0.4.13",
4945
-
"tower-http 0.5.2",
4935
+
"tower",
4936
+
"tower-http",
4946
4937
"url",
4947
4938
"walkdir",
4948
4939
]
+5
-5
cli/Cargo.toml
+5
-5
cli/Cargo.toml
···
1
1
[package]
2
2
name = "wisp-cli"
3
-
version = "0.2.0"
3
+
version = "0.3.0"
4
4
edition = "2024"
5
5
6
6
[features]
···
33
33
multihash = "0.19.3"
34
34
multibase = "0.9"
35
35
sha2 = "0.10"
36
-
axum = "0.7"
37
-
tower-http = { version = "0.5", features = ["fs", "compression-gzip"] }
38
-
tower = "0.4"
39
-
n0-future = "0.1"
36
+
axum = "0.8.7"
37
+
tower-http = { version = "0.6.6", features = ["fs", "compression-gzip"] }
38
+
tower = "0.5.2"
39
+
n0-future = "0.3.1"
40
40
chrono = "0.4"
41
41
url = "2.5"
-51
cli/lexicons/place/wisp/fs.json
-51
cli/lexicons/place/wisp/fs.json
···
1
-
{
2
-
"lexicon": 1,
3
-
"id": "place.wisp.fs",
4
-
"defs": {
5
-
"main": {
6
-
"type": "record",
7
-
"description": "Virtual filesystem manifest for a Wisp site",
8
-
"record": {
9
-
"type": "object",
10
-
"required": ["site", "root", "createdAt"],
11
-
"properties": {
12
-
"site": { "type": "string" },
13
-
"root": { "type": "ref", "ref": "#directory" },
14
-
"fileCount": { "type": "integer", "minimum": 0, "maximum": 1000 },
15
-
"createdAt": { "type": "string", "format": "datetime" }
16
-
}
17
-
}
18
-
},
19
-
"file": {
20
-
"type": "object",
21
-
"required": ["type", "blob"],
22
-
"properties": {
23
-
"type": { "type": "string", "const": "file" },
24
-
"blob": { "type": "blob", "accept": ["*/*"], "maxSize": 1000000, "description": "Content blob ref" },
25
-
"encoding": { "type": "string", "enum": ["gzip"], "description": "Content encoding (e.g., gzip for compressed files)" },
26
-
"mimeType": { "type": "string", "description": "Original MIME type before compression" },
27
-
"base64": { "type": "boolean", "description": "True if blob content is base64-encoded (used to bypass PDS content sniffing)" }
28
-
}
29
-
},
30
-
"directory": {
31
-
"type": "object",
32
-
"required": ["type", "entries"],
33
-
"properties": {
34
-
"type": { "type": "string", "const": "directory" },
35
-
"entries": {
36
-
"type": "array",
37
-
"maxLength": 500,
38
-
"items": { "type": "ref", "ref": "#entry" }
39
-
}
40
-
}
41
-
},
42
-
"entry": {
43
-
"type": "object",
44
-
"required": ["name", "node"],
45
-
"properties": {
46
-
"name": { "type": "string", "maxLength": 255 },
47
-
"node": { "type": "union", "refs": ["#file", "#directory"] }
48
-
}
49
-
}
50
-
}
51
-
}
+5
-1
cli/src/blob_map.rs
+5
-1
cli/src/blob_map.rs
···
33
33
// BlobRef is an enum with Blob variant, which has a ref field (CidLink)
34
34
let blob_ref = &file_node.blob;
35
35
let cid_string = blob_ref.blob().r#ref.to_string();
36
-
36
+
37
37
// Store with full path (mirrors TypeScript implementation)
38
38
blob_map.insert(
39
39
full_path,
···
43
43
EntryNode::Directory(subdir) => {
44
44
let sub_map = extract_blob_map_recursive(subdir, full_path);
45
45
blob_map.extend(sub_map);
46
+
}
47
+
EntryNode::Subfs(_) => {
48
+
// Subfs nodes don't contain blobs directly - they reference other records
49
+
// Skip them in blob map extraction
46
50
}
47
51
EntryNode::Unknown(_) => {
48
52
// Skip unknown node types
+9
cli/src/lib.rs
+9
cli/src/lib.rs
+195
-12
cli/src/main.rs
+195
-12
cli/src/main.rs
···
6
6
mod download;
7
7
mod pull;
8
8
mod serve;
9
+
mod subfs_utils;
9
10
10
11
use clap::{Parser, Subcommand};
11
12
use jacquard::CowStr;
···
204
205
println!("Deploying site '{}'...", site_name);
205
206
206
207
// Try to fetch existing manifest for incremental updates
207
-
let existing_blob_map: HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)> = {
208
+
let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
208
209
use jacquard_common::types::string::AtUri;
209
-
210
+
210
211
// Get the DID for this session
211
212
let session_info = agent.session_info().await;
212
213
if let Some((did, _)) = session_info {
···
218
219
match response.into_output() {
219
220
Ok(record_output) => {
220
221
let existing_manifest = record_output.value;
221
-
let blob_map = blob_map::extract_blob_map(&existing_manifest.root);
222
-
println!("Found existing manifest with {} files, checking for changes...", blob_map.len());
223
-
blob_map
222
+
let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
223
+
println!("Found existing manifest with {} files in main record", blob_map.len());
224
+
225
+
// Extract subfs URIs from main record
226
+
let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
227
+
228
+
if !subfs_uris.is_empty() {
229
+
println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
230
+
231
+
// Merge blob maps from all subfs records
232
+
match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
233
+
Ok(merged_count) => {
234
+
println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
235
+
}
236
+
Err(e) => {
237
+
eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
238
+
}
239
+
}
240
+
241
+
(blob_map, subfs_uris)
242
+
} else {
243
+
(blob_map, Vec::new())
244
+
}
224
245
}
225
246
Err(_) => {
226
247
println!("No existing manifest found, uploading all files...");
227
-
HashMap::new()
248
+
(HashMap::new(), Vec::new())
228
249
}
229
250
}
230
251
}
231
252
Err(_) => {
232
253
// Record doesn't exist yet - this is a new site
233
254
println!("No existing manifest found, uploading all files...");
234
-
HashMap::new()
255
+
(HashMap::new(), Vec::new())
235
256
}
236
257
}
237
258
} else {
238
259
println!("No existing manifest found (invalid URI), uploading all files...");
239
-
HashMap::new()
260
+
(HashMap::new(), Vec::new())
240
261
}
241
262
} else {
242
263
println!("No existing manifest found (could not get DID), uploading all files...");
243
-
HashMap::new()
264
+
(HashMap::new(), Vec::new())
244
265
}
245
266
};
246
267
···
248
269
let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new()).await?;
249
270
let uploaded_count = total_files - reused_count;
250
271
251
-
// Create the Fs record
272
+
// Check if we need to split into subfs records
273
+
const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
274
+
const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
275
+
const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
276
+
277
+
let mut working_directory = root_dir;
278
+
let mut current_file_count = total_files;
279
+
let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
280
+
281
+
// Estimate initial manifest size
282
+
let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
283
+
284
+
if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
285
+
println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
286
+
total_files, manifest_size as f64 / 1024.0);
287
+
288
+
let mut attempts = 0;
289
+
const MAX_SPLIT_ATTEMPTS: usize = 50;
290
+
291
+
while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
292
+
attempts += 1;
293
+
294
+
// Find large directories to split
295
+
let directories = subfs_utils::find_large_directories(&working_directory, String::new());
296
+
297
+
if let Some(largest_dir) = directories.first() {
298
+
println!(" Split #{}: {} ({} files, {:.1}KB)",
299
+
attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
300
+
301
+
// Create a subfs record for this directory
302
+
use jacquard_common::types::string::Tid;
303
+
let subfs_tid = Tid::now_0();
304
+
let subfs_rkey = subfs_tid.to_string();
305
+
306
+
let subfs_manifest = crate::place_wisp::subfs::SubfsRecord::new()
307
+
.root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
308
+
.file_count(Some(largest_dir.file_count as i64))
309
+
.created_at(Datetime::now())
310
+
.build();
311
+
312
+
// Upload subfs record
313
+
let subfs_output = agent.put_record(
314
+
RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
315
+
subfs_manifest
316
+
).await.into_diagnostic()?;
317
+
318
+
let subfs_uri = subfs_output.uri.to_string();
319
+
println!(" ✅ Created subfs: {}", subfs_uri);
320
+
321
+
// Replace directory with subfs node (flat: false to preserve structure)
322
+
working_directory = subfs_utils::replace_directory_with_subfs(
323
+
working_directory,
324
+
&largest_dir.path,
325
+
&subfs_uri,
326
+
false // Preserve directory structure
327
+
)?;
328
+
329
+
new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
330
+
current_file_count -= largest_dir.file_count;
331
+
332
+
// Recalculate manifest size
333
+
manifest_size = subfs_utils::estimate_directory_size(&working_directory);
334
+
println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
335
+
manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
336
+
337
+
if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
338
+
println!("✅ Manifest now fits within limits");
339
+
break;
340
+
}
341
+
} else {
342
+
println!(" No more subdirectories to split - stopping");
343
+
break;
344
+
}
345
+
}
346
+
347
+
if attempts >= MAX_SPLIT_ATTEMPTS {
348
+
return Err(miette::miette!(
349
+
"Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
350
+
MAX_SPLIT_ATTEMPTS,
351
+
manifest_size as f64 / 1024.0,
352
+
current_file_count
353
+
));
354
+
}
355
+
356
+
println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
357
+
new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
358
+
} else {
359
+
println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
360
+
total_files, manifest_size as f64 / 1024.0);
361
+
}
362
+
363
+
// Create the final Fs record
252
364
let fs_record = Fs::new()
253
365
.site(CowStr::from(site_name.clone()))
254
-
.root(root_dir)
255
-
.file_count(total_files as i64)
366
+
.root(working_directory)
367
+
.file_count(current_file_count as i64)
256
368
.created_at(Datetime::now())
257
369
.build();
258
370
···
270
382
println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
271
383
println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
272
384
println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
385
+
386
+
// Clean up old subfs records
387
+
if !old_subfs_uris.is_empty() {
388
+
println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
389
+
390
+
let mut deleted_count = 0;
391
+
let mut failed_count = 0;
392
+
393
+
for (uri, _path) in old_subfs_uris {
394
+
match subfs_utils::delete_subfs_record(agent, &uri).await {
395
+
Ok(_) => {
396
+
deleted_count += 1;
397
+
println!(" 🗑️ Deleted old subfs: {}", uri);
398
+
}
399
+
Err(e) => {
400
+
failed_count += 1;
401
+
eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
402
+
}
403
+
}
404
+
}
405
+
406
+
if failed_count > 0 {
407
+
eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
408
+
} else {
409
+
println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
410
+
}
411
+
}
273
412
274
413
Ok(())
275
414
}
···
448
587
))
449
588
}
450
589
590
+
/// Convert fs::Directory to subfs::Directory
591
+
/// They have the same structure, but different types
592
+
fn convert_fs_dir_to_subfs_dir(fs_dir: place_wisp::fs::Directory<'static>) -> place_wisp::subfs::Directory<'static> {
593
+
use place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
594
+
595
+
let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
596
+
let node = match entry.node {
597
+
place_wisp::fs::EntryNode::File(file) => {
598
+
SubfsEntryNode::File(Box::new(SubfsFile::new()
599
+
.r#type(file.r#type)
600
+
.blob(file.blob)
601
+
.encoding(file.encoding)
602
+
.mime_type(file.mime_type)
603
+
.base64(file.base64)
604
+
.build()))
605
+
}
606
+
place_wisp::fs::EntryNode::Directory(dir) => {
607
+
SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
608
+
}
609
+
place_wisp::fs::EntryNode::Subfs(subfs) => {
610
+
// Nested subfs in the directory we're converting
611
+
// Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
612
+
SubfsEntryNode::Subfs(Box::new(place_wisp::subfs::Subfs::new()
613
+
.r#type(subfs.r#type)
614
+
.subject(subfs.subject)
615
+
.build()))
616
+
}
617
+
place_wisp::fs::EntryNode::Unknown(unknown) => {
618
+
SubfsEntryNode::Unknown(unknown)
619
+
}
620
+
};
621
+
622
+
SubfsEntry::new()
623
+
.name(entry.name)
624
+
.node(node)
625
+
.build()
626
+
}).collect();
627
+
628
+
SubfsDirectory::new()
629
+
.r#type(fs_dir.r#type)
630
+
.entries(subfs_entries)
631
+
.build()
632
+
}
633
+
+2
-1
cli/src/place_wisp.rs
+2
-1
cli/src/place_wisp.rs
+261
-1
cli/src/place_wisp/fs.rs
+261
-1
cli/src/place_wisp/fs.rs
···
251
251
description: None,
252
252
refs: vec![
253
253
::jacquard_common::CowStr::new_static("#file"),
254
-
::jacquard_common::CowStr::new_static("#directory")
254
+
::jacquard_common::CowStr::new_static("#directory"),
255
+
::jacquard_common::CowStr::new_static("#subfs")
255
256
],
256
257
closed: None,
257
258
}),
···
428
429
}),
429
430
}),
430
431
);
432
+
map.insert(
433
+
::jacquard_common::smol_str::SmolStr::new_static("subfs"),
434
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
435
+
description: None,
436
+
required: Some(
437
+
vec![
438
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
439
+
::jacquard_common::smol_str::SmolStr::new_static("subject")
440
+
],
441
+
),
442
+
nullable: None,
443
+
properties: {
444
+
#[allow(unused_mut)]
445
+
let mut map = ::std::collections::BTreeMap::new();
446
+
map.insert(
447
+
::jacquard_common::smol_str::SmolStr::new_static("flat"),
448
+
::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
449
+
description: None,
450
+
default: None,
451
+
r#const: None,
452
+
}),
453
+
);
454
+
map.insert(
455
+
::jacquard_common::smol_str::SmolStr::new_static("subject"),
456
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
457
+
description: Some(
458
+
::jacquard_common::CowStr::new_static(
459
+
"AT-URI pointing to a place.wisp.subfs record containing this subtree.",
460
+
),
461
+
),
462
+
format: Some(
463
+
::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
464
+
),
465
+
default: None,
466
+
min_length: None,
467
+
max_length: None,
468
+
min_graphemes: None,
469
+
max_graphemes: None,
470
+
r#enum: None,
471
+
r#const: None,
472
+
known_values: None,
473
+
}),
474
+
);
475
+
map.insert(
476
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
477
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
478
+
description: None,
479
+
format: None,
480
+
default: None,
481
+
min_length: None,
482
+
max_length: None,
483
+
min_graphemes: None,
484
+
max_graphemes: None,
485
+
r#enum: None,
486
+
r#const: None,
487
+
known_values: None,
488
+
}),
489
+
);
490
+
map
491
+
},
492
+
}),
493
+
);
431
494
map
432
495
},
433
496
}
···
638
701
File(Box<crate::place_wisp::fs::File<'a>>),
639
702
#[serde(rename = "place.wisp.fs#directory")]
640
703
Directory(Box<crate::place_wisp::fs::Directory<'a>>),
704
+
#[serde(rename = "place.wisp.fs#subfs")]
705
+
Subfs(Box<crate::place_wisp::fs::Subfs<'a>>),
641
706
}
642
707
643
708
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
···
1225
1290
});
1226
1291
}
1227
1292
}
1293
+
Ok(())
1294
+
}
1295
+
}
1296
+
1297
+
#[jacquard_derive::lexicon]
1298
+
#[derive(
1299
+
serde::Serialize,
1300
+
serde::Deserialize,
1301
+
Debug,
1302
+
Clone,
1303
+
PartialEq,
1304
+
Eq,
1305
+
jacquard_derive::IntoStatic
1306
+
)]
1307
+
#[serde(rename_all = "camelCase")]
1308
+
pub struct Subfs<'a> {
1309
+
/// If true, the subfs record's root entries are merged (flattened) into the parent directory, replacing the subfs entry. If false (default), the subfs entries are placed in a subdirectory with the subfs entry's name. Flat merging is useful for splitting large directories across multiple records while maintaining a flat structure.
1310
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
1311
+
pub flat: Option<bool>,
1312
+
/// AT-URI pointing to a place.wisp.subfs record containing this subtree.
1313
+
#[serde(borrow)]
1314
+
pub subject: jacquard_common::types::string::AtUri<'a>,
1315
+
#[serde(borrow)]
1316
+
pub r#type: jacquard_common::CowStr<'a>,
1317
+
}
1318
+
1319
+
pub mod subfs_state {
1320
+
1321
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
1322
+
#[allow(unused)]
1323
+
use ::core::marker::PhantomData;
1324
+
mod sealed {
1325
+
pub trait Sealed {}
1326
+
}
1327
+
/// State trait tracking which required fields have been set
1328
+
pub trait State: sealed::Sealed {
1329
+
type Type;
1330
+
type Subject;
1331
+
}
1332
+
/// Empty state - all required fields are unset
1333
+
pub struct Empty(());
1334
+
impl sealed::Sealed for Empty {}
1335
+
impl State for Empty {
1336
+
type Type = Unset;
1337
+
type Subject = Unset;
1338
+
}
1339
+
///State transition - sets the `type` field to Set
1340
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
1341
+
impl<S: State> sealed::Sealed for SetType<S> {}
1342
+
impl<S: State> State for SetType<S> {
1343
+
type Type = Set<members::r#type>;
1344
+
type Subject = S::Subject;
1345
+
}
1346
+
///State transition - sets the `subject` field to Set
1347
+
pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
1348
+
impl<S: State> sealed::Sealed for SetSubject<S> {}
1349
+
impl<S: State> State for SetSubject<S> {
1350
+
type Type = S::Type;
1351
+
type Subject = Set<members::subject>;
1352
+
}
1353
+
/// Marker types for field names
1354
+
#[allow(non_camel_case_types)]
1355
+
pub mod members {
1356
+
///Marker type for the `type` field
1357
+
pub struct r#type(());
1358
+
///Marker type for the `subject` field
1359
+
pub struct subject(());
1360
+
}
1361
+
}
1362
+
1363
+
/// Builder for constructing an instance of this type
1364
+
pub struct SubfsBuilder<'a, S: subfs_state::State> {
1365
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
1366
+
__unsafe_private_named: (
1367
+
::core::option::Option<bool>,
1368
+
::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
1369
+
::core::option::Option<jacquard_common::CowStr<'a>>,
1370
+
),
1371
+
_phantom: ::core::marker::PhantomData<&'a ()>,
1372
+
}
1373
+
1374
+
impl<'a> Subfs<'a> {
1375
+
/// Create a new builder for this type
1376
+
pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
1377
+
SubfsBuilder::new()
1378
+
}
1379
+
}
1380
+
1381
+
impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
1382
+
/// Create a new builder with all fields unset
1383
+
pub fn new() -> Self {
1384
+
SubfsBuilder {
1385
+
_phantom_state: ::core::marker::PhantomData,
1386
+
__unsafe_private_named: (None, None, None),
1387
+
_phantom: ::core::marker::PhantomData,
1388
+
}
1389
+
}
1390
+
}
1391
+
1392
+
impl<'a, S: subfs_state::State> SubfsBuilder<'a, S> {
1393
+
/// Set the `flat` field (optional)
1394
+
pub fn flat(mut self, value: impl Into<Option<bool>>) -> Self {
1395
+
self.__unsafe_private_named.0 = value.into();
1396
+
self
1397
+
}
1398
+
/// Set the `flat` field to an Option value (optional)
1399
+
pub fn maybe_flat(mut self, value: Option<bool>) -> Self {
1400
+
self.__unsafe_private_named.0 = value;
1401
+
self
1402
+
}
1403
+
}
1404
+
1405
+
impl<'a, S> SubfsBuilder<'a, S>
1406
+
where
1407
+
S: subfs_state::State,
1408
+
S::Subject: subfs_state::IsUnset,
1409
+
{
1410
+
/// Set the `subject` field (required)
1411
+
pub fn subject(
1412
+
mut self,
1413
+
value: impl Into<jacquard_common::types::string::AtUri<'a>>,
1414
+
) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
1415
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
1416
+
SubfsBuilder {
1417
+
_phantom_state: ::core::marker::PhantomData,
1418
+
__unsafe_private_named: self.__unsafe_private_named,
1419
+
_phantom: ::core::marker::PhantomData,
1420
+
}
1421
+
}
1422
+
}
1423
+
1424
+
impl<'a, S> SubfsBuilder<'a, S>
1425
+
where
1426
+
S: subfs_state::State,
1427
+
S::Type: subfs_state::IsUnset,
1428
+
{
1429
+
/// Set the `type` field (required)
1430
+
pub fn r#type(
1431
+
mut self,
1432
+
value: impl Into<jacquard_common::CowStr<'a>>,
1433
+
) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
1434
+
self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
1435
+
SubfsBuilder {
1436
+
_phantom_state: ::core::marker::PhantomData,
1437
+
__unsafe_private_named: self.__unsafe_private_named,
1438
+
_phantom: ::core::marker::PhantomData,
1439
+
}
1440
+
}
1441
+
}
1442
+
1443
+
impl<'a, S> SubfsBuilder<'a, S>
1444
+
where
1445
+
S: subfs_state::State,
1446
+
S::Type: subfs_state::IsSet,
1447
+
S::Subject: subfs_state::IsSet,
1448
+
{
1449
+
/// Build the final struct
1450
+
pub fn build(self) -> Subfs<'a> {
1451
+
Subfs {
1452
+
flat: self.__unsafe_private_named.0,
1453
+
subject: self.__unsafe_private_named.1.unwrap(),
1454
+
r#type: self.__unsafe_private_named.2.unwrap(),
1455
+
extra_data: Default::default(),
1456
+
}
1457
+
}
1458
+
/// Build the final struct with custom extra_data
1459
+
pub fn build_with_data(
1460
+
self,
1461
+
extra_data: std::collections::BTreeMap<
1462
+
jacquard_common::smol_str::SmolStr,
1463
+
jacquard_common::types::value::Data<'a>,
1464
+
>,
1465
+
) -> Subfs<'a> {
1466
+
Subfs {
1467
+
flat: self.__unsafe_private_named.0,
1468
+
subject: self.__unsafe_private_named.1.unwrap(),
1469
+
r#type: self.__unsafe_private_named.2.unwrap(),
1470
+
extra_data: Some(extra_data),
1471
+
}
1472
+
}
1473
+
}
1474
+
1475
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
1476
+
fn nsid() -> &'static str {
1477
+
"place.wisp.fs"
1478
+
}
1479
+
fn def_name() -> &'static str {
1480
+
"subfs"
1481
+
}
1482
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
1483
+
lexicon_doc_place_wisp_fs()
1484
+
}
1485
+
fn validate(
1486
+
&self,
1487
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
1228
1488
Ok(())
1229
1489
}
1230
1490
}
+1408
cli/src/place_wisp/subfs.rs
+1408
cli/src/place_wisp/subfs.rs
···
1
+
// @generated by jacquard-lexicon. DO NOT EDIT.
2
+
//
3
+
// Lexicon: place.wisp.subfs
4
+
//
5
+
// This file was automatically generated from Lexicon schemas.
6
+
// Any manual changes will be overwritten on the next regeneration.
7
+
8
+
#[jacquard_derive::lexicon]
9
+
#[derive(
10
+
serde::Serialize,
11
+
serde::Deserialize,
12
+
Debug,
13
+
Clone,
14
+
PartialEq,
15
+
Eq,
16
+
jacquard_derive::IntoStatic
17
+
)]
18
+
#[serde(rename_all = "camelCase")]
19
+
pub struct Directory<'a> {
20
+
#[serde(borrow)]
21
+
pub entries: Vec<crate::place_wisp::subfs::Entry<'a>>,
22
+
#[serde(borrow)]
23
+
pub r#type: jacquard_common::CowStr<'a>,
24
+
}
25
+
26
+
pub mod directory_state {
27
+
28
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
29
+
#[allow(unused)]
30
+
use ::core::marker::PhantomData;
31
+
mod sealed {
32
+
pub trait Sealed {}
33
+
}
34
+
/// State trait tracking which required fields have been set
35
+
pub trait State: sealed::Sealed {
36
+
type Type;
37
+
type Entries;
38
+
}
39
+
/// Empty state - all required fields are unset
40
+
pub struct Empty(());
41
+
impl sealed::Sealed for Empty {}
42
+
impl State for Empty {
43
+
type Type = Unset;
44
+
type Entries = Unset;
45
+
}
46
+
///State transition - sets the `type` field to Set
47
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
48
+
impl<S: State> sealed::Sealed for SetType<S> {}
49
+
impl<S: State> State for SetType<S> {
50
+
type Type = Set<members::r#type>;
51
+
type Entries = S::Entries;
52
+
}
53
+
///State transition - sets the `entries` field to Set
54
+
pub struct SetEntries<S: State = Empty>(PhantomData<fn() -> S>);
55
+
impl<S: State> sealed::Sealed for SetEntries<S> {}
56
+
impl<S: State> State for SetEntries<S> {
57
+
type Type = S::Type;
58
+
type Entries = Set<members::entries>;
59
+
}
60
+
/// Marker types for field names
61
+
#[allow(non_camel_case_types)]
62
+
pub mod members {
63
+
///Marker type for the `type` field
64
+
pub struct r#type(());
65
+
///Marker type for the `entries` field
66
+
pub struct entries(());
67
+
}
68
+
}
69
+
70
+
/// Builder for constructing an instance of this type
71
+
pub struct DirectoryBuilder<'a, S: directory_state::State> {
72
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
73
+
__unsafe_private_named: (
74
+
::core::option::Option<Vec<crate::place_wisp::subfs::Entry<'a>>>,
75
+
::core::option::Option<jacquard_common::CowStr<'a>>,
76
+
),
77
+
_phantom: ::core::marker::PhantomData<&'a ()>,
78
+
}
79
+
80
+
impl<'a> Directory<'a> {
81
+
/// Create a new builder for this type
82
+
pub fn new() -> DirectoryBuilder<'a, directory_state::Empty> {
83
+
DirectoryBuilder::new()
84
+
}
85
+
}
86
+
87
+
impl<'a> DirectoryBuilder<'a, directory_state::Empty> {
88
+
/// Create a new builder with all fields unset
89
+
pub fn new() -> Self {
90
+
DirectoryBuilder {
91
+
_phantom_state: ::core::marker::PhantomData,
92
+
__unsafe_private_named: (None, None),
93
+
_phantom: ::core::marker::PhantomData,
94
+
}
95
+
}
96
+
}
97
+
98
+
impl<'a, S> DirectoryBuilder<'a, S>
99
+
where
100
+
S: directory_state::State,
101
+
S::Entries: directory_state::IsUnset,
102
+
{
103
+
/// Set the `entries` field (required)
104
+
pub fn entries(
105
+
mut self,
106
+
value: impl Into<Vec<crate::place_wisp::subfs::Entry<'a>>>,
107
+
) -> DirectoryBuilder<'a, directory_state::SetEntries<S>> {
108
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
109
+
DirectoryBuilder {
110
+
_phantom_state: ::core::marker::PhantomData,
111
+
__unsafe_private_named: self.__unsafe_private_named,
112
+
_phantom: ::core::marker::PhantomData,
113
+
}
114
+
}
115
+
}
116
+
117
+
impl<'a, S> DirectoryBuilder<'a, S>
118
+
where
119
+
S: directory_state::State,
120
+
S::Type: directory_state::IsUnset,
121
+
{
122
+
/// Set the `type` field (required)
123
+
pub fn r#type(
124
+
mut self,
125
+
value: impl Into<jacquard_common::CowStr<'a>>,
126
+
) -> DirectoryBuilder<'a, directory_state::SetType<S>> {
127
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
128
+
DirectoryBuilder {
129
+
_phantom_state: ::core::marker::PhantomData,
130
+
__unsafe_private_named: self.__unsafe_private_named,
131
+
_phantom: ::core::marker::PhantomData,
132
+
}
133
+
}
134
+
}
135
+
136
+
impl<'a, S> DirectoryBuilder<'a, S>
137
+
where
138
+
S: directory_state::State,
139
+
S::Type: directory_state::IsSet,
140
+
S::Entries: directory_state::IsSet,
141
+
{
142
+
/// Build the final struct
143
+
pub fn build(self) -> Directory<'a> {
144
+
Directory {
145
+
entries: self.__unsafe_private_named.0.unwrap(),
146
+
r#type: self.__unsafe_private_named.1.unwrap(),
147
+
extra_data: Default::default(),
148
+
}
149
+
}
150
+
/// Build the final struct with custom extra_data
151
+
pub fn build_with_data(
152
+
self,
153
+
extra_data: std::collections::BTreeMap<
154
+
jacquard_common::smol_str::SmolStr,
155
+
jacquard_common::types::value::Data<'a>,
156
+
>,
157
+
) -> Directory<'a> {
158
+
Directory {
159
+
entries: self.__unsafe_private_named.0.unwrap(),
160
+
r#type: self.__unsafe_private_named.1.unwrap(),
161
+
extra_data: Some(extra_data),
162
+
}
163
+
}
164
+
}
165
+
166
+
fn lexicon_doc_place_wisp_subfs() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
167
+
::jacquard_lexicon::lexicon::LexiconDoc {
168
+
lexicon: ::jacquard_lexicon::lexicon::Lexicon::Lexicon1,
169
+
id: ::jacquard_common::CowStr::new_static("place.wisp.subfs"),
170
+
revision: None,
171
+
description: None,
172
+
defs: {
173
+
let mut map = ::std::collections::BTreeMap::new();
174
+
map.insert(
175
+
::jacquard_common::smol_str::SmolStr::new_static("directory"),
176
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
177
+
description: None,
178
+
required: Some(
179
+
vec![
180
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
181
+
::jacquard_common::smol_str::SmolStr::new_static("entries")
182
+
],
183
+
),
184
+
nullable: None,
185
+
properties: {
186
+
#[allow(unused_mut)]
187
+
let mut map = ::std::collections::BTreeMap::new();
188
+
map.insert(
189
+
::jacquard_common::smol_str::SmolStr::new_static("entries"),
190
+
::jacquard_lexicon::lexicon::LexObjectProperty::Array(::jacquard_lexicon::lexicon::LexArray {
191
+
description: None,
192
+
items: ::jacquard_lexicon::lexicon::LexArrayItem::Ref(::jacquard_lexicon::lexicon::LexRef {
193
+
description: None,
194
+
r#ref: ::jacquard_common::CowStr::new_static("#entry"),
195
+
}),
196
+
min_length: None,
197
+
max_length: Some(500usize),
198
+
}),
199
+
);
200
+
map.insert(
201
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
202
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
203
+
description: None,
204
+
format: None,
205
+
default: None,
206
+
min_length: None,
207
+
max_length: None,
208
+
min_graphemes: None,
209
+
max_graphemes: None,
210
+
r#enum: None,
211
+
r#const: None,
212
+
known_values: None,
213
+
}),
214
+
);
215
+
map
216
+
},
217
+
}),
218
+
);
219
+
map.insert(
220
+
::jacquard_common::smol_str::SmolStr::new_static("entry"),
221
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
222
+
description: None,
223
+
required: Some(
224
+
vec![
225
+
::jacquard_common::smol_str::SmolStr::new_static("name"),
226
+
::jacquard_common::smol_str::SmolStr::new_static("node")
227
+
],
228
+
),
229
+
nullable: None,
230
+
properties: {
231
+
#[allow(unused_mut)]
232
+
let mut map = ::std::collections::BTreeMap::new();
233
+
map.insert(
234
+
::jacquard_common::smol_str::SmolStr::new_static("name"),
235
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
236
+
description: None,
237
+
format: None,
238
+
default: None,
239
+
min_length: None,
240
+
max_length: Some(255usize),
241
+
min_graphemes: None,
242
+
max_graphemes: None,
243
+
r#enum: None,
244
+
r#const: None,
245
+
known_values: None,
246
+
}),
247
+
);
248
+
map.insert(
249
+
::jacquard_common::smol_str::SmolStr::new_static("node"),
250
+
::jacquard_lexicon::lexicon::LexObjectProperty::Union(::jacquard_lexicon::lexicon::LexRefUnion {
251
+
description: None,
252
+
refs: vec![
253
+
::jacquard_common::CowStr::new_static("#file"),
254
+
::jacquard_common::CowStr::new_static("#directory"),
255
+
::jacquard_common::CowStr::new_static("#subfs")
256
+
],
257
+
closed: None,
258
+
}),
259
+
);
260
+
map
261
+
},
262
+
}),
263
+
);
264
+
map.insert(
265
+
::jacquard_common::smol_str::SmolStr::new_static("file"),
266
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
267
+
description: None,
268
+
required: Some(
269
+
vec![
270
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
271
+
::jacquard_common::smol_str::SmolStr::new_static("blob")
272
+
],
273
+
),
274
+
nullable: None,
275
+
properties: {
276
+
#[allow(unused_mut)]
277
+
let mut map = ::std::collections::BTreeMap::new();
278
+
map.insert(
279
+
::jacquard_common::smol_str::SmolStr::new_static("base64"),
280
+
::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
281
+
description: None,
282
+
default: None,
283
+
r#const: None,
284
+
}),
285
+
);
286
+
map.insert(
287
+
::jacquard_common::smol_str::SmolStr::new_static("blob"),
288
+
::jacquard_lexicon::lexicon::LexObjectProperty::Blob(::jacquard_lexicon::lexicon::LexBlob {
289
+
description: None,
290
+
accept: None,
291
+
max_size: None,
292
+
}),
293
+
);
294
+
map.insert(
295
+
::jacquard_common::smol_str::SmolStr::new_static("encoding"),
296
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
297
+
description: Some(
298
+
::jacquard_common::CowStr::new_static(
299
+
"Content encoding (e.g., gzip for compressed files)",
300
+
),
301
+
),
302
+
format: None,
303
+
default: None,
304
+
min_length: None,
305
+
max_length: None,
306
+
min_graphemes: None,
307
+
max_graphemes: None,
308
+
r#enum: None,
309
+
r#const: None,
310
+
known_values: None,
311
+
}),
312
+
);
313
+
map.insert(
314
+
::jacquard_common::smol_str::SmolStr::new_static("mimeType"),
315
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
316
+
description: Some(
317
+
::jacquard_common::CowStr::new_static(
318
+
"Original MIME type before compression",
319
+
),
320
+
),
321
+
format: None,
322
+
default: None,
323
+
min_length: None,
324
+
max_length: None,
325
+
min_graphemes: None,
326
+
max_graphemes: None,
327
+
r#enum: None,
328
+
r#const: None,
329
+
known_values: None,
330
+
}),
331
+
);
332
+
map.insert(
333
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
334
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
335
+
description: None,
336
+
format: None,
337
+
default: None,
338
+
min_length: None,
339
+
max_length: None,
340
+
min_graphemes: None,
341
+
max_graphemes: None,
342
+
r#enum: None,
343
+
r#const: None,
344
+
known_values: None,
345
+
}),
346
+
);
347
+
map
348
+
},
349
+
}),
350
+
);
351
+
map.insert(
352
+
::jacquard_common::smol_str::SmolStr::new_static("main"),
353
+
::jacquard_lexicon::lexicon::LexUserType::Record(::jacquard_lexicon::lexicon::LexRecord {
354
+
description: Some(
355
+
::jacquard_common::CowStr::new_static(
356
+
"Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",
357
+
),
358
+
),
359
+
key: None,
360
+
record: ::jacquard_lexicon::lexicon::LexRecordRecord::Object(::jacquard_lexicon::lexicon::LexObject {
361
+
description: None,
362
+
required: Some(
363
+
vec![
364
+
::jacquard_common::smol_str::SmolStr::new_static("root"),
365
+
::jacquard_common::smol_str::SmolStr::new_static("createdAt")
366
+
],
367
+
),
368
+
nullable: None,
369
+
properties: {
370
+
#[allow(unused_mut)]
371
+
let mut map = ::std::collections::BTreeMap::new();
372
+
map.insert(
373
+
::jacquard_common::smol_str::SmolStr::new_static(
374
+
"createdAt",
375
+
),
376
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
377
+
description: None,
378
+
format: Some(
379
+
::jacquard_lexicon::lexicon::LexStringFormat::Datetime,
380
+
),
381
+
default: None,
382
+
min_length: None,
383
+
max_length: None,
384
+
min_graphemes: None,
385
+
max_graphemes: None,
386
+
r#enum: None,
387
+
r#const: None,
388
+
known_values: None,
389
+
}),
390
+
);
391
+
map.insert(
392
+
::jacquard_common::smol_str::SmolStr::new_static(
393
+
"fileCount",
394
+
),
395
+
::jacquard_lexicon::lexicon::LexObjectProperty::Integer(::jacquard_lexicon::lexicon::LexInteger {
396
+
description: None,
397
+
default: None,
398
+
minimum: Some(0i64),
399
+
maximum: Some(1000i64),
400
+
r#enum: None,
401
+
r#const: None,
402
+
}),
403
+
);
404
+
map.insert(
405
+
::jacquard_common::smol_str::SmolStr::new_static("root"),
406
+
::jacquard_lexicon::lexicon::LexObjectProperty::Ref(::jacquard_lexicon::lexicon::LexRef {
407
+
description: None,
408
+
r#ref: ::jacquard_common::CowStr::new_static("#directory"),
409
+
}),
410
+
);
411
+
map
412
+
},
413
+
}),
414
+
}),
415
+
);
416
+
map.insert(
417
+
::jacquard_common::smol_str::SmolStr::new_static("subfs"),
418
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
419
+
description: None,
420
+
required: Some(
421
+
vec![
422
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
423
+
::jacquard_common::smol_str::SmolStr::new_static("subject")
424
+
],
425
+
),
426
+
nullable: None,
427
+
properties: {
428
+
#[allow(unused_mut)]
429
+
let mut map = ::std::collections::BTreeMap::new();
430
+
map.insert(
431
+
::jacquard_common::smol_str::SmolStr::new_static("subject"),
432
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
433
+
description: Some(
434
+
::jacquard_common::CowStr::new_static(
435
+
"AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.",
436
+
),
437
+
),
438
+
format: Some(
439
+
::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
440
+
),
441
+
default: None,
442
+
min_length: None,
443
+
max_length: None,
444
+
min_graphemes: None,
445
+
max_graphemes: None,
446
+
r#enum: None,
447
+
r#const: None,
448
+
known_values: None,
449
+
}),
450
+
);
451
+
map.insert(
452
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
453
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
454
+
description: None,
455
+
format: None,
456
+
default: None,
457
+
min_length: None,
458
+
max_length: None,
459
+
min_graphemes: None,
460
+
max_graphemes: None,
461
+
r#enum: None,
462
+
r#const: None,
463
+
known_values: None,
464
+
}),
465
+
);
466
+
map
467
+
},
468
+
}),
469
+
);
470
+
map
471
+
},
472
+
}
473
+
}
474
+
475
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Directory<'a> {
476
+
fn nsid() -> &'static str {
477
+
"place.wisp.subfs"
478
+
}
479
+
fn def_name() -> &'static str {
480
+
"directory"
481
+
}
482
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
483
+
lexicon_doc_place_wisp_subfs()
484
+
}
485
+
fn validate(
486
+
&self,
487
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
488
+
{
489
+
let value = &self.entries;
490
+
#[allow(unused_comparisons)]
491
+
if value.len() > 500usize {
492
+
return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
493
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
494
+
"entries",
495
+
),
496
+
max: 500usize,
497
+
actual: value.len(),
498
+
});
499
+
}
500
+
}
501
+
Ok(())
502
+
}
503
+
}
504
+
505
+
#[jacquard_derive::lexicon]
506
+
#[derive(
507
+
serde::Serialize,
508
+
serde::Deserialize,
509
+
Debug,
510
+
Clone,
511
+
PartialEq,
512
+
Eq,
513
+
jacquard_derive::IntoStatic
514
+
)]
515
+
#[serde(rename_all = "camelCase")]
516
+
pub struct Entry<'a> {
517
+
#[serde(borrow)]
518
+
pub name: jacquard_common::CowStr<'a>,
519
+
#[serde(borrow)]
520
+
pub node: EntryNode<'a>,
521
+
}
522
+
523
+
pub mod entry_state {
524
+
525
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
526
+
#[allow(unused)]
527
+
use ::core::marker::PhantomData;
528
+
mod sealed {
529
+
pub trait Sealed {}
530
+
}
531
+
/// State trait tracking which required fields have been set
532
+
pub trait State: sealed::Sealed {
533
+
type Name;
534
+
type Node;
535
+
}
536
+
/// Empty state - all required fields are unset
537
+
pub struct Empty(());
538
+
impl sealed::Sealed for Empty {}
539
+
impl State for Empty {
540
+
type Name = Unset;
541
+
type Node = Unset;
542
+
}
543
+
///State transition - sets the `name` field to Set
544
+
pub struct SetName<S: State = Empty>(PhantomData<fn() -> S>);
545
+
impl<S: State> sealed::Sealed for SetName<S> {}
546
+
impl<S: State> State for SetName<S> {
547
+
type Name = Set<members::name>;
548
+
type Node = S::Node;
549
+
}
550
+
///State transition - sets the `node` field to Set
551
+
pub struct SetNode<S: State = Empty>(PhantomData<fn() -> S>);
552
+
impl<S: State> sealed::Sealed for SetNode<S> {}
553
+
impl<S: State> State for SetNode<S> {
554
+
type Name = S::Name;
555
+
type Node = Set<members::node>;
556
+
}
557
+
/// Marker types for field names
558
+
#[allow(non_camel_case_types)]
559
+
pub mod members {
560
+
///Marker type for the `name` field
561
+
pub struct name(());
562
+
///Marker type for the `node` field
563
+
pub struct node(());
564
+
}
565
+
}
566
+
567
+
/// Builder for constructing an instance of this type
568
+
pub struct EntryBuilder<'a, S: entry_state::State> {
569
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
570
+
__unsafe_private_named: (
571
+
::core::option::Option<jacquard_common::CowStr<'a>>,
572
+
::core::option::Option<EntryNode<'a>>,
573
+
),
574
+
_phantom: ::core::marker::PhantomData<&'a ()>,
575
+
}
576
+
577
+
impl<'a> Entry<'a> {
578
+
/// Create a new builder for this type
579
+
pub fn new() -> EntryBuilder<'a, entry_state::Empty> {
580
+
EntryBuilder::new()
581
+
}
582
+
}
583
+
584
+
impl<'a> EntryBuilder<'a, entry_state::Empty> {
585
+
/// Create a new builder with all fields unset
586
+
pub fn new() -> Self {
587
+
EntryBuilder {
588
+
_phantom_state: ::core::marker::PhantomData,
589
+
__unsafe_private_named: (None, None),
590
+
_phantom: ::core::marker::PhantomData,
591
+
}
592
+
}
593
+
}
594
+
595
+
impl<'a, S> EntryBuilder<'a, S>
596
+
where
597
+
S: entry_state::State,
598
+
S::Name: entry_state::IsUnset,
599
+
{
600
+
/// Set the `name` field (required)
601
+
pub fn name(
602
+
mut self,
603
+
value: impl Into<jacquard_common::CowStr<'a>>,
604
+
) -> EntryBuilder<'a, entry_state::SetName<S>> {
605
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
606
+
EntryBuilder {
607
+
_phantom_state: ::core::marker::PhantomData,
608
+
__unsafe_private_named: self.__unsafe_private_named,
609
+
_phantom: ::core::marker::PhantomData,
610
+
}
611
+
}
612
+
}
613
+
614
+
impl<'a, S> EntryBuilder<'a, S>
615
+
where
616
+
S: entry_state::State,
617
+
S::Node: entry_state::IsUnset,
618
+
{
619
+
/// Set the `node` field (required)
620
+
pub fn node(
621
+
mut self,
622
+
value: impl Into<EntryNode<'a>>,
623
+
) -> EntryBuilder<'a, entry_state::SetNode<S>> {
624
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
625
+
EntryBuilder {
626
+
_phantom_state: ::core::marker::PhantomData,
627
+
__unsafe_private_named: self.__unsafe_private_named,
628
+
_phantom: ::core::marker::PhantomData,
629
+
}
630
+
}
631
+
}
632
+
633
+
impl<'a, S> EntryBuilder<'a, S>
634
+
where
635
+
S: entry_state::State,
636
+
S::Name: entry_state::IsSet,
637
+
S::Node: entry_state::IsSet,
638
+
{
639
+
/// Build the final struct
640
+
pub fn build(self) -> Entry<'a> {
641
+
Entry {
642
+
name: self.__unsafe_private_named.0.unwrap(),
643
+
node: self.__unsafe_private_named.1.unwrap(),
644
+
extra_data: Default::default(),
645
+
}
646
+
}
647
+
/// Build the final struct with custom extra_data
648
+
pub fn build_with_data(
649
+
self,
650
+
extra_data: std::collections::BTreeMap<
651
+
jacquard_common::smol_str::SmolStr,
652
+
jacquard_common::types::value::Data<'a>,
653
+
>,
654
+
) -> Entry<'a> {
655
+
Entry {
656
+
name: self.__unsafe_private_named.0.unwrap(),
657
+
node: self.__unsafe_private_named.1.unwrap(),
658
+
extra_data: Some(extra_data),
659
+
}
660
+
}
661
+
}
662
+
663
+
#[jacquard_derive::open_union]
664
+
#[derive(
665
+
serde::Serialize,
666
+
serde::Deserialize,
667
+
Debug,
668
+
Clone,
669
+
PartialEq,
670
+
Eq,
671
+
jacquard_derive::IntoStatic
672
+
)]
673
+
#[serde(tag = "$type")]
674
+
#[serde(bound(deserialize = "'de: 'a"))]
675
+
pub enum EntryNode<'a> {
676
+
#[serde(rename = "place.wisp.subfs#file")]
677
+
File(Box<crate::place_wisp::subfs::File<'a>>),
678
+
#[serde(rename = "place.wisp.subfs#directory")]
679
+
Directory(Box<crate::place_wisp::subfs::Directory<'a>>),
680
+
#[serde(rename = "place.wisp.subfs#subfs")]
681
+
Subfs(Box<crate::place_wisp::subfs::Subfs<'a>>),
682
+
}
683
+
684
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
685
+
fn nsid() -> &'static str {
686
+
"place.wisp.subfs"
687
+
}
688
+
fn def_name() -> &'static str {
689
+
"entry"
690
+
}
691
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
692
+
lexicon_doc_place_wisp_subfs()
693
+
}
694
+
fn validate(
695
+
&self,
696
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
697
+
{
698
+
let value = &self.name;
699
+
#[allow(unused_comparisons)]
700
+
if <str>::len(value.as_ref()) > 255usize {
701
+
return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
702
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
703
+
"name",
704
+
),
705
+
max: 255usize,
706
+
actual: <str>::len(value.as_ref()),
707
+
});
708
+
}
709
+
}
710
+
Ok(())
711
+
}
712
+
}
713
+
714
+
#[jacquard_derive::lexicon]
715
+
#[derive(
716
+
serde::Serialize,
717
+
serde::Deserialize,
718
+
Debug,
719
+
Clone,
720
+
PartialEq,
721
+
Eq,
722
+
jacquard_derive::IntoStatic
723
+
)]
724
+
#[serde(rename_all = "camelCase")]
725
+
pub struct File<'a> {
726
+
/// True if blob content is base64-encoded (used to bypass PDS content sniffing)
727
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
728
+
pub base64: Option<bool>,
729
+
/// Content blob ref
730
+
#[serde(borrow)]
731
+
pub blob: jacquard_common::types::blob::BlobRef<'a>,
732
+
/// Content encoding (e.g., gzip for compressed files)
733
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
734
+
#[serde(borrow)]
735
+
pub encoding: Option<jacquard_common::CowStr<'a>>,
736
+
/// Original MIME type before compression
737
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
738
+
#[serde(borrow)]
739
+
pub mime_type: Option<jacquard_common::CowStr<'a>>,
740
+
#[serde(borrow)]
741
+
pub r#type: jacquard_common::CowStr<'a>,
742
+
}
743
+
744
+
pub mod file_state {
745
+
746
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
747
+
#[allow(unused)]
748
+
use ::core::marker::PhantomData;
749
+
mod sealed {
750
+
pub trait Sealed {}
751
+
}
752
+
/// State trait tracking which required fields have been set
753
+
pub trait State: sealed::Sealed {
754
+
type Type;
755
+
type Blob;
756
+
}
757
+
/// Empty state - all required fields are unset
758
+
pub struct Empty(());
759
+
impl sealed::Sealed for Empty {}
760
+
impl State for Empty {
761
+
type Type = Unset;
762
+
type Blob = Unset;
763
+
}
764
+
///State transition - sets the `type` field to Set
765
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
766
+
impl<S: State> sealed::Sealed for SetType<S> {}
767
+
impl<S: State> State for SetType<S> {
768
+
type Type = Set<members::r#type>;
769
+
type Blob = S::Blob;
770
+
}
771
+
///State transition - sets the `blob` field to Set
772
+
pub struct SetBlob<S: State = Empty>(PhantomData<fn() -> S>);
773
+
impl<S: State> sealed::Sealed for SetBlob<S> {}
774
+
impl<S: State> State for SetBlob<S> {
775
+
type Type = S::Type;
776
+
type Blob = Set<members::blob>;
777
+
}
778
+
/// Marker types for field names
779
+
#[allow(non_camel_case_types)]
780
+
pub mod members {
781
+
///Marker type for the `type` field
782
+
pub struct r#type(());
783
+
///Marker type for the `blob` field
784
+
pub struct blob(());
785
+
}
786
+
}
787
+
788
+
/// Builder for constructing an instance of this type
789
+
pub struct FileBuilder<'a, S: file_state::State> {
790
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
791
+
__unsafe_private_named: (
792
+
::core::option::Option<bool>,
793
+
::core::option::Option<jacquard_common::types::blob::BlobRef<'a>>,
794
+
::core::option::Option<jacquard_common::CowStr<'a>>,
795
+
::core::option::Option<jacquard_common::CowStr<'a>>,
796
+
::core::option::Option<jacquard_common::CowStr<'a>>,
797
+
),
798
+
_phantom: ::core::marker::PhantomData<&'a ()>,
799
+
}
800
+
801
+
impl<'a> File<'a> {
802
+
/// Create a new builder for this type
803
+
pub fn new() -> FileBuilder<'a, file_state::Empty> {
804
+
FileBuilder::new()
805
+
}
806
+
}
807
+
808
+
impl<'a> FileBuilder<'a, file_state::Empty> {
809
+
/// Create a new builder with all fields unset
810
+
pub fn new() -> Self {
811
+
FileBuilder {
812
+
_phantom_state: ::core::marker::PhantomData,
813
+
__unsafe_private_named: (None, None, None, None, None),
814
+
_phantom: ::core::marker::PhantomData,
815
+
}
816
+
}
817
+
}
818
+
819
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
820
+
/// Set the `base64` field (optional)
821
+
pub fn base64(mut self, value: impl Into<Option<bool>>) -> Self {
822
+
self.__unsafe_private_named.0 = value.into();
823
+
self
824
+
}
825
+
/// Set the `base64` field to an Option value (optional)
826
+
pub fn maybe_base64(mut self, value: Option<bool>) -> Self {
827
+
self.__unsafe_private_named.0 = value;
828
+
self
829
+
}
830
+
}
831
+
832
+
impl<'a, S> FileBuilder<'a, S>
833
+
where
834
+
S: file_state::State,
835
+
S::Blob: file_state::IsUnset,
836
+
{
837
+
/// Set the `blob` field (required)
838
+
pub fn blob(
839
+
mut self,
840
+
value: impl Into<jacquard_common::types::blob::BlobRef<'a>>,
841
+
) -> FileBuilder<'a, file_state::SetBlob<S>> {
842
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
843
+
FileBuilder {
844
+
_phantom_state: ::core::marker::PhantomData,
845
+
__unsafe_private_named: self.__unsafe_private_named,
846
+
_phantom: ::core::marker::PhantomData,
847
+
}
848
+
}
849
+
}
850
+
851
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
852
+
/// Set the `encoding` field (optional)
853
+
pub fn encoding(
854
+
mut self,
855
+
value: impl Into<Option<jacquard_common::CowStr<'a>>>,
856
+
) -> Self {
857
+
self.__unsafe_private_named.2 = value.into();
858
+
self
859
+
}
860
+
/// Set the `encoding` field to an Option value (optional)
861
+
pub fn maybe_encoding(mut self, value: Option<jacquard_common::CowStr<'a>>) -> Self {
862
+
self.__unsafe_private_named.2 = value;
863
+
self
864
+
}
865
+
}
866
+
867
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
868
+
/// Set the `mimeType` field (optional)
869
+
pub fn mime_type(
870
+
mut self,
871
+
value: impl Into<Option<jacquard_common::CowStr<'a>>>,
872
+
) -> Self {
873
+
self.__unsafe_private_named.3 = value.into();
874
+
self
875
+
}
876
+
/// Set the `mimeType` field to an Option value (optional)
877
+
pub fn maybe_mime_type(
878
+
mut self,
879
+
value: Option<jacquard_common::CowStr<'a>>,
880
+
) -> Self {
881
+
self.__unsafe_private_named.3 = value;
882
+
self
883
+
}
884
+
}
885
+
886
+
impl<'a, S> FileBuilder<'a, S>
887
+
where
888
+
S: file_state::State,
889
+
S::Type: file_state::IsUnset,
890
+
{
891
+
/// Set the `type` field (required)
892
+
pub fn r#type(
893
+
mut self,
894
+
value: impl Into<jacquard_common::CowStr<'a>>,
895
+
) -> FileBuilder<'a, file_state::SetType<S>> {
896
+
self.__unsafe_private_named.4 = ::core::option::Option::Some(value.into());
897
+
FileBuilder {
898
+
_phantom_state: ::core::marker::PhantomData,
899
+
__unsafe_private_named: self.__unsafe_private_named,
900
+
_phantom: ::core::marker::PhantomData,
901
+
}
902
+
}
903
+
}
904
+
905
+
impl<'a, S> FileBuilder<'a, S>
906
+
where
907
+
S: file_state::State,
908
+
S::Type: file_state::IsSet,
909
+
S::Blob: file_state::IsSet,
910
+
{
911
+
/// Build the final struct
912
+
pub fn build(self) -> File<'a> {
913
+
File {
914
+
base64: self.__unsafe_private_named.0,
915
+
blob: self.__unsafe_private_named.1.unwrap(),
916
+
encoding: self.__unsafe_private_named.2,
917
+
mime_type: self.__unsafe_private_named.3,
918
+
r#type: self.__unsafe_private_named.4.unwrap(),
919
+
extra_data: Default::default(),
920
+
}
921
+
}
922
+
/// Build the final struct with custom extra_data
923
+
pub fn build_with_data(
924
+
self,
925
+
extra_data: std::collections::BTreeMap<
926
+
jacquard_common::smol_str::SmolStr,
927
+
jacquard_common::types::value::Data<'a>,
928
+
>,
929
+
) -> File<'a> {
930
+
File {
931
+
base64: self.__unsafe_private_named.0,
932
+
blob: self.__unsafe_private_named.1.unwrap(),
933
+
encoding: self.__unsafe_private_named.2,
934
+
mime_type: self.__unsafe_private_named.3,
935
+
r#type: self.__unsafe_private_named.4.unwrap(),
936
+
extra_data: Some(extra_data),
937
+
}
938
+
}
939
+
}
940
+
941
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for File<'a> {
942
+
fn nsid() -> &'static str {
943
+
"place.wisp.subfs"
944
+
}
945
+
fn def_name() -> &'static str {
946
+
"file"
947
+
}
948
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
949
+
lexicon_doc_place_wisp_subfs()
950
+
}
951
+
fn validate(
952
+
&self,
953
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
954
+
Ok(())
955
+
}
956
+
}
957
+
958
+
/// Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.
959
+
#[jacquard_derive::lexicon]
960
+
#[derive(
961
+
serde::Serialize,
962
+
serde::Deserialize,
963
+
Debug,
964
+
Clone,
965
+
PartialEq,
966
+
Eq,
967
+
jacquard_derive::IntoStatic
968
+
)]
969
+
#[serde(rename_all = "camelCase")]
970
+
pub struct SubfsRecord<'a> {
971
+
pub created_at: jacquard_common::types::string::Datetime,
972
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
973
+
pub file_count: Option<i64>,
974
+
#[serde(borrow)]
975
+
pub root: crate::place_wisp::subfs::Directory<'a>,
976
+
}
977
+
978
+
pub mod subfs_record_state {
979
+
980
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
981
+
#[allow(unused)]
982
+
use ::core::marker::PhantomData;
983
+
mod sealed {
984
+
pub trait Sealed {}
985
+
}
986
+
/// State trait tracking which required fields have been set
987
+
pub trait State: sealed::Sealed {
988
+
type Root;
989
+
type CreatedAt;
990
+
}
991
+
/// Empty state - all required fields are unset
992
+
pub struct Empty(());
993
+
impl sealed::Sealed for Empty {}
994
+
impl State for Empty {
995
+
type Root = Unset;
996
+
type CreatedAt = Unset;
997
+
}
998
+
///State transition - sets the `root` field to Set
999
+
pub struct SetRoot<S: State = Empty>(PhantomData<fn() -> S>);
1000
+
impl<S: State> sealed::Sealed for SetRoot<S> {}
1001
+
impl<S: State> State for SetRoot<S> {
1002
+
type Root = Set<members::root>;
1003
+
type CreatedAt = S::CreatedAt;
1004
+
}
1005
+
///State transition - sets the `created_at` field to Set
1006
+
pub struct SetCreatedAt<S: State = Empty>(PhantomData<fn() -> S>);
1007
+
impl<S: State> sealed::Sealed for SetCreatedAt<S> {}
1008
+
impl<S: State> State for SetCreatedAt<S> {
1009
+
type Root = S::Root;
1010
+
type CreatedAt = Set<members::created_at>;
1011
+
}
1012
+
/// Marker types for field names
1013
+
#[allow(non_camel_case_types)]
1014
+
pub mod members {
1015
+
///Marker type for the `root` field
1016
+
pub struct root(());
1017
+
///Marker type for the `created_at` field
1018
+
pub struct created_at(());
1019
+
}
1020
+
}
1021
+
1022
+
/// Builder for constructing an instance of this type
1023
+
pub struct SubfsRecordBuilder<'a, S: subfs_record_state::State> {
1024
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
1025
+
__unsafe_private_named: (
1026
+
::core::option::Option<jacquard_common::types::string::Datetime>,
1027
+
::core::option::Option<i64>,
1028
+
::core::option::Option<crate::place_wisp::subfs::Directory<'a>>,
1029
+
),
1030
+
_phantom: ::core::marker::PhantomData<&'a ()>,
1031
+
}
1032
+
1033
+
impl<'a> SubfsRecord<'a> {
1034
+
/// Create a new builder for this type
1035
+
pub fn new() -> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
1036
+
SubfsRecordBuilder::new()
1037
+
}
1038
+
}
1039
+
1040
+
impl<'a> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
1041
+
/// Create a new builder with all fields unset
1042
+
pub fn new() -> Self {
1043
+
SubfsRecordBuilder {
1044
+
_phantom_state: ::core::marker::PhantomData,
1045
+
__unsafe_private_named: (None, None, None),
1046
+
_phantom: ::core::marker::PhantomData,
1047
+
}
1048
+
}
1049
+
}
1050
+
1051
+
impl<'a, S> SubfsRecordBuilder<'a, S>
1052
+
where
1053
+
S: subfs_record_state::State,
1054
+
S::CreatedAt: subfs_record_state::IsUnset,
1055
+
{
1056
+
/// Set the `createdAt` field (required)
1057
+
pub fn created_at(
1058
+
mut self,
1059
+
value: impl Into<jacquard_common::types::string::Datetime>,
1060
+
) -> SubfsRecordBuilder<'a, subfs_record_state::SetCreatedAt<S>> {
1061
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
1062
+
SubfsRecordBuilder {
1063
+
_phantom_state: ::core::marker::PhantomData,
1064
+
__unsafe_private_named: self.__unsafe_private_named,
1065
+
_phantom: ::core::marker::PhantomData,
1066
+
}
1067
+
}
1068
+
}
1069
+
1070
+
impl<'a, S: subfs_record_state::State> SubfsRecordBuilder<'a, S> {
1071
+
/// Set the `fileCount` field (optional)
1072
+
pub fn file_count(mut self, value: impl Into<Option<i64>>) -> Self {
1073
+
self.__unsafe_private_named.1 = value.into();
1074
+
self
1075
+
}
1076
+
/// Set the `fileCount` field to an Option value (optional)
1077
+
pub fn maybe_file_count(mut self, value: Option<i64>) -> Self {
1078
+
self.__unsafe_private_named.1 = value;
1079
+
self
1080
+
}
1081
+
}
1082
+
1083
+
impl<'a, S> SubfsRecordBuilder<'a, S>
1084
+
where
1085
+
S: subfs_record_state::State,
1086
+
S::Root: subfs_record_state::IsUnset,
1087
+
{
1088
+
/// Set the `root` field (required)
1089
+
pub fn root(
1090
+
mut self,
1091
+
value: impl Into<crate::place_wisp::subfs::Directory<'a>>,
1092
+
) -> SubfsRecordBuilder<'a, subfs_record_state::SetRoot<S>> {
1093
+
self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
1094
+
SubfsRecordBuilder {
1095
+
_phantom_state: ::core::marker::PhantomData,
1096
+
__unsafe_private_named: self.__unsafe_private_named,
1097
+
_phantom: ::core::marker::PhantomData,
1098
+
}
1099
+
}
1100
+
}
1101
+
1102
+
impl<'a, S> SubfsRecordBuilder<'a, S>
1103
+
where
1104
+
S: subfs_record_state::State,
1105
+
S::Root: subfs_record_state::IsSet,
1106
+
S::CreatedAt: subfs_record_state::IsSet,
1107
+
{
1108
+
/// Build the final struct
1109
+
pub fn build(self) -> SubfsRecord<'a> {
1110
+
SubfsRecord {
1111
+
created_at: self.__unsafe_private_named.0.unwrap(),
1112
+
file_count: self.__unsafe_private_named.1,
1113
+
root: self.__unsafe_private_named.2.unwrap(),
1114
+
extra_data: Default::default(),
1115
+
}
1116
+
}
1117
+
/// Build the final struct with custom extra_data
1118
+
pub fn build_with_data(
1119
+
self,
1120
+
extra_data: std::collections::BTreeMap<
1121
+
jacquard_common::smol_str::SmolStr,
1122
+
jacquard_common::types::value::Data<'a>,
1123
+
>,
1124
+
) -> SubfsRecord<'a> {
1125
+
SubfsRecord {
1126
+
created_at: self.__unsafe_private_named.0.unwrap(),
1127
+
file_count: self.__unsafe_private_named.1,
1128
+
root: self.__unsafe_private_named.2.unwrap(),
1129
+
extra_data: Some(extra_data),
1130
+
}
1131
+
}
1132
+
}
1133
+
1134
+
impl<'a> SubfsRecord<'a> {
1135
+
pub fn uri(
1136
+
uri: impl Into<jacquard_common::CowStr<'a>>,
1137
+
) -> Result<
1138
+
jacquard_common::types::uri::RecordUri<'a, SubfsRecordRecord>,
1139
+
jacquard_common::types::uri::UriError,
1140
+
> {
1141
+
jacquard_common::types::uri::RecordUri::try_from_uri(
1142
+
jacquard_common::types::string::AtUri::new_cow(uri.into())?,
1143
+
)
1144
+
}
1145
+
}
1146
+
1147
+
/// Typed wrapper for GetRecord response with this collection's record type.
1148
+
#[derive(
1149
+
serde::Serialize,
1150
+
serde::Deserialize,
1151
+
Debug,
1152
+
Clone,
1153
+
PartialEq,
1154
+
Eq,
1155
+
jacquard_derive::IntoStatic
1156
+
)]
1157
+
#[serde(rename_all = "camelCase")]
1158
+
pub struct SubfsRecordGetRecordOutput<'a> {
1159
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
1160
+
#[serde(borrow)]
1161
+
pub cid: std::option::Option<jacquard_common::types::string::Cid<'a>>,
1162
+
#[serde(borrow)]
1163
+
pub uri: jacquard_common::types::string::AtUri<'a>,
1164
+
#[serde(borrow)]
1165
+
pub value: SubfsRecord<'a>,
1166
+
}
1167
+
1168
+
impl From<SubfsRecordGetRecordOutput<'_>> for SubfsRecord<'_> {
1169
+
fn from(output: SubfsRecordGetRecordOutput<'_>) -> Self {
1170
+
use jacquard_common::IntoStatic;
1171
+
output.value.into_static()
1172
+
}
1173
+
}
1174
+
1175
+
impl jacquard_common::types::collection::Collection for SubfsRecord<'_> {
1176
+
const NSID: &'static str = "place.wisp.subfs";
1177
+
type Record = SubfsRecordRecord;
1178
+
}
1179
+
1180
+
/// Marker type for deserializing records from this collection.
1181
+
#[derive(Debug, serde::Serialize, serde::Deserialize)]
1182
+
pub struct SubfsRecordRecord;
1183
+
impl jacquard_common::xrpc::XrpcResp for SubfsRecordRecord {
1184
+
const NSID: &'static str = "place.wisp.subfs";
1185
+
const ENCODING: &'static str = "application/json";
1186
+
type Output<'de> = SubfsRecordGetRecordOutput<'de>;
1187
+
type Err<'de> = jacquard_common::types::collection::RecordError<'de>;
1188
+
}
1189
+
1190
+
impl jacquard_common::types::collection::Collection for SubfsRecordRecord {
1191
+
const NSID: &'static str = "place.wisp.subfs";
1192
+
type Record = SubfsRecordRecord;
1193
+
}
1194
+
1195
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for SubfsRecord<'a> {
1196
+
fn nsid() -> &'static str {
1197
+
"place.wisp.subfs"
1198
+
}
1199
+
fn def_name() -> &'static str {
1200
+
"main"
1201
+
}
1202
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
1203
+
lexicon_doc_place_wisp_subfs()
1204
+
}
1205
+
fn validate(
1206
+
&self,
1207
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
1208
+
if let Some(ref value) = self.file_count {
1209
+
if *value > 1000i64 {
1210
+
return Err(::jacquard_lexicon::validation::ConstraintError::Maximum {
1211
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
1212
+
"file_count",
1213
+
),
1214
+
max: 1000i64,
1215
+
actual: *value,
1216
+
});
1217
+
}
1218
+
}
1219
+
if let Some(ref value) = self.file_count {
1220
+
if *value < 0i64 {
1221
+
return Err(::jacquard_lexicon::validation::ConstraintError::Minimum {
1222
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
1223
+
"file_count",
1224
+
),
1225
+
min: 0i64,
1226
+
actual: *value,
1227
+
});
1228
+
}
1229
+
}
1230
+
Ok(())
1231
+
}
1232
+
}
1233
+
1234
+
#[jacquard_derive::lexicon]
1235
+
#[derive(
1236
+
serde::Serialize,
1237
+
serde::Deserialize,
1238
+
Debug,
1239
+
Clone,
1240
+
PartialEq,
1241
+
Eq,
1242
+
jacquard_derive::IntoStatic
1243
+
)]
1244
+
#[serde(rename_all = "camelCase")]
1245
+
pub struct Subfs<'a> {
1246
+
/// AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.
1247
+
#[serde(borrow)]
1248
+
pub subject: jacquard_common::types::string::AtUri<'a>,
1249
+
#[serde(borrow)]
1250
+
pub r#type: jacquard_common::CowStr<'a>,
1251
+
}
1252
+
1253
+
pub mod subfs_state {
1254
+
1255
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
1256
+
#[allow(unused)]
1257
+
use ::core::marker::PhantomData;
1258
+
mod sealed {
1259
+
pub trait Sealed {}
1260
+
}
1261
+
/// State trait tracking which required fields have been set
1262
+
pub trait State: sealed::Sealed {
1263
+
type Type;
1264
+
type Subject;
1265
+
}
1266
+
/// Empty state - all required fields are unset
1267
+
pub struct Empty(());
1268
+
impl sealed::Sealed for Empty {}
1269
+
impl State for Empty {
1270
+
type Type = Unset;
1271
+
type Subject = Unset;
1272
+
}
1273
+
///State transition - sets the `type` field to Set
1274
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
1275
+
impl<S: State> sealed::Sealed for SetType<S> {}
1276
+
impl<S: State> State for SetType<S> {
1277
+
type Type = Set<members::r#type>;
1278
+
type Subject = S::Subject;
1279
+
}
1280
+
///State transition - sets the `subject` field to Set
1281
+
pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
1282
+
impl<S: State> sealed::Sealed for SetSubject<S> {}
1283
+
impl<S: State> State for SetSubject<S> {
1284
+
type Type = S::Type;
1285
+
type Subject = Set<members::subject>;
1286
+
}
1287
+
/// Marker types for field names
1288
+
#[allow(non_camel_case_types)]
1289
+
pub mod members {
1290
+
///Marker type for the `type` field
1291
+
pub struct r#type(());
1292
+
///Marker type for the `subject` field
1293
+
pub struct subject(());
1294
+
}
1295
+
}
1296
+
1297
+
/// Builder for constructing an instance of this type
1298
+
pub struct SubfsBuilder<'a, S: subfs_state::State> {
1299
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
1300
+
__unsafe_private_named: (
1301
+
::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
1302
+
::core::option::Option<jacquard_common::CowStr<'a>>,
1303
+
),
1304
+
_phantom: ::core::marker::PhantomData<&'a ()>,
1305
+
}
1306
+
1307
+
impl<'a> Subfs<'a> {
1308
+
/// Create a new builder for this type
1309
+
pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
1310
+
SubfsBuilder::new()
1311
+
}
1312
+
}
1313
+
1314
+
impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
1315
+
/// Create a new builder with all fields unset
1316
+
pub fn new() -> Self {
1317
+
SubfsBuilder {
1318
+
_phantom_state: ::core::marker::PhantomData,
1319
+
__unsafe_private_named: (None, None),
1320
+
_phantom: ::core::marker::PhantomData,
1321
+
}
1322
+
}
1323
+
}
1324
+
1325
+
impl<'a, S> SubfsBuilder<'a, S>
1326
+
where
1327
+
S: subfs_state::State,
1328
+
S::Subject: subfs_state::IsUnset,
1329
+
{
1330
+
/// Set the `subject` field (required)
1331
+
pub fn subject(
1332
+
mut self,
1333
+
value: impl Into<jacquard_common::types::string::AtUri<'a>>,
1334
+
) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
1335
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
1336
+
SubfsBuilder {
1337
+
_phantom_state: ::core::marker::PhantomData,
1338
+
__unsafe_private_named: self.__unsafe_private_named,
1339
+
_phantom: ::core::marker::PhantomData,
1340
+
}
1341
+
}
1342
+
}
1343
+
1344
+
impl<'a, S> SubfsBuilder<'a, S>
1345
+
where
1346
+
S: subfs_state::State,
1347
+
S::Type: subfs_state::IsUnset,
1348
+
{
1349
+
/// Set the `type` field (required)
1350
+
pub fn r#type(
1351
+
mut self,
1352
+
value: impl Into<jacquard_common::CowStr<'a>>,
1353
+
) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
1354
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
1355
+
SubfsBuilder {
1356
+
_phantom_state: ::core::marker::PhantomData,
1357
+
__unsafe_private_named: self.__unsafe_private_named,
1358
+
_phantom: ::core::marker::PhantomData,
1359
+
}
1360
+
}
1361
+
}
1362
+
1363
+
impl<'a, S> SubfsBuilder<'a, S>
1364
+
where
1365
+
S: subfs_state::State,
1366
+
S::Type: subfs_state::IsSet,
1367
+
S::Subject: subfs_state::IsSet,
1368
+
{
1369
+
/// Build the final struct
1370
+
pub fn build(self) -> Subfs<'a> {
1371
+
Subfs {
1372
+
subject: self.__unsafe_private_named.0.unwrap(),
1373
+
r#type: self.__unsafe_private_named.1.unwrap(),
1374
+
extra_data: Default::default(),
1375
+
}
1376
+
}
1377
+
/// Build the final struct with custom extra_data
1378
+
pub fn build_with_data(
1379
+
self,
1380
+
extra_data: std::collections::BTreeMap<
1381
+
jacquard_common::smol_str::SmolStr,
1382
+
jacquard_common::types::value::Data<'a>,
1383
+
>,
1384
+
) -> Subfs<'a> {
1385
+
Subfs {
1386
+
subject: self.__unsafe_private_named.0.unwrap(),
1387
+
r#type: self.__unsafe_private_named.1.unwrap(),
1388
+
extra_data: Some(extra_data),
1389
+
}
1390
+
}
1391
+
}
1392
+
1393
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
1394
+
fn nsid() -> &'static str {
1395
+
"place.wisp.subfs"
1396
+
}
1397
+
fn def_name() -> &'static str {
1398
+
"subfs"
1399
+
}
1400
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
1401
+
lexicon_doc_place_wisp_subfs()
1402
+
}
1403
+
fn validate(
1404
+
&self,
1405
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
1406
+
Ok(())
1407
+
}
1408
+
}
+433
-55
cli/src/pull.rs
+433
-55
cli/src/pull.rs
···
2
2
use crate::download;
3
3
use crate::metadata::SiteMetadata;
4
4
use crate::place_wisp::fs::*;
5
+
use crate::subfs_utils;
5
6
use jacquard::CowStr;
6
7
use jacquard::prelude::IdentityResolver;
7
8
use jacquard_common::types::string::Did;
···
66
67
let fs_record: Fs = from_data(&record_output.value).into_diagnostic()?;
67
68
68
69
let file_count = fs_record.file_count.map(|c| c.to_string()).unwrap_or_else(|| "?".to_string());
69
-
println!("Found site '{}' with {} files", fs_record.site, file_count);
70
+
println!("Found site '{}' with {} files (in main record)", fs_record.site, file_count);
71
+
72
+
// Check for and expand subfs nodes
73
+
let expanded_root = expand_subfs_in_pull(&fs_record.root, &pds_url, did.as_str()).await?;
74
+
let total_file_count = subfs_utils::count_files_in_directory(&expanded_root);
75
+
76
+
if total_file_count as i64 != fs_record.file_count.unwrap_or(0) {
77
+
println!("Total files after expanding subfs: {}", total_file_count);
78
+
}
70
79
71
80
// Load existing metadata for incremental updates
72
81
let existing_metadata = SiteMetadata::load(&output_dir)?;
···
75
84
.map(|m| m.file_cids.clone())
76
85
.unwrap_or_default();
77
86
78
-
// Extract blob map from the new manifest
79
-
let new_blob_map = blob_map::extract_blob_map(&fs_record.root);
87
+
// Extract blob map from the expanded manifest
88
+
let new_blob_map = blob_map::extract_blob_map(&expanded_root);
80
89
let new_file_cids: HashMap<String, String> = new_blob_map
81
90
.iter()
82
91
.map(|(path, (_blob_ref, cid))| (path.clone(), cid.clone()))
···
96
105
}
97
106
}
98
107
99
-
// Check if we need to update (but only if output directory actually exists with files)
108
+
// Check if we need to update (verify files actually exist, not just metadata)
100
109
if let Some(metadata) = &existing_metadata {
101
110
if metadata.record_cid == record_cid {
102
-
// Verify that the output directory actually exists and has content
103
-
let has_content = output_dir.exists() &&
104
-
output_dir.read_dir()
105
-
.map(|mut entries| entries.any(|e| {
106
-
if let Ok(entry) = e {
107
-
!entry.file_name().to_string_lossy().starts_with(".wisp-metadata")
108
-
} else {
109
-
false
111
+
// Verify that the output directory actually exists and has the expected files
112
+
let has_all_files = output_dir.exists() && {
113
+
// Count actual files on disk (excluding metadata)
114
+
let mut actual_file_count = 0;
115
+
if let Ok(entries) = std::fs::read_dir(&output_dir) {
116
+
for entry in entries.flatten() {
117
+
let name = entry.file_name();
118
+
if !name.to_string_lossy().starts_with(".wisp-metadata") {
119
+
if entry.path().is_file() {
120
+
actual_file_count += 1;
121
+
}
110
122
}
111
-
}))
112
-
.unwrap_or(false);
113
-
114
-
if has_content {
123
+
}
124
+
}
125
+
126
+
// Compare with expected file count from metadata
127
+
let expected_count = metadata.file_cids.len();
128
+
actual_file_count > 0 && actual_file_count >= expected_count
129
+
};
130
+
131
+
if has_all_files {
115
132
println!("Site is already up to date!");
116
133
return Ok(());
134
+
} else {
135
+
println!("Site metadata exists but files are missing, re-downloading...");
117
136
}
118
137
}
119
138
}
···
133
152
let mut downloaded = 0;
134
153
let mut reused = 0;
135
154
136
-
// Download files recursively
155
+
// Download files recursively (using expanded root)
137
156
let download_result = download_directory(
138
-
&fs_record.root,
157
+
&expanded_root,
139
158
&temp_dir,
140
159
&pds_url,
141
160
did.as_str(),
···
218
237
Ok(())
219
238
}
220
239
221
-
/// Recursively download a directory
240
+
/// Recursively download a directory with concurrent downloads
222
241
fn download_directory<'a>(
223
242
dir: &'a Directory<'_>,
224
243
output_dir: &'a Path,
···
232
251
reused: &'a mut usize,
233
252
) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<()>> + Send + 'a>> {
234
253
Box::pin(async move {
254
+
use futures::stream::{self, StreamExt};
255
+
256
+
// Collect download tasks and directory tasks separately
257
+
struct DownloadTask {
258
+
path: String,
259
+
output_path: PathBuf,
260
+
blob: jacquard_common::types::blob::BlobRef<'static>,
261
+
base64: bool,
262
+
gzip: bool,
263
+
}
264
+
265
+
struct CopyTask {
266
+
path: String,
267
+
from: PathBuf,
268
+
to: PathBuf,
269
+
}
270
+
271
+
let mut download_tasks = Vec::new();
272
+
let mut copy_tasks = Vec::new();
273
+
let mut dir_tasks = Vec::new();
274
+
235
275
for entry in &dir.entries {
236
276
let entry_name = entry.name.as_str();
237
277
let current_path = if path_prefix.is_empty() {
···
245
285
let output_path = output_dir.join(entry_name);
246
286
247
287
// Check if file CID matches existing
248
-
if let Some((_blob_ref, new_cid)) = new_blob_map.get(¤t_path) {
288
+
let should_copy = if let Some((_blob_ref, new_cid)) = new_blob_map.get(¤t_path) {
249
289
if let Some(existing_cid) = existing_file_cids.get(¤t_path) {
250
290
if existing_cid == new_cid {
251
-
// File unchanged, copy from existing directory
252
291
let existing_path = existing_output_dir.join(¤t_path);
253
292
if existing_path.exists() {
254
-
std::fs::copy(&existing_path, &output_path).into_diagnostic()?;
255
-
*reused += 1;
256
-
println!(" ✓ Reused {}", current_path);
257
-
continue;
293
+
copy_tasks.push(CopyTask {
294
+
path: current_path.clone(),
295
+
from: existing_path,
296
+
to: output_path.clone(),
297
+
});
298
+
true
299
+
} else {
300
+
false
258
301
}
302
+
} else {
303
+
false
259
304
}
305
+
} else {
306
+
false
260
307
}
261
-
}
262
-
263
-
// File is new or changed, download it
264
-
println!(" ↓ Downloading {}", current_path);
265
-
let data = download::download_and_decompress_blob(
266
-
pds_url,
267
-
&file.blob,
268
-
did,
269
-
file.base64.unwrap_or(false),
270
-
file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
271
-
)
272
-
.await?;
308
+
} else {
309
+
false
310
+
};
273
311
274
-
std::fs::write(&output_path, data).into_diagnostic()?;
275
-
*downloaded += 1;
312
+
if !should_copy {
313
+
use jacquard_common::IntoStatic;
314
+
// File needs to be downloaded
315
+
download_tasks.push(DownloadTask {
316
+
path: current_path,
317
+
output_path,
318
+
blob: file.blob.clone().into_static(),
319
+
base64: file.base64.unwrap_or(false),
320
+
gzip: file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
321
+
});
322
+
}
276
323
}
277
324
EntryNode::Directory(subdir) => {
278
325
let subdir_path = output_dir.join(entry_name);
279
-
std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
280
-
281
-
download_directory(
282
-
subdir,
283
-
&subdir_path,
284
-
pds_url,
285
-
did,
286
-
new_blob_map,
287
-
existing_file_cids,
288
-
existing_output_dir,
289
-
current_path,
290
-
downloaded,
291
-
reused,
292
-
)
293
-
.await?;
326
+
dir_tasks.push((subdir.as_ref().clone(), subdir_path, current_path));
327
+
}
328
+
EntryNode::Subfs(_) => {
329
+
println!(" ⚠ Skipping subfs node at {} (should have been expanded)", current_path);
294
330
}
295
331
EntryNode::Unknown(_) => {
296
-
// Skip unknown node types
297
332
println!(" ⚠ Skipping unknown node type for {}", current_path);
298
333
}
299
334
}
300
335
}
301
336
337
+
// Execute copy tasks (fast, do them all)
338
+
for task in copy_tasks {
339
+
std::fs::copy(&task.from, &task.to).into_diagnostic()?;
340
+
*reused += 1;
341
+
println!(" ✓ Reused {}", task.path);
342
+
}
343
+
344
+
// Execute download tasks with concurrency limit (20 concurrent downloads)
345
+
const DOWNLOAD_CONCURRENCY: usize = 20;
346
+
347
+
let pds_url_clone = pds_url.clone();
348
+
let did_str = did.to_string();
349
+
350
+
let download_results: Vec<miette::Result<(String, PathBuf, Vec<u8>)>> = stream::iter(download_tasks)
351
+
.map(|task| {
352
+
let pds = pds_url_clone.clone();
353
+
let did_copy = did_str.clone();
354
+
355
+
async move {
356
+
println!(" ↓ Downloading {}", task.path);
357
+
let data = download::download_and_decompress_blob(
358
+
&pds,
359
+
&task.blob,
360
+
&did_copy,
361
+
task.base64,
362
+
task.gzip,
363
+
)
364
+
.await?;
365
+
366
+
Ok::<_, miette::Report>((task.path, task.output_path, data))
367
+
}
368
+
})
369
+
.buffer_unordered(DOWNLOAD_CONCURRENCY)
370
+
.collect()
371
+
.await;
372
+
373
+
// Write downloaded files to disk
374
+
for result in download_results {
375
+
let (path, output_path, data) = result?;
376
+
std::fs::write(&output_path, data).into_diagnostic()?;
377
+
*downloaded += 1;
378
+
println!(" ✓ Downloaded {}", path);
379
+
}
380
+
381
+
// Recursively process directories
382
+
for (subdir, subdir_path, current_path) in dir_tasks {
383
+
std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
384
+
385
+
download_directory(
386
+
&subdir,
387
+
&subdir_path,
388
+
pds_url,
389
+
did,
390
+
new_blob_map,
391
+
existing_file_cids,
392
+
existing_output_dir,
393
+
current_path,
394
+
downloaded,
395
+
reused,
396
+
)
397
+
.await?;
398
+
}
399
+
302
400
Ok(())
303
401
})
304
402
}
305
403
404
+
/// Expand subfs nodes in a directory tree by fetching and merging subfs records (RECURSIVELY)
405
+
async fn expand_subfs_in_pull<'a>(
406
+
directory: &Directory<'a>,
407
+
pds_url: &Url,
408
+
_did: &str,
409
+
) -> miette::Result<Directory<'static>> {
410
+
use crate::place_wisp::subfs::SubfsRecord;
411
+
use jacquard_common::types::value::from_data;
412
+
use jacquard_common::IntoStatic;
413
+
414
+
// Recursively fetch ALL subfs records (including nested ones)
415
+
let mut all_subfs_map: HashMap<String, crate::place_wisp::subfs::Directory> = HashMap::new();
416
+
let mut to_fetch = subfs_utils::extract_subfs_uris(directory, String::new());
417
+
418
+
if to_fetch.is_empty() {
419
+
return Ok((*directory).clone().into_static());
420
+
}
421
+
422
+
println!("Found {} subfs records, fetching recursively...", to_fetch.len());
423
+
let client = reqwest::Client::new();
424
+
425
+
// Keep fetching until we've resolved all subfs (including nested ones)
426
+
let mut iteration = 0;
427
+
const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
428
+
429
+
while !to_fetch.is_empty() && iteration < MAX_ITERATIONS {
430
+
iteration += 1;
431
+
println!(" Iteration {}: fetching {} subfs records...", iteration, to_fetch.len());
432
+
433
+
let mut fetch_tasks = Vec::new();
434
+
435
+
for (uri, path) in to_fetch.clone() {
436
+
let client = client.clone();
437
+
let pds_url = pds_url.clone();
438
+
439
+
fetch_tasks.push(async move {
440
+
let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
441
+
if parts.len() < 3 {
442
+
return Err(miette::miette!("Invalid subfs URI: {}", uri));
443
+
}
444
+
445
+
let _did = parts[0];
446
+
let collection = parts[1];
447
+
let rkey = parts[2];
448
+
449
+
if collection != "place.wisp.subfs" {
450
+
return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
451
+
}
452
+
453
+
use jacquard::api::com_atproto::repo::get_record::GetRecord;
454
+
use jacquard_common::types::string::Rkey as RkeyType;
455
+
use jacquard_common::types::ident::AtIdentifier;
456
+
use jacquard_common::types::string::{RecordKey, Did as DidType};
457
+
458
+
let rkey_parsed = RkeyType::new(rkey).into_diagnostic()?;
459
+
let did_parsed = DidType::new(_did).into_diagnostic()?;
460
+
461
+
let request = GetRecord::new()
462
+
.repo(AtIdentifier::Did(did_parsed))
463
+
.collection(CowStr::from("place.wisp.subfs"))
464
+
.rkey(RecordKey::from(rkey_parsed))
465
+
.build();
466
+
467
+
let response = client
468
+
.xrpc(pds_url)
469
+
.send(&request)
470
+
.await
471
+
.into_diagnostic()?;
472
+
473
+
let record_output = response.into_output().into_diagnostic()?;
474
+
let subfs_record: SubfsRecord = from_data(&record_output.value).into_diagnostic()?;
475
+
let subfs_record_static = subfs_record.into_static();
476
+
477
+
Ok::<_, miette::Report>((path, subfs_record_static))
478
+
});
479
+
}
480
+
481
+
let results: Vec<_> = futures::future::join_all(fetch_tasks).await;
482
+
483
+
// Process results and find nested subfs
484
+
let mut newly_fetched = Vec::new();
485
+
for result in results {
486
+
match result {
487
+
Ok((path, record)) => {
488
+
println!(" ✓ Fetched subfs at {}", path);
489
+
490
+
// Check for nested subfs in this record
491
+
let nested_subfs = extract_subfs_from_subfs_dir(&record.root, path.clone());
492
+
newly_fetched.extend(nested_subfs);
493
+
494
+
all_subfs_map.insert(path, record.root);
495
+
}
496
+
Err(e) => {
497
+
eprintln!(" ⚠️ Failed to fetch subfs: {}", e);
498
+
}
499
+
}
500
+
}
501
+
502
+
// Update to_fetch with only the NEW subfs we haven't fetched yet
503
+
to_fetch = newly_fetched
504
+
.into_iter()
505
+
.filter(|(uri, _)| !all_subfs_map.iter().any(|(k, _)| k == uri))
506
+
.collect();
507
+
}
508
+
509
+
if iteration >= MAX_ITERATIONS {
510
+
return Err(miette::miette!("Max iterations reached while fetching nested subfs"));
511
+
}
512
+
513
+
println!(" Total subfs records fetched: {}", all_subfs_map.len());
514
+
515
+
// Now replace all subfs nodes with their content
516
+
Ok(replace_subfs_with_content(directory.clone(), &all_subfs_map, String::new()))
517
+
}
518
+
519
+
/// Extract subfs URIs from a subfs::Directory
520
+
fn extract_subfs_from_subfs_dir(
521
+
directory: &crate::place_wisp::subfs::Directory,
522
+
current_path: String,
523
+
) -> Vec<(String, String)> {
524
+
let mut uris = Vec::new();
525
+
526
+
for entry in &directory.entries {
527
+
let full_path = if current_path.is_empty() {
528
+
entry.name.to_string()
529
+
} else {
530
+
format!("{}/{}", current_path, entry.name)
531
+
};
532
+
533
+
match &entry.node {
534
+
crate::place_wisp::subfs::EntryNode::Subfs(subfs_node) => {
535
+
uris.push((subfs_node.subject.to_string(), full_path.clone()));
536
+
}
537
+
crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
538
+
let nested = extract_subfs_from_subfs_dir(subdir, full_path);
539
+
uris.extend(nested);
540
+
}
541
+
_ => {}
542
+
}
543
+
}
544
+
545
+
uris
546
+
}
547
+
548
+
/// Recursively replace subfs nodes with their actual content
549
+
fn replace_subfs_with_content(
550
+
directory: Directory,
551
+
subfs_map: &HashMap<String, crate::place_wisp::subfs::Directory>,
552
+
current_path: String,
553
+
) -> Directory<'static> {
554
+
use jacquard_common::IntoStatic;
555
+
556
+
let new_entries: Vec<Entry<'static>> = directory
557
+
.entries
558
+
.into_iter()
559
+
.flat_map(|entry| {
560
+
let full_path = if current_path.is_empty() {
561
+
entry.name.to_string()
562
+
} else {
563
+
format!("{}/{}", current_path, entry.name)
564
+
};
565
+
566
+
match entry.node {
567
+
EntryNode::Subfs(subfs_node) => {
568
+
// Check if we have this subfs record
569
+
if let Some(subfs_dir) = subfs_map.get(&full_path) {
570
+
let flat = subfs_node.flat.unwrap_or(true); // Default to flat merge
571
+
572
+
if flat {
573
+
// Flat merge: hoist subfs entries into parent
574
+
println!(" Merging subfs {} (flat)", full_path);
575
+
let converted_entries: Vec<Entry<'static>> = subfs_dir
576
+
.entries
577
+
.iter()
578
+
.map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
579
+
.collect();
580
+
581
+
converted_entries
582
+
} else {
583
+
// Nested: create a directory with the subfs name
584
+
println!(" Merging subfs {} (nested)", full_path);
585
+
let converted_entries: Vec<Entry<'static>> = subfs_dir
586
+
.entries
587
+
.iter()
588
+
.map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
589
+
.collect();
590
+
591
+
vec![Entry::new()
592
+
.name(entry.name.into_static())
593
+
.node(EntryNode::Directory(Box::new(
594
+
Directory::new()
595
+
.r#type(CowStr::from("directory"))
596
+
.entries(converted_entries)
597
+
.build()
598
+
)))
599
+
.build()]
600
+
}
601
+
} else {
602
+
// Subfs not found, skip with warning
603
+
eprintln!(" ⚠️ Subfs not found: {}", full_path);
604
+
vec![]
605
+
}
606
+
}
607
+
EntryNode::Directory(dir) => {
608
+
// Recursively process subdirectories
609
+
vec![Entry::new()
610
+
.name(entry.name.into_static())
611
+
.node(EntryNode::Directory(Box::new(
612
+
replace_subfs_with_content(*dir, subfs_map, full_path)
613
+
)))
614
+
.build()]
615
+
}
616
+
EntryNode::File(_) => {
617
+
vec![entry.into_static()]
618
+
}
619
+
EntryNode::Unknown(_) => {
620
+
vec![entry.into_static()]
621
+
}
622
+
}
623
+
})
624
+
.collect();
625
+
626
+
Directory::new()
627
+
.r#type(CowStr::from("directory"))
628
+
.entries(new_entries)
629
+
.build()
630
+
}
631
+
632
+
/// Convert a subfs entry to a fs entry (they have the same structure but different types)
633
+
fn convert_subfs_entry_to_fs(subfs_entry: crate::place_wisp::subfs::Entry<'static>) -> Entry<'static> {
634
+
use jacquard_common::IntoStatic;
635
+
636
+
let node = match subfs_entry.node {
637
+
crate::place_wisp::subfs::EntryNode::File(file) => {
638
+
EntryNode::File(Box::new(
639
+
File::new()
640
+
.r#type(file.r#type.into_static())
641
+
.blob(file.blob.into_static())
642
+
.encoding(file.encoding.map(|e| e.into_static()))
643
+
.mime_type(file.mime_type.map(|m| m.into_static()))
644
+
.base64(file.base64)
645
+
.build()
646
+
))
647
+
}
648
+
crate::place_wisp::subfs::EntryNode::Directory(dir) => {
649
+
let converted_entries: Vec<Entry<'static>> = dir
650
+
.entries
651
+
.into_iter()
652
+
.map(|e| convert_subfs_entry_to_fs(e.into_static()))
653
+
.collect();
654
+
655
+
EntryNode::Directory(Box::new(
656
+
Directory::new()
657
+
.r#type(dir.r#type.into_static())
658
+
.entries(converted_entries)
659
+
.build()
660
+
))
661
+
}
662
+
crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
663
+
// Nested subfs should have been expanded already - if we get here, it means expansion failed
664
+
// Treat it like a directory reference that should have been expanded
665
+
eprintln!(" ⚠️ Warning: unexpanded nested subfs at path, treating as empty directory");
666
+
EntryNode::Directory(Box::new(
667
+
Directory::new()
668
+
.r#type(CowStr::from("directory"))
669
+
.entries(vec![])
670
+
.build()
671
+
))
672
+
}
673
+
crate::place_wisp::subfs::EntryNode::Unknown(unknown) => {
674
+
EntryNode::Unknown(unknown)
675
+
}
676
+
};
677
+
678
+
Entry::new()
679
+
.name(subfs_entry.name.into_static())
680
+
.node(node)
681
+
.build()
682
+
}
683
+
+336
cli/src/subfs_utils.rs
+336
cli/src/subfs_utils.rs
···
1
+
use jacquard_common::types::string::AtUri;
2
+
use jacquard_common::types::blob::BlobRef;
3
+
use jacquard_common::IntoStatic;
4
+
use jacquard::client::{Agent, AgentSession, AgentSessionExt};
5
+
use jacquard::prelude::IdentityResolver;
6
+
use miette::IntoDiagnostic;
7
+
use std::collections::HashMap;
8
+
9
+
use crate::place_wisp::fs::{Directory as FsDirectory, EntryNode as FsEntryNode};
10
+
use crate::place_wisp::subfs::SubfsRecord;
11
+
12
+
/// Extract all subfs URIs from a directory tree with their mount paths
13
+
pub fn extract_subfs_uris(directory: &FsDirectory, current_path: String) -> Vec<(String, String)> {
14
+
let mut uris = Vec::new();
15
+
16
+
for entry in &directory.entries {
17
+
let full_path = if current_path.is_empty() {
18
+
entry.name.to_string()
19
+
} else {
20
+
format!("{}/{}", current_path, entry.name)
21
+
};
22
+
23
+
match &entry.node {
24
+
FsEntryNode::Subfs(subfs_node) => {
25
+
// Found a subfs node - store its URI and mount path
26
+
uris.push((subfs_node.subject.to_string(), full_path.clone()));
27
+
}
28
+
FsEntryNode::Directory(subdir) => {
29
+
// Recursively search subdirectories
30
+
let sub_uris = extract_subfs_uris(subdir, full_path);
31
+
uris.extend(sub_uris);
32
+
}
33
+
FsEntryNode::File(_) => {
34
+
// Files don't contain subfs
35
+
}
36
+
FsEntryNode::Unknown(_) => {
37
+
// Skip unknown nodes
38
+
}
39
+
}
40
+
}
41
+
42
+
uris
43
+
}
44
+
45
+
/// Fetch a subfs record from the PDS
46
+
pub async fn fetch_subfs_record(
47
+
agent: &Agent<impl AgentSession + IdentityResolver>,
48
+
uri: &str,
49
+
) -> miette::Result<SubfsRecord<'static>> {
50
+
// Parse URI: at://did/collection/rkey
51
+
let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
52
+
53
+
if parts.len() < 3 {
54
+
return Err(miette::miette!("Invalid subfs URI: {}", uri));
55
+
}
56
+
57
+
let _did = parts[0];
58
+
let collection = parts[1];
59
+
let _rkey = parts[2];
60
+
61
+
if collection != "place.wisp.subfs" {
62
+
return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
63
+
}
64
+
65
+
// Construct AT-URI for fetching
66
+
let at_uri = AtUri::new(uri).into_diagnostic()?;
67
+
68
+
// Fetch the record
69
+
let response = agent.get_record::<SubfsRecord>(&at_uri).await.into_diagnostic()?;
70
+
let record_output = response.into_output().into_diagnostic()?;
71
+
72
+
Ok(record_output.value.into_static())
73
+
}
74
+
75
+
/// Merge blob maps from subfs records into the main blob map
76
+
/// Returns the total number of blobs merged from all subfs records
77
+
pub async fn merge_subfs_blob_maps(
78
+
agent: &Agent<impl AgentSession + IdentityResolver>,
79
+
subfs_uris: Vec<(String, String)>,
80
+
main_blob_map: &mut HashMap<String, (BlobRef<'static>, String)>,
81
+
) -> miette::Result<usize> {
82
+
let mut total_merged = 0;
83
+
84
+
println!("Fetching {} subfs records for blob reuse...", subfs_uris.len());
85
+
86
+
// Fetch all subfs records in parallel (but with some concurrency limit)
87
+
use futures::stream::{self, StreamExt};
88
+
89
+
let subfs_results: Vec<_> = stream::iter(subfs_uris)
90
+
.map(|(uri, mount_path)| async move {
91
+
match fetch_subfs_record(agent, &uri).await {
92
+
Ok(record) => Some((record, mount_path)),
93
+
Err(e) => {
94
+
eprintln!(" ⚠️ Failed to fetch subfs {}: {}", uri, e);
95
+
None
96
+
}
97
+
}
98
+
})
99
+
.buffer_unordered(5)
100
+
.collect()
101
+
.await;
102
+
103
+
// Convert subfs Directory to fs Directory for blob extraction
104
+
// Note: We need to extract blobs from the subfs record's root
105
+
for result in subfs_results {
106
+
if let Some((subfs_record, mount_path)) = result {
107
+
// Extract blobs from this subfs record's root
108
+
// The blob_map module works with fs::Directory, but subfs::Directory has the same structure
109
+
// We need to convert or work directly with the entries
110
+
111
+
let subfs_blob_map = extract_subfs_blobs(&subfs_record.root, mount_path.clone());
112
+
let count = subfs_blob_map.len();
113
+
114
+
for (path, blob_info) in subfs_blob_map {
115
+
main_blob_map.insert(path, blob_info);
116
+
}
117
+
118
+
total_merged += count;
119
+
println!(" ✓ Merged {} blobs from subfs at {}", count, mount_path);
120
+
}
121
+
}
122
+
123
+
Ok(total_merged)
124
+
}
125
+
126
+
/// Extract blobs from a subfs directory (works with subfs::Directory)
127
+
/// Returns a map of file paths to their blob refs and CIDs
128
+
fn extract_subfs_blobs(
129
+
directory: &crate::place_wisp::subfs::Directory,
130
+
current_path: String,
131
+
) -> HashMap<String, (BlobRef<'static>, String)> {
132
+
let mut blob_map = HashMap::new();
133
+
134
+
for entry in &directory.entries {
135
+
let full_path = if current_path.is_empty() {
136
+
entry.name.to_string()
137
+
} else {
138
+
format!("{}/{}", current_path, entry.name)
139
+
};
140
+
141
+
match &entry.node {
142
+
crate::place_wisp::subfs::EntryNode::File(file_node) => {
143
+
let blob_ref = &file_node.blob;
144
+
let cid_string = blob_ref.blob().r#ref.to_string();
145
+
blob_map.insert(
146
+
full_path,
147
+
(blob_ref.clone().into_static(), cid_string)
148
+
);
149
+
}
150
+
crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
151
+
let sub_map = extract_subfs_blobs(subdir, full_path);
152
+
blob_map.extend(sub_map);
153
+
}
154
+
crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
155
+
// Nested subfs - these should be resolved recursively in the main flow
156
+
// For now, we skip them (they'll be fetched separately)
157
+
eprintln!(" ⚠️ Found nested subfs at {}, skipping (should be fetched separately)", full_path);
158
+
}
159
+
crate::place_wisp::subfs::EntryNode::Unknown(_) => {
160
+
// Skip unknown nodes
161
+
}
162
+
}
163
+
}
164
+
165
+
blob_map
166
+
}
167
+
168
+
/// Count total files in a directory tree
169
+
pub fn count_files_in_directory(directory: &FsDirectory) -> usize {
170
+
let mut count = 0;
171
+
172
+
for entry in &directory.entries {
173
+
match &entry.node {
174
+
FsEntryNode::File(_) => count += 1,
175
+
FsEntryNode::Directory(subdir) => {
176
+
count += count_files_in_directory(subdir);
177
+
}
178
+
FsEntryNode::Subfs(_) => {
179
+
// Subfs nodes don't count towards the main manifest file count
180
+
}
181
+
FsEntryNode::Unknown(_) => {}
182
+
}
183
+
}
184
+
185
+
count
186
+
}
187
+
188
+
/// Estimate JSON size of a directory tree
189
+
pub fn estimate_directory_size(directory: &FsDirectory) -> usize {
190
+
// Serialize to JSON and measure
191
+
match serde_json::to_string(directory) {
192
+
Ok(json) => json.len(),
193
+
Err(_) => 0,
194
+
}
195
+
}
196
+
197
+
/// Information about a directory that could be split into a subfs record
198
+
#[derive(Debug)]
199
+
pub struct SplittableDirectory {
200
+
pub path: String,
201
+
pub directory: FsDirectory<'static>,
202
+
pub size: usize,
203
+
pub file_count: usize,
204
+
}
205
+
206
+
/// Find large directories that could be split into subfs records
207
+
/// Returns directories sorted by size (largest first)
208
+
pub fn find_large_directories(directory: &FsDirectory, current_path: String) -> Vec<SplittableDirectory> {
209
+
let mut result = Vec::new();
210
+
211
+
for entry in &directory.entries {
212
+
if let FsEntryNode::Directory(subdir) = &entry.node {
213
+
let dir_path = if current_path.is_empty() {
214
+
entry.name.to_string()
215
+
} else {
216
+
format!("{}/{}", current_path, entry.name)
217
+
};
218
+
219
+
let size = estimate_directory_size(subdir);
220
+
let file_count = count_files_in_directory(subdir);
221
+
222
+
result.push(SplittableDirectory {
223
+
path: dir_path.clone(),
224
+
directory: (*subdir.clone()).into_static(),
225
+
size,
226
+
file_count,
227
+
});
228
+
229
+
// Recursively find subdirectories
230
+
let subdirs = find_large_directories(subdir, dir_path);
231
+
result.extend(subdirs);
232
+
}
233
+
}
234
+
235
+
// Sort by size (largest first)
236
+
result.sort_by(|a, b| b.size.cmp(&a.size));
237
+
238
+
result
239
+
}
240
+
241
+
/// Replace a directory with a subfs node in the tree
242
+
pub fn replace_directory_with_subfs(
243
+
directory: FsDirectory<'static>,
244
+
target_path: &str,
245
+
subfs_uri: &str,
246
+
flat: bool,
247
+
) -> miette::Result<FsDirectory<'static>> {
248
+
use jacquard_common::CowStr;
249
+
use crate::place_wisp::fs::{Entry, Subfs};
250
+
251
+
let path_parts: Vec<&str> = target_path.split('/').collect();
252
+
253
+
if path_parts.is_empty() {
254
+
return Err(miette::miette!("Cannot replace root directory"));
255
+
}
256
+
257
+
// Parse the subfs URI and make it owned/'static
258
+
let at_uri = AtUri::new_cow(jacquard_common::CowStr::from(subfs_uri.to_string())).into_diagnostic()?;
259
+
260
+
// If this is a root-level directory
261
+
if path_parts.len() == 1 {
262
+
let target_name = path_parts[0];
263
+
let new_entries: Vec<Entry> = directory.entries.into_iter().map(|entry| {
264
+
if entry.name == target_name {
265
+
// Replace this directory with a subfs node
266
+
Entry::new()
267
+
.name(entry.name)
268
+
.node(FsEntryNode::Subfs(Box::new(
269
+
Subfs::new()
270
+
.r#type(CowStr::from("subfs"))
271
+
.subject(at_uri.clone())
272
+
.flat(Some(flat))
273
+
.build()
274
+
)))
275
+
.build()
276
+
} else {
277
+
entry
278
+
}
279
+
}).collect();
280
+
281
+
return Ok(FsDirectory::new()
282
+
.r#type(CowStr::from("directory"))
283
+
.entries(new_entries)
284
+
.build());
285
+
}
286
+
287
+
// Recursively navigate to parent directory
288
+
let first_part = path_parts[0];
289
+
let remaining_path = path_parts[1..].join("/");
290
+
291
+
let new_entries: Vec<Entry> = directory.entries.into_iter().filter_map(|entry| {
292
+
if entry.name == first_part {
293
+
if let FsEntryNode::Directory(subdir) = entry.node {
294
+
// Recursively process this subdirectory
295
+
match replace_directory_with_subfs((*subdir).into_static(), &remaining_path, subfs_uri, flat) {
296
+
Ok(updated_subdir) => {
297
+
Some(Entry::new()
298
+
.name(entry.name)
299
+
.node(FsEntryNode::Directory(Box::new(updated_subdir)))
300
+
.build())
301
+
}
302
+
Err(_) => None, // Skip entries that fail to update
303
+
}
304
+
} else {
305
+
Some(entry)
306
+
}
307
+
} else {
308
+
Some(entry)
309
+
}
310
+
}).collect();
311
+
312
+
Ok(FsDirectory::new()
313
+
.r#type(CowStr::from("directory"))
314
+
.entries(new_entries)
315
+
.build())
316
+
}
317
+
318
+
/// Delete a subfs record from the PDS
319
+
pub async fn delete_subfs_record(
320
+
agent: &Agent<impl AgentSession + IdentityResolver>,
321
+
uri: &str,
322
+
) -> miette::Result<()> {
323
+
use jacquard_common::types::uri::RecordUri;
324
+
325
+
// Construct AT-URI and convert to RecordUri
326
+
let at_uri = AtUri::new(uri).into_diagnostic()?;
327
+
let record_uri: RecordUri<'_, crate::place_wisp::subfs::SubfsRecordRecord> = RecordUri::try_from_uri(at_uri).into_diagnostic()?;
328
+
329
+
let rkey = record_uri.rkey()
330
+
.ok_or_else(|| miette::miette!("Invalid subfs URI: missing rkey"))?
331
+
.clone();
332
+
333
+
agent.delete_record::<SubfsRecord>(rkey).await.into_diagnostic()?;
334
+
335
+
Ok(())
336
+
}