+73
-1
Cargo.lock
+73
-1
Cargo.lock
···
152
152
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
153
153
154
154
[[package]]
155
+
name = "block-buffer"
156
+
version = "0.10.4"
157
+
source = "registry+https://github.com/rust-lang/crates.io-index"
158
+
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
159
+
dependencies = [
160
+
"generic-array",
161
+
]
162
+
163
+
[[package]]
155
164
name = "bumpalo"
156
165
version = "3.19.0"
157
166
source = "registry+https://github.com/rust-lang/crates.io-index"
···
287
296
]
288
297
289
298
[[package]]
299
+
name = "cpufeatures"
300
+
version = "0.2.17"
301
+
source = "registry+https://github.com/rust-lang/crates.io-index"
302
+
checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280"
303
+
dependencies = [
304
+
"libc",
305
+
]
306
+
307
+
[[package]]
290
308
name = "criterion"
291
309
version = "0.7.0"
292
310
source = "registry+https://github.com/rust-lang/crates.io-index"
···
352
370
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
353
371
354
372
[[package]]
373
+
name = "crypto-common"
374
+
version = "0.1.6"
375
+
source = "registry+https://github.com/rust-lang/crates.io-index"
376
+
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
377
+
dependencies = [
378
+
"generic-array",
379
+
"typenum",
380
+
]
381
+
382
+
[[package]]
355
383
name = "data-encoding"
356
384
version = "2.9.0"
357
385
source = "registry+https://github.com/rust-lang/crates.io-index"
···
378
406
]
379
407
380
408
[[package]]
409
+
name = "digest"
410
+
version = "0.10.7"
411
+
source = "registry+https://github.com/rust-lang/crates.io-index"
412
+
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
413
+
dependencies = [
414
+
"block-buffer",
415
+
"crypto-common",
416
+
]
417
+
418
+
[[package]]
381
419
name = "either"
382
420
version = "1.15.0"
383
421
source = "registry+https://github.com/rust-lang/crates.io-index"
···
527
565
"pin-project-lite",
528
566
"pin-utils",
529
567
"slab",
568
+
]
569
+
570
+
[[package]]
571
+
name = "generic-array"
572
+
version = "0.14.9"
573
+
source = "registry+https://github.com/rust-lang/crates.io-index"
574
+
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
575
+
dependencies = [
576
+
"typenum",
577
+
"version_check",
530
578
]
531
579
532
580
[[package]]
···
976
1024
977
1025
[[package]]
978
1026
name = "repo-stream"
979
-
version = "0.1.1"
1027
+
version = "0.2.2"
980
1028
dependencies = [
981
1029
"bincode",
982
1030
"clap",
···
992
1040
"serde",
993
1041
"serde_bytes",
994
1042
"serde_ipld_dagcbor",
1043
+
"sha2",
995
1044
"tempfile",
996
1045
"thiserror 2.0.17",
997
1046
"tokio",
···
1123
1172
]
1124
1173
1125
1174
[[package]]
1175
+
name = "sha2"
1176
+
version = "0.10.9"
1177
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1178
+
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
1179
+
dependencies = [
1180
+
"cfg-if",
1181
+
"cpufeatures",
1182
+
"digest",
1183
+
]
1184
+
1185
+
[[package]]
1126
1186
name = "signal-hook-registry"
1127
1187
version = "1.4.6"
1128
1188
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1276
1336
]
1277
1337
1278
1338
[[package]]
1339
+
name = "typenum"
1340
+
version = "1.19.0"
1341
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1342
+
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
1343
+
1344
+
[[package]]
1279
1345
name = "unicode-ident"
1280
1346
version = "1.0.19"
1281
1347
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1310
1376
version = "0.2.15"
1311
1377
source = "registry+https://github.com/rust-lang/crates.io-index"
1312
1378
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
1379
+
1380
+
[[package]]
1381
+
name = "version_check"
1382
+
version = "0.9.5"
1383
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1384
+
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
1313
1385
1314
1386
[[package]]
1315
1387
name = "virtue"
+4
-3
Cargo.toml
+4
-3
Cargo.toml
···
1
1
[package]
2
2
name = "repo-stream"
3
-
version = "0.1.1"
3
+
version = "0.2.2"
4
4
edition = "2024"
5
5
license = "MIT OR Apache-2.0"
6
-
description = "Fast and robust atproto CAR file processing in rust"
6
+
description = "A robust CAR file -> MST walker for atproto"
7
7
repository = "https://tangled.org/@microcosm.blue/repo-stream"
8
8
9
9
[dependencies]
···
18
18
serde = { version = "1.0.228", features = ["derive"] }
19
19
serde_bytes = "0.11.19"
20
20
serde_ipld_dagcbor = "0.6.4"
21
+
sha2 = "0.10.9"
21
22
thiserror = "2.0.17"
22
-
tokio = { version = "1.47.1", features = ["rt"] }
23
+
tokio = { version = "1.47.1", features = ["rt", "sync"] }
23
24
24
25
[dev-dependencies]
25
26
clap = { version = "4.5.48", features = ["derive"] }
+12
-21
benches/huge-car.rs
+12
-21
benches/huge-car.rs
···
1
1
extern crate repo_stream;
2
-
use futures::TryStreamExt;
3
-
use iroh_car::CarReader;
4
-
use std::convert::Infallible;
2
+
use repo_stream::Driver;
5
3
use std::path::{Path, PathBuf};
6
4
7
5
use criterion::{Criterion, criterion_group, criterion_main};
···
20
18
});
21
19
}
22
20
23
-
async fn drive_car(filename: impl AsRef<Path>) {
21
+
async fn drive_car(filename: impl AsRef<Path>) -> usize {
24
22
let reader = tokio::fs::File::open(filename).await.unwrap();
25
23
let reader = tokio::io::BufReader::new(reader);
26
-
let reader = CarReader::new(reader).await.unwrap();
27
24
28
-
let root = reader
29
-
.header()
30
-
.roots()
31
-
.first()
32
-
.ok_or("missing root")
25
+
let mut driver = match Driver::load_car(reader, |block| block.len(), 1024)
26
+
.await
33
27
.unwrap()
34
-
.clone();
35
-
36
-
let stream = std::pin::pin!(reader.stream());
37
-
38
-
let (_commit, v) =
39
-
repo_stream::drive::Vehicle::init(root, stream, |block| Ok::<_, Infallible>(block.len()))
40
-
.await
41
-
.unwrap();
42
-
let mut record_stream = std::pin::pin!(v.stream());
28
+
{
29
+
Driver::Memory(_, mem_driver) => mem_driver,
30
+
Driver::Disk(_) => panic!("not doing disk for benchmark"),
31
+
};
43
32
44
-
while let Some(_) = record_stream.try_next().await.unwrap() {
45
-
// just here for the drive
33
+
let mut n = 0;
34
+
while let Some(pairs) = driver.next_chunk(256).await.unwrap() {
35
+
n += pairs.len();
46
36
}
37
+
n
47
38
}
48
39
49
40
criterion_group!(benches, criterion_benchmark);
+16
-22
benches/non-huge-cars.rs
+16
-22
benches/non-huge-cars.rs
···
1
1
extern crate repo_stream;
2
-
use futures::TryStreamExt;
3
-
use iroh_car::CarReader;
4
-
use std::convert::Infallible;
2
+
use repo_stream::Driver;
5
3
6
4
use criterion::{Criterion, criterion_group, criterion_main};
7
5
6
+
const EMPTY_CAR: &'static [u8] = include_bytes!("../car-samples/empty.car");
8
7
const TINY_CAR: &'static [u8] = include_bytes!("../car-samples/tiny.car");
9
8
const LITTLE_CAR: &'static [u8] = include_bytes!("../car-samples/little.car");
10
9
const MIDSIZE_CAR: &'static [u8] = include_bytes!("../car-samples/midsize.car");
···
15
14
.build()
16
15
.expect("Creating runtime failed");
17
16
17
+
c.bench_function("empty-car", |b| {
18
+
b.to_async(&rt).iter(async || drive_car(EMPTY_CAR).await)
19
+
});
18
20
c.bench_function("tiny-car", |b| {
19
21
b.to_async(&rt).iter(async || drive_car(TINY_CAR).await)
20
22
});
···
26
28
});
27
29
}
28
30
29
-
async fn drive_car(bytes: &[u8]) {
30
-
let reader = CarReader::new(bytes).await.unwrap();
31
-
32
-
let root = reader
33
-
.header()
34
-
.roots()
35
-
.first()
36
-
.ok_or("missing root")
31
+
async fn drive_car(bytes: &[u8]) -> usize {
32
+
let mut driver = match Driver::load_car(bytes, |block| block.len(), 32)
33
+
.await
37
34
.unwrap()
38
-
.clone();
39
-
40
-
let stream = std::pin::pin!(reader.stream());
35
+
{
36
+
Driver::Memory(_, mem_driver) => mem_driver,
37
+
Driver::Disk(_) => panic!("not benching big cars here"),
38
+
};
41
39
42
-
let (_commit, v) =
43
-
repo_stream::drive::Vehicle::init(root, stream, |block| Ok::<_, Infallible>(block.len()))
44
-
.await
45
-
.unwrap();
46
-
let mut record_stream = std::pin::pin!(v.stream());
47
-
48
-
while let Some(_) = record_stream.try_next().await.unwrap() {
49
-
// just here for the drive
40
+
let mut n = 0;
41
+
while let Some(pairs) = driver.next_chunk(256).await.unwrap() {
42
+
n += pairs.len();
50
43
}
44
+
n
51
45
}
52
46
53
47
criterion_group!(benches, criterion_benchmark);
car-samples/empty.car
car-samples/empty.car
This is a binary file and will not be displayed.
+63
-40
examples/disk-read-file/main.rs
+63
-40
examples/disk-read-file/main.rs
···
1
+
/*!
2
+
Read a CAR file by spilling to disk
3
+
*/
4
+
1
5
extern crate repo_stream;
2
6
use clap::Parser;
3
-
use repo_stream::drive::Processable;
4
-
use serde::{Deserialize, Serialize};
7
+
use repo_stream::{DiskBuilder, Driver, DriverBuilder};
5
8
use std::path::PathBuf;
6
-
7
-
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
9
+
use std::time::Instant;
8
10
9
11
#[derive(Debug, Parser)]
10
12
struct Args {
···
14
16
tmpfile: PathBuf,
15
17
}
16
18
17
-
#[derive(Clone, Serialize, Deserialize)]
18
-
struct S(usize);
19
-
20
-
impl Processable for S {
21
-
fn get_size(&self) -> usize {
22
-
0 // no additional space taken, just its stack size (newtype is free)
23
-
}
24
-
}
25
-
26
19
#[tokio::main]
27
-
async fn main() -> Result<()> {
20
+
async fn main() -> Result<(), Box<dyn std::error::Error>> {
28
21
env_logger::init();
29
22
30
23
let Args { car, tmpfile } = Args::parse();
24
+
25
+
// repo-stream takes an AsyncRead as input. wrapping a filesystem read in
26
+
// BufReader can provide a really significant performance win.
31
27
let reader = tokio::fs::File::open(car).await?;
32
28
let reader = tokio::io::BufReader::new(reader);
33
29
34
-
// let kb = 2_usize.pow(10);
35
-
let mb = 2_usize.pow(20);
30
+
log::info!("hello! reading the car...");
31
+
let t0 = Instant::now();
36
32
37
-
let limit_mb = 32;
33
+
// in this example we only bother handling CARs that are too big for memory
34
+
// `noop` helper means: do no block processing, store the raw blocks
35
+
let driver = match DriverBuilder::new()
36
+
.with_mem_limit_mb(10) // how much memory can be used before disk spill
37
+
.load_car(reader)
38
+
.await?
39
+
{
40
+
Driver::Memory(_, _) => panic!("try this on a bigger car"),
41
+
Driver::Disk(big_stuff) => {
42
+
// we reach here if the repo was too big and needs to be spilled to
43
+
// disk to continue
44
+
45
+
// set up a disk store we can spill to
46
+
let disk_store = DiskBuilder::new().open(tmpfile).await?;
47
+
48
+
// do the spilling, get back a (similar) driver
49
+
let (commit, driver) = big_stuff.finish_loading(disk_store).await?;
38
50
39
-
let mut driver =
40
-
match repo_stream::drive::load_car(reader, |block| S(block.len()), 10 * mb).await? {
41
-
repo_stream::drive::Vehicle::Lil(_, _) => panic!("try this on a bigger car"),
42
-
repo_stream::drive::Vehicle::Big(big_stuff) => {
43
-
let disk_store = repo_stream::disk::SqliteStore::new(tmpfile.clone(), limit_mb);
44
-
let (commit, driver) = big_stuff.finish_loading(disk_store).await?;
45
-
log::warn!("big: {:?}", commit);
46
-
driver
47
-
}
48
-
};
51
+
// at this point you might want to fetch the account's signing key
52
+
// via the DID from the commit, and then verify the signature.
53
+
log::warn!("big's comit ({:?}): {:?}", t0.elapsed(), commit);
49
54
50
-
println!("hello!");
55
+
// pop the driver back out to get some code indentation relief
56
+
driver
57
+
}
58
+
};
51
59
60
+
// collect some random stats about the blocks
52
61
let mut n = 0;
53
-
loop {
54
-
let (d, p) = driver.next_chunk(1024).await?;
55
-
driver = d;
56
-
let Some(pairs) = p else {
57
-
break;
58
-
};
62
+
let mut zeros = 0;
63
+
64
+
log::info!("walking...");
65
+
66
+
// this example uses the disk driver's channel mode: the tree walking is
67
+
// spawned onto a blocking thread, and we get chunks of rkey+blocks back
68
+
let (mut rx, join) = driver.to_channel(512);
69
+
while let Some(r) = rx.recv().await {
70
+
let pairs = r?;
71
+
72
+
// keep a count of the total number of blocks seen
59
73
n += pairs.len();
60
-
// log::info!("got {rkey:?}");
74
+
75
+
for (_, block) in pairs {
76
+
// for each block, count how many bytes are equal to '0'
77
+
// (this is just an example, you probably want to do something more
78
+
// interesting)
79
+
zeros += block.into_iter().filter(|&b| b == b'0').count()
80
+
}
61
81
}
62
-
// log::info!("now is the time to check mem...");
63
-
// tokio::time::sleep(std::time::Duration::from_secs(22)).await;
64
-
drop(driver);
65
-
log::info!("bye! {n}");
82
+
83
+
log::info!("arrived! ({:?}) joining rx...", t0.elapsed());
66
84
67
-
std::fs::remove_file(tmpfile).unwrap(); // need to also remove -shm -wal
85
+
// clean up the database. would be nice to do this in drop so it happens
86
+
// automatically, but some blocking work happens, so that's not allowed in
87
+
// async rust. ๐คทโโ๏ธ
88
+
join.await?.reset_store().await?;
89
+
90
+
log::info!("done. n={n} zeros={zeros}");
68
91
69
92
Ok(())
70
93
}
+14
-6
examples/read-file/main.rs
+14
-6
examples/read-file/main.rs
···
1
+
/*!
2
+
Read a CAR file with in-memory processing
3
+
*/
4
+
1
5
extern crate repo_stream;
2
6
use clap::Parser;
7
+
use repo_stream::{Driver, DriverBuilder};
3
8
use std::path::PathBuf;
4
9
5
10
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
···
18
23
let reader = tokio::fs::File::open(file).await?;
19
24
let reader = tokio::io::BufReader::new(reader);
20
25
21
-
let (commit, mut driver) =
22
-
match repo_stream::drive::load_car(reader, |block| block.len(), 1024 * 1024).await? {
23
-
repo_stream::drive::Vehicle::Lil(commit, mem_driver) => (commit, mem_driver),
24
-
repo_stream::drive::Vehicle::Big(_) => panic!("can't handle big cars yet"),
25
-
};
26
+
let (commit, mut driver) = match DriverBuilder::new()
27
+
.with_block_processor(|block| block.len())
28
+
.load_car(reader)
29
+
.await?
30
+
{
31
+
Driver::Memory(commit, mem_driver) => (commit, mem_driver),
32
+
Driver::Disk(_) => panic!("this example doesn't handle big CARs"),
33
+
};
26
34
27
35
log::info!("got commit: {commit:?}");
28
36
···
31
39
n += pairs.len();
32
40
// log::info!("got {rkey:?}");
33
41
}
34
-
log::info!("bye! {n}");
42
+
log::info!("bye! total records={n}");
35
43
36
44
Ok(())
37
45
}
+70
-2
readme.md
+70
-2
readme.md
···
1
1
# repo-stream
2
2
3
-
Fast and (aspirationally) robust atproto CAR file processing in rust
3
+
A robust CAR file -> MST walker for atproto
4
+
5
+
[![Crates.io][crates-badge]](https://crates.io/crates/repo-stream)
6
+
[![Documentation][docs-badge]](https://docs.rs/repo-stream)
7
+
[![Sponsor][sponsor-badge]](https://github.com/sponsors/uniphil)
8
+
9
+
[crates-badge]: https://img.shields.io/crates/v/repo-stream.svg
10
+
[docs-badge]: https://docs.rs/repo-stream/badge.svg
11
+
[sponsor-badge]: https://img.shields.io/badge/at-microcosm-b820f9?labelColor=b820f9&logo=githubsponsors&logoColor=fff
12
+
13
+
```rust
14
+
use repo_stream::{Driver, DriverBuilder, DriveError, DiskBuilder};
15
+
16
+
#[tokio::main]
17
+
async fn main() -> Result<(), DriveError> {
18
+
// repo-stream takes any AsyncRead as input, like a tokio::fs::File
19
+
let reader = tokio::fs::File::open("repo.car".into()).await?;
20
+
let reader = tokio::io::BufReader::new(reader);
21
+
22
+
// example repo workload is simply counting the total record bytes
23
+
let mut total_size = 0;
24
+
25
+
match DriverBuilder::new()
26
+
.with_mem_limit_mb(10)
27
+
.with_block_processor(|rec| rec.len()) // block processing: just extract the raw record size
28
+
.load_car(reader)
29
+
.await?
30
+
{
31
+
32
+
// if all blocks fit within memory
33
+
Driver::Memory(_commit, mut driver) => {
34
+
while let Some(chunk) = driver.next_chunk(256).await? {
35
+
for (_rkey, size) in chunk {
36
+
total_size += size;
37
+
}
38
+
}
39
+
},
40
+
41
+
// if the CAR was too big for in-memory processing
42
+
Driver::Disk(paused) => {
43
+
// set up a disk store we can spill to
44
+
let store = DiskBuilder::new().open("some/path.db".into()).await?;
45
+
// do the spilling, get back a (similar) driver
46
+
let (_commit, mut driver) = paused.finish_loading(store).await?;
47
+
48
+
while let Some(chunk) = driver.next_chunk(256).await? {
49
+
for (_rkey, size) in chunk {
50
+
total_size += size;
51
+
}
52
+
}
53
+
54
+
// clean up the disk store (drop tables etc)
55
+
driver.reset_store().await?;
56
+
}
57
+
};
58
+
println!("sum of size of all records: {total_size}");
59
+
Ok(())
60
+
}
61
+
```
62
+
63
+
more recent todo
64
+
65
+
- [ ] get an *emtpy* car for the test suite
66
+
- [x] implement a max size on disk limit
67
+
68
+
69
+
-----
70
+
71
+
older stuff (to clean up):
4
72
5
73
6
74
current car processing times (records processed into their length usize, phil's dev machine):
···
27
95
-> yeah the commit is returned from init
28
96
- [ ] spec compliance todos
29
97
- [x] assert that keys are ordered and fail if not
30
-
- [ ] verify node mst depth from key (possibly pending [interop test fixes](https://github.com/bluesky-social/atproto-interop-tests/issues/5))
98
+
- [x] verify node mst depth from key (possibly pending [interop test fixes](https://github.com/bluesky-social/atproto-interop-tests/issues/5))
31
99
- [ ] performance todos
32
100
- [x] consume the serialized nodes into a mutable efficient format
33
101
- [ ] maybe customize the deserialize impl to do that directly?
+174
-58
src/disk.rs
+174
-58
src/disk.rs
···
1
+
/*!
2
+
Disk storage for blocks on disk
3
+
4
+
Currently this uses sqlite. In testing sqlite wasn't the fastest, but it seemed
5
+
to be the best behaved in terms of both on-disk space usage and memory usage.
6
+
7
+
```no_run
8
+
# use repo_stream::{DiskBuilder, DiskError};
9
+
# #[tokio::main]
10
+
# async fn main() -> Result<(), DiskError> {
11
+
let store = DiskBuilder::new()
12
+
.with_cache_size_mb(32)
13
+
.with_max_stored_mb(1024) // errors when >1GiB of processed blocks are inserted
14
+
.open("/some/path.db".into()).await?;
15
+
# Ok(())
16
+
# }
17
+
```
18
+
*/
19
+
20
+
use crate::drive::DriveError;
1
21
use rusqlite::OptionalExtension;
2
22
use std::path::PathBuf;
3
23
4
-
pub struct SqliteStore {
5
-
path: PathBuf,
6
-
limit_mb: usize,
24
+
#[derive(Debug, thiserror::Error)]
25
+
pub enum DiskError {
26
+
/// A wrapped database error
27
+
///
28
+
/// (The wrapped err should probably be obscured to remove public-facing
29
+
/// sqlite bits)
30
+
#[error(transparent)]
31
+
DbError(#[from] rusqlite::Error),
32
+
/// A tokio blocking task failed to join
33
+
#[error("Failed to join a tokio blocking task: {0}")]
34
+
JoinError(#[from] tokio::task::JoinError),
35
+
/// The total size of stored blocks exceeded the allowed size
36
+
///
37
+
/// If you need to process *really* big CARs, you can configure a higher
38
+
/// limit.
39
+
#[error("Maximum disk size reached")]
40
+
MaxSizeExceeded,
41
+
#[error("this error was replaced, seeing this is a bug.")]
42
+
#[doc(hidden)]
43
+
Stolen,
7
44
}
8
45
9
-
impl SqliteStore {
10
-
pub fn new(path: PathBuf, limit_mb: usize) -> Self {
11
-
Self { path, limit_mb }
46
+
impl DiskError {
47
+
/// hack for ownership challenges with the disk driver
48
+
pub(crate) fn steal(&mut self) -> Self {
49
+
let mut swapped = DiskError::Stolen;
50
+
std::mem::swap(self, &mut swapped);
51
+
swapped
12
52
}
13
53
}
14
54
15
-
impl SqliteStore {
16
-
pub async fn get_access(&mut self) -> Result<SqliteAccess, rusqlite::Error> {
17
-
let path = self.path.clone();
18
-
let limit_mb = self.limit_mb;
55
+
/// Builder-style disk store setup
56
+
#[derive(Debug, Clone)]
57
+
pub struct DiskBuilder {
58
+
/// Database in-memory cache allowance
59
+
///
60
+
/// Default: 32 MiB
61
+
pub cache_size_mb: usize,
62
+
/// Database stored block size limit
63
+
///
64
+
/// Default: 10 GiB
65
+
///
66
+
/// Note: actual size on disk may be more, but should approximately scale
67
+
/// with this limit
68
+
pub max_stored_mb: usize,
69
+
}
70
+
71
+
impl Default for DiskBuilder {
72
+
fn default() -> Self {
73
+
Self {
74
+
cache_size_mb: 32,
75
+
max_stored_mb: 10 * 1024, // 10 GiB
76
+
}
77
+
}
78
+
}
79
+
80
+
impl DiskBuilder {
81
+
/// Begin configuring the storage with defaults
82
+
pub fn new() -> Self {
83
+
Default::default()
84
+
}
85
+
/// Set the in-memory cache allowance for the database
86
+
///
87
+
/// Default: 32 MiB
88
+
pub fn with_cache_size_mb(mut self, size: usize) -> Self {
89
+
self.cache_size_mb = size;
90
+
self
91
+
}
92
+
/// Set the approximate stored block size limit
93
+
///
94
+
/// Default: 10 GiB
95
+
pub fn with_max_stored_mb(mut self, max: usize) -> Self {
96
+
self.max_stored_mb = max;
97
+
self
98
+
}
99
+
/// Open and initialize the actual disk storage
100
+
pub async fn open(&self, path: PathBuf) -> Result<DiskStore, DiskError> {
101
+
DiskStore::new(path, self.cache_size_mb, self.max_stored_mb).await
102
+
}
103
+
}
104
+
105
+
/// On-disk block storage
106
+
pub struct DiskStore {
107
+
conn: rusqlite::Connection,
108
+
max_stored: usize,
109
+
stored: usize,
110
+
}
111
+
112
+
impl DiskStore {
113
+
/// Initialize a new disk store
114
+
pub async fn new(
115
+
path: PathBuf,
116
+
cache_mb: usize,
117
+
max_stored_mb: usize,
118
+
) -> Result<Self, DiskError> {
119
+
let max_stored = max_stored_mb * 2_usize.pow(20);
19
120
let conn = tokio::task::spawn_blocking(move || {
20
121
let conn = rusqlite::Connection::open(path)?;
21
122
22
-
let sq_mb = -(2_i64.pow(10)); // negative is kibibytes for sqlite cache_size
123
+
let sqlite_one_mb = -(2_i64.pow(10)); // negative is kibibytes for sqlite cache_size
23
124
24
125
// conn.pragma_update(None, "journal_mode", "OFF")?;
25
126
// conn.pragma_update(None, "journal_mode", "MEMORY")?;
26
127
conn.pragma_update(None, "journal_mode", "WAL")?;
27
128
// conn.pragma_update(None, "wal_autocheckpoint", "0")?; // this lets things get a bit big on disk
28
129
conn.pragma_update(None, "synchronous", "OFF")?;
29
-
conn.pragma_update(None, "cache_size", (limit_mb as i64 * sq_mb).to_string())?;
30
-
conn.execute(
31
-
"CREATE TABLE blocks (
32
-
key BLOB PRIMARY KEY NOT NULL,
33
-
val BLOB NOT NULL
34
-
) WITHOUT ROWID",
35
-
(),
130
+
conn.pragma_update(
131
+
None,
132
+
"cache_size",
133
+
(cache_mb as i64 * sqlite_one_mb).to_string(),
36
134
)?;
135
+
Self::reset_tables(&conn)?;
37
136
38
-
Ok::<_, rusqlite::Error>(conn)
137
+
Ok::<_, DiskError>(conn)
39
138
})
40
-
.await
41
-
.expect("join error")?;
139
+
.await??;
42
140
43
-
Ok(SqliteAccess { conn })
141
+
Ok(Self {
142
+
conn,
143
+
max_stored,
144
+
stored: 0,
145
+
})
44
146
}
45
-
}
46
-
47
-
pub struct SqliteAccess {
48
-
conn: rusqlite::Connection,
49
-
}
50
-
51
-
impl SqliteAccess {
52
-
pub fn get_writer(&'_ mut self) -> Result<SqliteWriter<'_>, rusqlite::Error> {
147
+
pub(crate) fn get_writer(&'_ mut self) -> Result<SqliteWriter<'_>, DiskError> {
53
148
let tx = self.conn.transaction()?;
54
-
// let insert_stmt = tx.prepare("INSERT INTO blocks (key, val) VALUES (?1, ?2)")?;
55
-
Ok(SqliteWriter { tx: Some(tx) })
149
+
Ok(SqliteWriter {
150
+
tx,
151
+
stored: &mut self.stored,
152
+
max: self.max_stored,
153
+
})
56
154
}
57
-
pub fn get_reader(&'_ self) -> Result<SqliteReader<'_>, rusqlite::Error> {
155
+
pub(crate) fn get_reader<'conn>(&'conn self) -> Result<SqliteReader<'conn>, DiskError> {
58
156
let select_stmt = self.conn.prepare("SELECT val FROM blocks WHERE key = ?1")?;
59
157
Ok(SqliteReader { select_stmt })
60
158
}
61
-
}
62
-
63
-
pub struct SqliteWriter<'conn> {
64
-
tx: Option<rusqlite::Transaction<'conn>>,
159
+
/// Drop and recreate the kv table
160
+
pub async fn reset(self) -> Result<Self, DiskError> {
161
+
tokio::task::spawn_blocking(move || {
162
+
Self::reset_tables(&self.conn)?;
163
+
Ok(self)
164
+
})
165
+
.await?
166
+
}
167
+
fn reset_tables(conn: &rusqlite::Connection) -> Result<(), DiskError> {
168
+
conn.execute("DROP TABLE IF EXISTS blocks", ())?;
169
+
conn.execute(
170
+
"CREATE TABLE blocks (
171
+
key BLOB PRIMARY KEY NOT NULL,
172
+
val BLOB NOT NULL
173
+
) WITHOUT ROWID",
174
+
(),
175
+
)?;
176
+
Ok(())
177
+
}
65
178
}
66
179
67
-
/// oops careful in async
68
-
impl Drop for SqliteWriter<'_> {
69
-
fn drop(&mut self) {
70
-
let tx = self.tx.take();
71
-
tx.unwrap().commit().unwrap();
72
-
}
180
+
pub(crate) struct SqliteWriter<'conn> {
181
+
tx: rusqlite::Transaction<'conn>,
182
+
stored: &'conn mut usize,
183
+
max: usize,
73
184
}
74
185
75
186
impl SqliteWriter<'_> {
76
-
pub fn put(&mut self, key: Vec<u8>, val: Vec<u8>) -> rusqlite::Result<()> {
77
-
let tx = self.tx.as_ref().unwrap();
78
-
let mut insert_stmt = tx.prepare_cached("INSERT INTO blocks (key, val) VALUES (?1, ?2)")?;
79
-
insert_stmt.execute((key, val))?;
80
-
Ok(())
81
-
}
82
-
pub fn put_many(
187
+
pub(crate) fn put_many(
83
188
&mut self,
84
-
kv: impl Iterator<Item = (Vec<u8>, Vec<u8>)>,
85
-
) -> rusqlite::Result<()> {
86
-
let tx = self.tx.as_ref().unwrap();
87
-
let mut insert_stmt = tx.prepare_cached("INSERT INTO blocks (key, val) VALUES (?1, ?2)")?;
88
-
for (k, v) in kv {
89
-
insert_stmt.execute((k, v))?;
189
+
kv: impl Iterator<Item = Result<(Vec<u8>, Vec<u8>), DriveError>>,
190
+
) -> Result<(), DriveError> {
191
+
let mut insert_stmt = self
192
+
.tx
193
+
.prepare_cached("INSERT INTO blocks (key, val) VALUES (?1, ?2)")
194
+
.map_err(DiskError::DbError)?;
195
+
for pair in kv {
196
+
let (k, v) = pair?;
197
+
*self.stored += v.len();
198
+
if *self.stored > self.max {
199
+
return Err(DiskError::MaxSizeExceeded.into());
200
+
}
201
+
insert_stmt.execute((k, v)).map_err(DiskError::DbError)?;
90
202
}
91
203
Ok(())
92
204
}
205
+
pub fn commit(self) -> Result<(), DiskError> {
206
+
self.tx.commit()?;
207
+
Ok(())
208
+
}
93
209
}
94
210
95
-
pub struct SqliteReader<'conn> {
211
+
pub(crate) struct SqliteReader<'conn> {
96
212
select_stmt: rusqlite::Statement<'conn>,
97
213
}
98
214
99
215
impl SqliteReader<'_> {
100
-
pub fn get(&mut self, key: Vec<u8>) -> rusqlite::Result<Option<Vec<u8>>> {
216
+
pub(crate) fn get(&mut self, key: Vec<u8>) -> rusqlite::Result<Option<Vec<u8>>> {
101
217
self.select_stmt
102
218
.query_one((&key,), |row| row.get(0))
103
219
.optional()
+458
-194
src/drive.rs
+458
-194
src/drive.rs
···
1
-
//! Consume an MST block stream, producing an ordered stream of records
1
+
//! Consume a CAR from an AsyncRead, producing an ordered stream of records
2
2
3
-
use crate::disk::{SqliteAccess, SqliteStore};
3
+
use crate::disk::{DiskError, DiskStore};
4
+
use crate::process::Processable;
4
5
use ipld_core::cid::Cid;
5
6
use iroh_car::CarReader;
6
-
use serde::de::DeserializeOwned;
7
7
use serde::{Deserialize, Serialize};
8
8
use std::collections::HashMap;
9
9
use std::convert::Infallible;
10
-
use tokio::io::AsyncRead;
10
+
use tokio::{io::AsyncRead, sync::mpsc};
11
11
12
12
use crate::mst::{Commit, Node};
13
-
use crate::walk::{DiskTrip, Step, Trip, Walker};
13
+
use crate::walk::{Step, WalkError, Walker};
14
14
15
15
/// Errors that can happen while consuming and emitting blocks and records
16
16
#[derive(Debug, thiserror::Error)]
···
24
24
#[error("The MST block {0} could not be found")]
25
25
MissingBlock(Cid),
26
26
#[error("Failed to walk the mst tree: {0}")]
27
-
Tripped(#[from] Trip),
27
+
WalkError(#[from] WalkError),
28
28
#[error("CAR file had no roots")]
29
29
MissingRoot,
30
-
}
31
-
32
-
#[derive(Debug, thiserror::Error)]
33
-
pub enum DiskDriveError {
34
-
#[error("Error from iroh_car: {0}")]
35
-
CarReader(#[from] iroh_car::Error),
36
-
#[error("Failed to decode commit block: {0}")]
37
-
BadBlock(#[from] serde_ipld_dagcbor::DecodeError<Infallible>),
38
30
#[error("Storage error")]
39
-
StorageError(#[from] rusqlite::Error),
40
-
#[error("The Commit block reference by the root was not found")]
41
-
MissingCommit,
42
-
#[error("The MST block {0} could not be found")]
43
-
MissingBlock(Cid),
31
+
StorageError(#[from] DiskError),
44
32
#[error("Encode error: {0}")]
45
33
BincodeEncodeError(#[from] bincode::error::EncodeError),
46
-
#[error("Decode error: {0}")]
47
-
BincodeDecodeError(#[from] bincode::error::DecodeError),
48
-
#[error("disk tripped: {0}")]
49
-
DiskTripped(#[from] DiskTrip),
34
+
#[error("Tried to send on a closed channel")]
35
+
ChannelSendError, // SendError takes <T> which we don't need
36
+
#[error("Failed to join a task: {0}")]
37
+
JoinError(#[from] tokio::task::JoinError),
50
38
}
51
39
52
-
pub trait Processable: Clone + Serialize + DeserializeOwned {
53
-
/// the additional size taken up (not including its mem::size_of)
54
-
fn get_size(&self) -> usize;
40
+
#[derive(Debug, thiserror::Error)]
41
+
pub enum DecodeError {
42
+
#[error(transparent)]
43
+
BincodeDecodeError(#[from] bincode::error::DecodeError),
44
+
#[error("extra bytes remained after decoding")]
45
+
ExtraGarbage,
55
46
}
56
47
48
+
/// An in-order chunk of Rkey + (processed) Block pairs
49
+
pub type BlockChunk<T> = Vec<(String, T)>;
50
+
57
51
#[derive(Debug, Clone, Serialize, Deserialize)]
58
-
pub enum MaybeProcessedBlock<T> {
52
+
pub(crate) enum MaybeProcessedBlock<T> {
59
53
/// A block that's *probably* a Node (but we can't know yet)
60
54
///
61
55
/// It *can be* a record that suspiciously looks a lot like a node, so we
···
97
91
}
98
92
}
99
93
100
-
pub enum Vehicle<R: AsyncRead + Unpin, T: Processable> {
101
-
Lil(Commit, MemDriver<T>),
102
-
Big(BigCar<R, T>),
94
+
impl<T> MaybeProcessedBlock<T> {
95
+
fn maybe(process: fn(Vec<u8>) -> T, data: Vec<u8>) -> Self {
96
+
if Node::could_be(&data) {
97
+
MaybeProcessedBlock::Raw(data)
98
+
} else {
99
+
MaybeProcessedBlock::Processed(process(data))
100
+
}
101
+
}
103
102
}
104
103
105
-
pub async fn load_car<R: AsyncRead + Unpin, T: Processable>(
106
-
reader: R,
107
-
process: fn(Vec<u8>) -> T,
108
-
max_size: usize,
109
-
) -> Result<Vehicle<R, T>, DriveError> {
110
-
let mut mem_blocks = HashMap::new();
104
+
/// Read a CAR file, buffering blocks in memory or to disk
105
+
pub enum Driver<R: AsyncRead + Unpin, T: Processable> {
106
+
/// All blocks fit within the memory limit
107
+
///
108
+
/// You probably want to check the commit's signature. You can go ahead and
109
+
/// walk the MST right away.
110
+
Memory(Commit, MemDriver<T>),
111
+
/// Blocks exceed the memory limit
112
+
///
113
+
/// You'll need to provide a disk storage to continue. The commit will be
114
+
/// returned and can be validated only once all blocks are loaded.
115
+
Disk(NeedDisk<R, T>),
116
+
}
117
+
118
+
/// Builder-style driver setup
119
+
#[derive(Debug, Clone)]
120
+
pub struct DriverBuilder {
121
+
pub mem_limit_mb: usize,
122
+
}
111
123
112
-
let mut car = CarReader::new(reader).await?;
124
+
impl Default for DriverBuilder {
125
+
fn default() -> Self {
126
+
Self { mem_limit_mb: 16 }
127
+
}
128
+
}
113
129
114
-
let root = *car
115
-
.header()
116
-
.roots()
117
-
.first()
118
-
.ok_or(DriveError::MissingRoot)?;
119
-
log::debug!("root: {root:?}");
130
+
impl DriverBuilder {
131
+
/// Begin configuring the driver with defaults
132
+
pub fn new() -> Self {
133
+
Default::default()
134
+
}
135
+
/// Set the in-memory size limit, in MiB
136
+
///
137
+
/// Default: 16 MiB
138
+
pub fn with_mem_limit_mb(self, new_limit: usize) -> Self {
139
+
Self {
140
+
mem_limit_mb: new_limit,
141
+
}
142
+
}
143
+
/// Set the block processor
144
+
///
145
+
/// Default: noop, raw blocks will be emitted
146
+
pub fn with_block_processor<T: Processable>(
147
+
self,
148
+
p: fn(Vec<u8>) -> T,
149
+
) -> DriverBuilderWithProcessor<T> {
150
+
DriverBuilderWithProcessor {
151
+
mem_limit_mb: self.mem_limit_mb,
152
+
block_processor: p,
153
+
}
154
+
}
155
+
/// Begin processing an atproto MST from a CAR file
156
+
pub async fn load_car<R: AsyncRead + Unpin>(
157
+
&self,
158
+
reader: R,
159
+
) -> Result<Driver<R, Vec<u8>>, DriveError> {
160
+
Driver::load_car(reader, crate::process::noop, self.mem_limit_mb).await
161
+
}
162
+
}
120
163
121
-
let mut commit = None;
164
+
/// Builder-style driver intermediate step
165
+
///
166
+
/// start from `DriverBuilder`
167
+
#[derive(Debug, Clone)]
168
+
pub struct DriverBuilderWithProcessor<T: Processable> {
169
+
pub mem_limit_mb: usize,
170
+
pub block_processor: fn(Vec<u8>) -> T,
171
+
}
122
172
123
-
// try to load all the blocks into memory
124
-
let mut mem_size = 0;
125
-
while let Some((cid, data)) = car.next_block().await? {
126
-
// the root commit is a Special Third Kind of block that we need to make
127
-
// sure not to optimistically send to the processing function
128
-
if cid == root {
129
-
let c: Commit = serde_ipld_dagcbor::from_slice(&data)?;
130
-
commit = Some(c);
131
-
continue;
173
+
impl<T: Processable> DriverBuilderWithProcessor<T> {
174
+
/// Set the in-memory size limit, in MiB
175
+
///
176
+
/// Default: 16 MiB
177
+
pub fn with_mem_limit_mb(mut self, new_limit: usize) -> Self {
178
+
self.mem_limit_mb = new_limit;
179
+
self
180
+
}
181
+
/// Begin processing an atproto MST from a CAR file
182
+
pub async fn load_car<R: AsyncRead + Unpin>(
183
+
&self,
184
+
reader: R,
185
+
) -> Result<Driver<R, T>, DriveError> {
186
+
Driver::load_car(reader, self.block_processor, self.mem_limit_mb).await
187
+
}
188
+
}
189
+
190
+
impl<R: AsyncRead + Unpin, T: Processable> Driver<R, T> {
191
+
/// Begin processing an atproto MST from a CAR file
192
+
///
193
+
/// Blocks will be loaded, processed, and buffered in memory. If the entire
194
+
/// processed size is under the `mem_limit_mb` limit, a `Driver::Memory`
195
+
/// will be returned along with a `Commit` ready for validation.
196
+
///
197
+
/// If the `mem_limit_mb` limit is reached before loading all blocks, the
198
+
/// partial state will be returned as `Driver::Disk(needed)`, which can be
199
+
/// resumed by providing a `SqliteStorage` for on-disk block storage.
200
+
pub async fn load_car(
201
+
reader: R,
202
+
process: fn(Vec<u8>) -> T,
203
+
mem_limit_mb: usize,
204
+
) -> Result<Driver<R, T>, DriveError> {
205
+
let max_size = mem_limit_mb * 2_usize.pow(20);
206
+
let mut mem_blocks = HashMap::new();
207
+
208
+
let mut car = CarReader::new(reader).await?;
209
+
210
+
let root = *car
211
+
.header()
212
+
.roots()
213
+
.first()
214
+
.ok_or(DriveError::MissingRoot)?;
215
+
log::debug!("root: {root:?}");
216
+
217
+
let mut commit = None;
218
+
219
+
// try to load all the blocks into memory
220
+
let mut mem_size = 0;
221
+
while let Some((cid, data)) = car.next_block().await? {
222
+
// the root commit is a Special Third Kind of block that we need to make
223
+
// sure not to optimistically send to the processing function
224
+
if cid == root {
225
+
let c: Commit = serde_ipld_dagcbor::from_slice(&data)?;
226
+
commit = Some(c);
227
+
continue;
228
+
}
229
+
230
+
// remaining possible types: node, record, other. optimistically process
231
+
let maybe_processed = MaybeProcessedBlock::maybe(process, data);
232
+
233
+
// stash (maybe processed) blocks in memory as long as we have room
234
+
mem_size += std::mem::size_of::<Cid>() + maybe_processed.get_size();
235
+
mem_blocks.insert(cid, maybe_processed);
236
+
if mem_size >= max_size {
237
+
return Ok(Driver::Disk(NeedDisk {
238
+
car,
239
+
root,
240
+
process,
241
+
max_size,
242
+
mem_blocks,
243
+
commit,
244
+
}));
245
+
}
132
246
}
133
247
134
-
// remaining possible types: node, record, other. optimistically process
135
-
// TODO: get the actual in-memory size to compute disk spill
136
-
let maybe_processed = if Node::could_be(&data) {
137
-
MaybeProcessedBlock::Raw(data)
138
-
} else {
139
-
MaybeProcessedBlock::Processed(process(data))
140
-
};
248
+
// all blocks loaded and we fit in memory! hopefully we found the commit...
249
+
let commit = commit.ok_or(DriveError::MissingCommit)?;
250
+
251
+
let walker = Walker::new(commit.data);
141
252
142
-
// stash (maybe processed) blocks in memory as long as we have room
143
-
mem_size += std::mem::size_of::<Cid>() + maybe_processed.get_size();
144
-
mem_blocks.insert(cid, maybe_processed);
145
-
if mem_size >= max_size {
146
-
return Ok(Vehicle::Big(BigCar {
147
-
car,
148
-
root,
253
+
Ok(Driver::Memory(
254
+
commit,
255
+
MemDriver {
256
+
blocks: mem_blocks,
257
+
walker,
149
258
process,
150
-
max_size,
151
-
mem_blocks,
152
-
commit,
153
-
}));
154
-
}
259
+
},
260
+
))
155
261
}
262
+
}
156
263
157
-
// all blocks loaded and we fit in memory! hopefully we found the commit...
158
-
let commit = commit.ok_or(DriveError::MissingCommit)?;
264
+
/// The core driver between the block stream and MST walker
265
+
///
266
+
/// In the future, PDSs will export CARs in a stream-friendly order that will
267
+
/// enable processing them with tiny memory overhead. But that future is not
268
+
/// here yet.
269
+
///
270
+
/// CARs are almost always in a stream-unfriendly order, so I'm reverting the
271
+
/// optimistic stream features: we load all block first, then walk the MST.
272
+
///
273
+
/// This makes things much simpler: we only need to worry about spilling to disk
274
+
/// in one place, and we always have a reasonable expecatation about how much
275
+
/// work the init function will do. We can drop the CAR reader before walking,
276
+
/// so the sync/async boundaries become a little easier to work around.
277
+
#[derive(Debug)]
278
+
pub struct MemDriver<T: Processable> {
279
+
blocks: HashMap<Cid, MaybeProcessedBlock<T>>,
280
+
walker: Walker,
281
+
process: fn(Vec<u8>) -> T,
282
+
}
159
283
160
-
let walker = Walker::new(commit.data);
284
+
impl<T: Processable> MemDriver<T> {
285
+
/// Step through the record outputs, in rkey order
286
+
pub async fn next_chunk(&mut self, n: usize) -> Result<Option<BlockChunk<T>>, DriveError> {
287
+
let mut out = Vec::with_capacity(n);
288
+
for _ in 0..n {
289
+
// walk as far as we can until we run out of blocks or find a record
290
+
match self.walker.step(&mut self.blocks, self.process)? {
291
+
Step::Missing(cid) => return Err(DriveError::MissingBlock(cid)),
292
+
Step::Finish => break,
293
+
Step::Found { rkey, data } => {
294
+
out.push((rkey, data));
295
+
continue;
296
+
}
297
+
};
298
+
}
161
299
162
-
Ok(Vehicle::Lil(
163
-
commit,
164
-
MemDriver {
165
-
blocks: mem_blocks,
166
-
walker,
167
-
process,
168
-
},
169
-
))
300
+
if out.is_empty() {
301
+
Ok(None)
302
+
} else {
303
+
Ok(Some(out))
304
+
}
305
+
}
170
306
}
171
307
172
-
/// a paritally memory-loaded car file that needs disk spillover to continue
173
-
pub struct BigCar<R: AsyncRead + Unpin, T: Processable> {
308
+
/// A partially memory-loaded car file that needs disk spillover to continue
309
+
pub struct NeedDisk<R: AsyncRead + Unpin, T: Processable> {
174
310
car: CarReader<R>,
175
311
root: Cid,
176
312
process: fn(Vec<u8>) -> T,
···
183
319
bincode::serde::encode_to_vec(v, bincode::config::standard())
184
320
}
185
321
186
-
pub fn decode<T: Processable>(bytes: &[u8]) -> Result<T, bincode::error::DecodeError> {
322
+
pub(crate) fn decode<T: Processable>(bytes: &[u8]) -> Result<T, DecodeError> {
187
323
let (t, n) = bincode::serde::decode_from_slice(bytes, bincode::config::standard())?;
188
-
assert_eq!(n, bytes.len(), "expected to decode all bytes"); // TODO
324
+
if n != bytes.len() {
325
+
return Err(DecodeError::ExtraGarbage);
326
+
}
189
327
Ok(t)
190
328
}
191
329
192
-
impl<R: AsyncRead + Unpin, T: Processable + Send + 'static> BigCar<R, T> {
330
+
impl<R: AsyncRead + Unpin, T: Processable + Send + 'static> NeedDisk<R, T> {
193
331
pub async fn finish_loading(
194
332
mut self,
195
-
mut store: SqliteStore,
196
-
) -> Result<(Commit, BigCarReady<T>), DiskDriveError> {
197
-
// set up access for real
198
-
let mut access = store.get_access().await?;
199
-
200
-
// move access in and back out so we can manage lifetimes
333
+
mut store: DiskStore,
334
+
) -> Result<(Commit, DiskDriver<T>), DriveError> {
335
+
// move store in and back out so we can manage lifetimes
201
336
// dump mem blocks into the store
202
-
access = tokio::task::spawn(async move {
203
-
let mut writer = access.get_writer()?;
337
+
store = tokio::task::spawn(async move {
338
+
let mut writer = store.get_writer()?;
204
339
205
340
let kvs = self
206
341
.mem_blocks
207
342
.into_iter()
208
-
.map(|(k, v)| (k.to_bytes(), encode(v).unwrap()));
343
+
.map(|(k, v)| Ok(encode(v).map(|v| (k.to_bytes(), v))?));
209
344
210
345
writer.put_many(kvs)?;
346
+
writer.commit()?;
347
+
Ok::<_, DriveError>(store)
348
+
})
349
+
.await??;
211
350
212
-
drop(writer); // cannot outlive access
213
-
Ok::<_, DiskDriveError>(access)
214
-
})
215
-
.await
216
-
.unwrap()?;
351
+
let (tx, mut rx) = mpsc::channel::<Vec<(Cid, MaybeProcessedBlock<T>)>>(1);
352
+
353
+
let store_worker = tokio::task::spawn_blocking(move || {
354
+
let mut writer = store.get_writer()?;
355
+
356
+
while let Some(chunk) = rx.blocking_recv() {
357
+
let kvs = chunk
358
+
.into_iter()
359
+
.map(|(k, v)| Ok(encode(v).map(|v| (k.to_bytes(), v))?));
360
+
writer.put_many(kvs)?;
361
+
}
362
+
363
+
writer.commit()?;
364
+
Ok::<_, DriveError>(store)
365
+
}); // await later
217
366
218
367
// dump the rest to disk (in chunks)
368
+
log::debug!("dumping the rest of the stream...");
219
369
loop {
220
-
let mut chunk = vec![];
221
370
let mut mem_size = 0;
371
+
let mut chunk = vec![];
222
372
loop {
223
373
let Some((cid, data)) = self.car.next_block().await? else {
224
374
break;
···
231
381
}
232
382
// remaining possible types: node, record, other. optimistically process
233
383
// TODO: get the actual in-memory size to compute disk spill
234
-
let maybe_processed = if Node::could_be(&data) {
235
-
MaybeProcessedBlock::Raw(data)
236
-
} else {
237
-
MaybeProcessedBlock::Processed((self.process)(data))
238
-
};
384
+
let maybe_processed = MaybeProcessedBlock::maybe(self.process, data);
239
385
mem_size += std::mem::size_of::<Cid>() + maybe_processed.get_size();
240
386
chunk.push((cid, maybe_processed));
241
387
if mem_size >= self.max_size {
388
+
// soooooo if we're setting the db cache to max_size and then letting
389
+
// multiple chunks in the queue that are >= max_size, then at any time
390
+
// we might be using some multiple of max_size?
242
391
break;
243
392
}
244
393
}
245
394
if chunk.is_empty() {
246
395
break;
247
396
}
248
-
249
-
// move access in and back out so we can manage lifetimes
250
-
// dump mem blocks into the store
251
-
access = tokio::task::spawn_blocking(move || {
252
-
let mut writer = access.get_writer()?;
397
+
tx.send(chunk)
398
+
.await
399
+
.map_err(|_| DriveError::ChannelSendError)?;
400
+
}
401
+
drop(tx);
402
+
log::debug!("done. waiting for worker to finish...");
253
403
254
-
let kvs = chunk
255
-
.into_iter()
256
-
.map(|(k, v)| (k.to_bytes(), encode(v).unwrap()));
404
+
store = store_worker.await??;
257
405
258
-
writer.put_many(kvs)?;
406
+
log::debug!("worker finished.");
259
407
260
-
drop(writer); // cannot outlive access
261
-
Ok::<_, DiskDriveError>(access)
262
-
})
263
-
.await
264
-
.unwrap()?; // TODO
265
-
}
266
-
267
-
let commit = self.commit.ok_or(DiskDriveError::MissingCommit)?;
408
+
let commit = self.commit.ok_or(DriveError::MissingCommit)?;
268
409
269
410
let walker = Walker::new(commit.data);
270
411
271
412
Ok((
272
413
commit,
273
-
BigCarReady {
414
+
DiskDriver {
274
415
process: self.process,
275
-
access,
276
-
walker,
416
+
state: Some(BigState { store, walker }),
277
417
},
278
418
))
279
419
}
280
420
}
281
421
282
-
pub struct BigCarReady<T: Clone> {
422
+
struct BigState {
423
+
store: DiskStore,
424
+
walker: Walker,
425
+
}
426
+
427
+
/// MST walker that reads from disk instead of an in-memory hashmap
428
+
pub struct DiskDriver<T: Clone> {
283
429
process: fn(Vec<u8>) -> T,
284
-
access: SqliteAccess,
285
-
walker: Walker,
430
+
state: Option<BigState>,
431
+
}
432
+
433
+
// for doctests only
434
+
#[doc(hidden)]
435
+
pub fn _get_fake_disk_driver() -> DiskDriver<Vec<u8>> {
436
+
use crate::process::noop;
437
+
DiskDriver {
438
+
process: noop,
439
+
state: None,
440
+
}
286
441
}
287
442
288
-
impl<T: Processable + Send + 'static> BigCarReady<T> {
289
-
pub async fn next_chunk(
290
-
mut self,
443
+
impl<T: Processable + Send + 'static> DiskDriver<T> {
444
+
/// Walk the MST returning up to `n` rkey + record pairs
445
+
///
446
+
/// ```no_run
447
+
/// # use repo_stream::{drive::{DiskDriver, DriveError, _get_fake_disk_driver}, process::noop};
448
+
/// # #[tokio::main]
449
+
/// # async fn main() -> Result<(), DriveError> {
450
+
/// # let mut disk_driver = _get_fake_disk_driver();
451
+
/// while let Some(pairs) = disk_driver.next_chunk(256).await? {
452
+
/// for (rkey, record) in pairs {
453
+
/// println!("{rkey}: size={}", record.len());
454
+
/// }
455
+
/// }
456
+
/// let store = disk_driver.reset_store().await?;
457
+
/// # Ok(())
458
+
/// # }
459
+
/// ```
460
+
pub async fn next_chunk(&mut self, n: usize) -> Result<Option<BlockChunk<T>>, DriveError> {
461
+
let process = self.process;
462
+
463
+
// state should only *ever* be None transiently while inside here
464
+
let mut state = self.state.take().expect("DiskDriver must have Some(state)");
465
+
466
+
// the big pain here is that we don't want to leave self.state in an
467
+
// invalid state (None), so all the error paths have to make sure it
468
+
// comes out again.
469
+
let (state, res) = tokio::task::spawn_blocking(
470
+
move || -> (BigState, Result<BlockChunk<T>, DriveError>) {
471
+
let mut reader_res = state.store.get_reader();
472
+
let reader: &mut _ = match reader_res {
473
+
Ok(ref mut r) => r,
474
+
Err(ref mut e) => {
475
+
// unfortunately we can't return the error directly because
476
+
// (for some reason) it's attached to the lifetime of the
477
+
// reader?
478
+
// hack a mem::swap so we can get it out :/
479
+
let e_swapped = e.steal();
480
+
// the pain: `state` *has to* outlive the reader
481
+
drop(reader_res);
482
+
return (state, Err(e_swapped.into()));
483
+
}
484
+
};
485
+
486
+
let mut out = Vec::with_capacity(n);
487
+
488
+
for _ in 0..n {
489
+
// walk as far as we can until we run out of blocks or find a record
490
+
let step = match state.walker.disk_step(reader, process) {
491
+
Ok(s) => s,
492
+
Err(e) => {
493
+
// the pain: `state` *has to* outlive the reader
494
+
drop(reader_res);
495
+
return (state, Err(e.into()));
496
+
}
497
+
};
498
+
match step {
499
+
Step::Missing(cid) => {
500
+
// the pain: `state` *has to* outlive the reader
501
+
drop(reader_res);
502
+
return (state, Err(DriveError::MissingBlock(cid)));
503
+
}
504
+
Step::Finish => break,
505
+
Step::Found { rkey, data } => out.push((rkey, data)),
506
+
};
507
+
}
508
+
509
+
// `state` *has to* outlive the reader
510
+
drop(reader_res);
511
+
512
+
(state, Ok::<_, DriveError>(out))
513
+
},
514
+
)
515
+
.await?; // on tokio JoinError, we'll be left with invalid state :(
516
+
517
+
// *must* restore state before dealing with the actual result
518
+
self.state = Some(state);
519
+
520
+
let out = res?;
521
+
522
+
if out.is_empty() {
523
+
Ok(None)
524
+
} else {
525
+
Ok(Some(out))
526
+
}
527
+
}
528
+
529
+
fn read_tx_blocking(
530
+
&mut self,
291
531
n: usize,
292
-
) -> Result<(Self, Option<Vec<(String, T)>>), DiskDriveError> {
293
-
let mut out = Vec::with_capacity(n);
294
-
(self, out) = tokio::task::spawn_blocking(move || {
295
-
let access = self.access;
296
-
let mut reader = access.get_reader()?;
532
+
tx: mpsc::Sender<Result<BlockChunk<T>, DriveError>>,
533
+
) -> Result<(), mpsc::error::SendError<Result<BlockChunk<T>, DriveError>>> {
534
+
let BigState { store, walker } = self.state.as_mut().expect("valid state");
535
+
let mut reader = match store.get_reader() {
536
+
Ok(r) => r,
537
+
Err(e) => return tx.blocking_send(Err(e.into())),
538
+
};
539
+
540
+
loop {
541
+
let mut out: BlockChunk<T> = Vec::with_capacity(n);
297
542
298
543
for _ in 0..n {
299
544
// walk as far as we can until we run out of blocks or find a record
300
-
match self.walker.disk_step(&mut reader, self.process)? {
301
-
Step::Missing(cid) => return Err(DiskDriveError::MissingBlock(cid)),
302
-
Step::Finish => break,
303
-
Step::Step { rkey, data } => {
545
+
546
+
let step = match walker.disk_step(&mut reader, self.process) {
547
+
Ok(s) => s,
548
+
Err(e) => return tx.blocking_send(Err(e.into())),
549
+
};
550
+
551
+
match step {
552
+
Step::Missing(cid) => {
553
+
return tx.blocking_send(Err(DriveError::MissingBlock(cid)));
554
+
}
555
+
Step::Finish => return Ok(()),
556
+
Step::Found { rkey, data } => {
304
557
out.push((rkey, data));
305
558
continue;
306
559
}
307
560
};
308
561
}
309
562
310
-
drop(reader); // cannot outlive access
311
-
self.access = access;
312
-
Ok::<_, DiskDriveError>((self, out))
313
-
})
314
-
.await
315
-
.unwrap()?; // TODO
563
+
if out.is_empty() {
564
+
break;
565
+
}
566
+
tx.blocking_send(Ok(out))?;
567
+
}
316
568
317
-
if out.is_empty() {
318
-
Ok((self, None))
319
-
} else {
320
-
Ok((self, Some(out)))
321
-
}
569
+
Ok(())
322
570
}
323
-
}
571
+
572
+
/// Spawn the disk reading task into a tokio blocking thread
573
+
///
574
+
/// The idea is to avoid so much sending back and forth to the blocking
575
+
/// thread, letting a blocking task do all the disk reading work and sending
576
+
/// records and rkeys back through an `mpsc` channel instead.
577
+
///
578
+
/// This might also allow the disk work to continue while processing the
579
+
/// records. It's still not yet clear if this method actually has much
580
+
/// benefit over just using `.next_chunk(n)`.
581
+
///
582
+
/// ```no_run
583
+
/// # use repo_stream::{drive::{DiskDriver, DriveError, _get_fake_disk_driver}, process::noop};
584
+
/// # #[tokio::main]
585
+
/// # async fn main() -> Result<(), DriveError> {
586
+
/// # let mut disk_driver = _get_fake_disk_driver();
587
+
/// let (mut rx, join) = disk_driver.to_channel(512);
588
+
/// while let Some(recvd) = rx.recv().await {
589
+
/// let pairs = recvd?;
590
+
/// for (rkey, record) in pairs {
591
+
/// println!("{rkey}: size={}", record.len());
592
+
/// }
593
+
///
594
+
/// }
595
+
/// let store = join.await?.reset_store().await?;
596
+
/// # Ok(())
597
+
/// # }
598
+
/// ```
599
+
pub fn to_channel(
600
+
mut self,
601
+
n: usize,
602
+
) -> (
603
+
mpsc::Receiver<Result<BlockChunk<T>, DriveError>>,
604
+
tokio::task::JoinHandle<Self>,
605
+
) {
606
+
let (tx, rx) = mpsc::channel::<Result<BlockChunk<T>, DriveError>>(1);
324
607
325
-
/// The core driver between the block stream and MST walker
326
-
///
327
-
/// In the future, PDSs will export CARs in a stream-friendly order that will
328
-
/// enable processing them with tiny memory overhead. But that future is not
329
-
/// here yet.
330
-
///
331
-
/// CARs are almost always in a stream-unfriendly order, so I'm reverting the
332
-
/// optimistic stream features: we load all block first, then walk the MST.
333
-
///
334
-
/// This makes things much simpler: we only need to worry about spilling to disk
335
-
/// in one place, and we always have a reasonable expecatation about how much
336
-
/// work the init function will do. We can drop the CAR reader before walking,
337
-
/// so the sync/async boundaries become a little easier to work around.
338
-
#[derive(Debug)]
339
-
pub struct MemDriver<T: Processable> {
340
-
blocks: HashMap<Cid, MaybeProcessedBlock<T>>,
341
-
walker: Walker,
342
-
process: fn(Vec<u8>) -> T,
343
-
}
608
+
// sketch: this worker is going to be allowed to execute without a join handle
609
+
let chan_task = tokio::task::spawn_blocking(move || {
610
+
if let Err(mpsc::error::SendError(_)) = self.read_tx_blocking(n, tx) {
611
+
log::debug!("big car reader exited early due to dropped receiver channel");
612
+
}
613
+
self
614
+
});
344
615
345
-
impl<T: Processable> MemDriver<T> {
346
-
/// Manually step through the record outputs
347
-
pub async fn next_chunk(&mut self, n: usize) -> Result<Option<Vec<(String, T)>>, DriveError> {
348
-
let mut out = Vec::with_capacity(n);
349
-
for _ in 0..n {
350
-
// walk as far as we can until we run out of blocks or find a record
351
-
match self.walker.step(&mut self.blocks, self.process)? {
352
-
Step::Missing(cid) => return Err(DriveError::MissingBlock(cid)),
353
-
Step::Finish => break,
354
-
Step::Step { rkey, data } => {
355
-
out.push((rkey, data));
356
-
continue;
357
-
}
358
-
};
359
-
}
616
+
(rx, chan_task)
617
+
}
360
618
361
-
if out.is_empty() {
362
-
Ok(None)
363
-
} else {
364
-
Ok(Some(out))
365
-
}
619
+
/// Reset the disk storage so it can be reused. You must call this.
620
+
///
621
+
/// Ideally we'd put this in an `impl Drop`, but since it makes blocking
622
+
/// calls, that would be risky in an async context. For now you just have to
623
+
/// carefully make sure you call it.
624
+
///
625
+
/// The sqlite store is returned, so it can be reused for another
626
+
/// `DiskDriver`.
627
+
pub async fn reset_store(mut self) -> Result<DiskStore, DriveError> {
628
+
let BigState { store, .. } = self.state.take().expect("valid state");
629
+
Ok(store.reset().await?)
366
630
}
367
631
}
+84
-5
src/lib.rs
+84
-5
src/lib.rs
···
1
-
//! Fast and robust atproto CAR file processing in rust
2
-
//!
3
-
//! For now see the [examples](https://tangled.org/@microcosm.blue/repo-stream/tree/main/examples)
1
+
/*!
2
+
A robust CAR file -> MST walker for atproto
3
+
4
+
Small CARs have their blocks buffered in memory. If a configurable memory limit
5
+
is reached while reading blocks, CAR reading is suspended, and can be continued
6
+
by providing disk storage to buffer the CAR blocks instead.
7
+
8
+
A `process` function can be provided for tasks where records are transformed
9
+
into a smaller representation, to save memory (and disk) during block reading.
10
+
11
+
Once blocks are loaded, the MST is walked and emitted as chunks of pairs of
12
+
`(rkey, processed_block)` pairs, in order (depth first, left-to-right).
13
+
14
+
Some MST validations are applied
15
+
- Keys must appear in order
16
+
- Keys must be at the correct MST tree depth
17
+
18
+
`iroh_car` additionally applies a block size limit of `2MiB`.
19
+
20
+
```
21
+
use repo_stream::{Driver, DriverBuilder, DiskBuilder};
22
+
23
+
# #[tokio::main]
24
+
# async fn main() -> Result<(), Box<dyn std::error::Error>> {
25
+
# let reader = include_bytes!("../car-samples/tiny.car").as_slice();
26
+
let mut total_size = 0;
27
+
28
+
match DriverBuilder::new()
29
+
.with_mem_limit_mb(10)
30
+
.with_block_processor(|rec| rec.len()) // block processing: just extract the raw record size
31
+
.load_car(reader)
32
+
.await?
33
+
{
34
+
35
+
// if all blocks fit within memory
36
+
Driver::Memory(_commit, mut driver) => {
37
+
while let Some(chunk) = driver.next_chunk(256).await? {
38
+
for (_rkey, size) in chunk {
39
+
total_size += size;
40
+
}
41
+
}
42
+
},
43
+
44
+
// if the CAR was too big for in-memory processing
45
+
Driver::Disk(paused) => {
46
+
// set up a disk store we can spill to
47
+
let store = DiskBuilder::new().open("some/path.db".into()).await?;
48
+
// do the spilling, get back a (similar) driver
49
+
let (_commit, mut driver) = paused.finish_loading(store).await?;
50
+
51
+
while let Some(chunk) = driver.next_chunk(256).await? {
52
+
for (_rkey, size) in chunk {
53
+
total_size += size;
54
+
}
55
+
}
56
+
57
+
// clean up the disk store (drop tables etc)
58
+
driver.reset_store().await?;
59
+
}
60
+
};
61
+
println!("sum of size of all records: {total_size}");
62
+
# Ok(())
63
+
# }
64
+
```
65
+
66
+
Disk spilling suspends and returns a `Driver::Disk(paused)` instead of going
67
+
ahead and eagerly using disk I/O. This means you have to write a bit more code
68
+
to handle both cases, but it allows you to have finer control over resource
69
+
usage. For example, you can drive a number of parallel memory CAR workers, and
70
+
separately have a different number of disk workers picking up suspended disk
71
+
tasks from a queue.
72
+
73
+
Find more [examples in the repo](https://tangled.org/@microcosm.blue/repo-stream/tree/main/examples).
74
+
75
+
*/
76
+
77
+
pub mod mst;
78
+
mod walk;
4
79
5
80
pub mod disk;
6
81
pub mod drive;
7
-
pub mod mst;
8
-
pub mod walk;
82
+
pub mod process;
83
+
84
+
pub use disk::{DiskBuilder, DiskError, DiskStore};
85
+
pub use drive::{DriveError, Driver, DriverBuilder, NeedDisk};
86
+
pub use mst::Commit;
87
+
pub use process::Processable;
+4
-8
src/mst.rs
+4
-8
src/mst.rs
···
39
39
/// MST node data schema
40
40
#[derive(Debug, Deserialize, PartialEq)]
41
41
#[serde(deny_unknown_fields)]
42
-
pub struct Node {
42
+
pub(crate) struct Node {
43
43
/// link to sub-tree Node on a lower level and with all keys sorting before
44
44
/// keys at this node
45
45
#[serde(rename = "l")]
···
62
62
/// so if a block *could be* a node, any record converter must postpone
63
63
/// processing. if it turns out it happens to be a very node-looking record,
64
64
/// well, sorry, it just has to only be processed later when that's known.
65
-
pub fn could_be(bytes: impl AsRef<[u8]>) -> bool {
65
+
pub(crate) fn could_be(bytes: impl AsRef<[u8]>) -> bool {
66
66
const NODE_FINGERPRINT: [u8; 3] = [
67
67
0xA2, // map length 2 (for "l" and "e" keys)
68
68
0x61, // text length 1
···
83
83
/// with an empty array of entries. This is the only situation in which a
84
84
/// tree may contain an empty leaf node which does not either contain keys
85
85
/// ("entries") or point to a sub-tree containing entries.
86
-
///
87
-
/// TODO: to me this is slightly unclear with respect to `l` (ask someone).
88
-
/// ...is that what "The top of the tree must not be a an empty node which
89
-
/// only points to a sub-tree." is referring to?
90
-
pub fn is_empty(&self) -> bool {
86
+
pub(crate) fn is_empty(&self) -> bool {
91
87
self.left.is_none() && self.entries.is_empty()
92
88
}
93
89
}
···
95
91
/// TreeEntry object
96
92
#[derive(Debug, Deserialize, PartialEq)]
97
93
#[serde(deny_unknown_fields)]
98
-
pub struct Entry {
94
+
pub(crate) struct Entry {
99
95
/// count of bytes shared with previous TreeEntry in this Node (if any)
100
96
#[serde(rename = "p")]
101
97
pub prefix_len: usize,
+108
src/process.rs
+108
src/process.rs
···
1
+
/*!
2
+
Record processor function output trait
3
+
4
+
The return type must satisfy the `Processable` trait, which requires:
5
+
6
+
- `Clone` because two rkeys can refer to the same record by CID, which may
7
+
only appear once in the CAR file.
8
+
- `Serialize + DeserializeOwned` so it can be spilled to disk.
9
+
10
+
One required function must be implemented, `get_size()`: this should return the
11
+
approximate total off-stack size of the type. (the on-stack size will be added
12
+
automatically via `std::mem::get_size`).
13
+
14
+
Note that it is **not guaranteed** that the `process` function will run on a
15
+
block before storing it in memory or on disk: it's not possible to know if a
16
+
block is a record without actually walking the MST, so the best we can do is
17
+
apply `process` to any block that we know *cannot* be an MST node, and otherwise
18
+
store the raw block bytes.
19
+
20
+
Here's a silly processing function that just collects 'eyy's found in the raw
21
+
record bytes
22
+
23
+
```
24
+
# use repo_stream::Processable;
25
+
# use serde::{Serialize, Deserialize};
26
+
#[derive(Debug, Clone, Serialize, Deserialize)]
27
+
struct Eyy(usize, String);
28
+
29
+
impl Processable for Eyy {
30
+
fn get_size(&self) -> usize {
31
+
// don't need to compute the usize, it's on the stack
32
+
self.1.capacity() // in-mem size from the string's capacity, in bytes
33
+
}
34
+
}
35
+
36
+
fn process(raw: Vec<u8>) -> Vec<Eyy> {
37
+
let mut out = Vec::new();
38
+
let to_find = "eyy".as_bytes();
39
+
for i in 0..(raw.len() - 3) {
40
+
if &raw[i..(i+3)] == to_find {
41
+
out.push(Eyy(i, "eyy".to_string()));
42
+
}
43
+
}
44
+
out
45
+
}
46
+
```
47
+
48
+
The memory sizing stuff is a little sketch but probably at least approximately
49
+
works.
50
+
*/
51
+
52
+
use serde::{Serialize, de::DeserializeOwned};
53
+
54
+
/// Output trait for record processing
55
+
pub trait Processable: Clone + Serialize + DeserializeOwned {
56
+
/// Any additional in-memory size taken by the processed type
57
+
///
58
+
/// Do not include stack size (`std::mem::size_of`)
59
+
fn get_size(&self) -> usize;
60
+
}
61
+
62
+
/// Processor that just returns the raw blocks
63
+
#[inline]
64
+
pub fn noop(block: Vec<u8>) -> Vec<u8> {
65
+
block
66
+
}
67
+
68
+
impl Processable for u8 {
69
+
fn get_size(&self) -> usize {
70
+
0
71
+
}
72
+
}
73
+
74
+
impl Processable for usize {
75
+
fn get_size(&self) -> usize {
76
+
0 // no additional space taken, just its stack size (newtype is free)
77
+
}
78
+
}
79
+
80
+
impl Processable for String {
81
+
fn get_size(&self) -> usize {
82
+
self.capacity()
83
+
}
84
+
}
85
+
86
+
impl<Item: Sized + Processable> Processable for Vec<Item> {
87
+
fn get_size(&self) -> usize {
88
+
let slot_size = std::mem::size_of::<Item>();
89
+
let direct_size = slot_size * self.capacity();
90
+
let items_referenced_size: usize = self.iter().map(|item| item.get_size()).sum();
91
+
direct_size + items_referenced_size
92
+
}
93
+
}
94
+
95
+
impl<Item: Processable> Processable for Option<Item> {
96
+
fn get_size(&self) -> usize {
97
+
self.as_ref().map(|item| item.get_size()).unwrap_or(0)
98
+
}
99
+
}
100
+
101
+
impl<Item: Processable, Error: Processable> Processable for Result<Item, Error> {
102
+
fn get_size(&self) -> usize {
103
+
match self {
104
+
Ok(item) => item.get_size(),
105
+
Err(err) => err.get_size(),
106
+
}
107
+
}
108
+
}
+193
-258
src/walk.rs
+193
-258
src/walk.rs
···
1
1
//! Depth-first MST traversal
2
2
3
3
use crate::disk::SqliteReader;
4
-
use crate::drive::{MaybeProcessedBlock, Processable};
4
+
use crate::drive::{DecodeError, MaybeProcessedBlock};
5
5
use crate::mst::Node;
6
+
use crate::process::Processable;
6
7
use ipld_core::cid::Cid;
8
+
use sha2::{Digest, Sha256};
7
9
use std::collections::HashMap;
8
10
use std::convert::Infallible;
9
11
10
12
/// Errors that can happen while walking
11
13
#[derive(Debug, thiserror::Error)]
12
-
pub enum Trip {
13
-
#[error("empty mst nodes are not allowed")]
14
-
NodeEmpty,
14
+
pub enum WalkError {
15
15
#[error("Failed to fingerprint commit block")]
16
16
BadCommitFingerprint,
17
17
#[error("Failed to decode commit block: {0}")]
18
18
BadCommit(#[from] serde_ipld_dagcbor::DecodeError<Infallible>),
19
19
#[error("Action node error: {0}")]
20
-
RkeyError(#[from] RkeyError),
21
-
#[error("Encountered an rkey out of order while walking the MST")]
22
-
RkeyOutOfOrder,
23
-
}
24
-
25
-
/// Errors that can happen while walking
26
-
#[derive(Debug, thiserror::Error)]
27
-
pub enum DiskTrip {
28
-
#[error("tripped: {0}")]
29
-
Trip(#[from] Trip),
20
+
MstError(#[from] MstError),
30
21
#[error("storage error: {0}")]
31
22
StorageError(#[from] rusqlite::Error),
32
23
#[error("Decode error: {0}")]
33
-
BincodeDecodeError(#[from] bincode::error::DecodeError),
24
+
DecodeError(#[from] DecodeError),
34
25
}
35
26
36
27
/// Errors from invalid Rkeys
37
-
#[derive(Debug, thiserror::Error)]
38
-
pub enum RkeyError {
28
+
#[derive(Debug, PartialEq, thiserror::Error)]
29
+
pub enum MstError {
39
30
#[error("Failed to compute an rkey due to invalid prefix_len")]
40
31
EntryPrefixOutOfbounds,
41
32
#[error("RKey was not utf-8")]
42
33
EntryRkeyNotUtf8(#[from] std::string::FromUtf8Error),
34
+
#[error("Nodes cannot be empty (except for an entirely empty MST)")]
35
+
EmptyNode,
36
+
#[error("Found an entry with rkey at the wrong depth")]
37
+
WrongDepth,
38
+
#[error("Lost track of our depth (possible bug?)")]
39
+
LostDepth,
40
+
#[error("MST depth underflow: depth-0 node with child trees")]
41
+
DepthUnderflow,
42
+
#[error("Encountered an rkey out of order while walking the MST")]
43
+
RkeyOutOfOrder,
43
44
}
44
45
45
46
/// Walker outputs
···
50
51
/// Reached the end of the MST! yay!
51
52
Finish,
52
53
/// A record was found!
53
-
Step { rkey: String, data: T },
54
+
Found { rkey: String, data: T },
54
55
}
55
56
56
57
#[derive(Debug, Clone, PartialEq)]
57
58
enum Need {
58
-
Node(Cid),
59
+
Node { depth: Depth, cid: Cid },
59
60
Record { rkey: String, cid: Cid },
60
61
}
61
62
62
-
fn push_from_node(stack: &mut Vec<Need>, node: &Node) -> Result<(), RkeyError> {
63
-
let mut entries = Vec::with_capacity(node.entries.len());
63
+
#[derive(Debug, Clone, Copy, PartialEq)]
64
+
enum Depth {
65
+
Root,
66
+
Depth(u32),
67
+
}
68
+
69
+
impl Depth {
70
+
fn from_key(key: &[u8]) -> Self {
71
+
let mut zeros = 0;
72
+
for byte in Sha256::digest(key) {
73
+
let leading = byte.leading_zeros();
74
+
zeros += leading;
75
+
if leading < 8 {
76
+
break;
77
+
}
78
+
}
79
+
Self::Depth(zeros / 2) // truncating divide (rounds down)
80
+
}
81
+
fn next_expected(&self) -> Result<Option<u32>, MstError> {
82
+
match self {
83
+
Self::Root => Ok(None),
84
+
Self::Depth(d) => d.checked_sub(1).ok_or(MstError::DepthUnderflow).map(Some),
85
+
}
86
+
}
87
+
}
64
88
89
+
fn push_from_node(stack: &mut Vec<Need>, node: &Node, parent_depth: Depth) -> Result<(), MstError> {
90
+
// empty nodes are not allowed in the MST except in an empty MST
91
+
if node.is_empty() {
92
+
if parent_depth == Depth::Root {
93
+
return Ok(()); // empty mst, nothing to push
94
+
} else {
95
+
return Err(MstError::EmptyNode);
96
+
}
97
+
}
98
+
99
+
let mut entries = Vec::with_capacity(node.entries.len());
65
100
let mut prefix = vec![];
101
+
let mut this_depth = parent_depth.next_expected()?;
102
+
66
103
for entry in &node.entries {
67
104
let mut rkey = vec![];
68
105
let pre_checked = prefix
69
106
.get(..entry.prefix_len)
70
-
.ok_or(RkeyError::EntryPrefixOutOfbounds)?;
107
+
.ok_or(MstError::EntryPrefixOutOfbounds)?;
71
108
rkey.extend_from_slice(pre_checked);
72
109
rkey.extend_from_slice(&entry.keysuffix);
110
+
111
+
let Depth::Depth(key_depth) = Depth::from_key(&rkey) else {
112
+
return Err(MstError::WrongDepth);
113
+
};
114
+
115
+
// this_depth is `none` if we are the deepest child (directly below root)
116
+
// in that case we accept whatever highest depth is claimed
117
+
let expected_depth = match this_depth {
118
+
Some(d) => d,
119
+
None => {
120
+
this_depth = Some(key_depth);
121
+
key_depth
122
+
}
123
+
};
124
+
125
+
// all keys we find should be this depth
126
+
if key_depth != expected_depth {
127
+
return Err(MstError::DepthUnderflow);
128
+
}
129
+
73
130
prefix = rkey.clone();
74
131
75
132
entries.push(Need::Record {
···
77
134
cid: entry.value,
78
135
});
79
136
if let Some(ref tree) = entry.tree {
80
-
entries.push(Need::Node(*tree));
137
+
entries.push(Need::Node {
138
+
depth: Depth::Depth(key_depth),
139
+
cid: *tree,
140
+
});
81
141
}
82
142
}
83
143
84
144
entries.reverse();
85
145
stack.append(&mut entries);
86
146
147
+
let d = this_depth.ok_or(MstError::LostDepth)?;
148
+
87
149
if let Some(tree) = node.left {
88
-
stack.push(Need::Node(tree));
150
+
stack.push(Need::Node {
151
+
depth: Depth::Depth(d),
152
+
cid: tree,
153
+
});
89
154
}
90
155
Ok(())
91
156
}
···
102
167
impl Walker {
103
168
pub fn new(tree_root_cid: Cid) -> Self {
104
169
Self {
105
-
stack: vec![Need::Node(tree_root_cid)],
170
+
stack: vec![Need::Node {
171
+
depth: Depth::Root,
172
+
cid: tree_root_cid,
173
+
}],
106
174
prev: "".to_string(),
107
175
}
108
176
}
···
112
180
&mut self,
113
181
blocks: &mut HashMap<Cid, MaybeProcessedBlock<T>>,
114
182
process: impl Fn(Vec<u8>) -> T,
115
-
) -> Result<Step<T>, Trip> {
183
+
) -> Result<Step<T>, WalkError> {
116
184
loop {
117
-
let Some(mut need) = self.stack.last() else {
185
+
let Some(need) = self.stack.last_mut() else {
118
186
log::trace!("tried to walk but we're actually done.");
119
187
return Ok(Step::Finish);
120
188
};
121
189
122
-
match &mut need {
123
-
Need::Node(cid) => {
190
+
match need {
191
+
&mut Need::Node { depth, cid } => {
124
192
log::trace!("need node {cid:?}");
125
-
let Some(block) = blocks.remove(cid) else {
193
+
let Some(block) = blocks.remove(&cid) else {
126
194
log::trace!("node not found, resting");
127
-
return Ok(Step::Missing(*cid));
195
+
return Ok(Step::Missing(cid));
128
196
};
129
197
130
198
let MaybeProcessedBlock::Raw(data) = block else {
131
-
return Err(Trip::BadCommitFingerprint);
199
+
return Err(WalkError::BadCommitFingerprint);
132
200
};
133
-
let node =
134
-
serde_ipld_dagcbor::from_slice::<Node>(&data).map_err(Trip::BadCommit)?;
201
+
let node = serde_ipld_dagcbor::from_slice::<Node>(&data)
202
+
.map_err(WalkError::BadCommit)?;
135
203
136
204
// found node, make sure we remember
137
205
self.stack.pop();
138
206
139
207
// queue up work on the found node next
140
-
push_from_node(&mut self.stack, &node)?;
208
+
push_from_node(&mut self.stack, &node, depth)?;
141
209
}
142
210
Need::Record { rkey, cid } => {
143
211
log::trace!("need record {cid:?}");
212
+
// note that we cannot *remove* a record block, sadly, since
213
+
// there can be multiple rkeys pointing to the same cid.
144
214
let Some(data) = blocks.get_mut(cid) else {
145
-
log::trace!("record block not found, resting");
146
215
return Ok(Step::Missing(*cid));
147
216
};
148
217
let rkey = rkey.clone();
···
154
223
// found node, make sure we remember
155
224
self.stack.pop();
156
225
157
-
log::trace!("emitting a block as a step. depth={}", self.stack.len());
158
-
159
226
// rkeys *must* be in order or else the tree is invalid (or
160
227
// we have a bug)
161
228
if rkey <= self.prev {
162
-
return Err(Trip::RkeyOutOfOrder);
229
+
return Err(MstError::RkeyOutOfOrder)?;
163
230
}
164
231
self.prev = rkey.clone();
165
232
166
-
return Ok(Step::Step { rkey, data });
233
+
return Ok(Step::Found { rkey, data });
167
234
}
168
235
}
169
236
}
···
174
241
&mut self,
175
242
reader: &mut SqliteReader,
176
243
process: impl Fn(Vec<u8>) -> T,
177
-
) -> Result<Step<T>, DiskTrip> {
244
+
) -> Result<Step<T>, WalkError> {
178
245
loop {
179
-
let Some(mut need) = self.stack.last() else {
246
+
let Some(need) = self.stack.last_mut() else {
180
247
log::trace!("tried to walk but we're actually done.");
181
248
return Ok(Step::Finish);
182
249
};
183
250
184
-
match &mut need {
185
-
Need::Node(cid) => {
251
+
match need {
252
+
&mut Need::Node { depth, cid } => {
186
253
let cid_bytes = cid.to_bytes();
187
254
log::trace!("need node {cid:?}");
188
255
let Some(block_bytes) = reader.get(cid_bytes)? else {
189
256
log::trace!("node not found, resting");
190
-
return Ok(Step::Missing(*cid));
257
+
return Ok(Step::Missing(cid));
191
258
};
192
259
193
260
let block: MaybeProcessedBlock<T> = crate::drive::decode(&block_bytes)?;
194
261
195
262
let MaybeProcessedBlock::Raw(data) = block else {
196
-
return Err(Trip::BadCommitFingerprint.into());
263
+
return Err(WalkError::BadCommitFingerprint);
197
264
};
198
-
let node =
199
-
serde_ipld_dagcbor::from_slice::<Node>(&data).map_err(Trip::BadCommit)?;
265
+
let node = serde_ipld_dagcbor::from_slice::<Node>(&data)
266
+
.map_err(WalkError::BadCommit)?;
200
267
201
268
// found node, make sure we remember
202
269
self.stack.pop();
203
270
204
271
// queue up work on the found node next
205
-
push_from_node(&mut self.stack, &node).map_err(Trip::RkeyError)?;
272
+
push_from_node(&mut self.stack, &node, depth).map_err(WalkError::MstError)?;
206
273
}
207
274
Need::Record { rkey, cid } => {
208
275
log::trace!("need record {cid:?}");
···
226
293
// rkeys *must* be in order or else the tree is invalid (or
227
294
// we have a bug)
228
295
if rkey <= self.prev {
229
-
return Err(DiskTrip::Trip(Trip::RkeyOutOfOrder));
296
+
return Err(MstError::RkeyOutOfOrder)?;
230
297
}
231
298
self.prev = rkey.clone();
232
299
233
-
return Ok(Step::Step { rkey, data });
300
+
return Ok(Step::Found { rkey, data });
234
301
}
235
302
}
236
303
}
···
240
307
#[cfg(test)]
241
308
mod test {
242
309
use super::*;
243
-
// use crate::mst::Entry;
244
310
245
311
fn cid1() -> Cid {
246
312
"bafyreihixenvk3ahqbytas4hk4a26w43bh6eo3w6usjqtxkpzsvi655a3m"
247
313
.parse()
248
314
.unwrap()
249
315
}
250
-
// fn cid2() -> Cid {
251
-
// "QmY7Yh4UquoXHLPFo2XbhXkhBvFoPwmQUSa92pxnxjQuPU"
252
-
// .parse()
253
-
// .unwrap()
254
-
// }
255
-
// fn cid3() -> Cid {
256
-
// "bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi"
257
-
// .parse()
258
-
// .unwrap()
259
-
// }
260
-
// fn cid4() -> Cid {
261
-
// "QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR"
262
-
// .parse()
263
-
// .unwrap()
264
-
// }
265
-
// fn cid5() -> Cid {
266
-
// "QmSnuWmxptJZdLJpKRarxBMS2Ju2oANVrgbr2xWbie9b2D"
267
-
// .parse()
268
-
// .unwrap()
269
-
// }
270
-
// fn cid6() -> Cid {
271
-
// "QmdmQXB2mzChmMeKY47C43LxUdg1NDJ5MWcKMKxDu7RgQm"
272
-
// .parse()
273
-
// .unwrap()
274
-
// }
275
-
// fn cid7() -> Cid {
276
-
// "bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze"
277
-
// .parse()
278
-
// .unwrap()
279
-
// }
280
-
// fn cid8() -> Cid {
281
-
// "bafyreif3tfdpr5n4jdrbielmcapwvbpcthepfkwq2vwonmlhirbjmotedi"
282
-
// .parse()
283
-
// .unwrap()
284
-
// }
285
-
// fn cid9() -> Cid {
286
-
// "bafyreicnokmhmrnlp2wjhyk2haep4tqxiptwfrp2rrs7rzq7uk766chqvq"
287
-
// .parse()
288
-
// .unwrap()
289
-
// }
316
+
317
+
#[test]
318
+
fn test_depth_spec_0() {
319
+
let d = Depth::from_key(b"2653ae71");
320
+
assert_eq!(d, Depth::Depth(0))
321
+
}
322
+
323
+
#[test]
324
+
fn test_depth_spec_1() {
325
+
let d = Depth::from_key(b"blue");
326
+
assert_eq!(d, Depth::Depth(1))
327
+
}
328
+
329
+
#[test]
330
+
fn test_depth_spec_4() {
331
+
let d = Depth::from_key(b"app.bsky.feed.post/454397e440ec");
332
+
assert_eq!(d, Depth::Depth(4))
333
+
}
334
+
335
+
#[test]
336
+
fn test_depth_spec_8() {
337
+
let d = Depth::from_key(b"app.bsky.feed.post/9adeb165882c");
338
+
assert_eq!(d, Depth::Depth(8))
339
+
}
340
+
341
+
#[test]
342
+
fn test_depth_ietf_draft_0() {
343
+
let d = Depth::from_key(b"key1");
344
+
assert_eq!(d, Depth::Depth(0))
345
+
}
346
+
347
+
#[test]
348
+
fn test_depth_ietf_draft_1() {
349
+
let d = Depth::from_key(b"key7");
350
+
assert_eq!(d, Depth::Depth(1))
351
+
}
352
+
353
+
#[test]
354
+
fn test_depth_ietf_draft_4() {
355
+
let d = Depth::from_key(b"key515");
356
+
assert_eq!(d, Depth::Depth(4))
357
+
}
358
+
359
+
#[test]
360
+
fn test_depth_interop() {
361
+
// examples from https://github.com/bluesky-social/atproto-interop-tests/blob/main/mst/key_heights.json
362
+
for (k, expected) in [
363
+
("", 0),
364
+
("asdf", 0),
365
+
("blue", 1),
366
+
("2653ae71", 0),
367
+
("88bfafc7", 2),
368
+
("2a92d355", 4),
369
+
("884976f5", 6),
370
+
("app.bsky.feed.post/454397e440ec", 4),
371
+
("app.bsky.feed.post/9adeb165882c", 8),
372
+
] {
373
+
let d = Depth::from_key(k.as_bytes());
374
+
assert_eq!(d, Depth::Depth(expected), "key: {}", k);
375
+
}
376
+
}
290
377
291
378
#[test]
292
-
fn test_next_from_node_empty() {
293
-
let node = Node {
379
+
fn test_push_empty_fails() {
380
+
let empty_node = Node {
294
381
left: None,
295
382
entries: vec![],
296
383
};
297
384
let mut stack = vec![];
298
-
push_from_node(&mut stack, &node).unwrap();
299
-
assert_eq!(stack.last(), None);
385
+
let err = push_from_node(&mut stack, &empty_node, Depth::Depth(4));
386
+
assert_eq!(err, Err(MstError::EmptyNode));
300
387
}
301
388
302
389
#[test]
303
-
fn test_needs_from_node_just_left() {
390
+
fn test_push_one_node() {
304
391
let node = Node {
305
392
left: Some(cid1()),
306
393
entries: vec![],
307
394
};
308
395
let mut stack = vec![];
309
-
push_from_node(&mut stack, &node).unwrap();
310
-
assert_eq!(stack.last(), Some(Need::Node(cid1())).as_ref());
396
+
push_from_node(&mut stack, &node, Depth::Depth(4)).unwrap();
397
+
assert_eq!(
398
+
stack.last(),
399
+
Some(Need::Node {
400
+
depth: Depth::Depth(3),
401
+
cid: cid1()
402
+
})
403
+
.as_ref()
404
+
);
311
405
}
312
-
313
-
// #[test]
314
-
// fn test_needs_from_node_just_one_record() {
315
-
// let node = Node {
316
-
// left: None,
317
-
// entries: vec![Entry {
318
-
// keysuffix: "asdf".into(),
319
-
// prefix_len: 0,
320
-
// value: cid1(),
321
-
// tree: None,
322
-
// }],
323
-
// };
324
-
// assert_eq!(
325
-
// needs_from_node(node).unwrap(),
326
-
// vec![Need::Record {
327
-
// rkey: "asdf".into(),
328
-
// cid: cid1(),
329
-
// },]
330
-
// );
331
-
// }
332
-
333
-
// #[test]
334
-
// fn test_needs_from_node_two_records() {
335
-
// let node = Node {
336
-
// left: None,
337
-
// entries: vec![
338
-
// Entry {
339
-
// keysuffix: "asdf".into(),
340
-
// prefix_len: 0,
341
-
// value: cid1(),
342
-
// tree: None,
343
-
// },
344
-
// Entry {
345
-
// keysuffix: "gh".into(),
346
-
// prefix_len: 2,
347
-
// value: cid2(),
348
-
// tree: None,
349
-
// },
350
-
// ],
351
-
// };
352
-
// assert_eq!(
353
-
// needs_from_node(node).unwrap(),
354
-
// vec![
355
-
// Need::Record {
356
-
// rkey: "asdf".into(),
357
-
// cid: cid1(),
358
-
// },
359
-
// Need::Record {
360
-
// rkey: "asgh".into(),
361
-
// cid: cid2(),
362
-
// },
363
-
// ]
364
-
// );
365
-
// }
366
-
367
-
// #[test]
368
-
// fn test_needs_from_node_with_both() {
369
-
// let node = Node {
370
-
// left: None,
371
-
// entries: vec![Entry {
372
-
// keysuffix: "asdf".into(),
373
-
// prefix_len: 0,
374
-
// value: cid1(),
375
-
// tree: Some(cid2()),
376
-
// }],
377
-
// };
378
-
// assert_eq!(
379
-
// needs_from_node(node).unwrap(),
380
-
// vec![
381
-
// Need::Record {
382
-
// rkey: "asdf".into(),
383
-
// cid: cid1(),
384
-
// },
385
-
// Need::Node(cid2()),
386
-
// ]
387
-
// );
388
-
// }
389
-
390
-
// #[test]
391
-
// fn test_needs_from_node_left_and_record() {
392
-
// let node = Node {
393
-
// left: Some(cid1()),
394
-
// entries: vec![Entry {
395
-
// keysuffix: "asdf".into(),
396
-
// prefix_len: 0,
397
-
// value: cid2(),
398
-
// tree: None,
399
-
// }],
400
-
// };
401
-
// assert_eq!(
402
-
// needs_from_node(node).unwrap(),
403
-
// vec![
404
-
// Need::Node(cid1()),
405
-
// Need::Record {
406
-
// rkey: "asdf".into(),
407
-
// cid: cid2(),
408
-
// },
409
-
// ]
410
-
// );
411
-
// }
412
-
413
-
// #[test]
414
-
// fn test_needs_from_full_node() {
415
-
// let node = Node {
416
-
// left: Some(cid1()),
417
-
// entries: vec![
418
-
// Entry {
419
-
// keysuffix: "asdf".into(),
420
-
// prefix_len: 0,
421
-
// value: cid2(),
422
-
// tree: Some(cid3()),
423
-
// },
424
-
// Entry {
425
-
// keysuffix: "ghi".into(),
426
-
// prefix_len: 1,
427
-
// value: cid4(),
428
-
// tree: Some(cid5()),
429
-
// },
430
-
// Entry {
431
-
// keysuffix: "jkl".into(),
432
-
// prefix_len: 2,
433
-
// value: cid6(),
434
-
// tree: Some(cid7()),
435
-
// },
436
-
// Entry {
437
-
// keysuffix: "mno".into(),
438
-
// prefix_len: 4,
439
-
// value: cid8(),
440
-
// tree: Some(cid9()),
441
-
// },
442
-
// ],
443
-
// };
444
-
// assert_eq!(
445
-
// needs_from_node(node).unwrap(),
446
-
// vec![
447
-
// Need::Node(cid1()),
448
-
// Need::Record {
449
-
// rkey: "asdf".into(),
450
-
// cid: cid2(),
451
-
// },
452
-
// Need::Node(cid3()),
453
-
// Need::Record {
454
-
// rkey: "aghi".into(),
455
-
// cid: cid4(),
456
-
// },
457
-
// Need::Node(cid5()),
458
-
// Need::Record {
459
-
// rkey: "agjkl".into(),
460
-
// cid: cid6(),
461
-
// },
462
-
// Need::Node(cid7()),
463
-
// Need::Record {
464
-
// rkey: "agjkmno".into(),
465
-
// cid: cid8(),
466
-
// },
467
-
// Need::Node(cid9()),
468
-
// ]
469
-
// );
470
-
// }
471
406
}
+34
-31
tests/non-huge-cars.rs
+34
-31
tests/non-huge-cars.rs
···
1
1
extern crate repo_stream;
2
-
use futures::TryStreamExt;
3
-
use iroh_car::CarReader;
4
-
use std::convert::Infallible;
2
+
use repo_stream::Driver;
5
3
4
+
const EMPTY_CAR: &'static [u8] = include_bytes!("../car-samples/empty.car");
6
5
const TINY_CAR: &'static [u8] = include_bytes!("../car-samples/tiny.car");
7
6
const LITTLE_CAR: &'static [u8] = include_bytes!("../car-samples/little.car");
8
7
const MIDSIZE_CAR: &'static [u8] = include_bytes!("../car-samples/midsize.car");
9
8
10
-
async fn test_car(bytes: &[u8], expected_records: usize, expected_sum: usize) {
11
-
let reader = CarReader::new(bytes).await.unwrap();
12
-
13
-
let root = reader
14
-
.header()
15
-
.roots()
16
-
.first()
17
-
.ok_or("missing root")
9
+
async fn test_car(
10
+
bytes: &[u8],
11
+
expected_records: usize,
12
+
expected_sum: usize,
13
+
expect_profile: bool,
14
+
) {
15
+
let mut driver = match Driver::load_car(bytes, |block| block.len(), 10 /* MiB */)
16
+
.await
18
17
.unwrap()
19
-
.clone();
20
-
21
-
let stream = std::pin::pin!(reader.stream());
22
-
23
-
let (_commit, v) =
24
-
repo_stream::drive::Vehicle::init(root, stream, |block| Ok::<_, Infallible>(block.len()))
25
-
.await
26
-
.unwrap();
27
-
let mut record_stream = std::pin::pin!(v.stream());
18
+
{
19
+
Driver::Memory(_commit, mem_driver) => mem_driver,
20
+
Driver::Disk(_) => panic!("too big"),
21
+
};
28
22
29
23
let mut records = 0;
30
24
let mut sum = 0;
31
25
let mut found_bsky_profile = false;
32
26
let mut prev_rkey = "".to_string();
33
-
while let Some((rkey, size)) = record_stream.try_next().await.unwrap() {
34
-
records += 1;
35
-
sum += size;
36
-
if rkey == "app.bsky.actor.profile/self" {
37
-
found_bsky_profile = true;
27
+
28
+
while let Some(pairs) = driver.next_chunk(256).await.unwrap() {
29
+
for (rkey, size) in pairs {
30
+
records += 1;
31
+
sum += size;
32
+
if rkey == "app.bsky.actor.profile/self" {
33
+
found_bsky_profile = true;
34
+
}
35
+
assert!(rkey > prev_rkey, "rkeys are streamed in order");
36
+
prev_rkey = rkey;
38
37
}
39
-
assert!(rkey > prev_rkey, "rkeys are streamed in order");
40
-
prev_rkey = rkey;
41
38
}
39
+
42
40
assert_eq!(records, expected_records);
43
41
assert_eq!(sum, expected_sum);
44
-
assert!(found_bsky_profile);
42
+
assert_eq!(found_bsky_profile, expect_profile);
43
+
}
44
+
45
+
#[tokio::test]
46
+
async fn test_empty_car() {
47
+
test_car(EMPTY_CAR, 0, 0, false).await
45
48
}
46
49
47
50
#[tokio::test]
48
51
async fn test_tiny_car() {
49
-
test_car(TINY_CAR, 8, 2071).await
52
+
test_car(TINY_CAR, 8, 2071, true).await
50
53
}
51
54
52
55
#[tokio::test]
53
56
async fn test_little_car() {
54
-
test_car(LITTLE_CAR, 278, 246960).await
57
+
test_car(LITTLE_CAR, 278, 246960, true).await
55
58
}
56
59
57
60
#[tokio::test]
58
61
async fn test_midsize_car() {
59
-
test_car(MIDSIZE_CAR, 11585, 3741393).await
62
+
test_car(MIDSIZE_CAR, 11585, 3741393, true).await
60
63
}