+81
-59
src/actor_store/blob.rs
+81
-59
src/actor_store/blob.rs
···
1
-
//! Blob storage and retrieval for the actor store.
2
-
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/blob/mod.rs
3
-
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
4
-
//!
5
-
//! Modified for SQLite backend
6
-
7
use std::sync::Arc;
8
9
-
use anyhow::{Error, Result, bail};
10
use cidv10::Cid;
11
use diesel::dsl::{count_distinct, exists, not};
12
use diesel::sql_types::{Integer, Nullable, Text};
13
use diesel::*;
14
-
use futures::stream::{self, StreamExt};
15
-
use futures::try_join;
16
-
use rsky_pds::actor_store::blob::sha256_stream;
17
// use rocket::data::{Data, ToByteUnit};
18
-
// use rocket::form::validate::Contains;
19
use rsky_common::ipld::sha256_raw_to_cid;
20
use rsky_common::now;
21
use rsky_lexicon::blob_refs::BlobRef;
22
use rsky_lexicon::com::atproto::admin::StatusAttr;
23
use rsky_lexicon::com::atproto::repo::ListMissingBlobsRefRecordBlob;
24
use rsky_pds::actor_store::blob::{
25
-
BlobMetadata, GetBlobMetadataOutput, GetBlobOutput, ListBlobsOpts, ListMissingBlobsOpts,
26
verify_blob,
27
};
28
use rsky_pds::image;
29
use rsky_pds::models::models;
30
use rsky_repo::error::BlobError;
31
use rsky_repo::types::{PreparedBlobRef, PreparedWrite};
32
-
use sha2::Digest;
33
34
-
use super::sql_blob::BlobStoreSql;
35
use crate::db::DbConn;
36
37
pub struct BlobReader {
38
pub blobstore: BlobStoreSql,
39
pub did: String,
40
pub db: Arc<DbConn>,
41
}
42
43
-
// Basically handles getting blob records from db
44
impl BlobReader {
45
pub fn new(blobstore: BlobStoreSql, db: Arc<DbConn>) -> Self {
46
-
// BlobReader {
47
-
// did: blobstore.bucket.clone(),
48
-
// blobstore,
49
-
// db,
50
-
// }
51
-
todo!();
52
}
53
54
pub async fn get_blob_metadata(&self, cid: Cid) -> Result<GetBlobMetadataOutput> {
55
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
56
···
77
}
78
}
79
80
pub async fn get_blob(&self, cid: Cid) -> Result<GetBlobOutput> {
81
let metadata = self.get_blob_metadata(cid).await?;
82
-
// let blob_stream = match self.blobstore.get_stream(cid).await {
83
-
// Ok(res) => res,
84
-
// Err(e) => {
85
-
// return match e.downcast_ref() {
86
-
// Some(GetObjectError::NoSuchKey(key)) => {
87
-
// Err(anyhow::Error::new(GetObjectError::NoSuchKey(key.clone())))
88
-
// }
89
-
// _ => bail!(e.to_string()),
90
-
// };
91
-
// }
92
-
// };
93
-
// Ok(GetBlobOutput {
94
-
// size: metadata.size,
95
-
// mime_type: metadata.mime_type,
96
-
// stream: blob_stream,
97
-
// })
98
-
todo!();
99
}
100
101
pub async fn get_records_for_blob(&self, cid: Cid) -> Result<Vec<String>> {
102
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
103
···
110
.filter(RecordBlobSchema::did.eq(did))
111
.select(models::RecordBlob::as_select())
112
.get_results(conn)?;
113
-
Ok::<_, Error>(results.into_iter().map(|row| row.record_uri))
114
})
115
.await?
116
.collect::<Vec<String>>();
···
118
Ok(res)
119
}
120
121
pub async fn upload_blob_and_get_metadata(
122
&self,
123
user_suggested_mime: String,
124
-
blob: Data<'_>, // Type representing the body data of a request.
125
) -> Result<BlobMetadata> {
126
-
todo!();
127
-
let blob_stream = blob.open(100.mebibytes());
128
-
let bytes = blob_stream.into_bytes().await?;
129
-
let size = bytes.n.written;
130
-
let bytes = bytes.into_inner();
131
let (temp_key, sha256, img_info, sniffed_mime) = try_join!(
132
self.blobstore.put_temp(bytes.clone()),
133
sha256_stream(bytes.clone()),
···
140
141
Ok(BlobMetadata {
142
temp_key,
143
-
size: size as i64,
144
cid,
145
mime_type,
146
width: if let Some(ref info) = img_info {
···
156
})
157
}
158
159
pub async fn track_untethered_blob(&self, metadata: BlobMetadata) -> Result<BlobRef> {
160
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
161
···
190
ON CONFLICT (cid, did) DO UPDATE \
191
SET \"tempKey\" = EXCLUDED.\"tempKey\" \
192
WHERE pds.blob.\"tempKey\" is not null;");
193
upsert
194
.bind::<Text, _>(&cid.to_string())
195
.bind::<Text, _>(&did)
···
206
}).await
207
}
208
209
pub async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
210
self.delete_dereferenced_blobs(writes.clone()).await?;
211
let _ = stream::iter(writes)
212
.then(|write| async move {
213
Ok::<(), anyhow::Error>(match write {
···
230
.await
231
.into_iter()
232
.collect::<Result<Vec<_>, _>>()?;
233
Ok(())
234
}
235
236
pub async fn delete_dereferenced_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
237
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
238
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
···
246
_ => None,
247
})
248
.collect();
249
if uris.is_empty() {
250
return Ok(());
251
}
···
260
.await?
261
.into_iter()
262
.collect::<Vec<models::RecordBlob>>();
263
if deleted_repo_blobs.is_empty() {
264
return Ok(());
265
}
···
293
.into_iter()
294
.flat_map(|v: Vec<PreparedBlobRef>| v.into_iter().map(|b| b.cid.to_string()))
295
.collect();
296
let mut cids_to_keep = Vec::new();
297
cids_to_keep.append(&mut new_blob_cids);
298
cids_to_keep.append(&mut duplicated_cids);
···
300
let cids_to_delete = deleted_repo_blob_cids
301
.into_iter()
302
.filter_map(|cid: String| match cids_to_keep.contains(&cid) {
303
-
true => Some(cid),
304
-
false => None,
305
})
306
.collect::<Vec<String>>();
307
if cids_to_delete.is_empty() {
308
return Ok(());
309
}
···
317
})
318
.await?;
319
320
-
// Original code queues a background job to delete by CID from S3 compatible blobstore
321
let _ = stream::iter(cids_to_delete)
322
.then(|cid| async { self.blobstore.delete(cid).await })
323
.collect::<Vec<_>>()
324
.await
325
.into_iter()
326
.collect::<Result<Vec<_>, _>>()?;
327
Ok(())
328
}
329
330
pub async fn verify_blob_and_make_permanent(&self, blob: PreparedBlobRef) -> Result<()> {
331
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
332
···
344
.optional()
345
})
346
.await?;
347
if let Some(found) = found {
348
verify_blob(&blob, &found).await?;
349
if let Some(ref temp_key) = found.temp_key {
···
361
.await?;
362
Ok(())
363
} else {
364
-
bail!("Cound not find blob: {:?}", blob.cid.to_string())
365
}
366
}
367
368
-
pub async fn associate_blob(&self, blob: PreparedBlobRef, _record_uri: String) -> Result<()> {
369
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
370
371
let cid = blob.cid.to_string();
372
-
let record_uri = _record_uri;
373
let did = self.did.clone();
374
self.db
375
.run(move |conn| {
376
insert_into(RecordBlobSchema::record_blob)
···
383
.execute(conn)
384
})
385
.await?;
386
Ok(())
387
}
388
389
pub async fn blob_count(&self) -> Result<i64> {
390
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
391
···
401
.await
402
}
403
404
pub async fn record_blob_count(&self) -> Result<i64> {
405
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
406
···
416
.await
417
}
418
419
pub async fn list_missing_blobs(
420
&self,
421
opts: ListMissingBlobsOpts,
···
474
.await
475
}
476
477
pub async fn list_blobs(&self, opts: ListBlobsOpts) -> Result<Vec<String>> {
478
use rsky_pds::schema::pds::record::dsl as RecordSchema;
479
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
480
let ListBlobsOpts {
481
since,
482
cursor,
···
512
}
513
self.db.run(move |conn| builder.load(conn)).await?
514
};
515
Ok(res)
516
}
517
518
pub async fn get_blob_takedown_status(&self, cid: Cid) -> Result<Option<StatusAttr>> {
519
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
520
···
525
.select(models::Blob::as_select())
526
.first(conn)
527
.optional()?;
528
match res {
529
None => Ok(None),
530
Some(res) => match res.takedown_ref {
···
542
.await
543
}
544
545
-
// Transactors
546
-
// -------------------
547
-
548
pub async fn update_blob_takedown_status(&self, blob: Cid, takedown: StatusAttr) -> Result<()> {
549
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
550
···
556
false => None,
557
};
558
559
-
let blob = self
560
-
.db
561
.run(move |conn| {
562
update(BlobSchema::blob)
563
-
.filter(BlobSchema::cid.eq(blob.to_string()))
564
.set(BlobSchema::takedownRef.eq(takedown_ref))
565
.execute(conn)?;
566
-
Ok::<_, Error>(blob)
567
})
568
.await?;
569
···
571
true => self.blobstore.quarantine(blob).await,
572
false => self.blobstore.unquarantine(blob).await,
573
};
574
match res {
575
Ok(_) => Ok(()),
576
Err(e) => match e.downcast_ref() {
···
1
use std::sync::Arc;
2
3
+
use anyhow::{Result, bail};
4
use cidv10::Cid;
5
use diesel::dsl::{count_distinct, exists, not};
6
use diesel::sql_types::{Integer, Nullable, Text};
7
use diesel::*;
8
+
use futures::{
9
+
stream::{self, StreamExt},
10
+
try_join,
11
+
};
12
// use rocket::data::{Data, ToByteUnit};
13
use rsky_common::ipld::sha256_raw_to_cid;
14
use rsky_common::now;
15
use rsky_lexicon::blob_refs::BlobRef;
16
use rsky_lexicon::com::atproto::admin::StatusAttr;
17
use rsky_lexicon::com::atproto::repo::ListMissingBlobsRefRecordBlob;
18
use rsky_pds::actor_store::blob::{
19
+
BlobMetadata, GetBlobMetadataOutput, ListBlobsOpts, ListMissingBlobsOpts, sha256_stream,
20
verify_blob,
21
};
22
use rsky_pds::image;
23
use rsky_pds::models::models;
24
use rsky_repo::error::BlobError;
25
use rsky_repo::types::{PreparedBlobRef, PreparedWrite};
26
27
+
use super::sql_blob::{BlobStoreSql, ByteStream};
28
use crate::db::DbConn;
29
30
+
pub struct GetBlobOutput {
31
+
pub size: i32,
32
+
pub mime_type: Option<String>,
33
+
pub stream: ByteStream,
34
+
}
35
+
36
+
/// Handles blob operations for an actor store
37
pub struct BlobReader {
38
+
/// SQL-based blob storage
39
pub blobstore: BlobStoreSql,
40
+
/// DID of the actor
41
pub did: String,
42
+
/// Database connection
43
pub db: Arc<DbConn>,
44
}
45
46
impl BlobReader {
47
+
/// Create a new blob reader
48
pub fn new(blobstore: BlobStoreSql, db: Arc<DbConn>) -> Self {
49
+
BlobReader {
50
+
did: blobstore.did.clone(),
51
+
blobstore,
52
+
db,
53
+
}
54
}
55
56
+
/// Get metadata for a blob by CID
57
pub async fn get_blob_metadata(&self, cid: Cid) -> Result<GetBlobMetadataOutput> {
58
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
59
···
80
}
81
}
82
83
+
/// Get a blob by CID with metadata and content
84
pub async fn get_blob(&self, cid: Cid) -> Result<GetBlobOutput> {
85
let metadata = self.get_blob_metadata(cid).await?;
86
+
let blob_stream = match self.blobstore.get_stream(cid).await {
87
+
Ok(stream) => stream,
88
+
Err(e) => bail!("Failed to get blob: {}", e),
89
+
};
90
+
91
+
Ok(GetBlobOutput {
92
+
size: metadata.size,
93
+
mime_type: metadata.mime_type,
94
+
stream: blob_stream,
95
+
})
96
}
97
98
+
/// Get all records that reference a specific blob
99
pub async fn get_records_for_blob(&self, cid: Cid) -> Result<Vec<String>> {
100
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
101
···
108
.filter(RecordBlobSchema::did.eq(did))
109
.select(models::RecordBlob::as_select())
110
.get_results(conn)?;
111
+
Ok::<_, result::Error>(results.into_iter().map(|row| row.record_uri))
112
})
113
.await?
114
.collect::<Vec<String>>();
···
116
Ok(res)
117
}
118
119
+
/// Upload a blob and get its metadata
120
pub async fn upload_blob_and_get_metadata(
121
&self,
122
user_suggested_mime: String,
123
+
blob: Vec<u8>,
124
) -> Result<BlobMetadata> {
125
+
let bytes = blob;
126
+
let size = bytes.len() as i64;
127
+
128
let (temp_key, sha256, img_info, sniffed_mime) = try_join!(
129
self.blobstore.put_temp(bytes.clone()),
130
sha256_stream(bytes.clone()),
···
137
138
Ok(BlobMetadata {
139
temp_key,
140
+
size,
141
cid,
142
mime_type,
143
width: if let Some(ref info) = img_info {
···
153
})
154
}
155
156
+
/// Track a blob that hasn't been associated with any records yet
157
pub async fn track_untethered_blob(&self, metadata: BlobMetadata) -> Result<BlobRef> {
158
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
159
···
188
ON CONFLICT (cid, did) DO UPDATE \
189
SET \"tempKey\" = EXCLUDED.\"tempKey\" \
190
WHERE pds.blob.\"tempKey\" is not null;");
191
+
#[expect(trivial_casts)]
192
upsert
193
.bind::<Text, _>(&cid.to_string())
194
.bind::<Text, _>(&did)
···
205
}).await
206
}
207
208
+
/// Process blobs associated with writes
209
pub async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
210
self.delete_dereferenced_blobs(writes.clone()).await?;
211
+
212
let _ = stream::iter(writes)
213
.then(|write| async move {
214
Ok::<(), anyhow::Error>(match write {
···
231
.await
232
.into_iter()
233
.collect::<Result<Vec<_>, _>>()?;
234
+
235
Ok(())
236
}
237
238
+
/// Delete blobs that are no longer referenced by any records
239
pub async fn delete_dereferenced_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
240
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
241
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
···
249
_ => None,
250
})
251
.collect();
252
+
253
if uris.is_empty() {
254
return Ok(());
255
}
···
264
.await?
265
.into_iter()
266
.collect::<Vec<models::RecordBlob>>();
267
+
268
if deleted_repo_blobs.is_empty() {
269
return Ok(());
270
}
···
298
.into_iter()
299
.flat_map(|v: Vec<PreparedBlobRef>| v.into_iter().map(|b| b.cid.to_string()))
300
.collect();
301
+
302
let mut cids_to_keep = Vec::new();
303
cids_to_keep.append(&mut new_blob_cids);
304
cids_to_keep.append(&mut duplicated_cids);
···
306
let cids_to_delete = deleted_repo_blob_cids
307
.into_iter()
308
.filter_map(|cid: String| match cids_to_keep.contains(&cid) {
309
+
true => None,
310
+
false => Some(cid),
311
})
312
.collect::<Vec<String>>();
313
+
314
if cids_to_delete.is_empty() {
315
return Ok(());
316
}
···
324
})
325
.await?;
326
327
+
// Delete from blob storage
328
let _ = stream::iter(cids_to_delete)
329
.then(|cid| async { self.blobstore.delete(cid).await })
330
.collect::<Vec<_>>()
331
.await
332
.into_iter()
333
.collect::<Result<Vec<_>, _>>()?;
334
+
335
Ok(())
336
}
337
338
+
/// Verify a blob and make it permanent
339
pub async fn verify_blob_and_make_permanent(&self, blob: PreparedBlobRef) -> Result<()> {
340
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
341
···
353
.optional()
354
})
355
.await?;
356
+
357
if let Some(found) = found {
358
verify_blob(&blob, &found).await?;
359
if let Some(ref temp_key) = found.temp_key {
···
371
.await?;
372
Ok(())
373
} else {
374
+
bail!("Could not find blob: {:?}", blob.cid.to_string())
375
}
376
}
377
378
+
/// Associate a blob with a record
379
+
pub async fn associate_blob(&self, blob: PreparedBlobRef, record_uri: String) -> Result<()> {
380
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
381
382
let cid = blob.cid.to_string();
383
let did = self.did.clone();
384
+
385
self.db
386
.run(move |conn| {
387
insert_into(RecordBlobSchema::record_blob)
···
394
.execute(conn)
395
})
396
.await?;
397
+
398
Ok(())
399
}
400
401
+
/// Count all blobs for this actor
402
pub async fn blob_count(&self) -> Result<i64> {
403
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
404
···
414
.await
415
}
416
417
+
/// Count blobs associated with records
418
pub async fn record_blob_count(&self) -> Result<i64> {
419
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
420
···
430
.await
431
}
432
433
+
/// List blobs that are referenced but missing
434
pub async fn list_missing_blobs(
435
&self,
436
opts: ListMissingBlobsOpts,
···
489
.await
490
}
491
492
+
/// List all blobs with optional filtering
493
pub async fn list_blobs(&self, opts: ListBlobsOpts) -> Result<Vec<String>> {
494
use rsky_pds::schema::pds::record::dsl as RecordSchema;
495
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
496
+
497
let ListBlobsOpts {
498
since,
499
cursor,
···
529
}
530
self.db.run(move |conn| builder.load(conn)).await?
531
};
532
+
533
Ok(res)
534
}
535
536
+
/// Get the takedown status of a blob
537
pub async fn get_blob_takedown_status(&self, cid: Cid) -> Result<Option<StatusAttr>> {
538
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
539
···
544
.select(models::Blob::as_select())
545
.first(conn)
546
.optional()?;
547
+
548
match res {
549
None => Ok(None),
550
Some(res) => match res.takedown_ref {
···
562
.await
563
}
564
565
+
/// Update the takedown status of a blob
566
pub async fn update_blob_takedown_status(&self, blob: Cid, takedown: StatusAttr) -> Result<()> {
567
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
568
···
574
false => None,
575
};
576
577
+
let blob_cid = blob.to_string();
578
+
let did_clone = self.did.clone();
579
+
580
+
self.db
581
.run(move |conn| {
582
update(BlobSchema::blob)
583
+
.filter(BlobSchema::cid.eq(blob_cid))
584
+
.filter(BlobSchema::did.eq(did_clone))
585
.set(BlobSchema::takedownRef.eq(takedown_ref))
586
.execute(conn)?;
587
+
Ok::<_, result::Error>(blob)
588
})
589
.await?;
590
···
592
true => self.blobstore.quarantine(blob).await,
593
false => self.blobstore.unquarantine(blob).await,
594
};
595
+
596
match res {
597
Ok(_) => Ok(()),
598
Err(e) => match e.downcast_ref() {
+472
-165
src/actor_store/sql_blob.rs
+472
-165
src/actor_store/sql_blob.rs
···
1
use std::{path::PathBuf, str::FromStr as _, sync::Arc};
2
3
-
use anyhow::Result;
4
use cidv10::Cid;
5
use rsky_common::get_random_str;
6
7
use crate::db::DbConn;
8
9
/// Type for stream of blob data
10
pub type BlobStream = Box<dyn std::io::Read + Send>;
11
12
-
/// Placeholder implementation for blob store
13
#[derive(Clone)]
14
-
pub(crate) struct BlobStoreSql {
15
-
client: Arc<DbConn>,
16
-
path: PathBuf,
17
}
18
19
/// Configuration for the blob store
20
-
/// TODO: Implement this placeholder
21
-
pub(crate) struct BlobConfig {
22
-
pub(crate) path: PathBuf,
23
}
24
25
-
/// ByteStream
26
-
/// TODO: Implement this placeholder
27
-
pub(crate) struct ByteStream {
28
-
pub(crate) bytes: Vec<u8>,
29
}
30
-
impl ByteStream {
31
-
pub async fn collect(self) -> Result<Vec<u8>> {
32
-
Ok(self.bytes)
33
}
34
}
35
36
impl BlobStoreSql {
37
-
pub fn new(did: String, cfg: &BlobConfig) -> Self {
38
-
// let client = aws_sdk_s3::Client::new(cfg);
39
-
// BlobStorePlaceholder {
40
-
// client,
41
-
// bucket: did,
42
-
// }
43
-
todo!();
44
}
45
46
-
pub fn creator(cfg: &BlobConfig) -> Box<dyn Fn(String) -> BlobStoreSql + '_> {
47
-
Box::new(move |did: String| BlobStoreSql::new(did, cfg))
48
}
49
50
fn gen_key(&self) -> String {
51
get_random_str()
52
}
53
54
-
fn get_tmp_path(&self, key: &String) -> String {
55
-
// format!("tmp/{0}/{1}", self.bucket, key)
56
-
todo!();
57
}
58
59
-
fn get_stored_path(&self, cid: Cid) -> String {
60
-
// format!("blocks/{0}/{1}", self.bucket, cid)
61
-
todo!();
62
}
63
64
-
fn get_quarantined_path(&self, cid: Cid) -> String {
65
-
// format!("quarantine/{0}/{1}", self.bucket, cid)
66
-
todo!();
67
}
68
69
pub async fn put_temp(&self, bytes: Vec<u8>) -> Result<String> {
70
let key = self.gen_key();
71
-
// let body = ByteStream::from(bytes);
72
-
// self.client
73
-
// .put_object()
74
-
// .body(body)
75
-
// .bucket(&self.bucket)
76
-
// .key(self.get_tmp_path(&key))
77
-
// .acl(ObjectCannedAcl::PublicRead)
78
-
// .send()
79
-
// .await?;
80
-
// Ok(key)
81
-
todo!();
82
}
83
84
pub async fn make_permanent(&self, key: String, cid: Cid) -> Result<()> {
85
-
// let already_has = self.has_stored(cid).await?;
86
-
// if !already_has {
87
-
// Ok(self
88
-
// .move_object(MoveObject {
89
-
// from: self.get_tmp_path(&key),
90
-
// to: self.get_stored_path(cid),
91
-
// })
92
-
// .await?)
93
-
// } else {
94
-
// // already saved, so we no-op & just delete the temp
95
-
// Ok(self.delete_key(self.get_tmp_path(&key)).await?)
96
-
// }
97
-
todo!();
98
}
99
100
pub async fn put_permanent(&self, cid: Cid, bytes: Vec<u8>) -> Result<()> {
101
-
// let body = ByteStream::from(bytes);
102
-
// self.client
103
-
// .put_object()
104
-
// .body(body)
105
-
// .bucket(&self.bucket)
106
-
// .key(self.get_stored_path(cid))
107
-
// .acl(ObjectCannedAcl::PublicRead)
108
-
// .send()
109
-
// .await?;
110
-
// Ok(())
111
-
todo!();
112
}
113
114
pub async fn quarantine(&self, cid: Cid) -> Result<()> {
115
-
// self.move_object(MoveObject {
116
-
// from: self.get_stored_path(cid),
117
-
// to: self.get_quarantined_path(cid),
118
-
// })
119
-
// .await
120
-
todo!();
121
}
122
123
pub async fn unquarantine(&self, cid: Cid) -> Result<()> {
124
-
// self.move_object(MoveObject {
125
-
// from: self.get_quarantined_path(cid),
126
-
// to: self.get_stored_path(cid),
127
-
// })
128
-
// .await
129
-
todo!();
130
}
131
132
-
async fn get_object(&self, cid: Cid) -> Result<ByteStream> {
133
-
// let res = self
134
-
// .client
135
-
// .get_object()
136
-
// .bucket(&self.bucket)
137
-
// .key(self.get_stored_path(cid))
138
-
// .send()
139
-
// .await;
140
-
// match res {
141
-
// Ok(res) => Ok(res.body),
142
-
// Err(SdkError::ServiceError(s)) => Err(anyhow::Error::new(s.into_err())),
143
-
// Err(e) => Err(anyhow::Error::new(e.into_service_error())),
144
-
// }
145
-
todo!();
146
}
147
148
pub async fn get_bytes(&self, cid: Cid) -> Result<Vec<u8>> {
149
-
let res = self.get_object(cid).await?;
150
-
// let bytes = res.collect().await.map(|data| data.into_bytes())?;
151
-
// Ok(bytes.to_vec())
152
-
todo!();
153
}
154
155
pub async fn get_stream(&self, cid: Cid) -> Result<ByteStream> {
156
self.get_object(cid).await
157
}
158
159
pub async fn delete(&self, cid: String) -> Result<()> {
160
-
self.delete_key(self.get_stored_path(Cid::from_str(&cid)?))
161
.await
162
}
163
164
pub async fn delete_many(&self, cids: Vec<Cid>) -> Result<()> {
165
-
let keys: Vec<String> = cids
166
-
.into_iter()
167
-
.map(|cid| self.get_stored_path(cid))
168
-
.collect();
169
-
self.delete_many_keys(keys).await
170
}
171
172
-
pub async fn has_stored(&self, cid: Cid) -> Result<bool> {
173
-
Ok(self.has_key(self.get_stored_path(cid)).await)
174
}
175
176
pub async fn has_temp(&self, key: String) -> Result<bool> {
177
-
Ok(self.has_key(self.get_tmp_path(&key)).await)
178
}
179
180
-
async fn has_key(&self, key: String) -> bool {
181
-
// let res = self
182
-
// .client
183
-
// .head_object()
184
-
// .bucket(&self.bucket)
185
-
// .key(key)
186
-
// .send()
187
-
// .await;
188
-
// res.is_ok()
189
-
todo!();
190
}
191
192
-
async fn delete_key(&self, key: String) -> Result<()> {
193
-
// self.client
194
-
// .delete_object()
195
-
// .bucket(&self.bucket)
196
-
// .key(key)
197
-
// .send()
198
-
// .await?;
199
-
// Ok(())
200
-
todo!();
201
}
202
203
async fn delete_many_keys(&self, keys: Vec<String>) -> Result<()> {
204
-
// let objects: Vec<ObjectIdentifier> = keys
205
-
// .into_iter()
206
-
// .map(|key| Ok(ObjectIdentifier::builder().key(key).build()?))
207
-
// .collect::<Result<Vec<ObjectIdentifier>>>()?;
208
-
// let deletes = Delete::builder().set_objects(Some(objects)).build()?;
209
-
// self.client
210
-
// .delete_objects()
211
-
// .bucket(&self.bucket)
212
-
// .delete(deletes)
213
-
// .send()
214
-
// .await?;
215
-
// Ok(())
216
-
todo!();
217
}
218
219
async fn move_object(&self, keys: MoveObject) -> Result<()> {
220
-
// self.client
221
-
// .copy_object()
222
-
// .bucket(&self.bucket)
223
-
// .copy_source(format!(
224
-
// "{0}/{1}/{2}",
225
-
// env_str("AWS_ENDPOINT_BUCKET").unwrap(),
226
-
// self.bucket,
227
-
// keys.from
228
-
// ))
229
-
// .key(keys.to)
230
-
// .acl(ObjectCannedAcl::PublicRead)
231
-
// .send()
232
-
// .await?;
233
-
// self.client
234
-
// .delete_object()
235
-
// .bucket(&self.bucket)
236
-
// .key(keys.from)
237
-
// .send()
238
-
// .await?;
239
-
// Ok(())
240
-
todo!();
241
}
242
}
243
-
244
-
struct MoveObject {
245
-
from: String,
246
-
to: String,
247
-
}
···
1
+
#![expect(
2
+
clippy::pub_use,
3
+
clippy::single_char_lifetime_names,
4
+
unused_qualifications
5
+
)]
6
use std::{path::PathBuf, str::FromStr as _, sync::Arc};
7
8
+
use anyhow::{Context, Result};
9
use cidv10::Cid;
10
+
use diesel::*;
11
use rsky_common::get_random_str;
12
+
use tokio::fs;
13
14
use crate::db::DbConn;
15
16
/// Type for stream of blob data
17
pub type BlobStream = Box<dyn std::io::Read + Send>;
18
19
+
/// ByteStream implementation for blob data
20
+
pub struct ByteStream {
21
+
pub bytes: Vec<u8>,
22
+
}
23
+
24
+
impl ByteStream {
25
+
pub fn new(bytes: Vec<u8>) -> Self {
26
+
Self { bytes }
27
+
}
28
+
29
+
pub async fn collect(self) -> Result<Vec<u8>> {
30
+
Ok(self.bytes)
31
+
}
32
+
}
33
+
34
+
/// SQL-based implementation of blob storage
35
#[derive(Clone)]
36
+
pub struct BlobStoreSql {
37
+
/// Database connection for metadata
38
+
pub db: Arc<DbConn>,
39
+
/// DID of the actor
40
+
pub did: String,
41
+
/// Path for blob storage
42
+
pub path: PathBuf,
43
}
44
45
/// Configuration for the blob store
46
+
pub struct BlobConfig {
47
+
/// Base path for blob storage
48
+
pub path: PathBuf,
49
}
50
51
+
/// Represents a move operation for blobs
52
+
struct MoveObject {
53
+
from: String,
54
+
to: String,
55
}
56
+
57
+
/// Blob table structure for SQL operations
58
+
#[derive(Queryable, Insertable, Debug)]
59
+
#[diesel(table_name = blobs)]
60
+
struct BlobEntry {
61
+
cid: String,
62
+
did: String,
63
+
path: String,
64
+
size: i32,
65
+
mime_type: String,
66
+
quarantined: bool,
67
+
temp: bool,
68
+
}
69
+
70
+
// Table definition for blobs
71
+
table! {
72
+
blobs (cid, did) {
73
+
cid -> Text,
74
+
did -> Text,
75
+
path -> Text,
76
+
size -> Integer,
77
+
mime_type -> Text,
78
+
quarantined -> Bool,
79
+
temp -> Bool,
80
}
81
}
82
83
impl BlobStoreSql {
84
+
/// Create a new SQL-based blob store for the given DID
85
+
pub fn new(did: String, cfg: &BlobConfig, db: Arc<DbConn>) -> Self {
86
+
let actor_path = cfg.path.join(&did);
87
+
88
+
// Create actor directory if it doesn't exist
89
+
if !actor_path.exists() {
90
+
// Use blocking to avoid complicating this constructor
91
+
std::fs::create_dir_all(&actor_path).unwrap_or_else(|_| {
92
+
panic!("Failed to create blob directory: {}", actor_path.display())
93
+
});
94
+
}
95
+
96
+
BlobStoreSql {
97
+
db,
98
+
did,
99
+
path: actor_path,
100
+
}
101
}
102
103
+
/// Create a factory function for blob stores
104
+
pub fn creator(cfg: &BlobConfig, db: Arc<DbConn>) -> Box<dyn Fn(String) -> BlobStoreSql + '_> {
105
+
let db_clone = db.clone();
106
+
Box::new(move |did: String| BlobStoreSql::new(did, cfg, db_clone.clone()))
107
}
108
109
+
/// Generate a random key for temporary blobs
110
fn gen_key(&self) -> String {
111
get_random_str()
112
}
113
114
+
/// Get the path for a temporary blob
115
+
fn get_tmp_path(&self, key: &str) -> PathBuf {
116
+
self.path.join("tmp").join(key)
117
}
118
119
+
/// Get the filesystem path for a stored blob
120
+
fn get_stored_path(&self, cid: &Cid) -> PathBuf {
121
+
self.path.join("blocks").join(cid.to_string())
122
}
123
124
+
/// Get the filesystem path for a quarantined blob
125
+
fn get_quarantined_path(&self, cid: &Cid) -> PathBuf {
126
+
self.path.join("quarantine").join(cid.to_string())
127
}
128
129
+
/// Store a blob temporarily
130
pub async fn put_temp(&self, bytes: Vec<u8>) -> Result<String> {
131
let key = self.gen_key();
132
+
let tmp_path = self.get_tmp_path(&key);
133
+
134
+
// Ensure the directory exists
135
+
if let Some(parent) = tmp_path.parent() {
136
+
fs::create_dir_all(parent)
137
+
.await
138
+
.context("Failed to create temp directory")?;
139
+
}
140
+
141
+
// Write the blob data to the file
142
+
fs::write(&tmp_path, &bytes)
143
+
.await
144
+
.context("Failed to write temporary blob")?;
145
+
146
+
// Clone values to be used in the closure
147
+
let did_clone = self.did.clone();
148
+
let tmp_path_str = tmp_path.to_string_lossy().to_string();
149
+
let bytes_len = bytes.len() as i32;
150
+
151
+
// Store metadata in the database (will be updated when made permanent)
152
+
self.db
153
+
.run(move |conn| {
154
+
let entry = BlobEntry {
155
+
cid: "temp".to_string(), // Will be updated when made permanent
156
+
did: did_clone,
157
+
path: tmp_path_str,
158
+
size: bytes_len,
159
+
mime_type: "application/octet-stream".to_string(), // Will be updated when made permanent
160
+
quarantined: false,
161
+
temp: true,
162
+
};
163
+
164
+
diesel::insert_into(blobs::table)
165
+
.values(&entry)
166
+
.execute(conn)
167
+
.context("Failed to insert temporary blob metadata")
168
+
})
169
+
.await?;
170
+
171
+
Ok(key)
172
}
173
174
+
/// Make a temporary blob permanent
175
pub async fn make_permanent(&self, key: String, cid: Cid) -> Result<()> {
176
+
let already_has = self.has_stored(cid).await?;
177
+
if !already_has {
178
+
let tmp_path = self.get_tmp_path(&key);
179
+
let stored_path = self.get_stored_path(&cid);
180
+
181
+
// Ensure parent directory exists
182
+
if let Some(parent) = stored_path.parent() {
183
+
fs::create_dir_all(parent)
184
+
.await
185
+
.context("Failed to create blocks directory")?;
186
+
}
187
+
188
+
// Read the bytes
189
+
let bytes = fs::read(&tmp_path)
190
+
.await
191
+
.context("Failed to read temporary blob")?;
192
+
193
+
// Write to permanent location
194
+
fs::write(&stored_path, &bytes)
195
+
.await
196
+
.context("Failed to write permanent blob")?;
197
+
198
+
// Update database metadata
199
+
let tmp_path_clone = tmp_path.clone();
200
+
self.db
201
+
.run(move |conn| {
202
+
// Update the entry with the correct CID and path
203
+
diesel::update(blobs::table)
204
+
.filter(blobs::path.eq(tmp_path_clone.to_string_lossy().to_string()))
205
+
.set((
206
+
blobs::cid.eq(cid.to_string()),
207
+
blobs::path.eq(stored_path.to_string_lossy().to_string()),
208
+
blobs::temp.eq(false),
209
+
))
210
+
.execute(conn)
211
+
.context("Failed to update blob metadata")
212
+
})
213
+
.await?;
214
+
215
+
// Remove the temporary file
216
+
fs::remove_file(tmp_path)
217
+
.await
218
+
.context("Failed to remove temporary blob")?;
219
+
220
+
Ok(())
221
+
} else {
222
+
// Already saved, so delete the temp file
223
+
let tmp_path = self.get_tmp_path(&key);
224
+
if tmp_path.exists() {
225
+
fs::remove_file(tmp_path)
226
+
.await
227
+
.context("Failed to remove existing temporary blob")?;
228
+
}
229
+
Ok(())
230
+
}
231
}
232
233
+
/// Store a blob directly as permanent
234
pub async fn put_permanent(&self, cid: Cid, bytes: Vec<u8>) -> Result<()> {
235
+
let stored_path = self.get_stored_path(&cid);
236
+
237
+
// Ensure parent directory exists
238
+
if let Some(parent) = stored_path.parent() {
239
+
fs::create_dir_all(parent)
240
+
.await
241
+
.context("Failed to create blocks directory")?;
242
+
}
243
+
244
+
// Write to permanent location
245
+
fs::write(&stored_path, &bytes)
246
+
.await
247
+
.context("Failed to write permanent blob")?;
248
+
249
+
let stored_path_str = stored_path.to_string_lossy().to_string();
250
+
let cid_str = cid.to_string();
251
+
let did_clone = self.did.clone();
252
+
let bytes_len = bytes.len() as i32;
253
+
254
+
// Update database metadata
255
+
self.db
256
+
.run(move |conn| {
257
+
let entry = BlobEntry {
258
+
cid: cid_str,
259
+
did: did_clone,
260
+
path: stored_path_str.clone(),
261
+
size: bytes_len,
262
+
mime_type: "application/octet-stream".to_string(), // Could be improved with MIME detection
263
+
quarantined: false,
264
+
temp: false,
265
+
};
266
+
267
+
diesel::insert_into(blobs::table)
268
+
.values(&entry)
269
+
.on_conflict((blobs::cid, blobs::did))
270
+
.do_update()
271
+
.set(blobs::path.eq(stored_path_str))
272
+
.execute(conn)
273
+
.context("Failed to insert permanent blob metadata")
274
+
})
275
+
.await?;
276
+
277
+
Ok(())
278
}
279
280
+
/// Quarantine a blob
281
pub async fn quarantine(&self, cid: Cid) -> Result<()> {
282
+
let stored_path = self.get_stored_path(&cid);
283
+
let quarantined_path = self.get_quarantined_path(&cid);
284
+
285
+
// Ensure parent directory exists
286
+
if let Some(parent) = quarantined_path.parent() {
287
+
fs::create_dir_all(parent)
288
+
.await
289
+
.context("Failed to create quarantine directory")?;
290
+
}
291
+
292
+
// Move the blob if it exists
293
+
if stored_path.exists() {
294
+
// Read the bytes
295
+
let bytes = fs::read(&stored_path)
296
+
.await
297
+
.context("Failed to read stored blob")?;
298
+
299
+
// Write to quarantine location
300
+
fs::write(&quarantined_path, &bytes)
301
+
.await
302
+
.context("Failed to write quarantined blob")?;
303
+
304
+
// Update database metadata
305
+
let cid_str = cid.to_string();
306
+
let did_clone = self.did.clone();
307
+
let quarantined_path_str = quarantined_path.to_string_lossy().to_string();
308
+
309
+
self.db
310
+
.run(move |conn| {
311
+
diesel::update(blobs::table)
312
+
.filter(blobs::cid.eq(cid_str))
313
+
.filter(blobs::did.eq(did_clone))
314
+
.set((
315
+
blobs::path.eq(quarantined_path_str),
316
+
blobs::quarantined.eq(true),
317
+
))
318
+
.execute(conn)
319
+
.context("Failed to update blob metadata for quarantine")
320
+
})
321
+
.await?;
322
+
323
+
// Remove the original file
324
+
fs::remove_file(stored_path)
325
+
.await
326
+
.context("Failed to remove quarantined blob")?;
327
+
}
328
+
329
+
Ok(())
330
}
331
332
+
/// Unquarantine a blob
333
pub async fn unquarantine(&self, cid: Cid) -> Result<()> {
334
+
let quarantined_path = self.get_quarantined_path(&cid);
335
+
let stored_path = self.get_stored_path(&cid);
336
+
337
+
// Ensure parent directory exists
338
+
if let Some(parent) = stored_path.parent() {
339
+
fs::create_dir_all(parent)
340
+
.await
341
+
.context("Failed to create blocks directory")?;
342
+
}
343
+
344
+
// Move the blob if it exists
345
+
if quarantined_path.exists() {
346
+
// Read the bytes
347
+
let bytes = fs::read(&quarantined_path)
348
+
.await
349
+
.context("Failed to read quarantined blob")?;
350
+
351
+
// Write to normal location
352
+
fs::write(&stored_path, &bytes)
353
+
.await
354
+
.context("Failed to write unquarantined blob")?;
355
+
356
+
// Update database metadata
357
+
let stored_path_str = stored_path.to_string_lossy().to_string();
358
+
let cid_str = cid.to_string();
359
+
let did_clone = self.did.clone();
360
+
361
+
self.db
362
+
.run(move |conn| {
363
+
diesel::update(blobs::table)
364
+
.filter(blobs::cid.eq(cid_str))
365
+
.filter(blobs::did.eq(did_clone))
366
+
.set((
367
+
blobs::path.eq(stored_path_str),
368
+
blobs::quarantined.eq(false),
369
+
))
370
+
.execute(conn)
371
+
.context("Failed to update blob metadata for unquarantine")
372
+
})
373
+
.await?;
374
+
375
+
// Remove the quarantined file
376
+
fs::remove_file(quarantined_path)
377
+
.await
378
+
.context("Failed to remove from quarantine")?;
379
+
}
380
+
381
+
Ok(())
382
}
383
384
+
/// Get a blob as a stream
385
+
pub async fn get_object(&self, cid_param: Cid) -> Result<ByteStream> {
386
+
use self::blobs::dsl::*;
387
+
388
+
// Get the blob path from the database
389
+
let cid_string = cid_param.to_string();
390
+
let did_clone = self.did.clone();
391
+
392
+
let blob_record = self
393
+
.db
394
+
.run(move |conn| {
395
+
blobs
396
+
.filter(cid.eq(&cid_string))
397
+
.filter(did.eq(&did_clone))
398
+
.filter(quarantined.eq(false))
399
+
.select(path)
400
+
.first::<String>(conn)
401
+
.optional()
402
+
.context("Failed to query blob metadata")
403
+
})
404
+
.await?;
405
+
406
+
if let Some(blob_path) = blob_record {
407
+
// Read the blob data
408
+
let bytes = fs::read(blob_path)
409
+
.await
410
+
.context("Failed to read blob data")?;
411
+
Ok(ByteStream::new(bytes))
412
+
} else {
413
+
anyhow::bail!("Blob not found: {:?}", cid)
414
+
}
415
}
416
417
+
/// Get blob bytes
418
pub async fn get_bytes(&self, cid: Cid) -> Result<Vec<u8>> {
419
+
let stream = self.get_object(cid).await?;
420
+
stream.collect().await
421
}
422
423
+
/// Get a blob as a stream
424
pub async fn get_stream(&self, cid: Cid) -> Result<ByteStream> {
425
self.get_object(cid).await
426
}
427
428
+
/// Delete a blob by CID string
429
pub async fn delete(&self, cid: String) -> Result<()> {
430
+
self.delete_key(self.get_stored_path(&Cid::from_str(&cid)?))
431
.await
432
}
433
434
+
/// Delete multiple blobs by CID
435
pub async fn delete_many(&self, cids: Vec<Cid>) -> Result<()> {
436
+
for cid in cids {
437
+
self.delete_key(self.get_stored_path(&cid)).await?;
438
+
}
439
+
Ok(())
440
}
441
442
+
/// Check if a blob is stored
443
+
pub async fn has_stored(&self, cid_param: Cid) -> Result<bool> {
444
+
use self::blobs::dsl::*;
445
+
446
+
let cid_string = cid_param.to_string();
447
+
let did_clone = self.did.clone();
448
+
449
+
let exists = self
450
+
.db
451
+
.run(move |conn| {
452
+
diesel::select(diesel::dsl::exists(
453
+
blobs
454
+
.filter(cid.eq(&cid_string))
455
+
.filter(did.eq(&did_clone))
456
+
.filter(temp.eq(false)),
457
+
))
458
+
.get_result::<bool>(conn)
459
+
.context("Failed to check if blob exists")
460
+
})
461
+
.await?;
462
+
463
+
Ok(exists)
464
}
465
466
+
/// Check if a temporary blob exists
467
pub async fn has_temp(&self, key: String) -> Result<bool> {
468
+
let tmp_path = self.get_tmp_path(&key);
469
+
Ok(tmp_path.exists())
470
}
471
472
+
/// Check if a blob exists by key
473
+
async fn has_key(&self, key_path: PathBuf) -> bool {
474
+
key_path.exists()
475
}
476
477
+
/// Delete a blob by its key path
478
+
async fn delete_key(&self, key_path: PathBuf) -> Result<()> {
479
+
use self::blobs::dsl::*;
480
+
481
+
// Delete from database first
482
+
let key_path_clone = key_path.clone();
483
+
self.db
484
+
.run(move |conn| {
485
+
diesel::delete(blobs)
486
+
.filter(path.eq(key_path_clone.to_string_lossy().to_string()))
487
+
.execute(conn)
488
+
.context("Failed to delete blob metadata")
489
+
})
490
+
.await?;
491
+
492
+
// Then delete the file if it exists
493
+
if key_path.exists() {
494
+
fs::remove_file(key_path)
495
+
.await
496
+
.context("Failed to delete blob file")?;
497
+
}
498
+
499
+
Ok(())
500
}
501
502
+
/// Delete multiple blobs by key path
503
async fn delete_many_keys(&self, keys: Vec<String>) -> Result<()> {
504
+
for key in keys {
505
+
self.delete_key(PathBuf::from(key)).await?;
506
+
}
507
+
Ok(())
508
}
509
510
+
/// Move a blob from one location to another
511
async fn move_object(&self, keys: MoveObject) -> Result<()> {
512
+
let from_path = PathBuf::from(&keys.from);
513
+
let to_path = PathBuf::from(&keys.to);
514
+
515
+
// Ensure parent directory exists
516
+
if let Some(parent) = to_path.parent() {
517
+
fs::create_dir_all(parent)
518
+
.await
519
+
.context("Failed to create directory")?;
520
+
}
521
+
522
+
// Only move if the source exists
523
+
if from_path.exists() {
524
+
// Read the data
525
+
let data = fs::read(&from_path)
526
+
.await
527
+
.context("Failed to read source blob")?;
528
+
529
+
// Write to the destination
530
+
fs::write(&to_path, data)
531
+
.await
532
+
.context("Failed to write destination blob")?;
533
+
534
+
// Update the database record
535
+
let from_path_clone = from_path.clone();
536
+
self.db
537
+
.run(move |conn| {
538
+
diesel::update(blobs::table)
539
+
.filter(blobs::path.eq(from_path_clone.to_string_lossy().to_string()))
540
+
.set(blobs::path.eq(to_path.to_string_lossy().to_string()))
541
+
.execute(conn)
542
+
.context("Failed to update blob path")
543
+
})
544
+
.await?;
545
+
546
+
// Delete the source file
547
+
fs::remove_file(from_path)
548
+
.await
549
+
.context("Failed to remove source blob")?;
550
+
}
551
+
552
+
Ok(())
553
}
554
}
+25
-23
src/actor_store/sql_repo.rs
+25
-23
src/actor_store/sql_repo.rs
···
38
}
39
40
impl ReadableBlockstore for SqlRepoReader {
41
-
fn get_bytes<'a>(
42
-
&'a self,
43
-
cid: &'a Cid,
44
-
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'a>> {
45
let did: String = self.did.clone();
46
let db: Arc<DbConn> = self.db.clone();
47
let cid = cid.clone();
···
79
})
80
}
81
82
-
fn has<'a>(
83
-
&'a self,
84
cid: Cid,
85
-
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + Sync + 'a>> {
86
Box::pin(async move {
87
let got = <Self as ReadableBlockstore>::get_bytes(self, &cid).await?;
88
Ok(got.is_some())
89
})
90
}
91
92
-
fn get_blocks<'a>(
93
-
&'a self,
94
cids: Vec<Cid>,
95
-
) -> Pin<Box<dyn Future<Output = Result<BlocksAndMissing>> + Send + Sync + 'a>> {
96
let did: String = self.did.clone();
97
let db: Arc<DbConn> = self.db.clone();
98
···
173
}
174
175
impl RepoStorage for SqlRepoReader {
176
-
fn get_root<'a>(&'a self) -> Pin<Box<dyn Future<Output = Option<Cid>> + Send + Sync + 'a>> {
177
Box::pin(async move {
178
match self.get_root_detailed().await {
179
Ok(root) => Some(root.cid),
···
182
})
183
}
184
185
-
fn put_block<'a>(
186
-
&'a self,
187
cid: Cid,
188
bytes: Vec<u8>,
189
rev: String,
190
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
191
let did: String = self.did.clone();
192
let db: Arc<DbConn> = self.db.clone();
193
let bytes_cloned = bytes.clone();
···
214
})
215
}
216
217
-
fn put_many<'a>(
218
-
&'a self,
219
to_put: BlockMap,
220
rev: String,
221
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
222
let did: String = self.did.clone();
223
let db: Arc<DbConn> = self.db.clone();
224
···
263
Ok(())
264
})
265
}
266
-
fn update_root<'a>(
267
-
&'a self,
268
cid: Cid,
269
rev: String,
270
is_create: Option<bool>,
271
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
272
let did: String = self.did.clone();
273
let db: Arc<DbConn> = self.db.clone();
274
let now: String = self.now.clone();
···
306
})
307
}
308
309
-
fn apply_commit<'a>(
310
-
&'a self,
311
commit: CommitData,
312
is_create: Option<bool>,
313
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
314
Box::pin(async move {
315
self.update_root(commit.cid, commit.rev.clone(), is_create)
316
.await?;
···
38
}
39
40
impl ReadableBlockstore for SqlRepoReader {
41
+
fn get_bytes<'life>(
42
+
&'life self,
43
+
cid: &'life Cid,
44
+
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'life>> {
45
let did: String = self.did.clone();
46
let db: Arc<DbConn> = self.db.clone();
47
let cid = cid.clone();
···
79
})
80
}
81
82
+
fn has<'life>(
83
+
&'life self,
84
cid: Cid,
85
+
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + Sync + 'life>> {
86
Box::pin(async move {
87
let got = <Self as ReadableBlockstore>::get_bytes(self, &cid).await?;
88
Ok(got.is_some())
89
})
90
}
91
92
+
fn get_blocks<'life>(
93
+
&'life self,
94
cids: Vec<Cid>,
95
+
) -> Pin<Box<dyn Future<Output = Result<BlocksAndMissing>> + Send + Sync + 'life>> {
96
let did: String = self.did.clone();
97
let db: Arc<DbConn> = self.db.clone();
98
···
173
}
174
175
impl RepoStorage for SqlRepoReader {
176
+
fn get_root<'life>(
177
+
&'life self,
178
+
) -> Pin<Box<dyn Future<Output = Option<Cid>> + Send + Sync + 'life>> {
179
Box::pin(async move {
180
match self.get_root_detailed().await {
181
Ok(root) => Some(root.cid),
···
184
})
185
}
186
187
+
fn put_block<'life>(
188
+
&'life self,
189
cid: Cid,
190
bytes: Vec<u8>,
191
rev: String,
192
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
193
let did: String = self.did.clone();
194
let db: Arc<DbConn> = self.db.clone();
195
let bytes_cloned = bytes.clone();
···
216
})
217
}
218
219
+
fn put_many<'life>(
220
+
&'life self,
221
to_put: BlockMap,
222
rev: String,
223
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
224
let did: String = self.did.clone();
225
let db: Arc<DbConn> = self.db.clone();
226
···
265
Ok(())
266
})
267
}
268
+
fn update_root<'life>(
269
+
&'life self,
270
cid: Cid,
271
rev: String,
272
is_create: Option<bool>,
273
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
274
let did: String = self.did.clone();
275
let db: Arc<DbConn> = self.db.clone();
276
let now: String = self.now.clone();
···
308
})
309
}
310
311
+
fn apply_commit<'life>(
312
+
&'life self,
313
commit: CommitData,
314
is_create: Option<bool>,
315
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
316
Box::pin(async move {
317
self.update_root(commit.cid, commit.rev.clone(), is_create)
318
.await?;
+2
-2
src/endpoints/mod.rs
+2
-2
src/endpoints/mod.rs
···
1
//! Root module for all endpoints.
2
mod identity;
3
-
// mod repo;
4
mod server;
5
mod sync;
6
···
21
Router::new()
22
.route("/_health", get(health))
23
.merge(identity::routes()) // com.atproto.identity
24
-
// .merge(repo::routes()) // com.atproto.repo
25
.merge(server::routes()) // com.atproto.server
26
.merge(sync::routes()) // com.atproto.sync
27
}
···
1
//! Root module for all endpoints.
2
mod identity;
3
+
mod repo;
4
mod server;
5
mod sync;
6
···
21
Router::new()
22
.route("/_health", get(health))
23
.merge(identity::routes()) // com.atproto.identity
24
+
.merge(repo::routes()) // com.atproto.repo
25
.merge(server::routes()) // com.atproto.server
26
.merge(sync::routes()) // com.atproto.sync
27
}