Alternative ATProto PDS implementation

prototype actor_store

+20
.sqlx/query-22c1e98ac038509ad16ce437e6670a59d3fc97a05ea8b0f1f80dba0157c53e13.json
···
··· 1 + { 2 + "db_name": "SQLite", 3 + "query": "SELECT name FROM actor_migration", 4 + "describe": { 5 + "columns": [ 6 + { 7 + "name": "name", 8 + "ordinal": 0, 9 + "type_info": "Text" 10 + } 11 + ], 12 + "parameters": { 13 + "Right": 0 14 + }, 15 + "nullable": [ 16 + false 17 + ] 18 + }, 19 + "hash": "22c1e98ac038509ad16ce437e6670a59d3fc97a05ea8b0f1f80dba0157c53e13" 20 + }
+12
.sqlx/query-5ea8376fbbe3077b2fc62187cc29a2d03eda91fa468c7fe63306f04e160ecb5d.json
···
··· 1 + { 2 + "db_name": "SQLite", 3 + "query": "INSERT INTO actor_migration (name, appliedAt) VALUES (?, ?)", 4 + "describe": { 5 + "columns": [], 6 + "parameters": { 7 + "Right": 2 8 + }, 9 + "nullable": [] 10 + }, 11 + "hash": "5ea8376fbbe3077b2fc62187cc29a2d03eda91fa468c7fe63306f04e160ecb5d" 12 + }
+4 -2
src/actor_store/blob/reader.rs
··· 7 use atrium_repo::Cid; 8 use sqlx::{Row, SqlitePool}; 9 10 /// Reader for blob data in the actor store. 11 pub(crate) struct BlobReader { 12 /// Database connection. ··· 47 /// Get a blob's full data and metadata. 48 pub(crate) async fn get_blob(&self, cid: &Cid) -> Result<Option<BlobData>> { 49 let metadata = self.get_blob_metadata(cid).await?; 50 - let blob_stream = self.blobstore.get_stream(cid).await?; 51 if blob_stream.is_none() { 52 return Err(anyhow::anyhow!("Blob not found")); // InvalidRequestError('Blob not found') 53 } ··· 240 pub size: u64, 241 /// The MIME type of the blob. 242 pub mime_type: Option<String>, 243 - pub stream: BlobStream, 244 } 245 246 /// Options for listing blobs.
··· 7 use atrium_repo::Cid; 8 use sqlx::{Row, SqlitePool}; 9 10 + use crate::repo::types::{BlobStore, BlobStoreTrait}; 11 + 12 /// Reader for blob data in the actor store. 13 pub(crate) struct BlobReader { 14 /// Database connection. ··· 49 /// Get a blob's full data and metadata. 50 pub(crate) async fn get_blob(&self, cid: &Cid) -> Result<Option<BlobData>> { 51 let metadata = self.get_blob_metadata(cid).await?; 52 + let blob_stream = self.blobstore.get_stream(*cid)?; 53 if blob_stream.is_none() { 54 return Err(anyhow::anyhow!("Blob not found")); // InvalidRequestError('Blob not found') 55 } ··· 242 pub size: u64, 243 /// The MIME type of the blob. 244 pub mime_type: Option<String>, 245 + pub stream: BlobStream, // stream.Readable, 246 } 247 248 /// Options for listing blobs.
+6 -5
src/actor_store/blob/transactor.rs
··· 13 use uuid::Uuid; 14 15 use super::BlobReader; 16 - use crate::repo::types::{PreparedBlobRef, PreparedWrite, WriteOpAction}; 17 18 /// Blob metadata for a newly uploaded blob. 19 #[derive(Debug, Clone)] ··· 163 164 /// Process blobs for a repository write operation. 165 pub(crate) async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> { 166 - self.delete_dereferenced_blobs(writes) 167 .await 168 .context("failed to delete dereferenced blobs")?; 169 for write in writes { 170 - if write.action == WriteOpAction::Create || write.action == WriteOpAction::Update { 171 - for blob in &write.blobs { 172 self.verify_blob_and_make_permanent(blob) 173 .await 174 .context("failed to verify and make blob permanent")?; 175 - self.associate_blob(&blob.r#ref.0.to_string(), &write.uri) 176 .await 177 .context("failed to associate blob with record")?; 178 }
··· 13 use uuid::Uuid; 14 15 use super::BlobReader; 16 + use crate::repo::types::{BlobStore, PreparedBlobRef, PreparedWrite, WriteOpAction}; 17 18 /// Blob metadata for a newly uploaded blob. 19 #[derive(Debug, Clone)] ··· 163 164 /// Process blobs for a repository write operation. 165 pub(crate) async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> { 166 + self.delete_dereferenced_blobs(writes, false) 167 .await 168 .context("failed to delete dereferenced blobs")?; 169 for write in writes { 170 + if write.action() == &WriteOpAction::Create || write.action() == &WriteOpAction::Update 171 + { 172 + for blob in &write.blobs() { 173 self.verify_blob_and_make_permanent(blob) 174 .await 175 .context("failed to verify and make blob permanent")?; 176 + self.associate_blob(&blob.r#ref.0.to_string(), &write.uri()) 177 .await 178 .context("failed to associate blob with record")?; 179 }
+6 -5
src/actor_store/db/migrations.rs
··· 1 //! Database migrations for the actor store. 2 use anyhow::{Context as _, Result}; 3 - use sqlx::SqlitePool; 4 5 use super::ActorDb; 6 ··· 25 /// Run all migrations 26 pub(crate) async fn migrate_to_latest(&self) -> Result<()> { 27 let past_migrations = sqlx::query!("SELECT name FROM actor_migration") 28 - .fetch_all(&self.db.db) 29 .await?; 30 let mut past_migration_names = past_migrations 31 .iter() ··· 36 let name = format!("{:p}", migration); 37 if !past_migration_names.contains(&name) { 38 migration(&self.db)?; 39 sqlx::query!( 40 "INSERT INTO actor_migration (name, appliedAt) VALUES (?, ?)", 41 name, 42 - chrono::Utc::now().to_rfc3339() 43 ) 44 - .execute(&self.db.db) 45 .await 46 .context("failed to insert migration record")?; 47 } ··· 61 fn _001_init(db: &ActorDb) -> Result<()> { 62 tokio::task::block_in_place(|| { 63 tokio::runtime::Handle::current() 64 - .block_on(create_tables(&db.db)) 65 .context("failed to create initial tables") 66 }) 67 }
··· 1 //! Database migrations for the actor store. 2 use anyhow::{Context as _, Result}; 3 + use sqlx::{Executor, SqlitePool}; 4 5 use super::ActorDb; 6 ··· 25 /// Run all migrations 26 pub(crate) async fn migrate_to_latest(&self) -> Result<()> { 27 let past_migrations = sqlx::query!("SELECT name FROM actor_migration") 28 + .fetch_all(&self.db.pool) 29 .await?; 30 let mut past_migration_names = past_migrations 31 .iter() ··· 36 let name = format!("{:p}", migration); 37 if !past_migration_names.contains(&name) { 38 migration(&self.db)?; 39 + let now = chrono::Utc::now().to_rfc3339(); 40 sqlx::query!( 41 "INSERT INTO actor_migration (name, appliedAt) VALUES (?, ?)", 42 name, 43 + now, 44 ) 45 + .execute(&self.db.pool) 46 .await 47 .context("failed to insert migration record")?; 48 } ··· 62 fn _001_init(db: &ActorDb) -> Result<()> { 63 tokio::task::block_in_place(|| { 64 tokio::runtime::Handle::current() 65 + .block_on(create_tables(&db.pool)) 66 .context("failed to create initial tables") 67 }) 68 }
+2 -2
src/actor_store/mod.rs
··· 5 mod actor_store_transactor; 6 mod actor_store_writer; 7 mod blob; 8 - // mod db; 9 mod preference; 10 mod record; 11 mod repo; ··· 15 // pub(crate) use actor_store_reader::ActorStoreReader; 16 pub(crate) use actor_store_transactor::ActorStoreTransactor; 17 pub(crate) use actor_store_writer::ActorStoreWriter; 18 - // pub(crate) use db::ActorDb; 19 pub(crate) use resources::ActorStoreResources;
··· 5 mod actor_store_transactor; 6 mod actor_store_writer; 7 mod blob; 8 + mod db; 9 mod preference; 10 mod record; 11 mod repo; ··· 15 // pub(crate) use actor_store_reader::ActorStoreReader; 16 pub(crate) use actor_store_transactor::ActorStoreTransactor; 17 pub(crate) use actor_store_writer::ActorStoreWriter; 18 + pub(crate) use db::ActorDb; 19 pub(crate) use resources::ActorStoreResources;
+4 -4
src/actor_store/preference/mod.rs
··· 1 //! Preference handling for actor store. 2 3 - // mod reader; 4 - // mod transactor; 5 6 - // pub(crate) use reader::PreferenceReader; 7 - // pub(crate) use transactor::PreferenceTransactor;
··· 1 //! Preference handling for actor store. 2 3 + mod reader; 4 + mod transactor; 5 6 + pub(crate) use reader::PreferenceReader; 7 + pub(crate) use transactor::PreferenceTransactor;
+1 -1
src/actor_store/preference/reader.rs
··· 32 scope: &str, 33 ) -> Result<Vec<AccountPreference>> { 34 let prefs_res = sqlx::query!("SELECT * FROM account_pref ORDER BY id") 35 - .fetch_all(&self.db.db) 36 .await?; 37 38 let prefs = prefs_res
··· 32 scope: &str, 33 ) -> Result<Vec<AccountPreference>> { 34 let prefs_res = sqlx::query!("SELECT * FROM account_pref ORDER BY id") 35 + .fetch_all(&self.db.pool) 36 .await?; 37 38 let prefs = prefs_res
+1 -1
src/actor_store/preference/transactor.rs
··· 48 let mut tx = self 49 .reader 50 .db 51 - .db 52 .begin() 53 .await 54 .context("failed to begin transaction")?;
··· 48 let mut tx = self 49 .reader 50 .db 51 + .pool 52 .begin() 53 .await 54 .context("failed to begin transaction")?;
+4 -4
src/actor_store/record/mod.rs
··· 1 //! Record storage and retrieval for the actor store. 2 3 - // mod reader; 4 - // mod transactor; 5 6 - // pub(crate) use reader::RecordReader; 7 - // pub(crate) use transactor::RecordTransactor;
··· 1 //! Record storage and retrieval for the actor store. 2 3 + mod reader; 4 + mod transactor; 5 6 + pub(crate) use reader::RecordReader; 7 + pub(crate) use transactor::RecordTransactor;
+19 -24
src/actor_store/record/reader.rs
··· 3 use anyhow::{Context as _, Result}; 4 use atrium_repo::Cid; 5 use rsky_syntax::aturi::AtUri; 6 - use sqlx::{Row, SqlitePool}; 7 use std::str::FromStr; 8 9 - use crate::actor_store::db::schema::Backlink; 10 11 /// Reader for record data. 12 pub(crate) struct RecordReader { 13 /// Database connection. 14 - pub db: SqlitePool, 15 - /// DID of the repository owner. 16 - pub did: String, 17 } 18 19 /// Record descriptor containing URI, path, and CID. ··· 62 63 impl RecordReader { 64 /// Create a new record reader. 65 - pub(crate) fn new(db: SqlitePool, did: String) -> Self { 66 - Self { db, did } 67 } 68 69 /// Count the total number of records. 70 pub(crate) async fn record_count(&self) -> Result<i64> { 71 let result = sqlx::query!(r#"SELECT COUNT(*) as count FROM record"#) 72 - .fetch_one(&self.db) 73 .await 74 .context("failed to count records")?; 75 ··· 86 "SELECT uri, cid FROM record WHERE uri > ? ORDER BY uri ASC LIMIT 1000", 87 current_cursor 88 ) 89 - .fetch_all(&self.db) 90 .await 91 .context("failed to fetch records")?; 92 ··· 116 /// List all collections in the repository. 117 pub(crate) async fn list_collections(&self) -> Result<Vec<String>> { 118 let rows = sqlx::query!("SELECT collection FROM record GROUP BY collection") 119 - .fetch_all(&self.db) 120 .await 121 .context("failed to list collections")?; 122 ··· 173 174 let rows = query 175 .build() 176 - .fetch_all(&self.db) 177 .await 178 .context("failed to list records")?; 179 ··· 227 228 let row = query 229 .build() 230 - .fetch_optional(&self.db) 231 .await 232 .context("failed to fetch record")?; 233 ··· 276 277 let result = query 278 .build() 279 - .fetch_optional(&self.db) 280 .await 281 .context("failed to check record existence")?; 282 ··· 286 /// Get the takedown status of a record. 287 pub(crate) async fn get_record_takedown_status(&self, uri: &str) -> Result<Option<StatusAttr>> { 288 let result = sqlx::query!("SELECT takedownRef FROM record WHERE uri = ?", uri) 289 - .fetch_optional(&self.db) 290 .await 291 .context("failed to fetch takedown status")?; 292 ··· 311 /// Get the current CID for a record URI. 312 pub(crate) async fn get_current_record_cid(&self, uri: &str) -> Result<Option<Cid>> { 313 let result = sqlx::query!("SELECT cid FROM record WHERE uri = ?", uri) 314 - .fetch_optional(&self.db) 315 .await 316 .context("failed to fetch record CID")?; 317 ··· 344 link_to, 345 collection 346 ) 347 - .fetch_all(&self.db) 348 .await 349 .context("failed to fetch record backlinks")?; 350 ··· 358 repo_rev: Some(row.repoRev), 359 indexed_at: row.indexedAt, 360 takedown_ref: row.takedownRef, 361 - did: self.did.clone(), 362 }); 363 } 364 ··· 392 backlink.link_to, 393 uri_collection 394 ) 395 - .fetch_all(&self.db) 396 .await 397 .context("failed to fetch backlink conflicts")?; 398 ··· 414 "SELECT cid FROM repo_block WHERE cid > ? ORDER BY cid ASC LIMIT 1000", 415 current_cursor 416 ) 417 - .fetch_all(&self.db) 418 .await 419 .context("failed to fetch blocks")?; 420 ··· 445 LIMIT 1 446 "# 447 ) 448 - .fetch_optional(&self.db) 449 .await 450 .context("failed to fetch profile record")?; 451 ··· 468 r#"SELECT repoRev FROM record WHERE repoRev <= ? LIMIT 1"#, 469 rev 470 ) 471 - .fetch_optional(&self.db) 472 .await 473 .context("failed to check revision existence")?; 474 ··· 488 "#, 489 rev 490 ) 491 - .fetch_all(&self.db) 492 .await 493 .context("failed to fetch records since revision")?; 494 ··· 527 pub indexed_at: String, 528 /// Reference for takedown, if any. 529 pub takedown_ref: Option<String>, 530 - /// DID of the repository owner. 531 - pub did: String, 532 } 533 534 /// Status attribute for takedowns
··· 3 use anyhow::{Context as _, Result}; 4 use atrium_repo::Cid; 5 use rsky_syntax::aturi::AtUri; 6 + use sqlx::Row; 7 use std::str::FromStr; 8 9 + use crate::actor_store::{ActorDb, db::schema::Backlink}; 10 11 /// Reader for record data. 12 pub(crate) struct RecordReader { 13 /// Database connection. 14 + pub db: ActorDb, 15 } 16 17 /// Record descriptor containing URI, path, and CID. ··· 60 61 impl RecordReader { 62 /// Create a new record reader. 63 + pub(crate) fn new(db: ActorDb) -> Self { 64 + Self { db } 65 } 66 67 /// Count the total number of records. 68 pub(crate) async fn record_count(&self) -> Result<i64> { 69 let result = sqlx::query!(r#"SELECT COUNT(*) as count FROM record"#) 70 + .fetch_one(&self.db.pool) 71 .await 72 .context("failed to count records")?; 73 ··· 84 "SELECT uri, cid FROM record WHERE uri > ? ORDER BY uri ASC LIMIT 1000", 85 current_cursor 86 ) 87 + .fetch_all(&self.db.pool) 88 .await 89 .context("failed to fetch records")?; 90 ··· 114 /// List all collections in the repository. 115 pub(crate) async fn list_collections(&self) -> Result<Vec<String>> { 116 let rows = sqlx::query!("SELECT collection FROM record GROUP BY collection") 117 + .fetch_all(&self.db.pool) 118 .await 119 .context("failed to list collections")?; 120 ··· 171 172 let rows = query 173 .build() 174 + .fetch_all(&self.db.pool) 175 .await 176 .context("failed to list records")?; 177 ··· 225 226 let row = query 227 .build() 228 + .fetch_optional(&self.db.pool) 229 .await 230 .context("failed to fetch record")?; 231 ··· 274 275 let result = query 276 .build() 277 + .fetch_optional(&self.db.pool) 278 .await 279 .context("failed to check record existence")?; 280 ··· 284 /// Get the takedown status of a record. 285 pub(crate) async fn get_record_takedown_status(&self, uri: &str) -> Result<Option<StatusAttr>> { 286 let result = sqlx::query!("SELECT takedownRef FROM record WHERE uri = ?", uri) 287 + .fetch_optional(&self.db.pool) 288 .await 289 .context("failed to fetch takedown status")?; 290 ··· 309 /// Get the current CID for a record URI. 310 pub(crate) async fn get_current_record_cid(&self, uri: &str) -> Result<Option<Cid>> { 311 let result = sqlx::query!("SELECT cid FROM record WHERE uri = ?", uri) 312 + .fetch_optional(&self.db.pool) 313 .await 314 .context("failed to fetch record CID")?; 315 ··· 342 link_to, 343 collection 344 ) 345 + .fetch_all(&self.db.pool) 346 .await 347 .context("failed to fetch record backlinks")?; 348 ··· 356 repo_rev: Some(row.repoRev), 357 indexed_at: row.indexedAt, 358 takedown_ref: row.takedownRef, 359 }); 360 } 361 ··· 389 backlink.link_to, 390 uri_collection 391 ) 392 + .fetch_all(&self.db.pool) 393 .await 394 .context("failed to fetch backlink conflicts")?; 395 ··· 411 "SELECT cid FROM repo_block WHERE cid > ? ORDER BY cid ASC LIMIT 1000", 412 current_cursor 413 ) 414 + .fetch_all(&self.db.pool) 415 .await 416 .context("failed to fetch blocks")?; 417 ··· 442 LIMIT 1 443 "# 444 ) 445 + .fetch_optional(&self.db.pool) 446 .await 447 .context("failed to fetch profile record")?; 448 ··· 465 r#"SELECT repoRev FROM record WHERE repoRev <= ? LIMIT 1"#, 466 rev 467 ) 468 + .fetch_optional(&self.db.pool) 469 .await 470 .context("failed to check revision existence")?; 471 ··· 485 "#, 486 rev 487 ) 488 + .fetch_all(&self.db.pool) 489 .await 490 .context("failed to fetch records since revision")?; 491 ··· 524 pub indexed_at: String, 525 /// Reference for takedown, if any. 526 pub takedown_ref: Option<String>, 527 } 528 529 /// Status attribute for takedowns
+10 -9
src/actor_store/record/transactor.rs
··· 4 use atrium_repo::Cid; 5 use rsky_repo::types::WriteOpAction; 6 use rsky_syntax::aturi::AtUri; 7 - use sqlx::SqlitePool; 8 9 use crate::actor_store::db::schema::Backlink; 10 use crate::actor_store::record::reader::{RecordReader, StatusAttr, get_backlinks}; 11 12 /// Transaction handler for record operations. 13 pub(crate) struct RecordTransactor { 14 /// The record reader. 15 pub reader: RecordReader, 16 /// The blob store. 17 - pub blobstore: SqlitePool, // This will be replaced with proper BlobStore type 18 } 19 20 impl RecordTransactor { 21 /// Create a new record transactor. 22 - pub(crate) fn new(db: SqlitePool, blobstore: SqlitePool, did: String) -> Self { 23 Self { 24 - reader: RecordReader::new(db, did), 25 blobstore, 26 } 27 } ··· 79 repo_rev, 80 now 81 ) 82 - .execute(&self.reader.db) 83 .await 84 .context("failed to index record")?; 85 ··· 107 tracing::debug!("Deleting indexed record {}", uri_str); 108 109 // Delete the record and its backlinks in a transaction 110 - let mut tx = self.reader.db.begin().await?; 111 112 // Delete from record table 113 sqlx::query!("DELETE FROM record WHERE uri = ?", uri_str) ··· 130 /// Remove backlinks for a URI. 131 pub(crate) async fn remove_backlinks_by_uri(&self, uri: &str) -> Result<()> { 132 sqlx::query!("DELETE FROM backlink WHERE uri = ?", uri) 133 - .execute(&self.reader.db) 134 .await 135 .context("failed to remove backlinks")?; 136 ··· 165 166 query 167 .build() 168 - .execute(&self.reader.db) 169 .await 170 .context("failed to add backlinks")?; 171 ··· 192 takedown_ref, 193 uri_str 194 ) 195 - .execute(&self.reader.db) 196 .await 197 .context("failed to update record takedown status")?; 198
··· 4 use atrium_repo::Cid; 5 use rsky_repo::types::WriteOpAction; 6 use rsky_syntax::aturi::AtUri; 7 8 + use crate::actor_store::ActorDb; 9 use crate::actor_store::db::schema::Backlink; 10 use crate::actor_store::record::reader::{RecordReader, StatusAttr, get_backlinks}; 11 + use crate::repo::types::BlobStore as BlobStore; 12 13 /// Transaction handler for record operations. 14 pub(crate) struct RecordTransactor { 15 /// The record reader. 16 pub reader: RecordReader, 17 /// The blob store. 18 + pub blobstore: BlobStore, 19 } 20 21 impl RecordTransactor { 22 /// Create a new record transactor. 23 + pub(crate) fn new(db: ActorDb, blobstore: BlobStore) -> Self { 24 Self { 25 + reader: RecordReader::new(db), 26 blobstore, 27 } 28 } ··· 80 repo_rev, 81 now 82 ) 83 + .execute(&self.reader.db.pool) 84 .await 85 .context("failed to index record")?; 86 ··· 108 tracing::debug!("Deleting indexed record {}", uri_str); 109 110 // Delete the record and its backlinks in a transaction 111 + let mut tx = self.reader.db.pool.begin().await?; 112 113 // Delete from record table 114 sqlx::query!("DELETE FROM record WHERE uri = ?", uri_str) ··· 131 /// Remove backlinks for a URI. 132 pub(crate) async fn remove_backlinks_by_uri(&self, uri: &str) -> Result<()> { 133 sqlx::query!("DELETE FROM backlink WHERE uri = ?", uri) 134 + .execute(&self.reader.db.pool) 135 .await 136 .context("failed to remove backlinks")?; 137 ··· 166 167 query 168 .build() 169 + .execute(&self.reader.db.pool) 170 .await 171 .context("failed to add backlinks")?; 172 ··· 193 takedown_ref, 194 uri_str 195 ) 196 + .execute(&self.reader.db.pool) 197 .await 198 .context("failed to update record takedown status")?; 199
+7 -7
src/db/db.rs
··· 20 #[derive(Clone)] 21 pub(crate) struct Database { 22 /// SQLite connection pool. 23 - pub(crate) db: Arc<SqlitePool>, 24 /// Flag indicating if the database is destroyed. 25 - destroyed: Arc<Mutex<bool>>, 26 /// Queue of commit hooks. 27 - commit_hooks: Arc<AsyncMutex<VecDeque<Box<dyn FnOnce() + Send>>>>, 28 } 29 30 impl Database { ··· 39 40 let pool = SqlitePool::connect_with(options).await?; 41 Ok(Self { 42 - db: Arc::new(pool), 43 destroyed: Arc::new(Mutex::new(false)), 44 commit_hooks: Arc::new(AsyncMutex::new(VecDeque::new())), 45 }) ··· 47 48 /// Ensures the database is using Write-Ahead Logging (WAL) mode. 49 pub async fn ensure_wal(&self) -> sqlx::Result<()> { 50 - let mut conn = self.db.acquire().await?; 51 sqlx::query("PRAGMA journal_mode = WAL") 52 .execute(&mut *conn) 53 .await?; ··· 59 where 60 F: FnOnce(&mut Transaction<'_, Sqlite>) -> sqlx::Result<T>, 61 { 62 - let mut tx = self.db.begin().await?; 63 let result = func(&mut tx)?; 64 tx.commit().await?; 65 self.run_commit_hooks().await; ··· 98 return Ok(()); 99 } 100 *destroyed = true; 101 - drop(self.db.clone()); // Drop the pool to close connections. 102 Ok(()) 103 } 104
··· 20 #[derive(Clone)] 21 pub(crate) struct Database { 22 /// SQLite connection pool. 23 + pub(crate) pool: SqlitePool, 24 /// Flag indicating if the database is destroyed. 25 + pub(crate) destroyed: Arc<Mutex<bool>>, 26 /// Queue of commit hooks. 27 + pub(crate) commit_hooks: Arc<AsyncMutex<VecDeque<Box<dyn FnOnce() + Send>>>>, 28 } 29 30 impl Database { ··· 39 40 let pool = SqlitePool::connect_with(options).await?; 41 Ok(Self { 42 + pool, 43 destroyed: Arc::new(Mutex::new(false)), 44 commit_hooks: Arc::new(AsyncMutex::new(VecDeque::new())), 45 }) ··· 47 48 /// Ensures the database is using Write-Ahead Logging (WAL) mode. 49 pub async fn ensure_wal(&self) -> sqlx::Result<()> { 50 + let mut conn = self.pool.acquire().await?; 51 sqlx::query("PRAGMA journal_mode = WAL") 52 .execute(&mut *conn) 53 .await?; ··· 59 where 60 F: FnOnce(&mut Transaction<'_, Sqlite>) -> sqlx::Result<T>, 61 { 62 + let mut tx = self.pool.begin().await?; 63 let result = func(&mut tx)?; 64 tx.commit().await?; 65 self.run_commit_hooks().await; ··· 98 return Ok(()); 99 } 100 *destroyed = true; 101 + drop(self.pool.clone()); // Drop the pool to close connections. 102 Ok(()) 103 } 104
+69
src/repo/types.rs
··· 1070 pub rkey: String, 1071 pub cid: Option<Cid>, 1072 }
··· 1070 pub rkey: String, 1071 pub cid: Option<Cid>, 1072 } 1073 + 1074 + pub(crate) struct BlobStore; 1075 + pub(crate) trait BlobStoreTrait { 1076 + fn put_temp(&self, bytes: &[u8]) -> Result<String>; // bytes: Uint8Array | stream.Readable 1077 + fn make_permanent(&self, key: &str, cid: Cid) -> Result<()>; 1078 + fn put_permanent(&self, cid: Cid, bytes: &[u8]) -> Result<()>; 1079 + fn quarantine(&self, cid: Cid) -> Result<()>; 1080 + fn unquarantine(&self, cid: Cid) -> Result<()>; 1081 + fn get_bytes(&self, cid: Cid) -> Result<Vec<u8>>; 1082 + fn get_stream(&self, cid: Cid) -> Result<Vec<u8>>; // Promise<stream.Readable> 1083 + fn has_temp(&self, key: &str) -> Result<bool>; 1084 + fn has_stored(&self, cid: Cid) -> Result<bool>; 1085 + fn delete(&self, cid: Cid) -> Result<()>; 1086 + fn delete_many(&self, cids: Vec<Cid>) -> Result<()>; 1087 + } 1088 + impl BlobStoreTrait for BlobStore { 1089 + fn put_temp(&self, bytes: &[u8]) -> Result<String> { 1090 + // Implementation here 1091 + Ok("temp_key".to_string()) 1092 + } 1093 + 1094 + fn make_permanent(&self, key: &str, cid: Cid) -> Result<()> { 1095 + // Implementation here 1096 + Ok(()) 1097 + } 1098 + 1099 + fn put_permanent(&self, cid: Cid, bytes: &[u8]) -> Result<()> { 1100 + // Implementation here 1101 + Ok(()) 1102 + } 1103 + 1104 + fn quarantine(&self, cid: Cid) -> Result<()> { 1105 + // Implementation here 1106 + Ok(()) 1107 + } 1108 + 1109 + fn unquarantine(&self, cid: Cid) -> Result<()> { 1110 + // Implementation here 1111 + Ok(()) 1112 + } 1113 + 1114 + fn get_bytes(&self, cid: Cid) -> Result<Vec<u8>> { 1115 + // Implementation here 1116 + Ok(vec![]) 1117 + } 1118 + 1119 + fn get_stream(&self, cid: Cid) -> Result<Vec<u8>> { 1120 + // Implementation here 1121 + Ok(vec![]) 1122 + } 1123 + 1124 + fn has_temp(&self, key: &str) -> Result<bool> { 1125 + // Implementation here 1126 + Ok(true) 1127 + } 1128 + 1129 + fn has_stored(&self, cid: Cid) -> Result<bool> { 1130 + // Implementation here 1131 + Ok(true) 1132 + } 1133 + fn delete(&self, cid: Cid) -> Result<()> { 1134 + // Implementation here 1135 + Ok(()) 1136 + } 1137 + fn delete_many(&self, cids: Vec<Cid>) -> Result<()> { 1138 + // Implementation here 1139 + Ok(()) 1140 + } 1141 + }