+20
.sqlx/query-1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d.json
+20
.sqlx/query-1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "SELECT data FROM blocks WHERE cid = ?",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"name": "data",
8
+
"ordinal": 0,
9
+
"type_info": "Blob"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Right": 1
14
+
},
15
+
"nullable": [
16
+
false
17
+
]
18
+
},
19
+
"hash": "1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d"
20
+
}
+12
.sqlx/query-2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46.json
+12
.sqlx/query-2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "INSERT INTO blocks (cid, data, multicodec, multihash) VALUES (?, ?, ?, ?)",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Right": 4
8
+
},
9
+
"nullable": []
10
+
},
11
+
"hash": "2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46"
12
+
}
+20
.sqlx/query-2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573.json
+20
.sqlx/query-2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "SELECT COUNT(*) FROM oauth_refresh_tokens WHERE dpop_thumbprint = ? AND client_id = ?",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"name": "COUNT(*)",
8
+
"ordinal": 0,
9
+
"type_info": "Integer"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Right": 2
14
+
},
15
+
"nullable": [
16
+
false
17
+
]
18
+
},
19
+
"hash": "2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573"
20
+
}
+20
.sqlx/query-73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c.json
+20
.sqlx/query-73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "SELECT COUNT(*) FROM blocks WHERE cid = ?",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"name": "COUNT(*)",
8
+
"ordinal": 0,
9
+
"type_info": "Integer"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Right": 1
14
+
},
15
+
"nullable": [
16
+
false
17
+
]
18
+
},
19
+
"hash": "73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c"
20
+
}
-20
.sqlx/query-9ae396483f5c0f338003dde167870448e01664d6666b64878f87777416b82773.json
-20
.sqlx/query-9ae396483f5c0f338003dde167870448e01664d6666b64878f87777416b82773.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n UPDATE invites\n SET count = count - 1\n WHERE id = ?\n AND count > 0\n RETURNING id\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "id",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "9ae396483f5c0f338003dde167870448e01664d6666b64878f87777416b82773"
20
-
}
+12
.sqlx/query-c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614.json
+12
.sqlx/query-c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "UPDATE oauth_refresh_tokens SET revoked = TRUE\n WHERE client_id = ? AND dpop_thumbprint = ?",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Right": 2
8
+
},
9
+
"nullable": []
10
+
},
11
+
"hash": "c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614"
12
+
}
+20
.sqlx/query-e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab.json
+20
.sqlx/query-e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab.json
···
1
+
{
2
+
"db_name": "SQLite",
3
+
"query": "\n UPDATE invites\n SET count = count - 1\n WHERE id = ?\n AND count > 0\n RETURNING id\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"name": "id",
8
+
"ordinal": 0,
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Right": 1
14
+
},
15
+
"nullable": [
16
+
false
17
+
]
18
+
},
19
+
"hash": "e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab"
20
+
}
+1
Cargo.lock
+1
Cargo.lock
+1
Cargo.toml
+1
Cargo.toml
+4
migrations/20250507035800_sqlite_blockstore.down.sql
+4
migrations/20250507035800_sqlite_blockstore.down.sql
+20
migrations/20250507035800_sqlite_blockstore.up.sql
+20
migrations/20250507035800_sqlite_blockstore.up.sql
···
1
+
-- Store raw blocks with their CIDs
2
+
CREATE TABLE IF NOT EXISTS blocks (
3
+
cid TEXT PRIMARY KEY NOT NULL,
4
+
data BLOB NOT NULL,
5
+
multicodec INTEGER NOT NULL,
6
+
multihash INTEGER NOT NULL
7
+
);
8
+
9
+
-- Store the repository tree structure
10
+
CREATE TABLE IF NOT EXISTS tree_nodes (
11
+
repo_did TEXT NOT NULL,
12
+
key TEXT NOT NULL,
13
+
value_cid TEXT NOT NULL,
14
+
PRIMARY KEY (repo_did, key),
15
+
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
16
+
);
17
+
18
+
-- Create index for faster lookups
19
+
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
20
+
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
+3
src/config.rs
+3
src/config.rs
+6
-6
src/endpoints/server.rs
+6
-6
src/endpoints/server.rs
···
33
33
firehose::{Commit, FirehoseProducer},
34
34
metrics::AUTH_FAILED,
35
35
plc::{self, PlcOperation, PlcService},
36
+
storage,
36
37
};
37
38
38
39
/// This is a dummy password that can be used in absence of a real password.
···
235
236
// Write out an initial commit for the user.
236
237
// https://atproto.com/guides/account-lifecycle
237
238
let (cid, rev, store) = async {
238
-
let file = tokio::fs::File::create_new(config.repo.path.join(format!("{did_hash}.car")))
239
-
.await
240
-
.context("failed to create repo file")?;
241
-
let mut store = CarStore::create(file)
239
+
let store = storage::create_storage_for_did(&config.repo, &did_hash)
242
240
.await
243
-
.context("failed to create carstore")?;
241
+
.context("failed to create storage")?;
244
242
243
+
// Initialize the repository with the storage
245
244
let repo_builder = Repository::create(
246
-
&mut store,
245
+
store,
247
246
Did::from_str(&did).expect("should be valid DID format"),
248
247
)
249
248
.await
···
261
260
let root = repo.root();
262
261
let rev = repo.commit().rev();
263
262
263
+
// Create a temporary CAR store for firehose events
264
264
let mut mem = Vec::new();
265
265
let mut firehose_store =
266
266
CarStore::create_with_roots(std::io::Cursor::new(&mut mem), [repo.root()])
-75
src/storage.rs
-75
src/storage.rs
···
1
-
//! `ATProto` user repository datastore functionality.
2
-
3
-
use std::str::FromStr as _;
4
-
5
-
use anyhow::{Context as _, Result};
6
-
use atrium_repo::{
7
-
Cid, Repository,
8
-
blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite, CarStore},
9
-
};
10
-
11
-
use crate::{Db, config::RepoConfig, mmap::MappedFile};
12
-
13
-
/// Open a block store for a given DID.
14
-
pub(crate) async fn open_store(
15
-
config: &RepoConfig,
16
-
did: impl Into<String>,
17
-
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
18
-
let did = did.into();
19
-
let id = did
20
-
.strip_prefix("did:plc:")
21
-
.context("did in unknown format")?;
22
-
23
-
let p = config.path.join(id).with_extension("car");
24
-
25
-
let f = std::fs::File::options()
26
-
.read(true)
27
-
.write(true)
28
-
.open(p)
29
-
.context("failed to open repository file")?;
30
-
let f = MappedFile::new(f).context("failed to map repo")?;
31
-
32
-
CarStore::open(f).await.context("failed to open car store")
33
-
}
34
-
35
-
/// Open a repository for a given DID.
36
-
pub(crate) async fn open_repo_db(
37
-
config: &RepoConfig,
38
-
db: &Db,
39
-
did: impl Into<String>,
40
-
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
41
-
let did = did.into();
42
-
let cid = sqlx::query_scalar!(
43
-
r#"
44
-
SELECT root FROM accounts
45
-
WHERE did = ?
46
-
"#,
47
-
did
48
-
)
49
-
.fetch_one(db)
50
-
.await
51
-
.context("failed to query database")?;
52
-
53
-
open_repo(
54
-
config,
55
-
did,
56
-
Cid::from_str(&cid).context("should be valid CID")?,
57
-
)
58
-
.await
59
-
}
60
-
61
-
/// Open a repository for a given DID and CID.
62
-
pub(crate) async fn open_repo(
63
-
config: &RepoConfig,
64
-
did: impl Into<String>,
65
-
cid: Cid,
66
-
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
67
-
let did = did.into();
68
-
let store = open_store(config, did)
69
-
.await
70
-
.context("failed to open storage")?;
71
-
72
-
Repository::open(store, cid)
73
-
.await
74
-
.context("failed to open repo")
75
-
}
+28
src/storage/car.rs
+28
src/storage/car.rs
···
1
+
//! CAR file-based repository storage
2
+
3
+
use anyhow::{Context as _, Result};
4
+
use atrium_repo::blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite, CarStore};
5
+
6
+
use crate::{config::RepoConfig, mmap::MappedFile};
7
+
8
+
/// Open a CAR block store for a given DID.
9
+
pub(crate) async fn open_car_store(
10
+
config: &RepoConfig,
11
+
did: impl AsRef<str>,
12
+
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
13
+
let id = did
14
+
.as_ref()
15
+
.strip_prefix("did:plc:")
16
+
.context("did in unknown format")?;
17
+
18
+
let p = config.path.join(id).with_extension("car");
19
+
20
+
let f = std::fs::File::options()
21
+
.read(true)
22
+
.write(true)
23
+
.open(p)
24
+
.context("failed to open repository file")?;
25
+
let f = MappedFile::new(f).context("failed to map repo")?;
26
+
27
+
CarStore::open(f).await.context("failed to open car store")
28
+
}
+155
src/storage/mod.rs
+155
src/storage/mod.rs
···
1
+
//! `ATProto` user repository datastore functionality.
2
+
3
+
mod car;
4
+
mod sqlite;
5
+
6
+
use anyhow::{Context as _, Result};
7
+
use atrium_repo::{
8
+
Cid, Repository,
9
+
blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite},
10
+
};
11
+
use std::str::FromStr as _;
12
+
13
+
use crate::{Db, config::RepoConfig};
14
+
15
+
// Re-export public items
16
+
pub(crate) use car::open_car_store;
17
+
pub(crate) use sqlite::{SQLiteStore, open_sqlite_store};
18
+
19
+
/// Open a repository for a given DID.
20
+
pub(crate) async fn open_repo_db(
21
+
config: &RepoConfig,
22
+
db: &Db,
23
+
did: impl Into<String>,
24
+
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
25
+
let did = did.into();
26
+
let cid = sqlx::query_scalar!(
27
+
r#"
28
+
SELECT root FROM accounts
29
+
WHERE did = ?
30
+
"#,
31
+
did
32
+
)
33
+
.fetch_one(db)
34
+
.await
35
+
.context("failed to query database")?;
36
+
37
+
open_repo(
38
+
config,
39
+
did,
40
+
Cid::from_str(&cid).context("should be valid CID")?,
41
+
)
42
+
.await
43
+
}
44
+
45
+
/// Open a repository for a given DID and CID.
46
+
pub(crate) async fn open_repo(
47
+
config: &RepoConfig,
48
+
did: impl Into<String>,
49
+
cid: Cid,
50
+
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
51
+
let did = did.into();
52
+
53
+
// if config.use_sqlite {
54
+
let store = open_sqlite_store(config, did.clone()).await?;
55
+
tracing::info!("Opening SQLite store for DID: {}, CID: {}", did, cid);
56
+
return Repository::open(store, cid)
57
+
.await
58
+
.context("failed to open repo");
59
+
// }
60
+
// let store = open_car_store(config, &did).await?;
61
+
// Repository::open(store, cid)
62
+
// .await
63
+
// .context("failed to open repo")
64
+
}
65
+
66
+
/// Open a block store for a given DID.
67
+
pub(crate) async fn open_store(
68
+
config: &RepoConfig,
69
+
did: impl Into<String>,
70
+
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
71
+
let did = did.into();
72
+
73
+
// if config.use_sqlite {
74
+
return open_sqlite_store(config, did.clone()).await;
75
+
// }
76
+
// Default to CAR store
77
+
// open_car_store(config, &did).await
78
+
}
79
+
80
+
/// Create a storage backend for a DID
81
+
pub(crate) async fn create_storage_for_did(
82
+
config: &RepoConfig,
83
+
did_hash: &str,
84
+
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
85
+
// Use standard file structure but change extension based on type
86
+
// if config.use_sqlite {
87
+
// For SQLite, create a new database file
88
+
let db_path = config.path.join(format!("{}.db", did_hash));
89
+
90
+
// Ensure parent directory exists
91
+
if let Some(parent) = db_path.parent() {
92
+
tokio::fs::create_dir_all(parent)
93
+
.await
94
+
.context("failed to create directory")?;
95
+
}
96
+
97
+
// Create SQLite store
98
+
let pool = sqlx::sqlite::SqlitePoolOptions::new()
99
+
.max_connections(5)
100
+
.connect_with(
101
+
sqlx::sqlite::SqliteConnectOptions::new()
102
+
.filename(&db_path)
103
+
.create_if_missing(true),
104
+
)
105
+
.await
106
+
.context("failed to connect to SQLite database")?;
107
+
108
+
// Initialize tables
109
+
sqlx::query(
110
+
"
111
+
CREATE TABLE IF NOT EXISTS blocks (
112
+
cid TEXT PRIMARY KEY NOT NULL,
113
+
data BLOB NOT NULL,
114
+
multicodec INTEGER NOT NULL,
115
+
multihash INTEGER NOT NULL
116
+
);
117
+
CREATE TABLE IF NOT EXISTS tree_nodes (
118
+
repo_did TEXT NOT NULL,
119
+
key TEXT NOT NULL,
120
+
value_cid TEXT NOT NULL,
121
+
PRIMARY KEY (repo_did, key),
122
+
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
123
+
);
124
+
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
125
+
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
126
+
",
127
+
)
128
+
.execute(&pool)
129
+
.await
130
+
.context("failed to create tables")?;
131
+
132
+
Ok(SQLiteStore {
133
+
pool,
134
+
did: format!("did:plc:{}", did_hash),
135
+
})
136
+
// } else {
137
+
// // For CAR files, create a new file
138
+
// let file_path = config.path.join(format!("{}.car", did_hash));
139
+
140
+
// // Ensure parent directory exists
141
+
// if let Some(parent) = file_path.parent() {
142
+
// tokio::fs::create_dir_all(parent)
143
+
// .await
144
+
// .context("failed to create directory")?;
145
+
// }
146
+
147
+
// let file = tokio::fs::File::create_new(file_path)
148
+
// .await
149
+
// .context("failed to create repo file")?;
150
+
151
+
// CarStore::create(file)
152
+
// .await
153
+
// .context("failed to create carstore")
154
+
// }
155
+
}
+174
src/storage/sqlite.rs
+174
src/storage/sqlite.rs
···
1
+
//! SQLite-based repository storage implementation.
2
+
3
+
use anyhow::{Context as _, Result};
4
+
use atrium_repo::{
5
+
Cid, Multihash,
6
+
blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite, Error as BlockstoreError},
7
+
};
8
+
use sha2::Digest;
9
+
use sqlx::SqlitePool;
10
+
11
+
use crate::config::RepoConfig;
12
+
13
+
/// SQLite-based implementation of block storage.
14
+
pub(crate) struct SQLiteStore {
15
+
pub did: String,
16
+
pub pool: SqlitePool,
17
+
}
18
+
19
+
impl AsyncBlockStoreRead for SQLiteStore {
20
+
async fn read_block(&mut self, cid: Cid) -> Result<Vec<u8>, BlockstoreError> {
21
+
tracing::info!("Reading block with CID: {}", cid);
22
+
let mut contents = Vec::new();
23
+
self.read_block_into(cid, &mut contents).await?;
24
+
Ok(contents)
25
+
}
26
+
fn read_block_into(
27
+
&mut self,
28
+
cid: Cid,
29
+
contents: &mut Vec<u8>,
30
+
) -> impl Future<Output = Result<(), BlockstoreError>> + Send {
31
+
tracing::info!("Reading block into buffer with CID: {}", cid);
32
+
let cid_str = cid.to_string();
33
+
let pool = self.pool.clone();
34
+
35
+
tracing::info!("Async moving block read");
36
+
async move {
37
+
tracing::info!("Async move block read");
38
+
// let record = sqlx::query!(r#"SELECT data FROM blocks WHERE cid = ?"#, cid_str)
39
+
// .fetch_optional(&pool)
40
+
// .await
41
+
// .map_err(|e| BlockstoreError::Other(Box::new(e)))?
42
+
// .ok_or(BlockstoreError::CidNotFound)?;
43
+
let record = sqlx::query!(r#"SELECT data FROM blocks WHERE cid = ?"#, cid_str)
44
+
.fetch_optional(&pool)
45
+
.await;
46
+
tracing::info!("Record fetched: {:?}", record);
47
+
let record = match record {
48
+
Ok(Some(record)) => record,
49
+
Ok(None) => return Err(BlockstoreError::CidNotFound),
50
+
Err(e) => return Err(BlockstoreError::Other(Box::new(e))),
51
+
};
52
+
tracing::info!("Block read successful");
53
+
54
+
contents.clear();
55
+
tracing::info!("Contents cleared");
56
+
contents.extend_from_slice(&record.data);
57
+
tracing::info!("Contents extended");
58
+
Ok(())
59
+
}
60
+
}
61
+
}
62
+
63
+
impl AsyncBlockStoreWrite for SQLiteStore {
64
+
fn write_block(
65
+
&mut self,
66
+
codec: u64,
67
+
hash: u64,
68
+
contents: &[u8],
69
+
) -> impl Future<Output = Result<Cid, BlockstoreError>> + Send {
70
+
let contents = contents.to_vec(); // Clone the data
71
+
let pool = self.pool.clone();
72
+
73
+
async move {
74
+
let digest = match hash {
75
+
atrium_repo::blockstore::SHA2_256 => sha2::Sha256::digest(&contents),
76
+
_ => return Err(BlockstoreError::UnsupportedHash(hash)),
77
+
};
78
+
79
+
let multihash = Multihash::wrap(hash, digest.as_slice())
80
+
.map_err(|_| BlockstoreError::UnsupportedHash(hash))?;
81
+
82
+
let cid = Cid::new_v1(codec, multihash);
83
+
let cid_str = cid.to_string();
84
+
85
+
// Use a transaction for atomicity
86
+
let mut tx = pool
87
+
.begin()
88
+
.await
89
+
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
90
+
91
+
// Check if block already exists
92
+
let exists =
93
+
sqlx::query_scalar!(r#"SELECT COUNT(*) FROM blocks WHERE cid = ?"#, cid_str)
94
+
.fetch_one(&mut *tx)
95
+
.await
96
+
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
97
+
98
+
// Only insert if block doesn't exist
99
+
let codec = codec as i64;
100
+
let hash = hash as i64;
101
+
if exists == 0 {
102
+
_ = sqlx::query!(
103
+
r#"INSERT INTO blocks (cid, data, multicodec, multihash) VALUES (?, ?, ?, ?)"#,
104
+
cid_str,
105
+
contents,
106
+
codec,
107
+
hash
108
+
)
109
+
.execute(&mut *tx)
110
+
.await
111
+
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
112
+
}
113
+
114
+
tx.commit()
115
+
.await
116
+
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
117
+
118
+
Ok(cid)
119
+
}
120
+
}
121
+
}
122
+
123
+
/// Open a SQLite store for the given DID.
124
+
pub(crate) async fn open_sqlite_store(
125
+
config: &RepoConfig,
126
+
did: impl Into<String>,
127
+
) -> Result<SQLiteStore> {
128
+
tracing::info!("Opening SQLite store for DID");
129
+
let did_str = did.into();
130
+
131
+
// Extract the PLC ID from the DID
132
+
let id = did_str
133
+
.strip_prefix("did:plc:")
134
+
.context("DID in unknown format")?;
135
+
136
+
// Create database connection pool
137
+
let db_path = config.path.join(format!("{id}.db"));
138
+
139
+
let pool = sqlx::sqlite::SqlitePoolOptions::new()
140
+
.max_connections(5)
141
+
.connect_with(
142
+
sqlx::sqlite::SqliteConnectOptions::new()
143
+
.filename(&db_path)
144
+
.create_if_missing(true),
145
+
)
146
+
.await
147
+
.context("failed to connect to SQLite database")?;
148
+
149
+
// Ensure tables exist
150
+
_ = sqlx::query(
151
+
"
152
+
CREATE TABLE IF NOT EXISTS blocks (
153
+
cid TEXT PRIMARY KEY NOT NULL,
154
+
data BLOB NOT NULL,
155
+
multicodec INTEGER NOT NULL,
156
+
multihash INTEGER NOT NULL
157
+
);
158
+
CREATE TABLE IF NOT EXISTS tree_nodes (
159
+
repo_did TEXT NOT NULL,
160
+
key TEXT NOT NULL,
161
+
value_cid TEXT NOT NULL,
162
+
PRIMARY KEY (repo_did, key),
163
+
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
164
+
);
165
+
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
166
+
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
167
+
",
168
+
)
169
+
.execute(&pool)
170
+
.await
171
+
.context("failed to create tables")?;
172
+
173
+
Ok(SQLiteStore { pool, did: did_str })
174
+
}
+4
-4
src/tests.rs
+4
-4
src/tests.rs
···
409
409
410
410
#[tokio::test]
411
411
async fn test_create_account() -> Result<()> {
412
-
return Ok(());
413
-
#[expect(unreachable_code, reason = "Disabled")]
412
+
// return Ok(());
413
+
// #[expect(unreachable_code, reason = "Disabled")]
414
414
let state = init_test_state().await?;
415
415
let account = state.create_test_account().await?;
416
416
···
430
430
431
431
#[tokio::test]
432
432
async fn test_create_record_benchmark() -> Result<()> {
433
-
return Ok(());
434
-
#[expect(unreachable_code, reason = "Disabled")]
433
+
// return Ok(());
434
+
// #[expect(unreachable_code, reason = "Disabled")]
435
435
let duration = create_record_benchmark(100, 1).await?;
436
436
437
437
println!("Created 100 records in {duration:?}");