+25
-21
src/account_manager/helpers/account.rs
+25
-21
src/account_manager/helpers/account.rs
···
34
let AvailabilityFlags {
35
include_taken_down,
36
include_deactivated,
37
-
} = flags.unwrap_or_else(|| AvailabilityFlags {
38
include_taken_down: Some(false),
39
include_deactivated: Some(false),
40
});
···
258
use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
259
260
let did = did.to_owned();
261
-
db.get()
262
.await?
263
.interact(move |conn| {
264
-
delete(RepoRootSchema::repo_root)
265
.filter(RepoRootSchema::did.eq(&did))
266
.execute(conn)?;
267
-
delete(EmailTokenSchema::email_token)
268
.filter(EmailTokenSchema::did.eq(&did))
269
.execute(conn)?;
270
-
delete(RefreshTokenSchema::refresh_token)
271
.filter(RefreshTokenSchema::did.eq(&did))
272
.execute(conn)?;
273
-
delete(AccountSchema::account)
274
.filter(AccountSchema::did.eq(&did))
275
.execute(conn)?;
276
delete(ActorSchema::actor)
···
291
>,
292
) -> Result<()> {
293
let takedown_ref: Option<String> = match takedown.applied {
294
-
true => match takedown.r#ref {
295
-
Some(takedown_ref) => Some(takedown_ref),
296
-
None => Some(rsky_common::now()),
297
-
},
298
false => None,
299
};
300
let did = did.to_owned();
301
-
db.get()
302
.await?
303
.interact(move |conn| {
304
update(ActorSchema::actor)
···
320
>,
321
) -> Result<()> {
322
let did = did.to_owned();
323
-
db.get()
324
.await?
325
.interact(move |conn| {
326
update(ActorSchema::actor)
···
344
>,
345
) -> Result<()> {
346
let did = did.to_owned();
347
-
db.get()
348
.await?
349
.interact(move |conn| {
350
update(ActorSchema::actor)
···
443
>,
444
) -> Result<()> {
445
let did = did.to_owned();
446
-
db.get()
447
.await?
448
.interact(move |conn| {
449
update(AccountSchema::account)
···
479
match res {
480
None => Ok(None),
481
Some(res) => {
482
-
let takedown = match res.0 {
483
-
Some(takedown_ref) => StatusAttr {
484
-
applied: true,
485
-
r#ref: Some(takedown_ref),
486
-
},
487
-
None => StatusAttr {
488
applied: false,
489
r#ref: None,
490
},
491
-
};
492
let deactivated = match res.1 {
493
Some(_) => StatusAttr {
494
applied: true,
···
34
let AvailabilityFlags {
35
include_taken_down,
36
include_deactivated,
37
+
} = flags.unwrap_or(AvailabilityFlags {
38
include_taken_down: Some(false),
39
include_deactivated: Some(false),
40
});
···
258
use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
259
260
let did = did.to_owned();
261
+
_ = db
262
+
.get()
263
.await?
264
.interact(move |conn| {
265
+
_ = delete(RepoRootSchema::repo_root)
266
.filter(RepoRootSchema::did.eq(&did))
267
.execute(conn)?;
268
+
_ = delete(EmailTokenSchema::email_token)
269
.filter(EmailTokenSchema::did.eq(&did))
270
.execute(conn)?;
271
+
_ = delete(RefreshTokenSchema::refresh_token)
272
.filter(RefreshTokenSchema::did.eq(&did))
273
.execute(conn)?;
274
+
_ = delete(AccountSchema::account)
275
.filter(AccountSchema::did.eq(&did))
276
.execute(conn)?;
277
delete(ActorSchema::actor)
···
292
>,
293
) -> Result<()> {
294
let takedown_ref: Option<String> = match takedown.applied {
295
+
true => takedown
296
+
.r#ref
297
+
.map_or_else(|| Some(rsky_common::now()), Some),
298
false => None,
299
};
300
let did = did.to_owned();
301
+
_ = db
302
+
.get()
303
.await?
304
.interact(move |conn| {
305
update(ActorSchema::actor)
···
321
>,
322
) -> Result<()> {
323
let did = did.to_owned();
324
+
_ = db
325
+
.get()
326
.await?
327
.interact(move |conn| {
328
update(ActorSchema::actor)
···
346
>,
347
) -> Result<()> {
348
let did = did.to_owned();
349
+
_ = db
350
+
.get()
351
.await?
352
.interact(move |conn| {
353
update(ActorSchema::actor)
···
446
>,
447
) -> Result<()> {
448
let did = did.to_owned();
449
+
_ = db
450
+
.get()
451
.await?
452
.interact(move |conn| {
453
update(AccountSchema::account)
···
483
match res {
484
None => Ok(None),
485
Some(res) => {
486
+
let takedown = res.0.map_or(
487
+
StatusAttr {
488
applied: false,
489
r#ref: None,
490
},
491
+
|takedown_ref| StatusAttr {
492
+
applied: true,
493
+
r#ref: Some(takedown_ref),
494
+
},
495
+
);
496
let deactivated = match res.1 {
497
Some(_) => StatusAttr {
498
applied: true,
+21
-18
src/account_manager/helpers/auth.rs
+21
-18
src/account_manager/helpers/auth.rs
···
26
27
let exp = from_micros_to_utc((payload.exp.as_millis() / 1000) as i64);
28
29
-
db.get()
30
.await?
31
.interact(move |conn| {
32
insert_into(RefreshTokenSchema::refresh_token)
···
149
db.get()
150
.await?
151
.interact(move |conn| {
152
-
delete(RefreshTokenSchema::refresh_token)
153
.filter(RefreshTokenSchema::did.eq(did))
154
.filter(RefreshTokenSchema::expiresAt.le(now))
155
.execute(conn)?;
···
176
} = opts;
177
use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
178
179
-
update(RefreshTokenSchema::refresh_token)
180
-
.filter(RefreshTokenSchema::id.eq(id))
181
-
.filter(
182
-
RefreshTokenSchema::nextId
183
-
.is_null()
184
-
.or(RefreshTokenSchema::nextId.eq(&next_id)),
185
-
)
186
-
.set((
187
-
RefreshTokenSchema::expiresAt.eq(expires_at),
188
-
RefreshTokenSchema::nextId.eq(&next_id),
189
-
))
190
-
.returning(models::RefreshToken::as_select())
191
-
.get_results(conn)
192
-
.map_err(|error| {
193
-
anyhow::Error::new(AuthHelperError::ConcurrentRefresh).context(error)
194
-
})?;
195
Ok(())
196
})
197
.await
···
26
27
let exp = from_micros_to_utc((payload.exp.as_millis() / 1000) as i64);
28
29
+
_ = db
30
+
.get()
31
.await?
32
.interact(move |conn| {
33
insert_into(RefreshTokenSchema::refresh_token)
···
150
db.get()
151
.await?
152
.interact(move |conn| {
153
+
_ = delete(RefreshTokenSchema::refresh_token)
154
.filter(RefreshTokenSchema::did.eq(did))
155
.filter(RefreshTokenSchema::expiresAt.le(now))
156
.execute(conn)?;
···
177
} = opts;
178
use rsky_pds::schema::pds::refresh_token::dsl as RefreshTokenSchema;
179
180
+
drop(
181
+
update(RefreshTokenSchema::refresh_token)
182
+
.filter(RefreshTokenSchema::id.eq(id))
183
+
.filter(
184
+
RefreshTokenSchema::nextId
185
+
.is_null()
186
+
.or(RefreshTokenSchema::nextId.eq(&next_id)),
187
+
)
188
+
.set((
189
+
RefreshTokenSchema::expiresAt.eq(expires_at),
190
+
RefreshTokenSchema::nextId.eq(&next_id),
191
+
))
192
+
.returning(models::RefreshToken::as_select())
193
+
.get_results(conn)
194
+
.map_err(|error| {
195
+
anyhow::Error::new(AuthHelperError::ConcurrentRefresh).context(error)
196
+
})?,
197
+
);
198
Ok(())
199
})
200
.await
+23
-20
src/account_manager/helpers/email_token.rs
+23
-20
src/account_manager/helpers/email_token.rs
···
2
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
//!
4
//! Modified for SQLite backend
5
use anyhow::{Result, bail};
6
use diesel::*;
7
use rsky_common::time::{MINUTE, from_str_to_utc, less_than_ago_s};
···
24
db.get()
25
.await?
26
.interact(move |conn| {
27
-
insert_into(EmailTokenSchema::email_token)
28
.values((
29
EmailTokenSchema::purpose.eq(purpose),
30
EmailTokenSchema::did.eq(did),
···
146
}
147
148
impl EmailTokenPurpose {
149
-
pub fn as_str(&self) -> &'static str {
150
match self {
151
-
EmailTokenPurpose::ConfirmEmail => "confirm_email",
152
-
EmailTokenPurpose::UpdateEmail => "update_email",
153
-
EmailTokenPurpose::ResetPassword => "reset_password",
154
-
EmailTokenPurpose::DeleteAccount => "delete_account",
155
-
EmailTokenPurpose::PlcOperation => "plc_operation",
156
}
157
}
158
159
pub fn from_str(s: &str) -> Result<Self> {
160
match s {
161
-
"confirm_email" => Ok(EmailTokenPurpose::ConfirmEmail),
162
-
"update_email" => Ok(EmailTokenPurpose::UpdateEmail),
163
-
"reset_password" => Ok(EmailTokenPurpose::ResetPassword),
164
-
"delete_account" => Ok(EmailTokenPurpose::DeleteAccount),
165
-
"plc_operation" => Ok(EmailTokenPurpose::PlcOperation),
166
_ => bail!("Unable to parse as EmailTokenPurpose: `{s:?}`"),
167
}
168
}
···
176
type Row = String;
177
178
fn build(s: String) -> deserialize::Result<Self> {
179
-
Ok(EmailTokenPurpose::from_str(&s)?)
180
}
181
}
182
···
190
) -> serialize::Result {
191
serialize::ToSql::<sql_types::Text, sqlite::Sqlite>::to_sql(
192
match self {
193
-
EmailTokenPurpose::ConfirmEmail => "confirm_email",
194
-
EmailTokenPurpose::UpdateEmail => "update_email",
195
-
EmailTokenPurpose::ResetPassword => "reset_password",
196
-
EmailTokenPurpose::DeleteAccount => "delete_account",
197
-
EmailTokenPurpose::PlcOperation => "plc_operation",
198
},
199
out,
200
)
···
211
) -> Result<()> {
212
use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
213
let did = did.to_owned();
214
-
db.get()
215
.await?
216
.interact(move |conn| {
217
delete(EmailTokenSchema::email_token)
···
234
use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
235
236
let did = did.to_owned();
237
-
db.get()
238
.await?
239
.interact(move |conn| {
240
delete(EmailTokenSchema::email_token)
···
2
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
//!
4
//! Modified for SQLite backend
5
+
#![allow(unnameable_types, unused_qualifications)]
6
use anyhow::{Result, bail};
7
use diesel::*;
8
use rsky_common::time::{MINUTE, from_str_to_utc, less_than_ago_s};
···
25
db.get()
26
.await?
27
.interact(move |conn| {
28
+
_ = insert_into(EmailTokenSchema::email_token)
29
.values((
30
EmailTokenSchema::purpose.eq(purpose),
31
EmailTokenSchema::did.eq(did),
···
147
}
148
149
impl EmailTokenPurpose {
150
+
pub const fn as_str(&self) -> &'static str {
151
match self {
152
+
Self::ConfirmEmail => "confirm_email",
153
+
Self::UpdateEmail => "update_email",
154
+
Self::ResetPassword => "reset_password",
155
+
Self::DeleteAccount => "delete_account",
156
+
Self::PlcOperation => "plc_operation",
157
}
158
}
159
160
pub fn from_str(s: &str) -> Result<Self> {
161
match s {
162
+
"confirm_email" => Ok(Self::ConfirmEmail),
163
+
"update_email" => Ok(Self::UpdateEmail),
164
+
"reset_password" => Ok(Self::ResetPassword),
165
+
"delete_account" => Ok(Self::DeleteAccount),
166
+
"plc_operation" => Ok(Self::PlcOperation),
167
_ => bail!("Unable to parse as EmailTokenPurpose: `{s:?}`"),
168
}
169
}
···
177
type Row = String;
178
179
fn build(s: String) -> deserialize::Result<Self> {
180
+
Ok(Self::from_str(&s)?)
181
}
182
}
183
···
191
) -> serialize::Result {
192
serialize::ToSql::<sql_types::Text, sqlite::Sqlite>::to_sql(
193
match self {
194
+
Self::ConfirmEmail => "confirm_email",
195
+
Self::UpdateEmail => "update_email",
196
+
Self::ResetPassword => "reset_password",
197
+
Self::DeleteAccount => "delete_account",
198
+
Self::PlcOperation => "plc_operation",
199
},
200
out,
201
)
···
212
) -> Result<()> {
213
use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
214
let did = did.to_owned();
215
+
_ = db
216
+
.get()
217
.await?
218
.interact(move |conn| {
219
delete(EmailTokenSchema::email_token)
···
236
use rsky_pds::schema::pds::email_token::dsl as EmailTokenSchema;
237
238
let did = did.to_owned();
239
+
_ = db
240
+
.get()
241
.await?
242
.interact(move |conn| {
243
delete(EmailTokenSchema::email_token)
+27
-17
src/account_manager/helpers/invite.rs
+27
-17
src/account_manager/helpers/invite.rs
···
39
.first(conn)
40
.optional()?;
41
42
-
if invite.is_none() || invite.clone().unwrap().disabled > 0 {
43
-
bail!("InvalidInviteCode: None or disabled. Provided invite code not available `{invite_code:?}`")
44
-
}
45
46
-
let uses: i64 = InviteCodeUseSchema::invite_code_use
47
-
.count()
48
-
.filter(InviteCodeUseSchema::code.eq(&invite_code))
49
-
.first(conn)?;
50
51
-
if invite.unwrap().available_uses as i64 <= uses {
52
-
bail!("InvalidInviteCode: Not enough uses. Provided invite code not available `{invite_code:?}`")
53
}
54
Ok(())
55
}).await.expect("Failed to check invite code availability")?;
56
···
69
if let Some(invite_code) = invite_code {
70
use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
71
72
-
db.get()
73
.await?
74
.interact(move |conn| {
75
insert_into(InviteCodeUseSchema::invite_code_use)
···
97
use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
98
let created_at = rsky_common::now();
99
100
-
db.get()
101
.await?
102
.interact(move |conn| {
103
let rows: Vec<models::InviteCode> = to_create
···
158
})
159
.collect();
160
161
-
insert_into(InviteCodeSchema::invite_code)
162
.values(&rows)
163
.execute(conn)?;
164
···
256
} = invite_code_use;
257
match uses.get_mut(&code) {
258
None => {
259
-
uses.insert(code, vec![CodeUse { used_by, used_at }]);
260
}
261
Some(matched_uses) => matched_uses.push(CodeUse { used_by, used_at }),
262
};
···
317
BTreeMap::new(),
318
|mut acc: BTreeMap<String, CodeDetail>, cur| {
319
for code_use in &cur.uses {
320
-
acc.insert(code_use.used_by.clone(), cur.clone());
321
}
322
acc
323
},
···
336
337
let disabled: i16 = if disabled { 1 } else { 0 };
338
let did = did.to_owned();
339
-
db.get()
340
.await?
341
.interact(move |conn| {
342
update(AccountSchema::account)
···
360
361
let DisableInviteCodesOpts { codes, accounts } = opts;
362
if !codes.is_empty() {
363
-
db.get()
364
.await?
365
.interact(move |conn| {
366
update(InviteCodeSchema::invite_code)
···
372
.expect("Failed to disable invite codes")?;
373
}
374
if !accounts.is_empty() {
375
-
db.get()
376
.await?
377
.interact(move |conn| {
378
update(InviteCodeSchema::invite_code)
···
39
.first(conn)
40
.optional()?;
41
42
+
if let Some(invite) = invite {
43
+
if invite.disabled > 0 {
44
+
bail!("InvalidInviteCode: Disabled. Provided invite code not available `{invite_code:?}`");
45
+
}
46
47
+
let uses: i64 = InviteCodeUseSchema::invite_code_use
48
+
.count()
49
+
.filter(InviteCodeUseSchema::code.eq(&invite_code))
50
+
.first(conn)?;
51
52
+
if invite.available_uses as i64 <= uses {
53
+
bail!("InvalidInviteCode: Not enough uses. Provided invite code not available `{invite_code:?}`");
54
+
}
55
+
} else {
56
+
bail!("InvalidInviteCode: None. Provided invite code not available `{invite_code:?}`");
57
}
58
+
59
Ok(())
60
}).await.expect("Failed to check invite code availability")?;
61
···
74
if let Some(invite_code) = invite_code {
75
use rsky_pds::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
76
77
+
_ = db
78
+
.get()
79
.await?
80
.interact(move |conn| {
81
insert_into(InviteCodeUseSchema::invite_code_use)
···
103
use rsky_pds::schema::pds::invite_code::dsl as InviteCodeSchema;
104
let created_at = rsky_common::now();
105
106
+
_ = db
107
+
.get()
108
.await?
109
.interact(move |conn| {
110
let rows: Vec<models::InviteCode> = to_create
···
165
})
166
.collect();
167
168
+
_ = insert_into(InviteCodeSchema::invite_code)
169
.values(&rows)
170
.execute(conn)?;
171
···
263
} = invite_code_use;
264
match uses.get_mut(&code) {
265
None => {
266
+
drop(uses.insert(code, vec![CodeUse { used_by, used_at }]));
267
}
268
Some(matched_uses) => matched_uses.push(CodeUse { used_by, used_at }),
269
};
···
324
BTreeMap::new(),
325
|mut acc: BTreeMap<String, CodeDetail>, cur| {
326
for code_use in &cur.uses {
327
+
drop(acc.insert(code_use.used_by.clone(), cur.clone()));
328
}
329
acc
330
},
···
343
344
let disabled: i16 = if disabled { 1 } else { 0 };
345
let did = did.to_owned();
346
+
_ = db
347
+
.get()
348
.await?
349
.interact(move |conn| {
350
update(AccountSchema::account)
···
368
369
let DisableInviteCodesOpts { codes, accounts } = opts;
370
if !codes.is_empty() {
371
+
_ = db
372
+
.get()
373
.await?
374
.interact(move |conn| {
375
update(InviteCodeSchema::invite_code)
···
381
.expect("Failed to disable invite codes")?;
382
}
383
if !accounts.is_empty() {
384
+
_ = db
385
+
.get()
386
.await?
387
.interact(move |conn| {
388
update(InviteCodeSchema::invite_code)
+2
-2
src/account_manager/helpers/password.rs
+2
-2
src/account_manager/helpers/password.rs
···
156
db.get()
157
.await?
158
.interact(move |conn| {
159
-
update(AccountSchema::account)
160
.filter(AccountSchema::did.eq(opts.did))
161
.set(AccountSchema::password.eq(opts.password_encrypted))
162
.execute(conn)?;
···
181
db.get()
182
.await?
183
.interact(move |conn| {
184
-
delete(AppPasswordSchema::app_password)
185
.filter(AppPasswordSchema::did.eq(did))
186
.filter(AppPasswordSchema::name.eq(name))
187
.execute(conn)?;
···
156
db.get()
157
.await?
158
.interact(move |conn| {
159
+
_ = update(AccountSchema::account)
160
.filter(AccountSchema::did.eq(opts.did))
161
.set(AccountSchema::password.eq(opts.password_encrypted))
162
.execute(conn)?;
···
181
db.get()
182
.await?
183
.interact(move |conn| {
184
+
_ = delete(AppPasswordSchema::app_password)
185
.filter(AppPasswordSchema::did.eq(did))
186
.filter(AppPasswordSchema::name.eq(name))
187
.execute(conn)?;
+2
-1
src/account_manager/helpers/repo.rs
+2
-1
src/account_manager/helpers/repo.rs
+8
-7
src/account_manager/mod.rs
+8
-7
src/account_manager/mod.rs
···
66
>;
67
68
impl AccountManager {
69
-
pub fn new(
70
db: deadpool_diesel::Pool<
71
deadpool_diesel::Manager<SqliteConnection>,
72
deadpool_diesel::sqlite::Object,
···
81
deadpool_diesel::Manager<SqliteConnection>,
82
deadpool_diesel::sqlite::Object,
83
>|
84
-
-> AccountManager { AccountManager::new(db) },
85
)
86
}
87
···
153
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
154
did: did.clone(),
155
jwt_key,
156
-
service_did: env::var("PDS_SERVICE_DID").unwrap(),
157
scope: Some(AuthScope::Access),
158
jti: None,
159
expires_in: None,
···
246
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
247
did,
248
jwt_key,
249
-
service_did: env::var("PDS_SERVICE_DID").unwrap(),
250
scope: Some(scope),
251
jti: None,
252
expires_in: None,
···
289
let next_id = token.next_id.unwrap_or_else(auth::get_refresh_token_id);
290
291
let secp = Secp256k1::new();
292
-
let private_key = env::var("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX").unwrap();
293
let secret_key =
294
-
SecretKey::from_slice(&hex::decode(private_key.as_bytes()).unwrap()).unwrap();
295
let jwt_key = Keypair::from_secret_key(&secp, &secret_key);
296
297
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
298
did: token.did,
299
jwt_key,
300
-
service_did: env::var("PDS_SERVICE_DID").unwrap(),
301
scope: Some(if token.app_password_name.is_none() {
302
AuthScope::Access
303
} else {
···
66
>;
67
68
impl AccountManager {
69
+
pub const fn new(
70
db: deadpool_diesel::Pool<
71
deadpool_diesel::Manager<SqliteConnection>,
72
deadpool_diesel::sqlite::Object,
···
81
deadpool_diesel::Manager<SqliteConnection>,
82
deadpool_diesel::sqlite::Object,
83
>|
84
+
-> Self { Self::new(db) },
85
)
86
}
87
···
153
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
154
did: did.clone(),
155
jwt_key,
156
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
157
scope: Some(AuthScope::Access),
158
jti: None,
159
expires_in: None,
···
246
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
247
did,
248
jwt_key,
249
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
250
scope: Some(scope),
251
jti: None,
252
expires_in: None,
···
289
let next_id = token.next_id.unwrap_or_else(auth::get_refresh_token_id);
290
291
let secp = Secp256k1::new();
292
+
let private_key = env::var("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX")
293
+
.expect("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX not set");
294
let secret_key =
295
+
SecretKey::from_slice(&hex::decode(private_key.as_bytes()).expect("Invalid key"))?;
296
let jwt_key = Keypair::from_secret_key(&secp, &secret_key);
297
298
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
299
did: token.did,
300
jwt_key,
301
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
302
scope: Some(if token.app_password_name.is_none() {
303
AuthScope::Access
304
} else {
+61
-56
src/actor_store/blob.rs
+61
-56
src/actor_store/blob.rs
···
58
deadpool_diesel::sqlite::Object,
59
>,
60
) -> Self {
61
-
BlobReader {
62
did: blobstore.did.clone(),
63
blobstore,
64
db,
···
158
size,
159
cid,
160
mime_type,
161
-
width: if let Some(ref info) = img_info {
162
-
Some(info.width as i32)
163
-
} else {
164
-
None
165
-
},
166
height: if let Some(info) = img_info {
167
Some(info.height as i32)
168
} else {
···
207
SET \"tempKey\" = EXCLUDED.\"tempKey\" \
208
WHERE pds.blob.\"tempKey\" is not null;");
209
#[expect(trivial_casts)]
210
-
upsert
211
.bind::<Text, _>(&cid.to_string())
212
.bind::<Text, _>(&did)
213
.bind::<Text, _>(&mime_type)
214
.bind::<Integer, _>(size as i32)
215
-
.bind::<Nullable<Text>, _>(Some(temp_key.clone()))
216
.bind::<Nullable<Integer>, _>(width)
217
.bind::<Nullable<Integer>, _>(height)
218
.bind::<Text, _>(created_at)
···
227
pub async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
228
self.delete_dereferenced_blobs(writes.clone()).await?;
229
230
-
let _ = stream::iter(writes)
231
-
.then(|write| async move {
232
-
Ok::<(), anyhow::Error>(match write {
233
-
PreparedWrite::Create(w) => {
234
-
for blob in w.blobs {
235
-
self.verify_blob_and_make_permanent(blob.clone()).await?;
236
-
self.associate_blob(blob, w.uri.clone()).await?;
237
}
238
-
}
239
-
PreparedWrite::Update(w) => {
240
-
for blob in w.blobs {
241
-
self.verify_blob_and_make_permanent(blob.clone()).await?;
242
-
self.associate_blob(blob, w.uri.clone()).await?;
243
}
244
-
}
245
-
_ => (),
246
})
247
-
})
248
-
.collect::<Vec<_>>()
249
-
.await
250
-
.into_iter()
251
-
.collect::<Result<Vec<_>, _>>()?;
252
253
Ok(())
254
}
···
295
296
// Now perform the delete
297
let uris_clone = uris.clone();
298
-
self.db
299
.get()
300
.await?
301
.interact(move |conn| {
···
354
// Delete from the blob table
355
let cids = cids_to_delete.clone();
356
let did_clone = self.did.clone();
357
-
self.db
358
.get()
359
.await?
360
.interact(move |conn| {
···
368
369
// Delete from blob storage
370
// Ideally we'd use a background queue here, but for now:
371
-
let _ = stream::iter(cids_to_delete)
372
-
.then(|cid| async move {
373
-
match Cid::from_str(&cid) {
374
Ok(cid) => self.blobstore.delete(cid.to_string()).await,
375
Err(e) => Err(anyhow::Error::new(e)),
376
-
}
377
-
})
378
-
.collect::<Vec<_>>()
379
-
.await
380
-
.into_iter()
381
-
.collect::<Result<Vec<_>, _>>()?;
382
383
Ok(())
384
}
···
412
.make_permanent(temp_key.clone(), blob.cid)
413
.await?;
414
}
415
-
self.db
416
.get()
417
.await?
418
.interact(move |conn| {
···
436
let cid = blob.cid.to_string();
437
let did = self.did.clone();
438
439
-
self.db
440
.get()
441
.await?
442
.interact(move |conn| {
···
628
629
match res {
630
None => Ok(None),
631
-
Some(res) => match res.takedown_ref {
632
-
None => Ok(Some(StatusAttr {
633
-
applied: false,
634
-
r#ref: None,
635
-
})),
636
-
Some(takedown_ref) => Ok(Some(StatusAttr {
637
-
applied: true,
638
-
r#ref: Some(takedown_ref),
639
-
})),
640
-
},
641
}
642
})
643
.await
···
649
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
650
651
let takedown_ref: Option<String> = match takedown.applied {
652
-
true => match takedown.r#ref {
653
-
Some(takedown_ref) => Some(takedown_ref),
654
-
None => Some(now()),
655
-
},
656
false => None,
657
};
658
659
let blob_cid = blob.to_string();
660
let did_clone = self.did.clone();
661
662
-
self.db
663
.get()
664
.await?
665
.interact(move |conn| {
666
-
update(BlobSchema::blob)
667
.filter(BlobSchema::cid.eq(blob_cid))
668
.filter(BlobSchema::did.eq(did_clone))
669
.set(BlobSchema::takedownRef.eq(takedown_ref))
···
58
deadpool_diesel::sqlite::Object,
59
>,
60
) -> Self {
61
+
Self {
62
did: blobstore.did.clone(),
63
blobstore,
64
db,
···
158
size,
159
cid,
160
mime_type,
161
+
width: img_info.as_ref().map(|info| info.width as i32),
162
height: if let Some(info) = img_info {
163
Some(info.height as i32)
164
} else {
···
203
SET \"tempKey\" = EXCLUDED.\"tempKey\" \
204
WHERE pds.blob.\"tempKey\" is not null;");
205
#[expect(trivial_casts)]
206
+
let _ = upsert
207
.bind::<Text, _>(&cid.to_string())
208
.bind::<Text, _>(&did)
209
.bind::<Text, _>(&mime_type)
210
.bind::<Integer, _>(size as i32)
211
+
.bind::<Nullable<Text>, _>(Some(temp_key))
212
.bind::<Nullable<Integer>, _>(width)
213
.bind::<Nullable<Integer>, _>(height)
214
.bind::<Text, _>(created_at)
···
223
pub async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
224
self.delete_dereferenced_blobs(writes.clone()).await?;
225
226
+
drop(
227
+
stream::iter(writes)
228
+
.then(async move |write| {
229
+
match write {
230
+
PreparedWrite::Create(w) => {
231
+
for blob in w.blobs {
232
+
self.verify_blob_and_make_permanent(blob.clone()).await?;
233
+
self.associate_blob(blob, w.uri.clone()).await?;
234
+
}
235
}
236
+
PreparedWrite::Update(w) => {
237
+
for blob in w.blobs {
238
+
self.verify_blob_and_make_permanent(blob.clone()).await?;
239
+
self.associate_blob(blob, w.uri.clone()).await?;
240
+
}
241
}
242
+
_ => (),
243
+
};
244
+
Ok::<(), anyhow::Error>(())
245
})
246
+
.collect::<Vec<_>>()
247
+
.await
248
+
.into_iter()
249
+
.collect::<Result<Vec<_>, _>>()?,
250
+
);
251
252
Ok(())
253
}
···
294
295
// Now perform the delete
296
let uris_clone = uris.clone();
297
+
_ = self
298
+
.db
299
.get()
300
.await?
301
.interact(move |conn| {
···
354
// Delete from the blob table
355
let cids = cids_to_delete.clone();
356
let did_clone = self.did.clone();
357
+
_ = self
358
+
.db
359
.get()
360
.await?
361
.interact(move |conn| {
···
369
370
// Delete from blob storage
371
// Ideally we'd use a background queue here, but for now:
372
+
drop(
373
+
stream::iter(cids_to_delete)
374
+
.then(async move |cid| match Cid::from_str(&cid) {
375
Ok(cid) => self.blobstore.delete(cid.to_string()).await,
376
Err(e) => Err(anyhow::Error::new(e)),
377
+
})
378
+
.collect::<Vec<_>>()
379
+
.await
380
+
.into_iter()
381
+
.collect::<Result<Vec<_>, _>>()?,
382
+
);
383
384
Ok(())
385
}
···
413
.make_permanent(temp_key.clone(), blob.cid)
414
.await?;
415
}
416
+
_ = self
417
+
.db
418
.get()
419
.await?
420
.interact(move |conn| {
···
438
let cid = blob.cid.to_string();
439
let did = self.did.clone();
440
441
+
_ = self
442
+
.db
443
.get()
444
.await?
445
.interact(move |conn| {
···
631
632
match res {
633
None => Ok(None),
634
+
Some(res) => res.takedown_ref.map_or_else(
635
+
|| {
636
+
Ok(Some(StatusAttr {
637
+
applied: false,
638
+
r#ref: None,
639
+
}))
640
+
},
641
+
|takedown_ref| {
642
+
Ok(Some(StatusAttr {
643
+
applied: true,
644
+
r#ref: Some(takedown_ref),
645
+
}))
646
+
},
647
+
),
648
}
649
})
650
.await
···
656
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
657
658
let takedown_ref: Option<String> = match takedown.applied {
659
+
true => takedown.r#ref.map_or_else(|| Some(now()), Some),
660
false => None,
661
};
662
663
let blob_cid = blob.to_string();
664
let did_clone = self.did.clone();
665
666
+
_ = self
667
+
.db
668
.get()
669
.await?
670
.interact(move |conn| {
671
+
_ = update(BlobSchema::blob)
672
.filter(BlobSchema::cid.eq(blob_cid))
673
.filter(BlobSchema::did.eq(did_clone))
674
.set(BlobSchema::takedownRef.eq(takedown_ref))
+97
-68
src/actor_store/mod.rs
+97
-68
src/actor_store/mod.rs
···
81
>,
82
conn: deadpool_diesel::sqlite::Object,
83
) -> Self {
84
-
ActorStore {
85
storage: Arc::new(RwLock::new(SqlRepoReader::new(did.clone(), None, conn))),
86
record: RecordReader::new(did.clone(), db.clone()),
87
pref: PreferenceReader::new(did.clone(), db.clone()),
88
did,
89
-
blob: BlobReader::new(blobstore, db.clone()),
90
}
91
}
92
···
124
Some(write_ops),
125
)
126
.await?;
127
-
let storage_guard = self.storage.read().await;
128
-
storage_guard.apply_commit(commit.clone(), None).await?;
129
let writes = writes
130
.into_iter()
131
.map(PreparedWrite::Create)
···
159
Some(write_ops),
160
)
161
.await?;
162
-
let storage_guard = self.storage.read().await;
163
-
storage_guard.apply_commit(commit.clone(), None).await?;
164
let write_commit_ops = writes.iter().try_fold(
165
Vec::with_capacity(writes.len()),
166
|mut acc, w| -> Result<Vec<CommitOp>> {
···
168
acc.push(CommitOp {
169
action: CommitAction::Create,
170
path: format_data_key(aturi.get_collection(), aturi.get_rkey()),
171
-
cid: Some(w.cid.clone()),
172
prev: None,
173
});
174
Ok(acc)
···
199
.await?;
200
}
201
// persist the commit to repo storage
202
-
let storage_guard = self.storage.read().await;
203
-
storage_guard.apply_commit(commit.clone(), None).await?;
204
// process blobs
205
self.blob.process_write_blobs(writes).await?;
206
Ok(())
···
226
.await?;
227
}
228
// persist the commit to repo storage
229
-
let storage_guard = self.storage.read().await;
230
-
storage_guard
231
.apply_commit(commit.commit_data.clone(), None)
232
.await?;
233
// process blobs
···
236
}
237
238
pub async fn get_sync_event_data(&mut self) -> Result<SyncEvtData> {
239
-
let storage_guard = self.storage.read().await;
240
-
let current_root = storage_guard.get_root_detailed().await?;
241
-
let blocks_and_missing = storage_guard.get_blocks(vec![current_root.cid]).await?;
242
Ok(SyncEvtData {
243
cid: current_root.cid,
244
rev: current_root.rev,
···
264
}
265
}
266
{
267
-
let mut storage_guard = self.storage.write().await;
268
-
storage_guard.cache_rev(current_root.rev).await?;
269
}
270
let mut new_record_cids: Vec<Cid> = vec![];
271
let mut delete_and_update_uris = vec![];
···
306
cid,
307
prev: None,
308
};
309
-
if let Some(_) = current_record {
310
op.prev = current_record;
311
};
312
commit_ops.push(op);
···
352
.collect::<Result<Vec<RecordWriteOp>>>()?;
353
// @TODO: Use repo signing key global config
354
let secp = Secp256k1::new();
355
-
let repo_private_key = env::var("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX").unwrap();
356
-
let repo_secret_key =
357
-
SecretKey::from_slice(&hex::decode(repo_private_key.as_bytes()).unwrap()).unwrap();
358
let repo_signing_key = Keypair::from_secret_key(&secp, &repo_secret_key);
359
360
let mut commit = repo
···
393
pub async fn index_writes(&self, writes: Vec<PreparedWrite>, rev: &str) -> Result<()> {
394
let now: &str = &rsky_common::now();
395
396
-
let _ = stream::iter(writes)
397
-
.then(|write| async move {
398
-
Ok::<(), anyhow::Error>(match write {
399
-
PreparedWrite::Create(write) => {
400
-
let write_at_uri: AtUri = write.uri.try_into()?;
401
-
self.record
402
-
.index_record(
403
-
write_at_uri.clone(),
404
-
write.cid,
405
-
Some(write.record),
406
-
Some(write.action),
407
-
rev.to_owned(),
408
-
Some(now.to_string()),
409
-
)
410
-
.await?
411
-
}
412
-
PreparedWrite::Update(write) => {
413
-
let write_at_uri: AtUri = write.uri.try_into()?;
414
-
self.record
415
-
.index_record(
416
-
write_at_uri.clone(),
417
-
write.cid,
418
-
Some(write.record),
419
-
Some(write.action),
420
-
rev.to_owned(),
421
-
Some(now.to_string()),
422
-
)
423
-
.await?
424
-
}
425
-
PreparedWrite::Delete(write) => {
426
-
let write_at_uri: AtUri = write.uri.try_into()?;
427
-
self.record.delete_record(&write_at_uri).await?
428
}
429
})
430
-
})
431
-
.collect::<Vec<_>>()
432
-
.await
433
-
.into_iter()
434
-
.collect::<Result<Vec<_>, _>>()?;
435
Ok(())
436
}
437
438
pub async fn destroy(&mut self) -> Result<()> {
439
let did: String = self.did.clone();
440
-
let storage_guard = self.storage.read().await;
441
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
442
443
-
let blob_rows: Vec<String> = storage_guard
444
.db
445
.interact(move |conn| {
446
BlobSchema::blob
···
454
.into_iter()
455
.map(|row| Ok(Cid::from_str(&row)?))
456
.collect::<Result<Vec<Cid>>>()?;
457
-
let _ = stream::iter(cids.chunks(500))
458
-
.then(|chunk| async { self.blob.blobstore.delete_many(chunk.to_vec()).await })
459
-
.collect::<Vec<_>>()
460
-
.await
461
-
.into_iter()
462
-
.collect::<Result<Vec<_>, _>>()?;
463
Ok(())
464
}
465
···
472
return Ok(vec![]);
473
}
474
let did: String = self.did.clone();
475
-
let storage_guard = self.storage.read().await;
476
use rsky_pds::schema::pds::record::dsl as RecordSchema;
477
478
let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
479
let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
480
-
let res: Vec<String> = storage_guard
481
.db
482
.interact(move |conn| {
483
RecordSchema::record
···
490
.await
491
.expect("Failed to get duplicate record cids")?;
492
res.into_iter()
493
-
.map(|row| Cid::from_str(&row).map_err(|error| anyhow::Error::new(error)))
494
.collect::<Result<Vec<Cid>>>()
495
}
496
}
···
81
>,
82
conn: deadpool_diesel::sqlite::Object,
83
) -> Self {
84
+
Self {
85
storage: Arc::new(RwLock::new(SqlRepoReader::new(did.clone(), None, conn))),
86
record: RecordReader::new(did.clone(), db.clone()),
87
pref: PreferenceReader::new(did.clone(), db.clone()),
88
did,
89
+
blob: BlobReader::new(blobstore, db),
90
}
91
}
92
···
124
Some(write_ops),
125
)
126
.await?;
127
+
self.storage
128
+
.read()
129
+
.await
130
+
.apply_commit(commit.clone(), None)
131
+
.await?;
132
let writes = writes
133
.into_iter()
134
.map(PreparedWrite::Create)
···
162
Some(write_ops),
163
)
164
.await?;
165
+
self.storage
166
+
.read()
167
+
.await
168
+
.apply_commit(commit.clone(), None)
169
+
.await?;
170
let write_commit_ops = writes.iter().try_fold(
171
Vec::with_capacity(writes.len()),
172
|mut acc, w| -> Result<Vec<CommitOp>> {
···
174
acc.push(CommitOp {
175
action: CommitAction::Create,
176
path: format_data_key(aturi.get_collection(), aturi.get_rkey()),
177
+
cid: Some(w.cid),
178
prev: None,
179
});
180
Ok(acc)
···
205
.await?;
206
}
207
// persist the commit to repo storage
208
+
self.storage
209
+
.read()
210
+
.await
211
+
.apply_commit(commit.clone(), None)
212
+
.await?;
213
// process blobs
214
self.blob.process_write_blobs(writes).await?;
215
Ok(())
···
235
.await?;
236
}
237
// persist the commit to repo storage
238
+
self.storage
239
+
.read()
240
+
.await
241
.apply_commit(commit.commit_data.clone(), None)
242
.await?;
243
// process blobs
···
246
}
247
248
pub async fn get_sync_event_data(&mut self) -> Result<SyncEvtData> {
249
+
let current_root = self.storage.read().await.get_root_detailed().await?;
250
+
let blocks_and_missing = self
251
+
.storage
252
+
.read()
253
+
.await
254
+
.get_blocks(vec![current_root.cid])
255
+
.await?;
256
Ok(SyncEvtData {
257
cid: current_root.cid,
258
rev: current_root.rev,
···
278
}
279
}
280
{
281
+
self.storage
282
+
.write()
283
+
.await
284
+
.cache_rev(current_root.rev)
285
+
.await?;
286
}
287
let mut new_record_cids: Vec<Cid> = vec![];
288
let mut delete_and_update_uris = vec![];
···
323
cid,
324
prev: None,
325
};
326
+
if current_record.is_some() {
327
op.prev = current_record;
328
};
329
commit_ops.push(op);
···
369
.collect::<Result<Vec<RecordWriteOp>>>()?;
370
// @TODO: Use repo signing key global config
371
let secp = Secp256k1::new();
372
+
let repo_private_key = env::var("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX")
373
+
.expect("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX not set");
374
+
let repo_secret_key = SecretKey::from_slice(
375
+
&hex::decode(repo_private_key.as_bytes()).expect("Failed to decode hex"),
376
+
)
377
+
.expect("Failed to create secret key from hex");
378
let repo_signing_key = Keypair::from_secret_key(&secp, &repo_secret_key);
379
380
let mut commit = repo
···
413
pub async fn index_writes(&self, writes: Vec<PreparedWrite>, rev: &str) -> Result<()> {
414
let now: &str = &rsky_common::now();
415
416
+
drop(
417
+
stream::iter(writes)
418
+
.then(async move |write| {
419
+
match write {
420
+
PreparedWrite::Create(write) => {
421
+
let write_at_uri: AtUri = write.uri.try_into()?;
422
+
self.record
423
+
.index_record(
424
+
write_at_uri.clone(),
425
+
write.cid,
426
+
Some(write.record),
427
+
Some(write.action),
428
+
rev.to_owned(),
429
+
Some(now.to_owned()),
430
+
)
431
+
.await?;
432
+
}
433
+
PreparedWrite::Update(write) => {
434
+
let write_at_uri: AtUri = write.uri.try_into()?;
435
+
self.record
436
+
.index_record(
437
+
write_at_uri.clone(),
438
+
write.cid,
439
+
Some(write.record),
440
+
Some(write.action),
441
+
rev.to_owned(),
442
+
Some(now.to_owned()),
443
+
)
444
+
.await?;
445
+
}
446
+
PreparedWrite::Delete(write) => {
447
+
let write_at_uri: AtUri = write.uri.try_into()?;
448
+
self.record.delete_record(&write_at_uri).await?;
449
+
}
450
}
451
+
Ok::<(), anyhow::Error>(())
452
})
453
+
.collect::<Vec<_>>()
454
+
.await
455
+
.into_iter()
456
+
.collect::<Result<Vec<_>, _>>()?,
457
+
);
458
Ok(())
459
}
460
461
pub async fn destroy(&mut self) -> Result<()> {
462
let did: String = self.did.clone();
463
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
464
465
+
let blob_rows: Vec<String> = self
466
+
.storage
467
+
.read()
468
+
.await
469
.db
470
.interact(move |conn| {
471
BlobSchema::blob
···
479
.into_iter()
480
.map(|row| Ok(Cid::from_str(&row)?))
481
.collect::<Result<Vec<Cid>>>()?;
482
+
drop(
483
+
stream::iter(cids.chunks(500))
484
+
.then(|chunk| async { self.blob.blobstore.delete_many(chunk.to_vec()).await })
485
+
.collect::<Vec<_>>()
486
+
.await
487
+
.into_iter()
488
+
.collect::<Result<Vec<_>, _>>()?,
489
+
);
490
Ok(())
491
}
492
···
499
return Ok(vec![]);
500
}
501
let did: String = self.did.clone();
502
use rsky_pds::schema::pds::record::dsl as RecordSchema;
503
504
let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
505
let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
506
+
let res: Vec<String> = self
507
+
.storage
508
+
.read()
509
+
.await
510
.db
511
.interact(move |conn| {
512
RecordSchema::record
···
519
.await
520
.expect("Failed to get duplicate record cids")?;
521
res.into_iter()
522
+
.map(|row| Cid::from_str(&row).map_err(anyhow::Error::new))
523
.collect::<Result<Vec<Cid>>>()
524
}
525
}
+10
-12
src/actor_store/preference.rs
+10
-12
src/actor_store/preference.rs
···
21
}
22
23
impl PreferenceReader {
24
-
pub fn new(
25
did: String,
26
db: deadpool_diesel::Pool<
27
deadpool_diesel::Manager<SqliteConnection>,
28
deadpool_diesel::sqlite::Object,
29
>,
30
) -> Self {
31
-
PreferenceReader { did, db }
32
}
33
34
pub async fn get_preferences(
···
50
.load(conn)?;
51
let account_prefs = prefs_res
52
.into_iter()
53
-
.filter(|pref| match &namespace {
54
-
None => true,
55
-
Some(namespace) => pref_match_namespace(namespace, &pref.name),
56
})
57
.filter(|pref| pref_in_scope(scope.clone(), pref.name.clone()))
58
.map(|pref| {
···
88
{
89
false => bail!("Some preferences are not in the {namespace} namespace"),
90
true => {
91
-
let not_in_scope = values
92
-
.iter()
93
-
.filter(|value| !pref_in_scope(scope.clone(), value.get_type()))
94
-
.collect::<Vec<&RefPreferences>>();
95
-
if !not_in_scope.is_empty() {
96
tracing::info!(
97
"@LOG: PreferenceReader::put_preferences() debug scope: {:?}, values: {:?}",
98
scope,
···
125
.collect::<Vec<i32>>();
126
// replace all prefs in given namespace
127
if !all_pref_ids_in_namespace.is_empty() {
128
-
delete(AccountPrefSchema::account_pref)
129
.filter(AccountPrefSchema::id.eq_any(all_pref_ids_in_namespace))
130
.execute(conn)?;
131
}
132
if !put_prefs.is_empty() {
133
-
insert_into(AccountPrefSchema::account_pref)
134
.values(
135
put_prefs
136
.into_iter()
···
21
}
22
23
impl PreferenceReader {
24
+
pub const fn new(
25
did: String,
26
db: deadpool_diesel::Pool<
27
deadpool_diesel::Manager<SqliteConnection>,
28
deadpool_diesel::sqlite::Object,
29
>,
30
) -> Self {
31
+
Self { did, db }
32
}
33
34
pub async fn get_preferences(
···
50
.load(conn)?;
51
let account_prefs = prefs_res
52
.into_iter()
53
+
.filter(|pref| {
54
+
namespace
55
+
.as_ref()
56
+
.is_none_or(|namespace| pref_match_namespace(namespace, &pref.name))
57
})
58
.filter(|pref| pref_in_scope(scope.clone(), pref.name.clone()))
59
.map(|pref| {
···
89
{
90
false => bail!("Some preferences are not in the {namespace} namespace"),
91
true => {
92
+
if values
93
+
.iter().any(|value| !pref_in_scope(scope.clone(), value.get_type())) {
94
tracing::info!(
95
"@LOG: PreferenceReader::put_preferences() debug scope: {:?}, values: {:?}",
96
scope,
···
123
.collect::<Vec<i32>>();
124
// replace all prefs in given namespace
125
if !all_pref_ids_in_namespace.is_empty() {
126
+
_ = delete(AccountPrefSchema::account_pref)
127
.filter(AccountPrefSchema::id.eq_any(all_pref_ids_in_namespace))
128
.execute(conn)?;
129
}
130
if !put_prefs.is_empty() {
131
+
_ = insert_into(AccountPrefSchema::account_pref)
132
.values(
133
put_prefs
134
.into_iter()
+36
-45
src/actor_store/record.rs
+36
-45
src/actor_store/record.rs
···
31
32
impl RecordReader {
33
/// Create a new record handler.
34
-
pub(crate) fn new(
35
did: String,
36
db: deadpool_diesel::Pool<
37
deadpool_diesel::Manager<SqliteConnection>,
···
93
use rsky_pds::schema::pds::record::dsl as RecordSchema;
94
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
95
96
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
97
-
include_soft_deleted
98
-
} else {
99
-
false
100
-
};
101
let mut builder = RecordSchema::record
102
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
103
.limit(limit)
···
156
use rsky_pds::schema::pds::record::dsl as RecordSchema;
157
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
158
159
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
160
-
include_soft_deleted
161
-
} else {
162
-
false
163
-
};
164
let mut builder = RecordSchema::record
165
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
166
.select((Record::as_select(), RepoBlock::as_select()))
···
201
) -> Result<bool> {
202
use rsky_pds::schema::pds::record::dsl as RecordSchema;
203
204
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
205
-
include_soft_deleted
206
-
} else {
207
-
false
208
-
};
209
let mut builder = RecordSchema::record
210
.select(RecordSchema::uri)
211
.filter(RecordSchema::uri.eq(uri))
···
223
.interact(move |conn| builder.first::<String>(conn).optional())
224
.await
225
.expect("Failed to check record")?;
226
-
Ok(!!record_uri.is_some())
227
}
228
229
/// Get the takedown status of a record.
···
246
})
247
.await
248
.expect("Failed to get takedown status")?;
249
-
if let Some(res) = res {
250
-
if let Some(takedown_ref) = res {
251
-
Ok(Some(StatusAttr {
252
-
applied: true,
253
-
r#ref: Some(takedown_ref),
254
-
}))
255
-
} else {
256
-
Ok(Some(StatusAttr {
257
-
applied: false,
258
-
r#ref: None,
259
-
}))
260
-
}
261
-
} else {
262
-
Ok(None)
263
-
}
264
}
265
266
/// Get the current CID for a record URI.
···
373
let rkey = uri.get_rkey();
374
let hostname = uri.get_hostname().to_string();
375
let action = action.unwrap_or(WriteOpAction::Create);
376
-
let indexed_at = timestamp.unwrap_or_else(|| rsky_common::now());
377
let row = Record {
378
did: self.did.clone(),
379
uri: uri.to_string(),
···
401
.get()
402
.await?
403
.interact(move |conn| {
404
-
insert_into(RecordSchema::record)
405
.values(row)
406
.on_conflict(RecordSchema::uri)
407
.do_update()
···
419
if let Some(record) = record {
420
// Maintain backlinks
421
let backlinks = get_backlinks(&uri, &record)?;
422
-
if let WriteOpAction::Update = action {
423
// On update just recreate backlinks from scratch for the record, so we can clear out
424
// the old ones. E.g. for weird cases like updating a follow to be for a different did.
425
self.remove_backlinks_by_uri(&uri).await?;
···
441
.get()
442
.await?
443
.interact(move |conn| {
444
-
delete(RecordSchema::record)
445
.filter(RecordSchema::uri.eq(&uri))
446
.execute(conn)?;
447
-
delete(BacklinkSchema::backlink)
448
.filter(BacklinkSchema::uri.eq(&uri))
449
.execute(conn)?;
450
tracing::debug!(
···
464
.get()
465
.await?
466
.interact(move |conn| {
467
-
delete(BacklinkSchema::backlink)
468
.filter(BacklinkSchema::uri.eq(uri))
469
.execute(conn)?;
470
Ok(())
···
475
476
/// Add backlinks to the database.
477
pub(crate) async fn add_backlinks(&self, backlinks: Vec<Backlink>) -> Result<()> {
478
-
if backlinks.len() == 0 {
479
Ok(())
480
} else {
481
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
···
483
.get()
484
.await?
485
.interact(move |conn| {
486
-
insert_or_ignore_into(BacklinkSchema::backlink)
487
.values(&backlinks)
488
.execute(conn)?;
489
Ok(())
···
502
use rsky_pds::schema::pds::record::dsl as RecordSchema;
503
504
let takedown_ref: Option<String> = match takedown.applied {
505
-
true => match takedown.r#ref {
506
-
Some(takedown_ref) => Some(takedown_ref),
507
-
None => Some(rsky_common::now()),
508
-
},
509
false => None,
510
};
511
let uri_string = uri.to_string();
···
514
.get()
515
.await?
516
.interact(move |conn| {
517
-
update(RecordSchema::record)
518
.filter(RecordSchema::uri.eq(uri_string))
519
.set(RecordSchema::takedownRef.eq(takedown_ref))
520
.execute(conn)?;
···
31
32
impl RecordReader {
33
/// Create a new record handler.
34
+
pub(crate) const fn new(
35
did: String,
36
db: deadpool_diesel::Pool<
37
deadpool_diesel::Manager<SqliteConnection>,
···
93
use rsky_pds::schema::pds::record::dsl as RecordSchema;
94
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
95
96
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
97
let mut builder = RecordSchema::record
98
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
99
.limit(limit)
···
152
use rsky_pds::schema::pds::record::dsl as RecordSchema;
153
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
154
155
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
156
let mut builder = RecordSchema::record
157
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
158
.select((Record::as_select(), RepoBlock::as_select()))
···
193
) -> Result<bool> {
194
use rsky_pds::schema::pds::record::dsl as RecordSchema;
195
196
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
197
let mut builder = RecordSchema::record
198
.select(RecordSchema::uri)
199
.filter(RecordSchema::uri.eq(uri))
···
211
.interact(move |conn| builder.first::<String>(conn).optional())
212
.await
213
.expect("Failed to check record")?;
214
+
Ok(record_uri.is_some())
215
}
216
217
/// Get the takedown status of a record.
···
234
})
235
.await
236
.expect("Failed to get takedown status")?;
237
+
res.map_or_else(
238
+
|| Ok(None),
239
+
|res| {
240
+
res.map_or_else(
241
+
|| {
242
+
Ok(Some(StatusAttr {
243
+
applied: false,
244
+
r#ref: None,
245
+
}))
246
+
},
247
+
|takedown_ref| {
248
+
Ok(Some(StatusAttr {
249
+
applied: true,
250
+
r#ref: Some(takedown_ref),
251
+
}))
252
+
},
253
+
)
254
+
},
255
+
)
256
}
257
258
/// Get the current CID for a record URI.
···
365
let rkey = uri.get_rkey();
366
let hostname = uri.get_hostname().to_string();
367
let action = action.unwrap_or(WriteOpAction::Create);
368
+
let indexed_at = timestamp.unwrap_or_else(rsky_common::now);
369
let row = Record {
370
did: self.did.clone(),
371
uri: uri.to_string(),
···
393
.get()
394
.await?
395
.interact(move |conn| {
396
+
_ = insert_into(RecordSchema::record)
397
.values(row)
398
.on_conflict(RecordSchema::uri)
399
.do_update()
···
411
if let Some(record) = record {
412
// Maintain backlinks
413
let backlinks = get_backlinks(&uri, &record)?;
414
+
if action == WriteOpAction::Update {
415
// On update just recreate backlinks from scratch for the record, so we can clear out
416
// the old ones. E.g. for weird cases like updating a follow to be for a different did.
417
self.remove_backlinks_by_uri(&uri).await?;
···
433
.get()
434
.await?
435
.interact(move |conn| {
436
+
_ = delete(RecordSchema::record)
437
.filter(RecordSchema::uri.eq(&uri))
438
.execute(conn)?;
439
+
_ = delete(BacklinkSchema::backlink)
440
.filter(BacklinkSchema::uri.eq(&uri))
441
.execute(conn)?;
442
tracing::debug!(
···
456
.get()
457
.await?
458
.interact(move |conn| {
459
+
_ = delete(BacklinkSchema::backlink)
460
.filter(BacklinkSchema::uri.eq(uri))
461
.execute(conn)?;
462
Ok(())
···
467
468
/// Add backlinks to the database.
469
pub(crate) async fn add_backlinks(&self, backlinks: Vec<Backlink>) -> Result<()> {
470
+
if backlinks.is_empty() {
471
Ok(())
472
} else {
473
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
···
475
.get()
476
.await?
477
.interact(move |conn| {
478
+
_ = insert_or_ignore_into(BacklinkSchema::backlink)
479
.values(&backlinks)
480
.execute(conn)?;
481
Ok(())
···
494
use rsky_pds::schema::pds::record::dsl as RecordSchema;
495
496
let takedown_ref: Option<String> = match takedown.applied {
497
+
true => takedown
498
+
.r#ref
499
+
.map_or_else(|| Some(rsky_common::now()), Some),
500
false => None,
501
};
502
let uri_string = uri.to_string();
···
505
.get()
506
.await?
507
.interact(move |conn| {
508
+
_ = update(RecordSchema::record)
509
.filter(RecordSchema::uri.eq(uri_string))
510
.set(RecordSchema::takedownRef.eq(takedown_ref))
511
.execute(conn)?;
+17
-11
src/actor_store/sql_blob.rs
+17
-11
src/actor_store/sql_blob.rs
···
2
#![expect(
3
clippy::pub_use,
4
clippy::single_char_lifetime_names,
5
-
unused_qualifications
6
)]
7
use anyhow::{Context, Result};
8
use cidv10::Cid;
···
14
}
15
16
impl ByteStream {
17
-
pub fn new(bytes: Vec<u8>) -> Self {
18
Self { bytes }
19
}
20
···
60
61
impl BlobStoreSql {
62
/// Create a new SQL-based blob store for the given DID
63
-
pub fn new(
64
did: String,
65
db: deadpool_diesel::Pool<
66
deadpool_diesel::Manager<SqliteConnection>,
67
deadpool_diesel::sqlite::Object,
68
>,
69
) -> Self {
70
-
BlobStoreSql { db, did }
71
}
72
73
// /// Create a factory function for blob stores
···
92
self.put_permanent_with_mime(
93
Cid::try_from(format!("bafy{}", key)).unwrap_or_else(|_| Cid::default()),
94
bytes,
95
-
"application/octet-stream".to_string(),
96
)
97
.await?;
98
···
118
let bytes_len = bytes.len() as i32;
119
120
// Store directly in the database
121
-
self.db
122
.get()
123
.await?
124
.interact(move |conn| {
···
148
149
/// Store a blob directly as permanent
150
pub async fn put_permanent(&self, cid: Cid, bytes: Vec<u8>) -> Result<()> {
151
-
self.put_permanent_with_mime(cid, bytes, "application/octet-stream".to_string())
152
.await
153
}
154
···
158
let did_clone = self.did.clone();
159
160
// Update the quarantine flag in the database
161
-
self.db
162
.get()
163
.await?
164
.interact(move |conn| {
···
181
let did_clone = self.did.clone();
182
183
// Update the quarantine flag in the database
184
-
self.db
185
.get()
186
.await?
187
.interact(move |conn| {
···
248
let did_clone = self.did.clone();
249
250
// Delete from database
251
-
self.db
252
.get()
253
.await?
254
.interact(move |conn| {
···
272
let did_clone = self.did.clone();
273
274
// Delete all blobs in one operation
275
-
self.db
276
.get()
277
.await?
278
.interact(move |conn| {
···
2
#![expect(
3
clippy::pub_use,
4
clippy::single_char_lifetime_names,
5
+
unused_qualifications,
6
+
unnameable_types
7
)]
8
use anyhow::{Context, Result};
9
use cidv10::Cid;
···
15
}
16
17
impl ByteStream {
18
+
pub const fn new(bytes: Vec<u8>) -> Self {
19
Self { bytes }
20
}
21
···
61
62
impl BlobStoreSql {
63
/// Create a new SQL-based blob store for the given DID
64
+
pub const fn new(
65
did: String,
66
db: deadpool_diesel::Pool<
67
deadpool_diesel::Manager<SqliteConnection>,
68
deadpool_diesel::sqlite::Object,
69
>,
70
) -> Self {
71
+
Self { db, did }
72
}
73
74
// /// Create a factory function for blob stores
···
93
self.put_permanent_with_mime(
94
Cid::try_from(format!("bafy{}", key)).unwrap_or_else(|_| Cid::default()),
95
bytes,
96
+
"application/octet-stream".to_owned(),
97
)
98
.await?;
99
···
119
let bytes_len = bytes.len() as i32;
120
121
// Store directly in the database
122
+
_ = self
123
+
.db
124
.get()
125
.await?
126
.interact(move |conn| {
···
150
151
/// Store a blob directly as permanent
152
pub async fn put_permanent(&self, cid: Cid, bytes: Vec<u8>) -> Result<()> {
153
+
self.put_permanent_with_mime(cid, bytes, "application/octet-stream".to_owned())
154
.await
155
}
156
···
160
let did_clone = self.did.clone();
161
162
// Update the quarantine flag in the database
163
+
_ = self
164
+
.db
165
.get()
166
.await?
167
.interact(move |conn| {
···
184
let did_clone = self.did.clone();
185
186
// Update the quarantine flag in the database
187
+
_ = self
188
+
.db
189
.get()
190
.await?
191
.interact(move |conn| {
···
252
let did_clone = self.did.clone();
253
254
// Delete from database
255
+
_ = self
256
+
.db
257
.get()
258
.await?
259
.interact(move |conn| {
···
277
let did_clone = self.did.clone();
278
279
// Delete all blobs in one operation
280
+
_ = self
281
+
.db
282
.get()
283
.await?
284
.interact(move |conn| {
+16
-10
src/actor_store/sql_repo.rs
+16
-10
src/actor_store/sql_repo.rs
···
50
cid: &'life Cid,
51
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'life>> {
52
let did: String = self.did.clone();
53
-
let cid = cid.clone();
54
55
Box::pin(async move {
56
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
57
let cached = {
58
let cache_guard = self.cache.read().await;
59
-
cache_guard.get(cid).map(|v| v.clone())
60
};
61
if let Some(cached_result) = cached {
62
-
return Ok(Some(cached_result.clone()));
63
}
64
65
let found: Option<Vec<u8>> = self
···
120
let blocks = Arc::new(tokio::sync::Mutex::new(BlockMap::new()));
121
let missing_set = Arc::new(tokio::sync::Mutex::new(missing));
122
123
-
let _: Vec<_> = stream::iter(missing_strings.chunks(500))
124
.then(|batch| {
125
let this_did = did.clone();
126
let blocks = Arc::clone(&blocks);
···
156
})
157
.try_collect()
158
.await?;
159
160
// Extract values from synchronization primitives
161
let mut blocks = Arc::try_unwrap(blocks)
···
203
Box::pin(async move {
204
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
205
206
-
self.db
207
.interact(move |conn| {
208
insert_into(RepoBlockSchema::repo_block)
209
.values((
···
251
blocks.chunks(50).map(|chunk| chunk.to_vec()).collect();
252
253
for batch in chunks {
254
-
self.db
255
.interact(move |conn| {
256
insert_or_ignore_into(RepoBlockSchema::repo_block)
257
.values(&batch)
···
278
279
let is_create = is_create.unwrap_or(false);
280
if is_create {
281
-
self.db
282
.interact(move |conn| {
283
insert_into(RepoRootSchema::repo_root)
284
.values((
···
292
.await
293
.expect("Failed to create root")?;
294
} else {
295
-
self.db
296
.interact(move |conn| {
297
update(RepoRootSchema::repo_root)
298
.filter(RepoRootSchema::did.eq(did))
···
329
impl SqlRepoReader {
330
pub fn new(did: String, now: Option<String>, db: deadpool_diesel::sqlite::Object) -> Self {
331
let now = now.unwrap_or_else(rsky_common::now);
332
-
SqlRepoReader {
333
cache: Arc::new(RwLock::new(BlockMap::new())),
334
root: None,
335
rev: None,
···
463
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
464
465
let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
466
-
self.db
467
.interact(move |conn| {
468
delete(RepoBlockSchema::repo_block)
469
.filter(RepoBlockSchema::did.eq(did))
···
50
cid: &'life Cid,
51
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'life>> {
52
let did: String = self.did.clone();
53
+
let cid = *cid;
54
55
Box::pin(async move {
56
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
57
let cached = {
58
let cache_guard = self.cache.read().await;
59
+
cache_guard.get(cid).cloned()
60
};
61
if let Some(cached_result) = cached {
62
+
return Ok(Some(cached_result));
63
}
64
65
let found: Option<Vec<u8>> = self
···
120
let blocks = Arc::new(tokio::sync::Mutex::new(BlockMap::new()));
121
let missing_set = Arc::new(tokio::sync::Mutex::new(missing));
122
123
+
let stream: Vec<_> = stream::iter(missing_strings.chunks(500))
124
.then(|batch| {
125
let this_did = did.clone();
126
let blocks = Arc::clone(&blocks);
···
156
})
157
.try_collect()
158
.await?;
159
+
drop(stream);
160
161
// Extract values from synchronization primitives
162
let mut blocks = Arc::try_unwrap(blocks)
···
204
Box::pin(async move {
205
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
206
207
+
_ = self
208
+
.db
209
.interact(move |conn| {
210
insert_into(RepoBlockSchema::repo_block)
211
.values((
···
253
blocks.chunks(50).map(|chunk| chunk.to_vec()).collect();
254
255
for batch in chunks {
256
+
_ = self
257
+
.db
258
.interact(move |conn| {
259
insert_or_ignore_into(RepoBlockSchema::repo_block)
260
.values(&batch)
···
281
282
let is_create = is_create.unwrap_or(false);
283
if is_create {
284
+
_ = self
285
+
.db
286
.interact(move |conn| {
287
insert_into(RepoRootSchema::repo_root)
288
.values((
···
296
.await
297
.expect("Failed to create root")?;
298
} else {
299
+
_ = self
300
+
.db
301
.interact(move |conn| {
302
update(RepoRootSchema::repo_root)
303
.filter(RepoRootSchema::did.eq(did))
···
334
impl SqlRepoReader {
335
pub fn new(did: String, now: Option<String>, db: deadpool_diesel::sqlite::Object) -> Self {
336
let now = now.unwrap_or_else(rsky_common::now);
337
+
Self {
338
cache: Arc::new(RwLock::new(BlockMap::new())),
339
root: None,
340
rev: None,
···
468
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
469
470
let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
471
+
_ = self
472
+
.db
473
.interact(move |conn| {
474
delete(RepoBlockSchema::repo_block)
475
.filter(RepoBlockSchema::did.eq(did))
+3
-3
src/auth.rs
+3
-3
src/auth.rs
···
341
use crate::schema::pds::oauth_used_jtis::dsl as JtiSchema;
342
343
// Check if JTI has been used before
344
-
let jti_string = jti.to_string();
345
let jti_used = state
346
.db
347
.get()
···
372
.unwrap_or_else(|| timestamp.checked_add(60).unwrap_or(timestamp));
373
374
// Convert SQLx INSERT to Diesel
375
-
let jti_str = jti.to_string();
376
let thumbprint_str = calculated_thumbprint.to_string();
377
-
state
378
.db
379
.get()
380
.await
···
341
use crate::schema::pds::oauth_used_jtis::dsl as JtiSchema;
342
343
// Check if JTI has been used before
344
+
let jti_string = jti.to_owned();
345
let jti_used = state
346
.db
347
.get()
···
372
.unwrap_or_else(|| timestamp.checked_add(60).unwrap_or(timestamp));
373
374
// Convert SQLx INSERT to Diesel
375
+
let jti_str = jti.to_owned();
376
let thumbprint_str = calculated_thumbprint.to_string();
377
+
let _ = state
378
.db
379
.get()
380
.await
+8
-10
src/main.rs
+8
-10
src/main.rs
···
437
let path_blob = path_repo.replace("repo", "blob");
438
let actor_blob_pool =
439
establish_pool(&path_blob).context("failed to create database connection pool")?;
440
-
actor_pools.insert(
441
did.to_string(),
442
ActorPools {
443
repo: actor_repo_pool,
444
blob: actor_blob_pool,
445
},
446
-
);
447
}
448
}
449
// Apply pending migrations
···
494
total_count: i32,
495
}
496
497
-
// let result = diesel::sql_query(
498
-
// "SELECT (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites) AS total_count",
499
-
// )
500
-
// .get_result::<TotalCount>(conn)
501
-
// .context("failed to query database")?;
502
let result = conn.interact(move |conn| {
503
diesel::sql_query(
504
"SELECT (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites) AS total_count",
···
515
let uuid = Uuid::new_v4().to_string();
516
517
let uuid_clone = uuid.clone();
518
-
conn.interact(move |conn| {
519
-
diesel::sql_query(
520
"INSERT INTO invites (id, did, count, created_at) VALUES (?, NULL, 1, datetime('now'))",
521
)
522
.bind::<diesel::sql_types::Text, _>(uuid_clone)
523
.execute(conn)
524
.context("failed to create new invite code")
525
.expect("should be able to create invite code")
526
-
});
527
528
// N.B: This is a sensitive message, so we're bypassing `tracing` here and
529
// logging it directly to console.
···
437
let path_blob = path_repo.replace("repo", "blob");
438
let actor_blob_pool =
439
establish_pool(&path_blob).context("failed to create database connection pool")?;
440
+
drop(actor_pools.insert(
441
did.to_string(),
442
ActorPools {
443
repo: actor_repo_pool,
444
blob: actor_blob_pool,
445
},
446
+
));
447
}
448
}
449
// Apply pending migrations
···
494
total_count: i32,
495
}
496
497
let result = conn.interact(move |conn| {
498
diesel::sql_query(
499
"SELECT (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites) AS total_count",
···
510
let uuid = Uuid::new_v4().to_string();
511
512
let uuid_clone = uuid.clone();
513
+
_ = conn
514
+
.interact(move |conn| {
515
+
diesel::sql_query(
516
"INSERT INTO invites (id, did, count, created_at) VALUES (?, NULL, 1, datetime('now'))",
517
)
518
.bind::<diesel::sql_types::Text, _>(uuid_clone)
519
.execute(conn)
520
.context("failed to create new invite code")
521
.expect("should be able to create invite code")
522
+
})
523
+
.await
524
+
.expect("should be able to create invite code");
525
526
// N.B: This is a sensitive message, so we're bypassing `tracing` here and
527
// logging it directly to console.
+1
-1
src/oauth.rs
+1
-1
src/oauth.rs