+2
.cargo/config.toml
+2
.cargo/config.toml
+22
.sqlx/query-05fd99170e31e68fa5028c862417cdf535cd70e09fde0a8a28249df0070eb2fc.json
+22
.sqlx/query-05fd99170e31e68fa5028c862417cdf535cd70e09fde0a8a28249df0070eb2fc.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT t.token FROM plc_operation_tokens t JOIN users u ON t.user_id = u.id WHERE u.did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "token",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "05fd99170e31e68fa5028c862417cdf535cd70e09fde0a8a28249df0070eb2fc"
22
+
}
+15
.sqlx/query-0710b57fb9aa933525f617b15e6e2e5feaa9c59c38ec9175568abdacda167107.json
+15
.sqlx/query-0710b57fb9aa933525f617b15e6e2e5feaa9c59c38ec9175568abdacda167107.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "UPDATE users SET deactivated_at = $1 WHERE did = $2",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Left": [
8
+
"Timestamptz",
9
+
"Text"
10
+
]
11
+
},
12
+
"nullable": []
13
+
},
14
+
"hash": "0710b57fb9aa933525f617b15e6e2e5feaa9c59c38ec9175568abdacda167107"
15
+
}
+22
.sqlx/query-0ec60bb854a4991d0d7249a68f7445b65c8cc8c723baca221d85f5e4f2478b99.json
+22
.sqlx/query-0ec60bb854a4991d0d7249a68f7445b65c8cc8c723baca221d85f5e4f2478b99.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT body FROM comms_queue WHERE user_id = (SELECT id FROM users WHERE did = $1) AND comms_type = 'email_update' ORDER BY created_at DESC LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "body",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "0ec60bb854a4991d0d7249a68f7445b65c8cc8c723baca221d85f5e4f2478b99"
22
+
}
+22
.sqlx/query-24a7686c535e4f0332f45daa20cfce2209635090252ac3692823450431d03dc6.json
+22
.sqlx/query-24a7686c535e4f0332f45daa20cfce2209635090252ac3692823450431d03dc6.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT COUNT(*) FROM comms_queue WHERE status = 'pending' AND user_id = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "count",
9
+
"type_info": "Int8"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Uuid"
15
+
]
16
+
},
17
+
"nullable": [
18
+
null
19
+
]
20
+
},
21
+
"hash": "24a7686c535e4f0332f45daa20cfce2209635090252ac3692823450431d03dc6"
22
+
}
+14
.sqlx/query-29ef76852bb89af1ab9e679ceaa4abcf8bc8268a348d3be0da9840d1708d20b5.json
+14
.sqlx/query-29ef76852bb89af1ab9e679ceaa4abcf8bc8268a348d3be0da9840d1708d20b5.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "UPDATE users SET password_reset_code_expires_at = NOW() - INTERVAL '1 hour' WHERE email = $1",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Left": [
8
+
"Text"
9
+
]
10
+
},
11
+
"nullable": []
12
+
},
13
+
"hash": "29ef76852bb89af1ab9e679ceaa4abcf8bc8268a348d3be0da9840d1708d20b5"
14
+
}
+54
.sqlx/query-4445cc86cdf04894b340e67661b79a3c411917144a011f50849b737130b24dbe.json
+54
.sqlx/query-4445cc86cdf04894b340e67661b79a3c411917144a011f50849b737130b24dbe.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT subject, body, comms_type as \"comms_type: String\" FROM comms_queue WHERE user_id = $1 AND comms_type = 'admin_email' ORDER BY created_at DESC LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "subject",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "body",
14
+
"type_info": "Text"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "comms_type: String",
19
+
"type_info": {
20
+
"Custom": {
21
+
"name": "comms_type",
22
+
"kind": {
23
+
"Enum": [
24
+
"welcome",
25
+
"email_verification",
26
+
"password_reset",
27
+
"email_update",
28
+
"account_deletion",
29
+
"admin_email",
30
+
"plc_operation",
31
+
"two_factor_code",
32
+
"channel_verification",
33
+
"passkey_recovery",
34
+
"legacy_login_alert",
35
+
"migration_verification"
36
+
]
37
+
}
38
+
}
39
+
}
40
+
}
41
+
],
42
+
"parameters": {
43
+
"Left": [
44
+
"Uuid"
45
+
]
46
+
},
47
+
"nullable": [
48
+
true,
49
+
false,
50
+
false
51
+
]
52
+
},
53
+
"hash": "4445cc86cdf04894b340e67661b79a3c411917144a011f50849b737130b24dbe"
54
+
}
+22
.sqlx/query-4560c237741ce9d4166aecd669770b3360a3ac71e649b293efb88d92c3254068.json
+22
.sqlx/query-4560c237741ce9d4166aecd669770b3360a3ac71e649b293efb88d92c3254068.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT id FROM users WHERE email = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "id",
9
+
"type_info": "Uuid"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "4560c237741ce9d4166aecd669770b3360a3ac71e649b293efb88d92c3254068"
22
+
}
+28
.sqlx/query-4649e8daefaf4cfefc5cb2de8b3813f13f5892f653128469be727b686e6a0f0a.json
+28
.sqlx/query-4649e8daefaf4cfefc5cb2de8b3813f13f5892f653128469be727b686e6a0f0a.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT body, metadata FROM comms_queue WHERE user_id = $1 AND comms_type = 'channel_verification' ORDER BY created_at DESC LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "body",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "metadata",
14
+
"type_info": "Jsonb"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Uuid"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
true
25
+
]
26
+
},
27
+
"hash": "4649e8daefaf4cfefc5cb2de8b3813f13f5892f653128469be727b686e6a0f0a"
28
+
}
+28
.sqlx/query-47fe4a54857344d8f789f37092a294cd58f64b4fb431b54b5deda13d64525e88.json
+28
.sqlx/query-47fe4a54857344d8f789f37092a294cd58f64b4fb431b54b5deda13d64525e88.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT token, expires_at FROM account_deletion_requests WHERE did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "token",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "expires_at",
14
+
"type_info": "Timestamptz"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
false
25
+
]
26
+
},
27
+
"hash": "47fe4a54857344d8f789f37092a294cd58f64b4fb431b54b5deda13d64525e88"
28
+
}
+22
.sqlx/query-49cbc923cc4a0dcf7dea4ead5ab9580ff03b717586c4ca2d5343709e2dac86b6.json
+22
.sqlx/query-49cbc923cc4a0dcf7dea4ead5ab9580ff03b717586c4ca2d5343709e2dac86b6.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT email_verified FROM users WHERE did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "email_verified",
9
+
"type_info": "Bool"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "49cbc923cc4a0dcf7dea4ead5ab9580ff03b717586c4ca2d5343709e2dac86b6"
22
+
}
+28
.sqlx/query-5a016f289caf75177731711e56e92881ba343c73a9a6e513e205c801c5943ec0.json
+28
.sqlx/query-5a016f289caf75177731711e56e92881ba343c73a9a6e513e205c801c5943ec0.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT k.key_bytes, k.encryption_version\n FROM user_keys k\n JOIN users u ON k.user_id = u.id\n WHERE u.did = $1\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "key_bytes",
9
+
"type_info": "Bytea"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "encryption_version",
14
+
"type_info": "Int4"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
true
25
+
]
26
+
},
27
+
"hash": "5a016f289caf75177731711e56e92881ba343c73a9a6e513e205c801c5943ec0"
28
+
}
+22
.sqlx/query-5a036d95feedcbe6fb6396b10a7b4bd6a2eedeefda46a23e6a904cdbc3a65d45.json
+22
.sqlx/query-5a036d95feedcbe6fb6396b10a7b4bd6a2eedeefda46a23e6a904cdbc3a65d45.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT body FROM comms_queue WHERE user_id = $1 AND comms_type = 'email_update' ORDER BY created_at DESC LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "body",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Uuid"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "5a036d95feedcbe6fb6396b10a7b4bd6a2eedeefda46a23e6a904cdbc3a65d45"
22
+
}
+28
.sqlx/query-65eece4401fa4cf08d7f09c3f0f0fdc32722423772cb6d11123aae9f2e0784d0.json
+28
.sqlx/query-65eece4401fa4cf08d7f09c3f0f0fdc32722423772cb6d11123aae9f2e0784d0.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT icu.used_by_user, icu.code\n FROM invite_code_uses icu\n WHERE icu.used_by_user = ANY($1)\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "used_by_user",
9
+
"type_info": "Uuid"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "code",
14
+
"type_info": "Text"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"UuidArray"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
false
25
+
]
26
+
},
27
+
"hash": "65eece4401fa4cf08d7f09c3f0f0fdc32722423772cb6d11123aae9f2e0784d0"
28
+
}
+3
-3
.sqlx/query-6df413951ea7648c77d8db2fe6e704370869816a3f47c86671dfe000b5961eee.json
.sqlx/query-2cbfa3c83222c1f67befdbfda687149e7a9af715957182954fbc28bf000b329f.json
+3
-3
.sqlx/query-6df413951ea7648c77d8db2fe6e704370869816a3f47c86671dfe000b5961eee.json
.sqlx/query-2cbfa3c83222c1f67befdbfda687149e7a9af715957182954fbc28bf000b329f.json
···
1
1
{
2
2
"db_name": "PostgreSQL",
3
-
"query": "\n SELECT id, did, handle, email, created_at, invites_disabled, email_verified, deactivated_at\n FROM users\n WHERE did = $1\n ",
3
+
"query": "\n SELECT id, did, handle, email, created_at, invites_disabled, email_verified, deactivated_at\n FROM users\n WHERE did = ANY($1)\n ",
4
4
"describe": {
5
5
"columns": [
6
6
{
···
46
46
],
47
47
"parameters": {
48
48
"Left": [
49
-
"Text"
49
+
"TextArray"
50
50
]
51
51
},
52
52
"nullable": [
···
60
60
true
61
61
]
62
62
},
63
-
"hash": "6df413951ea7648c77d8db2fe6e704370869816a3f47c86671dfe000b5961eee"
63
+
"hash": "2cbfa3c83222c1f67befdbfda687149e7a9af715957182954fbc28bf000b329f"
64
64
}
+22
.sqlx/query-785a864944c5939331704c71b0cd3ed26ffdd64f3fd0f26ecc28b6a0557bbe8f.json
+22
.sqlx/query-785a864944c5939331704c71b0cd3ed26ffdd64f3fd0f26ecc28b6a0557bbe8f.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT subject FROM comms_queue WHERE user_id = $1 AND comms_type = 'admin_email' AND body = 'Email without subject' LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "subject",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Uuid"
15
+
]
16
+
},
17
+
"nullable": [
18
+
true
19
+
]
20
+
},
21
+
"hash": "785a864944c5939331704c71b0cd3ed26ffdd64f3fd0f26ecc28b6a0557bbe8f"
22
+
}
+22
.sqlx/query-7caa8f9083b15ec1209dda35c4c6f6fba9fe338e4a6a10636b5389d426df1631.json
+22
.sqlx/query-7caa8f9083b15ec1209dda35c4c6f6fba9fe338e4a6a10636b5389d426df1631.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT t.token\n FROM plc_operation_tokens t\n JOIN users u ON t.user_id = u.id\n WHERE u.did = $1\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "token",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "7caa8f9083b15ec1209dda35c4c6f6fba9fe338e4a6a10636b5389d426df1631"
22
+
}
+28
.sqlx/query-82717b6f61cd79347e1ca7e92c4413743ba168d1e0d8b85566711e54d4048f81.json
+28
.sqlx/query-82717b6f61cd79347e1ca7e92c4413743ba168d1e0d8b85566711e54d4048f81.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT t.token, t.expires_at FROM plc_operation_tokens t JOIN users u ON t.user_id = u.id WHERE u.did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "token",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "expires_at",
14
+
"type_info": "Timestamptz"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
false,
24
+
false
25
+
]
26
+
},
27
+
"hash": "82717b6f61cd79347e1ca7e92c4413743ba168d1e0d8b85566711e54d4048f81"
28
+
}
+22
.sqlx/query-9ad422bf3c43e3cfd86fc88c73594246ead214ca794760d3fe77bb5cf4f27be5.json
+22
.sqlx/query-9ad422bf3c43e3cfd86fc88c73594246ead214ca794760d3fe77bb5cf4f27be5.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT body FROM comms_queue WHERE user_id = (SELECT id FROM users WHERE did = $1) AND comms_type = 'email_verification' ORDER BY created_at DESC LIMIT 1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "body",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "9ad422bf3c43e3cfd86fc88c73594246ead214ca794760d3fe77bb5cf4f27be5"
22
+
}
+28
.sqlx/query-9b035b051769e6b9d45910a8bb42ac0f84c73de8c244ba4560f004ee3f4b7002.json
+28
.sqlx/query-9b035b051769e6b9d45910a8bb42ac0f84c73de8c244ba4560f004ee3f4b7002.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT did, public_key_did_key FROM reserved_signing_keys WHERE public_key_did_key = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "did",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "public_key_did_key",
14
+
"type_info": "Text"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
true,
24
+
false
25
+
]
26
+
},
27
+
"hash": "9b035b051769e6b9d45910a8bb42ac0f84c73de8c244ba4560f004ee3f4b7002"
28
+
}
+108
.sqlx/query-9e772a967607553a0ab800970eaeadcaab7e06bdb79e0c89eb919b1bc1d6fabe.json
+108
.sqlx/query-9e772a967607553a0ab800970eaeadcaab7e06bdb79e0c89eb919b1bc1d6fabe.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT\n id, user_id, recipient, subject, body,\n channel as \"channel: CommsChannel\",\n comms_type as \"comms_type: CommsType\",\n status as \"status: CommsStatus\"\n FROM comms_queue\n WHERE id = $1\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "id",
9
+
"type_info": "Uuid"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "user_id",
14
+
"type_info": "Uuid"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "recipient",
19
+
"type_info": "Text"
20
+
},
21
+
{
22
+
"ordinal": 3,
23
+
"name": "subject",
24
+
"type_info": "Text"
25
+
},
26
+
{
27
+
"ordinal": 4,
28
+
"name": "body",
29
+
"type_info": "Text"
30
+
},
31
+
{
32
+
"ordinal": 5,
33
+
"name": "channel: CommsChannel",
34
+
"type_info": {
35
+
"Custom": {
36
+
"name": "comms_channel",
37
+
"kind": {
38
+
"Enum": [
39
+
"email",
40
+
"discord",
41
+
"telegram",
42
+
"signal"
43
+
]
44
+
}
45
+
}
46
+
}
47
+
},
48
+
{
49
+
"ordinal": 6,
50
+
"name": "comms_type: CommsType",
51
+
"type_info": {
52
+
"Custom": {
53
+
"name": "comms_type",
54
+
"kind": {
55
+
"Enum": [
56
+
"welcome",
57
+
"email_verification",
58
+
"password_reset",
59
+
"email_update",
60
+
"account_deletion",
61
+
"admin_email",
62
+
"plc_operation",
63
+
"two_factor_code",
64
+
"channel_verification",
65
+
"passkey_recovery",
66
+
"legacy_login_alert",
67
+
"migration_verification"
68
+
]
69
+
}
70
+
}
71
+
}
72
+
},
73
+
{
74
+
"ordinal": 7,
75
+
"name": "status: CommsStatus",
76
+
"type_info": {
77
+
"Custom": {
78
+
"name": "comms_status",
79
+
"kind": {
80
+
"Enum": [
81
+
"pending",
82
+
"processing",
83
+
"sent",
84
+
"failed"
85
+
]
86
+
}
87
+
}
88
+
}
89
+
}
90
+
],
91
+
"parameters": {
92
+
"Left": [
93
+
"Uuid"
94
+
]
95
+
},
96
+
"nullable": [
97
+
false,
98
+
false,
99
+
false,
100
+
true,
101
+
false,
102
+
false,
103
+
false,
104
+
false
105
+
]
106
+
},
107
+
"hash": "9e772a967607553a0ab800970eaeadcaab7e06bdb79e0c89eb919b1bc1d6fabe"
108
+
}
+34
.sqlx/query-a23a390659616779d7dbceaa3b5d5171e70fa25e3b8393e142cebcbff752f0f5.json
+34
.sqlx/query-a23a390659616779d7dbceaa3b5d5171e70fa25e3b8393e142cebcbff752f0f5.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT private_key_bytes, expires_at, used_at FROM reserved_signing_keys WHERE public_key_did_key = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "private_key_bytes",
9
+
"type_info": "Bytea"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "expires_at",
14
+
"type_info": "Timestamptz"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "used_at",
19
+
"type_info": "Timestamptz"
20
+
}
21
+
],
22
+
"parameters": {
23
+
"Left": [
24
+
"Text"
25
+
]
26
+
},
27
+
"nullable": [
28
+
false,
29
+
false,
30
+
true
31
+
]
32
+
},
33
+
"hash": "a23a390659616779d7dbceaa3b5d5171e70fa25e3b8393e142cebcbff752f0f5"
34
+
}
+22
.sqlx/query-a802d7d860f263eace39ce82bb27b633cec7287c1cc177f0e1d47ec6571564d5.json
+22
.sqlx/query-a802d7d860f263eace39ce82bb27b633cec7287c1cc177f0e1d47ec6571564d5.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT token FROM account_deletion_requests WHERE did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "token",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
false
19
+
]
20
+
},
21
+
"hash": "a802d7d860f263eace39ce82bb27b633cec7287c1cc177f0e1d47ec6571564d5"
22
+
}
+60
.sqlx/query-b0fca342e85dea89a06b4fee144cae4825dec587b1387f0fee401458aea2a2e5.json
+60
.sqlx/query-b0fca342e85dea89a06b4fee144cae4825dec587b1387f0fee401458aea2a2e5.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT\n recipient, subject, body,\n comms_type as \"comms_type: CommsType\"\n FROM comms_queue\n WHERE id = $1\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "recipient",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "subject",
14
+
"type_info": "Text"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "body",
19
+
"type_info": "Text"
20
+
},
21
+
{
22
+
"ordinal": 3,
23
+
"name": "comms_type: CommsType",
24
+
"type_info": {
25
+
"Custom": {
26
+
"name": "comms_type",
27
+
"kind": {
28
+
"Enum": [
29
+
"welcome",
30
+
"email_verification",
31
+
"password_reset",
32
+
"email_update",
33
+
"account_deletion",
34
+
"admin_email",
35
+
"plc_operation",
36
+
"two_factor_code",
37
+
"channel_verification",
38
+
"passkey_recovery",
39
+
"legacy_login_alert",
40
+
"migration_verification"
41
+
]
42
+
}
43
+
}
44
+
}
45
+
}
46
+
],
47
+
"parameters": {
48
+
"Left": [
49
+
"Uuid"
50
+
]
51
+
},
52
+
"nullable": [
53
+
false,
54
+
true,
55
+
false,
56
+
false
57
+
]
58
+
},
59
+
"hash": "b0fca342e85dea89a06b4fee144cae4825dec587b1387f0fee401458aea2a2e5"
60
+
}
+58
.sqlx/query-c18d02b918c122ed2a4681971906f3e26e0ff0bd8733e036b2c3bc59401bc3a9.json
+58
.sqlx/query-c18d02b918c122ed2a4681971906f3e26e0ff0bd8733e036b2c3bc59401bc3a9.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT ic.code, ic.available_uses, ic.disabled, ic.for_account, ic.created_at,\n ic.created_by_user, u.did as created_by\n FROM invite_codes ic\n JOIN users u ON ic.created_by_user = u.id\n WHERE ic.created_by_user = ANY($1)\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "code",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "available_uses",
14
+
"type_info": "Int4"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "disabled",
19
+
"type_info": "Bool"
20
+
},
21
+
{
22
+
"ordinal": 3,
23
+
"name": "for_account",
24
+
"type_info": "Text"
25
+
},
26
+
{
27
+
"ordinal": 4,
28
+
"name": "created_at",
29
+
"type_info": "Timestamptz"
30
+
},
31
+
{
32
+
"ordinal": 5,
33
+
"name": "created_by_user",
34
+
"type_info": "Uuid"
35
+
},
36
+
{
37
+
"ordinal": 6,
38
+
"name": "created_by",
39
+
"type_info": "Text"
40
+
}
41
+
],
42
+
"parameters": {
43
+
"Left": [
44
+
"UuidArray"
45
+
]
46
+
},
47
+
"nullable": [
48
+
false,
49
+
false,
50
+
true,
51
+
false,
52
+
false,
53
+
false,
54
+
false
55
+
]
56
+
},
57
+
"hash": "c18d02b918c122ed2a4681971906f3e26e0ff0bd8733e036b2c3bc59401bc3a9"
58
+
}
+22
.sqlx/query-cd3b8098ad4c1056c1d23acd8a6b29f7abfe18ee6f559bd94ab16274b1cfdfee.json
+22
.sqlx/query-cd3b8098ad4c1056c1d23acd8a6b29f7abfe18ee6f559bd94ab16274b1cfdfee.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT password_reset_code FROM users WHERE email = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "password_reset_code",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
true
19
+
]
20
+
},
21
+
"hash": "cd3b8098ad4c1056c1d23acd8a6b29f7abfe18ee6f559bd94ab16274b1cfdfee"
22
+
}
+22
.sqlx/query-cda68f9b6c60295a196fc853b70ec5fd51a8ffaa2bac5942c115c99d1cbcafa3.json
+22
.sqlx/query-cda68f9b6c60295a196fc853b70ec5fd51a8ffaa2bac5942c115c99d1cbcafa3.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT COUNT(*) as \"count!\" FROM plc_operation_tokens t JOIN users u ON t.user_id = u.id WHERE u.did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "count!",
9
+
"type_info": "Int8"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
null
19
+
]
20
+
},
21
+
"hash": "cda68f9b6c60295a196fc853b70ec5fd51a8ffaa2bac5942c115c99d1cbcafa3"
22
+
}
+14
.sqlx/query-d529d6dc9858c1da360f0417e94a3b40041b043bae57e95002d4bf5df46a4ab4.json
+14
.sqlx/query-d529d6dc9858c1da360f0417e94a3b40041b043bae57e95002d4bf5df46a4ab4.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "UPDATE account_deletion_requests SET expires_at = NOW() - INTERVAL '1 hour' WHERE token = $1",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Left": [
8
+
"Text"
9
+
]
10
+
},
11
+
"nullable": []
12
+
},
13
+
"hash": "d529d6dc9858c1da360f0417e94a3b40041b043bae57e95002d4bf5df46a4ab4"
14
+
}
+34
.sqlx/query-d9affa3cc6ee8d007d58bc1a390aaf01b84e35656b15db2a74aeee9fedd58c9a.json
+34
.sqlx/query-d9affa3cc6ee8d007d58bc1a390aaf01b84e35656b15db2a74aeee9fedd58c9a.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n SELECT icu.code, u.did as used_by, icu.used_at\n FROM invite_code_uses icu\n JOIN users u ON icu.used_by_user = u.id\n WHERE icu.code = ANY($1)\n ",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "code",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "used_by",
14
+
"type_info": "Text"
15
+
},
16
+
{
17
+
"ordinal": 2,
18
+
"name": "used_at",
19
+
"type_info": "Timestamptz"
20
+
}
21
+
],
22
+
"parameters": {
23
+
"Left": [
24
+
"TextArray"
25
+
]
26
+
},
27
+
"nullable": [
28
+
false,
29
+
false,
30
+
false
31
+
]
32
+
},
33
+
"hash": "d9affa3cc6ee8d007d58bc1a390aaf01b84e35656b15db2a74aeee9fedd58c9a"
34
+
}
-16
.sqlx/query-e155d44cb2bd48ff141a27c51f34dfebeb628992a03f4bd6b10ade365ef8dc5e.json
-16
.sqlx/query-e155d44cb2bd48ff141a27c51f34dfebeb628992a03f4bd6b10ade365ef8dc5e.json
···
1
-
{
2
-
"db_name": "PostgreSQL",
3
-
"query": "\n INSERT INTO record_blobs (repo_id, record_uri, blob_cid)\n VALUES ($1, $2, $3)\n ON CONFLICT (repo_id, record_uri, blob_cid) DO NOTHING\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Left": [
8
-
"Uuid",
9
-
"Text",
10
-
"Text"
11
-
]
12
-
},
13
-
"nullable": []
14
-
},
15
-
"hash": "e155d44cb2bd48ff141a27c51f34dfebeb628992a03f4bd6b10ade365ef8dc5e"
16
-
}
+22
.sqlx/query-e20cbe2a939d790aaea718b084a80d8ede655ba1cc0fd4346d7e91d6de7d6cf3.json
+22
.sqlx/query-e20cbe2a939d790aaea718b084a80d8ede655ba1cc0fd4346d7e91d6de7d6cf3.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT COUNT(*) FROM comms_queue WHERE user_id = $1 AND comms_type = 'password_reset'",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "count",
9
+
"type_info": "Int8"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Uuid"
15
+
]
16
+
},
17
+
"nullable": [
18
+
null
19
+
]
20
+
},
21
+
"hash": "e20cbe2a939d790aaea718b084a80d8ede655ba1cc0fd4346d7e91d6de7d6cf3"
22
+
}
+22
.sqlx/query-e64cd36284d10ab7f3d9f6959975a1a627809f444b0faff7e611d985f31b90e9.json
+22
.sqlx/query-e64cd36284d10ab7f3d9f6959975a1a627809f444b0faff7e611d985f31b90e9.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT used_at FROM reserved_signing_keys WHERE public_key_did_key = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "used_at",
9
+
"type_info": "Timestamptz"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
true
19
+
]
20
+
},
21
+
"hash": "e64cd36284d10ab7f3d9f6959975a1a627809f444b0faff7e611d985f31b90e9"
22
+
}
+16
.sqlx/query-e80ef36b0354bb73568425b5e8787d45909ee764b80c0e9e8cd29871bd1e46d3.json
+16
.sqlx/query-e80ef36b0354bb73568425b5e8787d45909ee764b80c0e9e8cd29871bd1e46d3.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "\n INSERT INTO record_blobs (repo_id, record_uri, blob_cid)\n SELECT $1, record_uri, blob_cid\n FROM UNNEST($2::text[], $3::text[]) AS t(record_uri, blob_cid)\n ON CONFLICT (repo_id, record_uri, blob_cid) DO NOTHING\n ",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Left": [
8
+
"Uuid",
9
+
"TextArray",
10
+
"TextArray"
11
+
]
12
+
},
13
+
"nullable": []
14
+
},
15
+
"hash": "e80ef36b0354bb73568425b5e8787d45909ee764b80c0e9e8cd29871bd1e46d3"
16
+
}
+22
.sqlx/query-f26c13023b47b908ec96da2e6b8bf8b34ca6a2246c20fc96f76f0e95530762a7.json
+22
.sqlx/query-f26c13023b47b908ec96da2e6b8bf8b34ca6a2246c20fc96f76f0e95530762a7.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT email FROM users WHERE did = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "email",
9
+
"type_info": "Text"
10
+
}
11
+
],
12
+
"parameters": {
13
+
"Left": [
14
+
"Text"
15
+
]
16
+
},
17
+
"nullable": [
18
+
true
19
+
]
20
+
},
21
+
"hash": "f26c13023b47b908ec96da2e6b8bf8b34ca6a2246c20fc96f76f0e95530762a7"
22
+
}
+14
.sqlx/query-f29da3bdfbbc547b339b4cdb059fac26435b0feec65cf1c56f851d1c4d6b1814.json
+14
.sqlx/query-f29da3bdfbbc547b339b4cdb059fac26435b0feec65cf1c56f851d1c4d6b1814.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "UPDATE users SET is_admin = TRUE WHERE did = $1",
4
+
"describe": {
5
+
"columns": [],
6
+
"parameters": {
7
+
"Left": [
8
+
"Text"
9
+
]
10
+
},
11
+
"nullable": []
12
+
},
13
+
"hash": "f29da3bdfbbc547b339b4cdb059fac26435b0feec65cf1c56f851d1c4d6b1814"
14
+
}
+28
.sqlx/query-f7af28963099aec12cf1d4f8a9a03699bb3a90f39bc9c4c0f738a37827e8f382.json
+28
.sqlx/query-f7af28963099aec12cf1d4f8a9a03699bb3a90f39bc9c4c0f738a37827e8f382.json
···
1
+
{
2
+
"db_name": "PostgreSQL",
3
+
"query": "SELECT password_reset_code, password_reset_code_expires_at FROM users WHERE email = $1",
4
+
"describe": {
5
+
"columns": [
6
+
{
7
+
"ordinal": 0,
8
+
"name": "password_reset_code",
9
+
"type_info": "Text"
10
+
},
11
+
{
12
+
"ordinal": 1,
13
+
"name": "password_reset_code_expires_at",
14
+
"type_info": "Timestamptz"
15
+
}
16
+
],
17
+
"parameters": {
18
+
"Left": [
19
+
"Text"
20
+
]
21
+
},
22
+
"nullable": [
23
+
true,
24
+
true
25
+
]
26
+
},
27
+
"hash": "f7af28963099aec12cf1d4f8a9a03699bb3a90f39bc9c4c0f738a37827e8f382"
28
+
}
+3
-2
justfile
+3
-2
justfile
···
19
19
cargo fmt -- --check
20
20
lint: fmt-check clippy
21
21
22
-
test-all *args:
23
-
./scripts/run-tests.sh {{args}}
22
+
test-unit:
23
+
SQLX_OFFLINE=true cargo test --test dpop_unit --test validation_edge_cases --test scope_edge_cases
24
24
25
25
test-auth:
26
26
./scripts/run-tests.sh --test oauth --test oauth_lifecycle --test oauth_scopes --test oauth_security --test oauth_client_metadata --test jwt_security --test session_management --test change_password --test password_reset
···
50
50
./scripts/run-tests.sh --test actor --test commit_signing --test image_processing --test lifecycle_social --test notifications --test server --test signing_key --test verify_live_commit
51
51
52
52
test *args:
53
+
@just test-unit
53
54
./scripts/run-tests.sh {{args}}
54
55
55
56
test-one name:
+23
migrations/20260102_composite_indexes.sql
+23
migrations/20260102_composite_indexes.sql
···
1
+
CREATE INDEX IF NOT EXISTS idx_session_tokens_did_created_at
2
+
ON session_tokens(did, created_at DESC);
3
+
4
+
CREATE INDEX IF NOT EXISTS idx_oauth_token_did_expires_at
5
+
ON oauth_token(did, expires_at DESC);
6
+
7
+
CREATE INDEX IF NOT EXISTS idx_oauth_token_did_created_at
8
+
ON oauth_token(did, created_at DESC);
9
+
10
+
CREATE INDEX IF NOT EXISTS idx_session_tokens_did_refresh_expires
11
+
ON session_tokens(did, refresh_expires_at DESC);
12
+
13
+
CREATE INDEX IF NOT EXISTS idx_app_passwords_user_created
14
+
ON app_passwords(user_id, created_at DESC);
15
+
16
+
CREATE INDEX IF NOT EXISTS idx_records_repo_collection_rkey
17
+
ON records(repo_id, collection, rkey);
18
+
19
+
CREATE INDEX IF NOT EXISTS idx_passkeys_did_created
20
+
ON passkeys(did, created_at DESC);
21
+
22
+
CREATE INDEX IF NOT EXISTS idx_backup_codes_did_unused
23
+
ON backup_codes(did) WHERE used_at IS NULL;
+125
-31
src/api/admin/account/info.rs
+125
-31
src/api/admin/account/info.rs
···
217
217
_auth: BearerAuthAdmin,
218
218
RawQuery(raw_query): RawQuery,
219
219
) -> Response {
220
-
let dids = crate::util::parse_repeated_query_param(raw_query.as_deref(), "dids");
220
+
let dids: Vec<String> = crate::util::parse_repeated_query_param(raw_query.as_deref(), "dids")
221
+
.into_iter()
222
+
.filter(|d| !d.is_empty())
223
+
.collect();
221
224
if dids.is_empty() {
222
225
return (
223
226
StatusCode::BAD_REQUEST,
···
225
228
)
226
229
.into_response();
227
230
}
228
-
let mut infos = Vec::new();
229
-
for did in &dids {
230
-
if did.is_empty() {
231
-
continue;
231
+
let users = match sqlx::query!(
232
+
r#"
233
+
SELECT id, did, handle, email, created_at, invites_disabled, email_verified, deactivated_at
234
+
FROM users
235
+
WHERE did = ANY($1)
236
+
"#,
237
+
&dids
238
+
)
239
+
.fetch_all(&state.db)
240
+
.await
241
+
{
242
+
Ok(rows) => rows,
243
+
Err(e) => {
244
+
error!("Failed to fetch account infos: {:?}", e);
245
+
return (
246
+
StatusCode::INTERNAL_SERVER_ERROR,
247
+
Json(json!({"error": "InternalError"})),
248
+
)
249
+
.into_response();
232
250
}
233
-
let result = sqlx::query!(
251
+
};
252
+
253
+
let user_ids: Vec<uuid::Uuid> = users.iter().map(|u| u.id).collect();
254
+
255
+
let all_invite_codes = sqlx::query!(
256
+
r#"
257
+
SELECT ic.code, ic.available_uses, ic.disabled, ic.for_account, ic.created_at,
258
+
ic.created_by_user, u.did as created_by
259
+
FROM invite_codes ic
260
+
JOIN users u ON ic.created_by_user = u.id
261
+
WHERE ic.created_by_user = ANY($1)
262
+
"#,
263
+
&user_ids
264
+
)
265
+
.fetch_all(&state.db)
266
+
.await
267
+
.unwrap_or_default();
268
+
269
+
let all_codes: Vec<String> = all_invite_codes.iter().map(|c| c.code.clone()).collect();
270
+
let all_invite_uses = if !all_codes.is_empty() {
271
+
sqlx::query!(
234
272
r#"
235
-
SELECT id, did, handle, email, created_at, invites_disabled, email_verified, deactivated_at
236
-
FROM users
237
-
WHERE did = $1
273
+
SELECT icu.code, u.did as used_by, icu.used_at
274
+
FROM invite_code_uses icu
275
+
JOIN users u ON icu.used_by_user = u.id
276
+
WHERE icu.code = ANY($1)
238
277
"#,
239
-
did
278
+
&all_codes
240
279
)
241
-
.fetch_optional(&state.db)
242
-
.await;
243
-
if let Ok(Some(row)) = result {
244
-
let invited_by = get_invited_by(&state.db, row.id).await;
245
-
let invites = get_invites_for_user(&state.db, row.id).await;
246
-
infos.push(AccountInfo {
247
-
did: row.did,
248
-
handle: row.handle,
249
-
email: row.email,
250
-
indexed_at: row.created_at.to_rfc3339(),
251
-
invite_note: None,
252
-
invites_disabled: row.invites_disabled.unwrap_or(false),
253
-
email_confirmed_at: if row.email_verified {
254
-
Some(row.created_at.to_rfc3339())
255
-
} else {
256
-
None
257
-
},
258
-
deactivated_at: row.deactivated_at.map(|dt| dt.to_rfc3339()),
259
-
invited_by,
260
-
invites,
280
+
.fetch_all(&state.db)
281
+
.await
282
+
.unwrap_or_default()
283
+
} else {
284
+
Vec::new()
285
+
};
286
+
287
+
let invited_by_map: std::collections::HashMap<uuid::Uuid, String> = sqlx::query!(
288
+
r#"
289
+
SELECT icu.used_by_user, icu.code
290
+
FROM invite_code_uses icu
291
+
WHERE icu.used_by_user = ANY($1)
292
+
"#,
293
+
&user_ids
294
+
)
295
+
.fetch_all(&state.db)
296
+
.await
297
+
.unwrap_or_default()
298
+
.into_iter()
299
+
.map(|r| (r.used_by_user, r.code))
300
+
.collect();
301
+
302
+
let mut uses_by_code: std::collections::HashMap<String, Vec<InviteCodeUseInfo>> =
303
+
std::collections::HashMap::new();
304
+
for u in all_invite_uses {
305
+
uses_by_code
306
+
.entry(u.code.clone())
307
+
.or_default()
308
+
.push(InviteCodeUseInfo {
309
+
used_by: u.used_by,
310
+
used_at: u.used_at.to_rfc3339(),
261
311
});
262
-
}
312
+
}
313
+
314
+
let mut codes_by_user: std::collections::HashMap<uuid::Uuid, Vec<InviteCodeInfo>> =
315
+
std::collections::HashMap::new();
316
+
let mut code_info_map: std::collections::HashMap<String, InviteCodeInfo> =
317
+
std::collections::HashMap::new();
318
+
for ic in all_invite_codes {
319
+
let info = InviteCodeInfo {
320
+
code: ic.code.clone(),
321
+
available: ic.available_uses,
322
+
disabled: ic.disabled.unwrap_or(false),
323
+
for_account: ic.for_account,
324
+
created_by: ic.created_by,
325
+
created_at: ic.created_at.to_rfc3339(),
326
+
uses: uses_by_code.get(&ic.code).cloned().unwrap_or_default(),
327
+
};
328
+
code_info_map.insert(ic.code.clone(), info.clone());
329
+
codes_by_user
330
+
.entry(ic.created_by_user)
331
+
.or_default()
332
+
.push(info);
333
+
}
334
+
335
+
let mut infos = Vec::with_capacity(users.len());
336
+
for row in users {
337
+
let invited_by = invited_by_map
338
+
.get(&row.id)
339
+
.and_then(|code| code_info_map.get(code).cloned());
340
+
let invites = codes_by_user.get(&row.id).cloned();
341
+
infos.push(AccountInfo {
342
+
did: row.did,
343
+
handle: row.handle,
344
+
email: row.email,
345
+
indexed_at: row.created_at.to_rfc3339(),
346
+
invite_note: None,
347
+
invites_disabled: row.invites_disabled.unwrap_or(false),
348
+
email_confirmed_at: if row.email_verified {
349
+
Some(row.created_at.to_rfc3339())
350
+
} else {
351
+
None
352
+
},
353
+
deactivated_at: row.deactivated_at.map(|dt| dt.to_rfc3339()),
354
+
invited_by,
355
+
invites,
356
+
});
263
357
}
264
358
(StatusCode::OK, Json(GetAccountInfosOutput { infos })).into_response()
265
359
}
+1
-1
src/api/delegation.rs
+1
-1
src/api/delegation.rs
···
726
726
}
727
727
};
728
728
729
-
let plc_client = crate::plc::PlcClient::new(None);
729
+
let plc_client = crate::plc::PlcClient::with_cache(None, Some(state.cache.clone()));
730
730
if let Err(e) = plc_client
731
731
.send_operation(&genesis_result.did, &genesis_result.signed_operation)
732
732
.await
+23
-13
src/api/identity/account.rs
+23
-13
src/api/identity/account.rs
···
451
451
.into_response();
452
452
}
453
453
};
454
-
let plc_client = PlcClient::new(None);
454
+
let plc_client = PlcClient::with_cache(None, Some(state.cache.clone()));
455
455
if let Err(e) = plc_client
456
456
.send_operation(&genesis_result.did, &genesis_result.signed_operation)
457
457
.await
···
488
488
.into_response();
489
489
}
490
490
};
491
-
let plc_client = PlcClient::new(None);
491
+
let plc_client = PlcClient::with_cache(None, Some(state.cache.clone()));
492
492
if let Err(e) = plc_client
493
493
.send_operation(&genesis_result.did, &genesis_result.signed_operation)
494
494
.await
···
745
745
.into_response();
746
746
}
747
747
748
-
let password_hash = match hash(&input.password, DEFAULT_COST) {
749
-
Ok(h) => h,
750
-
Err(e) => {
751
-
error!("Error hashing password: {:?}", e);
752
-
return (
753
-
StatusCode::INTERNAL_SERVER_ERROR,
754
-
Json(json!({"error": "InternalError"})),
755
-
)
756
-
.into_response();
757
-
}
758
-
};
748
+
let password_clone = input.password.clone();
749
+
let password_hash =
750
+
match tokio::task::spawn_blocking(move || hash(&password_clone, DEFAULT_COST)).await {
751
+
Ok(Ok(h)) => h,
752
+
Ok(Err(e)) => {
753
+
error!("Error hashing password: {:?}", e);
754
+
return (
755
+
StatusCode::INTERNAL_SERVER_ERROR,
756
+
Json(json!({"error": "InternalError"})),
757
+
)
758
+
.into_response();
759
+
}
760
+
Err(e) => {
761
+
error!("Failed to spawn blocking task: {:?}", e);
762
+
return (
763
+
StatusCode::INTERNAL_SERVER_ERROR,
764
+
Json(json!({"error": "InternalError"})),
765
+
)
766
+
.into_response();
767
+
}
768
+
};
759
769
let is_first_user = sqlx::query_scalar!("SELECT COUNT(*) as count FROM users")
760
770
.fetch_one(&mut *tx)
761
771
.await
+2
-6
src/api/identity/did.rs
+2
-6
src/api/identity/did.rs
···
10
10
use base64::Engine;
11
11
use k256::SecretKey;
12
12
use k256::elliptic_curve::sec1::ToEncodedPoint;
13
-
use reqwest;
14
13
use serde::{Deserialize, Serialize};
15
14
use serde_json::json;
16
15
use tracing::{error, warn};
···
504
503
let path = parts[3..].join("/");
505
504
format!("{}://{}/{}/did.json", scheme, domain, path)
506
505
};
507
-
let client = reqwest::Client::builder()
508
-
.timeout(std::time::Duration::from_secs(5))
509
-
.build()
510
-
.map_err(|e| format!("Failed to create client: {}", e))?;
506
+
let client = crate::api::proxy_client::did_resolution_client();
511
507
let resp = client
512
508
.get(&url)
513
509
.send()
···
926
922
};
927
923
let key_bytes = crate::config::decrypt_key(&user_row.key_bytes, user_row.encryption_version)?;
928
924
let signing_key = k256::ecdsa::SigningKey::from_slice(&key_bytes)?;
929
-
let plc_client = crate::plc::PlcClient::new(None);
925
+
let plc_client = crate::plc::PlcClient::with_cache(None, Some(state.cache.clone()));
930
926
let last_op = plc_client.get_last_op(did).await?;
931
927
let new_also_known_as = vec![format!("at://{}", new_handle)];
932
928
let update_op =
+1
-1
src/api/identity/plc/sign.rs
+1
-1
src/api/identity/plc/sign.rs
···
174
174
.into_response();
175
175
}
176
176
};
177
-
let plc_client = PlcClient::new(None);
177
+
let plc_client = PlcClient::with_cache(None, Some(state.cache.clone()));
178
178
let did_clone = did.clone();
179
179
let result: Result<PlcOpOrTombstone, CircuitBreakerError<PlcError>> =
180
180
with_circuit_breaker(&state.circuit_breakers.plc_directory, || async {
+1
-1
src/api/identity/plc/submit.rs
+1
-1
src/api/identity/plc/submit.rs
···
184
184
.into_response();
185
185
}
186
186
}
187
-
let plc_client = PlcClient::new(None);
187
+
let plc_client = PlcClient::with_cache(None, Some(state.cache.clone()));
188
188
let operation_clone = input.operation.clone();
189
189
let did_clone = did.clone();
190
190
let result: Result<(), CircuitBreakerError<PlcError>> =
+31
src/api/proxy_client.rs
+31
src/api/proxy_client.rs
···
10
10
pub const MAX_RESPONSE_SIZE: u64 = 10 * 1024 * 1024;
11
11
12
12
static PROXY_CLIENT: OnceLock<Client> = OnceLock::new();
13
+
static DID_RESOLUTION_CLIENT: OnceLock<Client> = OnceLock::new();
14
+
static HANDLE_RESOLUTION_CLIENT: OnceLock<Client> = OnceLock::new();
13
15
14
16
pub fn proxy_client() -> &'static Client {
15
17
PROXY_CLIENT.get_or_init(|| {
···
22
24
.build()
23
25
.expect(
24
26
"Failed to build HTTP client - this indicates a TLS or system configuration issue",
27
+
)
28
+
})
29
+
}
30
+
31
+
pub fn did_resolution_client() -> &'static Client {
32
+
DID_RESOLUTION_CLIENT.get_or_init(|| {
33
+
ClientBuilder::new()
34
+
.timeout(Duration::from_secs(5))
35
+
.connect_timeout(DEFAULT_CONNECT_TIMEOUT)
36
+
.pool_max_idle_per_host(10)
37
+
.pool_idle_timeout(Duration::from_secs(90))
38
+
.build()
39
+
.expect(
40
+
"Failed to build DID resolution client - this indicates a TLS or system configuration issue",
41
+
)
42
+
})
43
+
}
44
+
45
+
pub fn handle_resolution_client() -> &'static Client {
46
+
HANDLE_RESOLUTION_CLIENT.get_or_init(|| {
47
+
ClientBuilder::new()
48
+
.timeout(Duration::from_secs(10))
49
+
.connect_timeout(DEFAULT_CONNECT_TIMEOUT)
50
+
.pool_max_idle_per_host(10)
51
+
.pool_idle_timeout(Duration::from_secs(90))
52
+
.redirect(reqwest::redirect::Policy::limited(5))
53
+
.build()
54
+
.expect(
55
+
"Failed to build handle resolution client - this indicates a TLS or system configuration issue",
25
56
)
26
57
})
27
58
}
+73
-39
src/api/repo/blob.rs
+73
-39
src/api/repo/blob.rs
···
2
2
use crate::delegation::{self, DelegationActionType};
3
3
use crate::state::AppState;
4
4
use crate::util::get_max_blob_size;
5
-
use axum::body::Bytes;
5
+
use axum::body::Body;
6
6
use axum::{
7
7
Json,
8
8
extract::{Query, State},
9
9
http::StatusCode,
10
10
response::{IntoResponse, Response},
11
11
};
12
+
use bytes::Bytes;
12
13
use cid::Cid;
14
+
use futures::StreamExt;
13
15
use multihash::Multihash;
14
16
use serde::{Deserialize, Serialize};
15
17
use serde_json::json;
16
-
use sha2::{Digest, Sha256};
17
-
use tracing::{debug, error};
18
+
use std::pin::Pin;
19
+
use tracing::{debug, error, info};
18
20
19
21
pub async fn upload_blob(
20
22
State(state): State<AppState>,
21
23
headers: axum::http::HeaderMap,
22
-
body: Bytes,
24
+
body: Body,
23
25
) -> Response {
24
26
let token = match crate::auth::extract_bearer_token_from_header(
25
27
headers.get("Authorization").and_then(|h| h.to_str().ok()),
···
106
108
.into_response();
107
109
}
108
110
109
-
let max_size = get_max_blob_size();
111
+
let mime_type = headers
112
+
.get("content-type")
113
+
.and_then(|h| h.to_str().ok())
114
+
.unwrap_or("application/octet-stream")
115
+
.to_string();
116
+
117
+
let user_query = sqlx::query!("SELECT id FROM users WHERE did = $1", did)
118
+
.fetch_optional(&state.db)
119
+
.await;
120
+
let user_id = match user_query {
121
+
Ok(Some(row)) => row.id,
122
+
_ => {
123
+
return (
124
+
StatusCode::INTERNAL_SERVER_ERROR,
125
+
Json(json!({"error": "InternalError"})),
126
+
)
127
+
.into_response();
128
+
}
129
+
};
110
130
111
-
if body.len() > max_size {
131
+
let temp_key = format!("temp/{}", uuid::Uuid::new_v4());
132
+
let max_size = get_max_blob_size() as u64;
133
+
134
+
let body_stream = body.into_data_stream();
135
+
let mapped_stream =
136
+
body_stream.map(|result| result.map_err(|e| std::io::Error::other(e.to_string())));
137
+
let pinned_stream: Pin<Box<dyn futures::Stream<Item = Result<Bytes, std::io::Error>> + Send>> =
138
+
Box::pin(mapped_stream);
139
+
140
+
info!("Starting streaming blob upload to temp key: {}", temp_key);
141
+
142
+
let upload_result = match state.blob_store.put_stream(&temp_key, pinned_stream).await {
143
+
Ok(result) => result,
144
+
Err(e) => {
145
+
error!("Failed to stream blob to storage: {:?}", e);
146
+
return (
147
+
StatusCode::INTERNAL_SERVER_ERROR,
148
+
Json(json!({"error": "InternalError", "message": "Failed to store blob"})),
149
+
)
150
+
.into_response();
151
+
}
152
+
};
153
+
154
+
let size = upload_result.size;
155
+
if size > max_size {
156
+
let _ = state.blob_store.delete(&temp_key).await;
112
157
return (
113
158
StatusCode::PAYLOAD_TOO_LARGE,
114
-
Json(json!({"error": "BlobTooLarge", "message": format!("Blob size {} exceeds maximum of {} bytes", body.len(), max_size)})),
159
+
Json(json!({"error": "BlobTooLarge", "message": format!("Blob size {} exceeds maximum of {} bytes", size, max_size)})),
115
160
)
116
161
.into_response();
117
162
}
118
-
let mime_type = headers
119
-
.get("content-type")
120
-
.and_then(|h| h.to_str().ok())
121
-
.unwrap_or("application/octet-stream")
122
-
.to_string();
123
-
let size = body.len() as i64;
124
-
let data = body.to_vec();
125
-
let mut hasher = Sha256::new();
126
-
hasher.update(&data);
127
-
let hash = hasher.finalize();
128
-
let multihash = match Multihash::wrap(0x12, &hash) {
163
+
164
+
let multihash = match Multihash::wrap(0x12, &upload_result.sha256_hash) {
129
165
Ok(mh) => mh,
130
166
Err(e) => {
167
+
let _ = state.blob_store.delete(&temp_key).await;
131
168
error!("Failed to create multihash for blob: {:?}", e);
132
169
return (
133
170
StatusCode::INTERNAL_SERVER_ERROR,
···
139
176
let cid = Cid::new_v1(0x55, multihash);
140
177
let cid_str = cid.to_string();
141
178
let storage_key = format!("blobs/{}", cid_str);
142
-
let user_query = sqlx::query!("SELECT id FROM users WHERE did = $1", did)
143
-
.fetch_optional(&state.db)
144
-
.await;
145
-
let user_id = match user_query {
146
-
Ok(Some(row)) => row.id,
147
-
_ => {
148
-
return (
149
-
StatusCode::INTERNAL_SERVER_ERROR,
150
-
Json(json!({"error": "InternalError"})),
151
-
)
152
-
.into_response();
153
-
}
154
-
};
179
+
180
+
info!(
181
+
"Blob upload complete: size={}, cid={}, copying to final location",
182
+
size, cid_str
183
+
);
184
+
155
185
let mut tx = match state.db.begin().await {
156
186
Ok(tx) => tx,
157
187
Err(e) => {
188
+
let _ = state.blob_store.delete(&temp_key).await;
158
189
error!("Failed to begin transaction: {:?}", e);
159
190
return (
160
191
StatusCode::INTERNAL_SERVER_ERROR,
···
163
194
.into_response();
164
195
}
165
196
};
197
+
166
198
let insert = sqlx::query!(
167
199
"INSERT INTO blobs (cid, mime_type, size_bytes, created_by_user, storage_key) VALUES ($1, $2, $3, $4, $5) ON CONFLICT (cid) DO NOTHING RETURNING cid",
168
200
cid_str,
169
201
mime_type,
170
-
size,
202
+
size as i64,
171
203
user_id,
172
204
storage_key
173
205
)
174
206
.fetch_optional(&mut *tx)
175
207
.await;
208
+
176
209
let was_inserted = match insert {
177
210
Ok(Some(_)) => true,
178
211
Ok(None) => false,
179
212
Err(e) => {
213
+
let _ = state.blob_store.delete(&temp_key).await;
180
214
error!("Failed to insert blob record: {:?}", e);
181
215
return (
182
216
StatusCode::INTERNAL_SERVER_ERROR,
···
185
219
.into_response();
186
220
}
187
221
};
188
-
if was_inserted
189
-
&& let Err(e) = state
190
-
.blob_store
191
-
.put_bytes(&storage_key, bytes::Bytes::from(data))
192
-
.await
193
-
{
194
-
error!("Failed to upload blob to storage: {:?}", e);
222
+
223
+
if was_inserted && let Err(e) = state.blob_store.copy(&temp_key, &storage_key).await {
224
+
let _ = state.blob_store.delete(&temp_key).await;
225
+
error!("Failed to copy blob to final location: {:?}", e);
195
226
return (
196
227
StatusCode::INTERNAL_SERVER_ERROR,
197
228
Json(json!({"error": "InternalError", "message": "Failed to store blob"})),
198
229
)
199
230
.into_response();
200
231
}
232
+
233
+
let _ = state.blob_store.delete(&temp_key).await;
234
+
201
235
if let Err(e) = tx.commit().await {
202
236
error!("Failed to commit blob transaction: {:?}", e);
203
237
if was_inserted && let Err(cleanup_err) = state.blob_store.delete(&storage_key).await {
+2
-2
src/api/repo/import.rs
+2
-2
src/api/repo/import.rs
···
16
16
use serde_json::json;
17
17
use tracing::{debug, error, info, warn};
18
18
19
-
const DEFAULT_MAX_IMPORT_SIZE: usize = 100 * 1024 * 1024;
20
-
const DEFAULT_MAX_BLOCKS: usize = 50000;
19
+
const DEFAULT_MAX_IMPORT_SIZE: usize = 1024 * 1024 * 1024;
20
+
const DEFAULT_MAX_BLOCKS: usize = 500000;
21
21
22
22
pub async fn import_repo(
23
23
State(state): State<AppState>,
+10
-6
src/api/server/account_status.rs
+10
-6
src/api/server/account_status.rs
···
1
1
use crate::api::ApiError;
2
+
use crate::cache::Cache;
2
3
use crate::plc::PlcClient;
3
4
use crate::state::AppState;
4
5
use axum::{
···
16
17
use serde::{Deserialize, Serialize};
17
18
use serde_json::json;
18
19
use std::str::FromStr;
20
+
use std::sync::Arc;
19
21
use tracing::{error, info, warn};
20
22
use uuid::Uuid;
21
23
···
140
142
.await
141
143
.unwrap_or(Some(0))
142
144
.unwrap_or(0);
143
-
let valid_did = is_valid_did_for_service(&state.db, &did).await;
145
+
let valid_did = is_valid_did_for_service(&state.db, &state.cache, &did).await;
144
146
(
145
147
StatusCode::OK,
146
148
Json(CheckAccountStatusOutput {
···
158
160
.into_response()
159
161
}
160
162
161
-
async fn is_valid_did_for_service(db: &sqlx::PgPool, did: &str) -> bool {
162
-
assert_valid_did_document_for_service(db, did, false)
163
+
async fn is_valid_did_for_service(db: &sqlx::PgPool, cache: &Arc<dyn Cache>, did: &str) -> bool {
164
+
assert_valid_did_document_for_service(db, cache, did, false)
163
165
.await
164
166
.is_ok()
165
167
}
166
168
167
169
async fn assert_valid_did_document_for_service(
168
170
db: &sqlx::PgPool,
171
+
cache: &Arc<dyn Cache>,
169
172
did: &str,
170
173
with_retry: bool,
171
174
) -> Result<(), (StatusCode, Json<serde_json::Value>)> {
···
173
176
let expected_endpoint = format!("https://{}", hostname);
174
177
175
178
if did.starts_with("did:plc:") {
176
-
let plc_client = PlcClient::new(None);
179
+
let plc_client = PlcClient::with_cache(None, Some(cache.clone()));
177
180
178
181
let max_attempts = if with_retry { 5 } else { 1 };
179
182
let mut last_error = None;
···
308
311
}
309
312
}
310
313
} else if let Some(host_and_path) = did.strip_prefix("did:web:") {
311
-
let client = reqwest::Client::new();
314
+
let client = crate::api::proxy_client::did_resolution_client();
312
315
let decoded = host_and_path.replace("%3A", ":");
313
316
let parts: Vec<&str> = decoded.split(':').collect();
314
317
let (host, path_parts) = if parts.len() > 1 && parts[1].chars().all(|c| c.is_ascii_digit())
···
438
441
did
439
442
);
440
443
let did_validation_start = std::time::Instant::now();
441
-
if let Err((status, json)) = assert_valid_did_document_for_service(&state.db, &did, true).await
444
+
if let Err((status, json)) =
445
+
assert_valid_did_document_for_service(&state.db, &state.cache, &did, true).await
442
446
{
443
447
info!(
444
448
"[MIGRATION] activateAccount: DID document validation FAILED for {} (took {:?})",
+12
-3
src/api/server/app_password.rs
+12
-3
src/api/server/app_password.rs
···
158
158
})
159
159
.collect::<Vec<String>>()
160
160
.join("-");
161
-
let password_hash = match bcrypt::hash(&password, bcrypt::DEFAULT_COST) {
162
-
Ok(h) => h,
161
+
let password_clone = password.clone();
162
+
let password_hash = match tokio::task::spawn_blocking(move || {
163
+
bcrypt::hash(&password_clone, bcrypt::DEFAULT_COST)
164
+
})
165
+
.await
166
+
{
167
+
Ok(Ok(h)) => h,
168
+
Ok(Err(e)) => {
169
+
error!("Failed to hash password: {:?}", e);
170
+
return ApiError::InternalError.into_response();
171
+
}
163
172
Err(e) => {
164
-
error!("Failed to hash password: {:?}", e);
173
+
error!("Failed to spawn blocking task: {:?}", e);
165
174
return ApiError::InternalError.into_response();
166
175
}
167
176
};
+1
-1
src/api/server/passkey_account.rs
+1
-1
src/api/server/passkey_account.rs
···
436
436
}
437
437
};
438
438
439
-
let plc_client = crate::plc::PlcClient::new(None);
439
+
let plc_client = crate::plc::PlcClient::with_cache(None, Some(state.cache.clone()));
440
440
if let Err(e) = plc_client
441
441
.send_operation(&genesis_result.did, &genesis_result.signed_operation)
442
442
.await
+42
-22
src/api/server/password.rs
+42
-22
src/api/server/password.rs
···
226
226
)
227
227
.into_response();
228
228
}
229
-
let password_hash = match hash(password, DEFAULT_COST) {
230
-
Ok(h) => h,
231
-
Err(e) => {
232
-
error!("Failed to hash password: {:?}", e);
233
-
return (
234
-
StatusCode::INTERNAL_SERVER_ERROR,
235
-
Json(json!({"error": "InternalError"})),
236
-
)
237
-
.into_response();
238
-
}
239
-
};
229
+
let password_clone = password.to_string();
230
+
let password_hash =
231
+
match tokio::task::spawn_blocking(move || hash(password_clone, DEFAULT_COST)).await {
232
+
Ok(Ok(h)) => h,
233
+
Ok(Err(e)) => {
234
+
error!("Failed to hash password: {:?}", e);
235
+
return (
236
+
StatusCode::INTERNAL_SERVER_ERROR,
237
+
Json(json!({"error": "InternalError"})),
238
+
)
239
+
.into_response();
240
+
}
241
+
Err(e) => {
242
+
error!("Failed to spawn blocking task: {:?}", e);
243
+
return (
244
+
StatusCode::INTERNAL_SERVER_ERROR,
245
+
Json(json!({"error": "InternalError"})),
246
+
)
247
+
.into_response();
248
+
}
249
+
};
240
250
let mut tx = match state.db.begin().await {
241
251
Ok(tx) => tx,
242
252
Err(e) => {
···
409
419
)
410
420
.into_response();
411
421
}
412
-
let new_hash = match hash(new_password, DEFAULT_COST) {
413
-
Ok(h) => h,
414
-
Err(e) => {
415
-
error!("Failed to hash password: {:?}", e);
416
-
return (
417
-
StatusCode::INTERNAL_SERVER_ERROR,
418
-
Json(json!({"error": "InternalError"})),
419
-
)
420
-
.into_response();
421
-
}
422
-
};
422
+
let new_password_clone = new_password.to_string();
423
+
let new_hash =
424
+
match tokio::task::spawn_blocking(move || hash(new_password_clone, DEFAULT_COST)).await {
425
+
Ok(Ok(h)) => h,
426
+
Ok(Err(e)) => {
427
+
error!("Failed to hash password: {:?}", e);
428
+
return (
429
+
StatusCode::INTERNAL_SERVER_ERROR,
430
+
Json(json!({"error": "InternalError"})),
431
+
)
432
+
.into_response();
433
+
}
434
+
Err(e) => {
435
+
error!("Failed to spawn blocking task: {:?}", e);
436
+
return (
437
+
StatusCode::INTERNAL_SERVER_ERROR,
438
+
Json(json!({"error": "InternalError"})),
439
+
)
440
+
.into_response();
441
+
}
442
+
};
423
443
if let Err(e) = sqlx::query("UPDATE users SET password_hash = $1 WHERE id = $2")
424
444
.bind(&new_hash)
425
445
.bind(user_id)
+84
-14
src/auth/mod.rs
+84
-14
src/auth/mod.rs
···
35
35
36
36
const KEY_CACHE_TTL_SECS: u64 = 300;
37
37
const SESSION_CACHE_TTL_SECS: u64 = 60;
38
+
const USER_STATUS_CACHE_TTL_SECS: u64 = 60;
39
+
40
+
#[derive(Serialize, Deserialize)]
41
+
struct CachedUserStatus {
42
+
deactivated: bool,
43
+
takendown: bool,
44
+
is_admin: bool,
45
+
}
38
46
39
47
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
40
48
pub enum TokenValidationError {
···
149
157
150
158
let (decrypted_key, deactivated_at, takedown_ref, is_admin) = if let Some(key) = cached_key
151
159
{
152
-
let user_status = sqlx::query!(
153
-
"SELECT deactivated_at, takedown_ref, is_admin FROM users WHERE did = $1",
154
-
did
155
-
)
156
-
.fetch_optional(db)
157
-
.await
158
-
.ok()
159
-
.flatten();
160
+
let status_cache_key = format!("auth:status:{}", did);
161
+
let cached_status: Option<CachedUserStatus> = if let Some(c) = cache {
162
+
c.get(&status_cache_key)
163
+
.await
164
+
.and_then(|s| serde_json::from_str(&s).ok())
165
+
} else {
166
+
None
167
+
};
160
168
161
-
match user_status {
162
-
Some(status) => (
169
+
if let Some(status) = cached_status {
170
+
(
163
171
Some(key),
164
-
status.deactivated_at,
165
-
status.takedown_ref,
172
+
if status.deactivated {
173
+
Some(chrono::Utc::now())
174
+
} else {
175
+
None
176
+
},
177
+
if status.takendown {
178
+
Some("takendown".to_string())
179
+
} else {
180
+
None
181
+
},
166
182
status.is_admin,
167
-
),
168
-
None => (None, None, None, false),
183
+
)
184
+
} else {
185
+
let user_status = sqlx::query!(
186
+
"SELECT deactivated_at, takedown_ref, is_admin FROM users WHERE did = $1",
187
+
did
188
+
)
189
+
.fetch_optional(db)
190
+
.await
191
+
.ok()
192
+
.flatten();
193
+
194
+
match user_status {
195
+
Some(status) => {
196
+
if let Some(c) = cache {
197
+
let cached = CachedUserStatus {
198
+
deactivated: status.deactivated_at.is_some(),
199
+
takendown: status.takedown_ref.is_some(),
200
+
is_admin: status.is_admin,
201
+
};
202
+
if let Ok(json) = serde_json::to_string(&cached) {
203
+
let _ = c
204
+
.set(
205
+
&status_cache_key,
206
+
&json,
207
+
Duration::from_secs(USER_STATUS_CACHE_TTL_SECS),
208
+
)
209
+
.await;
210
+
}
211
+
}
212
+
(
213
+
Some(key),
214
+
status.deactivated_at,
215
+
status.takedown_ref,
216
+
status.is_admin,
217
+
)
218
+
}
219
+
None => (None, None, None, false),
220
+
}
169
221
}
170
222
} else if let Some(user) = sqlx::query!(
171
223
"SELECT k.key_bytes, k.encryption_version, u.deactivated_at, u.takedown_ref, u.is_admin
···
190
242
Duration::from_secs(KEY_CACHE_TTL_SECS),
191
243
)
192
244
.await;
245
+
246
+
let status_cache_key = format!("auth:status:{}", did);
247
+
let cached = CachedUserStatus {
248
+
deactivated: user.deactivated_at.is_some(),
249
+
takendown: user.takedown_ref.is_some(),
250
+
is_admin: user.is_admin,
251
+
};
252
+
if let Ok(json) = serde_json::to_string(&cached) {
253
+
let _ = c
254
+
.set(
255
+
&status_cache_key,
256
+
&json,
257
+
Duration::from_secs(USER_STATUS_CACHE_TTL_SECS),
258
+
)
259
+
.await;
260
+
}
193
261
}
194
262
195
263
(
···
328
396
329
397
pub async fn invalidate_auth_cache(cache: &Arc<dyn Cache>, did: &str) {
330
398
let key_cache_key = format!("auth:key:{}", did);
399
+
let status_cache_key = format!("auth:status:{}", did);
331
400
let _ = cache.delete(&key_cache_key).await;
401
+
let _ = cache.delete(&status_cache_key).await;
332
402
}
333
403
334
404
pub async fn validate_token_with_dpop(
+2
src/auth/service.rs
+2
src/auth/service.rs
+3
src/crawlers.rs
+3
src/crawlers.rs
···
24
24
crawler_urls,
25
25
http_client: Client::builder()
26
26
.timeout(Duration::from_secs(30))
27
+
.connect_timeout(Duration::from_secs(5))
28
+
.pool_max_idle_per_host(5)
29
+
.pool_idle_timeout(Duration::from_secs(90))
27
30
.build()
28
31
.unwrap_or_default(),
29
32
last_notified: AtomicU64::new(0),
+1
-7
src/handle/mod.rs
+1
-7
src/handle/mod.rs
···
2
2
3
3
use hickory_resolver::TokioAsyncResolver;
4
4
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
5
-
use reqwest::Client;
6
-
use std::time::Duration;
7
5
use thiserror::Error;
8
6
9
7
#[derive(Error, Debug)]
···
43
41
44
42
pub async fn resolve_handle_http(handle: &str) -> Result<String, HandleResolutionError> {
45
43
let url = format!("https://{}/.well-known/atproto-did", handle);
46
-
let client = Client::builder()
47
-
.timeout(Duration::from_secs(10))
48
-
.redirect(reqwest::redirect::Policy::limited(5))
49
-
.build()
50
-
.map_err(|e| HandleResolutionError::HttpError(e.to_string()))?;
44
+
let client = crate::api::proxy_client::handle_resolution_client();
51
45
let response = client
52
46
.get(&url)
53
47
.header("Accept", "text/plain")
+2
src/oauth/client.rs
+2
src/oauth/client.rs
···
80
80
http_client: Client::builder()
81
81
.timeout(std::time::Duration::from_secs(30))
82
82
.connect_timeout(std::time::Duration::from_secs(10))
83
+
.pool_max_idle_per_host(10)
84
+
.pool_idle_timeout(std::time::Duration::from_secs(90))
83
85
.build()
84
86
.unwrap_or_else(|_| Client::new()),
85
87
cache_ttl_secs,
+57
-5
src/plc/mod.rs
+57
-5
src/plc/mod.rs
···
1
+
use crate::cache::Cache;
1
2
use base32::Alphabet;
2
3
use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD};
3
4
use k256::ecdsa::{Signature, SigningKey, signature::Signer};
···
6
7
use serde_json::{Value, json};
7
8
use sha2::{Digest, Sha256};
8
9
use std::collections::HashMap;
10
+
use std::sync::Arc;
9
11
use std::time::Duration;
10
12
use thiserror::Error;
11
13
···
77
79
}
78
80
}
79
81
82
+
const PLC_CACHE_TTL_SECS: u64 = 300;
83
+
80
84
pub struct PlcClient {
81
85
base_url: String,
82
86
client: Client,
87
+
cache: Option<Arc<dyn Cache>>,
83
88
}
84
89
85
90
impl PlcClient {
86
91
pub fn new(base_url: Option<String>) -> Self {
92
+
Self::with_cache(base_url, None)
93
+
}
94
+
95
+
pub fn with_cache(base_url: Option<String>, cache: Option<Arc<dyn Cache>>) -> Self {
87
96
let base_url = base_url.unwrap_or_else(|| {
88
97
std::env::var("PLC_DIRECTORY_URL")
89
98
.unwrap_or_else(|_| "https://plc.directory".to_string())
···
100
109
.timeout(Duration::from_secs(timeout_secs))
101
110
.connect_timeout(Duration::from_secs(connect_timeout_secs))
102
111
.pool_max_idle_per_host(5)
112
+
.pool_idle_timeout(Duration::from_secs(90))
103
113
.build()
104
114
.unwrap_or_else(|_| Client::new());
105
-
Self { base_url, client }
115
+
Self {
116
+
base_url,
117
+
client,
118
+
cache,
119
+
}
106
120
}
107
121
108
122
fn encode_did(did: &str) -> String {
···
110
124
}
111
125
112
126
pub async fn get_document(&self, did: &str) -> Result<Value, PlcError> {
127
+
let cache_key = format!("plc:doc:{}", did);
128
+
if let Some(ref cache) = self.cache
129
+
&& let Some(cached) = cache.get(&cache_key).await
130
+
&& let Ok(value) = serde_json::from_str(&cached)
131
+
{
132
+
return Ok(value);
133
+
}
113
134
let url = format!("{}/{}", self.base_url, Self::encode_did(did));
114
135
let response = self.client.get(&url).send().await?;
115
136
if response.status() == reqwest::StatusCode::NOT_FOUND {
···
123
144
status, body
124
145
)));
125
146
}
126
-
response
147
+
let value: Value = response
127
148
.json()
128
149
.await
129
-
.map_err(|e| PlcError::InvalidResponse(e.to_string()))
150
+
.map_err(|e| PlcError::InvalidResponse(e.to_string()))?;
151
+
if let Some(ref cache) = self.cache
152
+
&& let Ok(json_str) = serde_json::to_string(&value)
153
+
{
154
+
let _ = cache
155
+
.set(
156
+
&cache_key,
157
+
&json_str,
158
+
Duration::from_secs(PLC_CACHE_TTL_SECS),
159
+
)
160
+
.await;
161
+
}
162
+
Ok(value)
130
163
}
131
164
132
165
pub async fn get_document_data(&self, did: &str) -> Result<Value, PlcError> {
166
+
let cache_key = format!("plc:data:{}", did);
167
+
if let Some(ref cache) = self.cache
168
+
&& let Some(cached) = cache.get(&cache_key).await
169
+
&& let Ok(value) = serde_json::from_str(&cached)
170
+
{
171
+
return Ok(value);
172
+
}
133
173
let url = format!("{}/{}/data", self.base_url, Self::encode_did(did));
134
174
let response = self.client.get(&url).send().await?;
135
175
if response.status() == reqwest::StatusCode::NOT_FOUND {
···
143
183
status, body
144
184
)));
145
185
}
146
-
response
186
+
let value: Value = response
147
187
.json()
148
188
.await
149
-
.map_err(|e| PlcError::InvalidResponse(e.to_string()))
189
+
.map_err(|e| PlcError::InvalidResponse(e.to_string()))?;
190
+
if let Some(ref cache) = self.cache
191
+
&& let Ok(json_str) = serde_json::to_string(&value)
192
+
{
193
+
let _ = cache
194
+
.set(
195
+
&cache_key,
196
+
&json_str,
197
+
Duration::from_secs(PLC_CACHE_TTL_SECS),
198
+
)
199
+
.await;
200
+
}
201
+
Ok(value)
150
202
}
151
203
152
204
pub async fn get_last_op(&self, did: &str) -> Result<PlcOpOrTombstone, PlcError> {
+30
-25
src/scheduled.rs
+30
-25
src/scheduled.rs
···
343
343
}
344
344
};
345
345
346
-
let mut blob_refs_found = 0;
346
+
let mut batch_record_uris: Vec<String> = Vec::new();
347
+
let mut batch_blob_cids: Vec<String> = Vec::new();
348
+
347
349
for record in records {
348
350
let record_cid = match Cid::from_str(&record.record_cid) {
349
351
Ok(c) => c,
···
363
365
let blob_refs = crate::sync::import::find_blob_refs_ipld(&record_ipld, 0);
364
366
for blob_ref in blob_refs {
365
367
let record_uri = format!("at://{}/{}/{}", user.did, record.collection, record.rkey);
366
-
if let Err(e) = sqlx::query!(
367
-
r#"
368
-
INSERT INTO record_blobs (repo_id, record_uri, blob_cid)
369
-
VALUES ($1, $2, $3)
370
-
ON CONFLICT (repo_id, record_uri, blob_cid) DO NOTHING
371
-
"#,
372
-
user.user_id,
373
-
record_uri,
374
-
blob_ref.cid
375
-
)
376
-
.execute(db)
377
-
.await
378
-
{
379
-
warn!(error = %e, "Failed to insert record_blob during backfill");
380
-
} else {
381
-
blob_refs_found += 1;
382
-
}
368
+
batch_record_uris.push(record_uri);
369
+
batch_blob_cids.push(blob_ref.cid);
383
370
}
384
371
}
385
372
386
-
if blob_refs_found > 0 {
387
-
info!(
388
-
user_id = %user.user_id,
389
-
did = %user.did,
390
-
blob_refs = blob_refs_found,
391
-
"Backfilled record_blobs"
392
-
);
373
+
let blob_refs_found = batch_record_uris.len();
374
+
if !batch_record_uris.is_empty() {
375
+
if let Err(e) = sqlx::query!(
376
+
r#"
377
+
INSERT INTO record_blobs (repo_id, record_uri, blob_cid)
378
+
SELECT $1, record_uri, blob_cid
379
+
FROM UNNEST($2::text[], $3::text[]) AS t(record_uri, blob_cid)
380
+
ON CONFLICT (repo_id, record_uri, blob_cid) DO NOTHING
381
+
"#,
382
+
user.user_id,
383
+
&batch_record_uris,
384
+
&batch_blob_cids
385
+
)
386
+
.execute(db)
387
+
.await
388
+
{
389
+
warn!(error = %e, "Failed to batch insert record_blobs during backfill");
390
+
} else {
391
+
info!(
392
+
user_id = %user.user_id,
393
+
did = %user.did,
394
+
blob_refs = blob_refs_found,
395
+
"Backfilled record_blobs"
396
+
);
397
+
}
393
398
}
394
399
success += 1;
395
400
}
+184
src/storage/mod.rs
+184
src/storage/mod.rs
···
3
3
use aws_config::meta::region::RegionProviderChain;
4
4
use aws_sdk_s3::Client;
5
5
use aws_sdk_s3::primitives::ByteStream;
6
+
use aws_sdk_s3::types::CompletedMultipartUpload;
7
+
use aws_sdk_s3::types::CompletedPart;
6
8
use bytes::Bytes;
9
+
use futures::Stream;
10
+
use sha2::{Digest, Sha256};
11
+
use std::pin::Pin;
7
12
use thiserror::Error;
13
+
14
+
const MIN_PART_SIZE: usize = 5 * 1024 * 1024;
8
15
9
16
#[derive(Error, Debug)]
10
17
pub enum StorageError {
···
16
23
Other(String),
17
24
}
18
25
26
+
pub struct StreamUploadResult {
27
+
pub sha256_hash: [u8; 32],
28
+
pub size: u64,
29
+
}
30
+
19
31
#[async_trait]
20
32
pub trait BlobStorage: Send + Sync {
21
33
async fn put(&self, key: &str, data: &[u8]) -> Result<(), StorageError>;
···
23
35
async fn get(&self, key: &str) -> Result<Vec<u8>, StorageError>;
24
36
async fn get_bytes(&self, key: &str) -> Result<Bytes, StorageError>;
25
37
async fn delete(&self, key: &str) -> Result<(), StorageError>;
38
+
async fn put_stream(
39
+
&self,
40
+
key: &str,
41
+
stream: Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send>>,
42
+
) -> Result<StreamUploadResult, StorageError>;
43
+
async fn copy(&self, src_key: &str, dst_key: &str) -> Result<(), StorageError>;
26
44
}
27
45
28
46
pub struct S3BlobStorage {
···
231
249
}
232
250
233
251
result?;
252
+
Ok(())
253
+
}
254
+
255
+
async fn put_stream(
256
+
&self,
257
+
key: &str,
258
+
mut stream: Pin<Box<dyn Stream<Item = Result<Bytes, std::io::Error>> + Send>>,
259
+
) -> Result<StreamUploadResult, StorageError> {
260
+
use futures::StreamExt;
261
+
262
+
let create_resp = self
263
+
.client
264
+
.create_multipart_upload()
265
+
.bucket(&self.bucket)
266
+
.key(key)
267
+
.send()
268
+
.await
269
+
.map_err(|e| StorageError::S3(format!("Failed to create multipart upload: {}", e)))?;
270
+
271
+
let upload_id = create_resp
272
+
.upload_id()
273
+
.ok_or_else(|| StorageError::S3("No upload ID returned".to_string()))?
274
+
.to_string();
275
+
276
+
let mut hasher = Sha256::new();
277
+
let mut total_size: u64 = 0;
278
+
let mut part_number = 1;
279
+
let mut completed_parts: Vec<CompletedPart> = Vec::new();
280
+
let mut buffer = Vec::with_capacity(MIN_PART_SIZE);
281
+
282
+
let upload_part = |client: &Client,
283
+
bucket: &str,
284
+
key: &str,
285
+
upload_id: &str,
286
+
part_num: i32,
287
+
data: Vec<u8>|
288
+
-> std::pin::Pin<
289
+
Box<dyn std::future::Future<Output = Result<CompletedPart, StorageError>> + Send>,
290
+
> {
291
+
let client = client.clone();
292
+
let bucket = bucket.to_string();
293
+
let key = key.to_string();
294
+
let upload_id = upload_id.to_string();
295
+
Box::pin(async move {
296
+
let resp = client
297
+
.upload_part()
298
+
.bucket(&bucket)
299
+
.key(&key)
300
+
.upload_id(&upload_id)
301
+
.part_number(part_num)
302
+
.body(ByteStream::from(data))
303
+
.send()
304
+
.await
305
+
.map_err(|e| StorageError::S3(format!("Failed to upload part: {}", e)))?;
306
+
307
+
let etag = resp
308
+
.e_tag()
309
+
.ok_or_else(|| StorageError::S3("No ETag returned for part".to_string()))?
310
+
.to_string();
311
+
312
+
Ok(CompletedPart::builder()
313
+
.part_number(part_num)
314
+
.e_tag(etag)
315
+
.build())
316
+
})
317
+
};
318
+
319
+
loop {
320
+
match stream.next().await {
321
+
Some(Ok(chunk)) => {
322
+
hasher.update(&chunk);
323
+
total_size += chunk.len() as u64;
324
+
buffer.extend_from_slice(&chunk);
325
+
326
+
if buffer.len() >= MIN_PART_SIZE {
327
+
let part_data =
328
+
std::mem::replace(&mut buffer, Vec::with_capacity(MIN_PART_SIZE));
329
+
let part = upload_part(
330
+
&self.client,
331
+
&self.bucket,
332
+
key,
333
+
&upload_id,
334
+
part_number,
335
+
part_data,
336
+
)
337
+
.await?;
338
+
completed_parts.push(part);
339
+
part_number += 1;
340
+
}
341
+
}
342
+
Some(Err(e)) => {
343
+
let _ = self
344
+
.client
345
+
.abort_multipart_upload()
346
+
.bucket(&self.bucket)
347
+
.key(key)
348
+
.upload_id(&upload_id)
349
+
.send()
350
+
.await;
351
+
return Err(StorageError::Io(e));
352
+
}
353
+
None => break,
354
+
}
355
+
}
356
+
357
+
if !buffer.is_empty() {
358
+
let part = upload_part(
359
+
&self.client,
360
+
&self.bucket,
361
+
key,
362
+
&upload_id,
363
+
part_number,
364
+
buffer,
365
+
)
366
+
.await?;
367
+
completed_parts.push(part);
368
+
}
369
+
370
+
if completed_parts.is_empty() {
371
+
let _ = self
372
+
.client
373
+
.abort_multipart_upload()
374
+
.bucket(&self.bucket)
375
+
.key(key)
376
+
.upload_id(&upload_id)
377
+
.send()
378
+
.await;
379
+
return Err(StorageError::Other("Empty upload".to_string()));
380
+
}
381
+
382
+
let completed_upload = CompletedMultipartUpload::builder()
383
+
.set_parts(Some(completed_parts))
384
+
.build();
385
+
386
+
self.client
387
+
.complete_multipart_upload()
388
+
.bucket(&self.bucket)
389
+
.key(key)
390
+
.upload_id(&upload_id)
391
+
.multipart_upload(completed_upload)
392
+
.send()
393
+
.await
394
+
.map_err(|e| StorageError::S3(format!("Failed to complete multipart upload: {}", e)))?;
395
+
396
+
crate::metrics::record_s3_operation("put_stream", "success");
397
+
398
+
let hash: [u8; 32] = hasher.finalize().into();
399
+
Ok(StreamUploadResult {
400
+
sha256_hash: hash,
401
+
size: total_size,
402
+
})
403
+
}
404
+
405
+
async fn copy(&self, src_key: &str, dst_key: &str) -> Result<(), StorageError> {
406
+
let copy_source = format!("{}/{}", self.bucket, src_key);
407
+
408
+
self.client
409
+
.copy_object()
410
+
.bucket(&self.bucket)
411
+
.copy_source(©_source)
412
+
.key(dst_key)
413
+
.send()
414
+
.await
415
+
.map_err(|e| StorageError::S3(format!("Failed to copy object: {}", e)))?;
416
+
417
+
crate::metrics::record_s3_operation("copy", "success");
234
418
Ok(())
235
419
}
236
420
}
+8
-8
src/sync/util.rs
+8
-8
src/sync/util.rs
···
216
216
op: 1,
217
217
t: "#identity".to_string(),
218
218
};
219
-
let mut bytes = Vec::new();
219
+
let mut bytes = Vec::with_capacity(256);
220
220
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
221
221
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
222
222
Ok(bytes)
···
234
234
op: 1,
235
235
t: "#account".to_string(),
236
236
};
237
-
let mut bytes = Vec::new();
237
+
let mut bytes = Vec::with_capacity(256);
238
238
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
239
239
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
240
240
let hex_str: String = bytes.iter().map(|b| format!("{:02x}", b)).collect();
···
281
281
op: 1,
282
282
t: "#sync".to_string(),
283
283
};
284
-
let mut bytes = Vec::new();
284
+
let mut bytes = Vec::with_capacity(512);
285
285
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
286
286
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
287
287
Ok(bytes)
···
349
349
op: 1,
350
350
t: "#commit".to_string(),
351
351
};
352
-
let mut bytes = Vec::new();
352
+
let mut bytes = Vec::with_capacity(frame.blocks.len() + 512);
353
353
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
354
354
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
355
355
Ok(bytes)
···
385
385
return Ok(HashMap::new());
386
386
}
387
387
let fetched = state.block_store.get_many(&all_cids).await?;
388
-
let mut blocks_map = HashMap::new();
388
+
let mut blocks_map = HashMap::with_capacity(all_cids.len());
389
389
for (cid, data_opt) in all_cids.into_iter().zip(fetched.into_iter()) {
390
390
if let Some(data) = data_opt {
391
391
blocks_map.insert(cid, data);
···
497
497
op: 1,
498
498
t: "#commit".to_string(),
499
499
};
500
-
let mut bytes = Vec::new();
500
+
let mut bytes = Vec::with_capacity(frame.blocks.len() + 512);
501
501
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
502
502
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
503
503
Ok(bytes)
···
512
512
name: name.to_string(),
513
513
message: message.map(String::from),
514
514
};
515
-
let mut bytes = Vec::new();
515
+
let mut bytes = Vec::with_capacity(128);
516
516
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
517
517
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
518
518
Ok(bytes)
···
524
524
error: error.to_string(),
525
525
message: message.map(String::from),
526
526
};
527
-
let mut bytes = Vec::new();
527
+
let mut bytes = Vec::with_capacity(128);
528
528
serde_ipld_dagcbor::to_writer(&mut bytes, &header)?;
529
529
serde_ipld_dagcbor::to_writer(&mut bytes, &frame)?;
530
530
Ok(bytes)
+3
src/sync/verify.rs
+3
src/sync/verify.rs
···
47
47
Self {
48
48
http_client: Client::builder()
49
49
.timeout(std::time::Duration::from_secs(10))
50
+
.connect_timeout(std::time::Duration::from_secs(5))
51
+
.pool_max_idle_per_host(10)
52
+
.pool_idle_timeout(std::time::Duration::from_secs(90))
50
53
.build()
51
54
.unwrap_or_default(),
52
55
}
+285
-288
src/sync/verify_tests.rs
+285
-288
src/sync/verify_tests.rs
···
1
-
#[cfg(test)]
2
-
mod tests {
3
-
use crate::sync::verify::{CarVerifier, VerifyError};
4
-
use bytes::Bytes;
5
-
use cid::Cid;
6
-
use sha2::{Digest, Sha256};
7
-
use std::collections::HashMap;
1
+
use crate::sync::verify::{CarVerifier, VerifyError};
2
+
use bytes::Bytes;
3
+
use cid::Cid;
4
+
use sha2::{Digest, Sha256};
5
+
use std::collections::HashMap;
8
6
9
-
fn make_cid(data: &[u8]) -> Cid {
10
-
let mut hasher = Sha256::new();
11
-
hasher.update(data);
12
-
let hash = hasher.finalize();
13
-
let multihash = multihash::Multihash::wrap(0x12, &hash).unwrap();
14
-
Cid::new_v1(0x71, multihash)
15
-
}
7
+
fn make_cid(data: &[u8]) -> Cid {
8
+
let mut hasher = Sha256::new();
9
+
hasher.update(data);
10
+
let hash = hasher.finalize();
11
+
let multihash = multihash::Multihash::wrap(0x12, &hash).unwrap();
12
+
Cid::new_v1(0x71, multihash)
13
+
}
16
14
17
-
#[test]
18
-
fn test_verifier_creation() {
19
-
let _verifier = CarVerifier::new();
20
-
}
15
+
#[test]
16
+
fn test_verifier_creation() {
17
+
let _verifier = CarVerifier::new();
18
+
}
21
19
22
-
#[test]
23
-
fn test_verify_error_display() {
24
-
let err = VerifyError::DidMismatch {
25
-
commit_did: "did:plc:abc".to_string(),
26
-
expected_did: "did:plc:xyz".to_string(),
27
-
};
28
-
assert!(err.to_string().contains("did:plc:abc"));
29
-
assert!(err.to_string().contains("did:plc:xyz"));
30
-
let err = VerifyError::InvalidSignature;
31
-
assert!(err.to_string().contains("signature"));
32
-
let err = VerifyError::NoSigningKey;
33
-
assert!(err.to_string().contains("signing key"));
34
-
let err = VerifyError::MstValidationFailed("test error".to_string());
35
-
assert!(err.to_string().contains("test error"));
36
-
}
20
+
#[test]
21
+
fn test_verify_error_display() {
22
+
let err = VerifyError::DidMismatch {
23
+
commit_did: "did:plc:abc".to_string(),
24
+
expected_did: "did:plc:xyz".to_string(),
25
+
};
26
+
assert!(err.to_string().contains("did:plc:abc"));
27
+
assert!(err.to_string().contains("did:plc:xyz"));
28
+
let err = VerifyError::InvalidSignature;
29
+
assert!(err.to_string().contains("signature"));
30
+
let err = VerifyError::NoSigningKey;
31
+
assert!(err.to_string().contains("signing key"));
32
+
let err = VerifyError::MstValidationFailed("test error".to_string());
33
+
assert!(err.to_string().contains("test error"));
34
+
}
37
35
38
-
#[test]
39
-
fn test_mst_validation_missing_root_block() {
40
-
let verifier = CarVerifier::new();
41
-
let blocks: HashMap<Cid, Bytes> = HashMap::new();
42
-
let fake_cid = make_cid(b"fake data");
43
-
let result = verifier.verify_mst_structure(&fake_cid, &blocks);
44
-
assert!(result.is_err());
45
-
let err = result.unwrap_err();
46
-
assert!(matches!(err, VerifyError::BlockNotFound(_)));
47
-
}
36
+
#[test]
37
+
fn test_mst_validation_missing_root_block() {
38
+
let verifier = CarVerifier::new();
39
+
let blocks: HashMap<Cid, Bytes> = HashMap::new();
40
+
let fake_cid = make_cid(b"fake data");
41
+
let result = verifier.verify_mst_structure(&fake_cid, &blocks);
42
+
assert!(result.is_err());
43
+
let err = result.unwrap_err();
44
+
assert!(matches!(err, VerifyError::BlockNotFound(_)));
45
+
}
48
46
49
-
#[test]
50
-
fn test_mst_validation_invalid_cbor() {
51
-
let verifier = CarVerifier::new();
52
-
let bad_cbor = Bytes::from(vec![0xFF, 0xFF, 0xFF]);
53
-
let cid = make_cid(&bad_cbor);
54
-
let mut blocks = HashMap::new();
55
-
blocks.insert(cid, bad_cbor);
56
-
let result = verifier.verify_mst_structure(&cid, &blocks);
57
-
assert!(result.is_err());
58
-
let err = result.unwrap_err();
59
-
assert!(matches!(err, VerifyError::InvalidCbor(_)));
60
-
}
47
+
#[test]
48
+
fn test_mst_validation_invalid_cbor() {
49
+
let verifier = CarVerifier::new();
50
+
let bad_cbor = Bytes::from(vec![0xFF, 0xFF, 0xFF]);
51
+
let cid = make_cid(&bad_cbor);
52
+
let mut blocks = HashMap::new();
53
+
blocks.insert(cid, bad_cbor);
54
+
let result = verifier.verify_mst_structure(&cid, &blocks);
55
+
assert!(result.is_err());
56
+
let err = result.unwrap_err();
57
+
assert!(matches!(err, VerifyError::InvalidCbor(_)));
58
+
}
61
59
62
-
#[test]
63
-
fn test_mst_validation_empty_node() {
64
-
let verifier = CarVerifier::new();
65
-
let empty_node = serde_ipld_dagcbor::to_vec(&serde_json::json!({
66
-
"e": []
67
-
}))
68
-
.unwrap();
69
-
let cid = make_cid(&empty_node);
70
-
let mut blocks = HashMap::new();
71
-
blocks.insert(cid, Bytes::from(empty_node));
72
-
let result = verifier.verify_mst_structure(&cid, &blocks);
73
-
assert!(result.is_ok());
74
-
}
60
+
#[test]
61
+
fn test_mst_validation_empty_node() {
62
+
let verifier = CarVerifier::new();
63
+
let empty_node = serde_ipld_dagcbor::to_vec(&serde_json::json!({
64
+
"e": []
65
+
}))
66
+
.unwrap();
67
+
let cid = make_cid(&empty_node);
68
+
let mut blocks = HashMap::new();
69
+
blocks.insert(cid, Bytes::from(empty_node));
70
+
let result = verifier.verify_mst_structure(&cid, &blocks);
71
+
assert!(result.is_ok());
72
+
}
75
73
76
-
#[test]
77
-
fn test_mst_validation_missing_left_pointer() {
78
-
use ipld_core::ipld::Ipld;
74
+
#[test]
75
+
fn test_mst_validation_missing_left_pointer() {
76
+
use ipld_core::ipld::Ipld;
79
77
80
-
let verifier = CarVerifier::new();
81
-
let missing_left_cid = make_cid(b"missing left");
82
-
let node = Ipld::Map(std::collections::BTreeMap::from([
83
-
("l".to_string(), Ipld::Link(missing_left_cid)),
84
-
("e".to_string(), Ipld::List(vec![])),
85
-
]));
86
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
87
-
let cid = make_cid(&node_bytes);
88
-
let mut blocks = HashMap::new();
89
-
blocks.insert(cid, Bytes::from(node_bytes));
90
-
let result = verifier.verify_mst_structure(&cid, &blocks);
91
-
assert!(result.is_err());
92
-
let err = result.unwrap_err();
93
-
assert!(matches!(err, VerifyError::BlockNotFound(_)));
94
-
assert!(err.to_string().contains("left pointer"));
95
-
}
78
+
let verifier = CarVerifier::new();
79
+
let missing_left_cid = make_cid(b"missing left");
80
+
let node = Ipld::Map(std::collections::BTreeMap::from([
81
+
("l".to_string(), Ipld::Link(missing_left_cid)),
82
+
("e".to_string(), Ipld::List(vec![])),
83
+
]));
84
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
85
+
let cid = make_cid(&node_bytes);
86
+
let mut blocks = HashMap::new();
87
+
blocks.insert(cid, Bytes::from(node_bytes));
88
+
let result = verifier.verify_mst_structure(&cid, &blocks);
89
+
assert!(result.is_err());
90
+
let err = result.unwrap_err();
91
+
assert!(matches!(err, VerifyError::BlockNotFound(_)));
92
+
assert!(err.to_string().contains("left pointer"));
93
+
}
96
94
97
-
#[test]
98
-
fn test_mst_validation_missing_subtree() {
99
-
use ipld_core::ipld::Ipld;
95
+
#[test]
96
+
fn test_mst_validation_missing_subtree() {
97
+
use ipld_core::ipld::Ipld;
100
98
101
-
let verifier = CarVerifier::new();
102
-
let missing_subtree_cid = make_cid(b"missing subtree");
103
-
let record_cid = make_cid(b"record");
104
-
let entry = Ipld::Map(std::collections::BTreeMap::from([
105
-
("k".to_string(), Ipld::Bytes(b"key1".to_vec())),
106
-
("v".to_string(), Ipld::Link(record_cid)),
107
-
("p".to_string(), Ipld::Integer(0)),
108
-
("t".to_string(), Ipld::Link(missing_subtree_cid)),
109
-
]));
110
-
let node = Ipld::Map(std::collections::BTreeMap::from([(
111
-
"e".to_string(),
112
-
Ipld::List(vec![entry]),
113
-
)]));
114
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
115
-
let cid = make_cid(&node_bytes);
116
-
let mut blocks = HashMap::new();
117
-
blocks.insert(cid, Bytes::from(node_bytes));
118
-
let result = verifier.verify_mst_structure(&cid, &blocks);
119
-
assert!(result.is_err());
120
-
let err = result.unwrap_err();
121
-
assert!(matches!(err, VerifyError::BlockNotFound(_)));
122
-
assert!(err.to_string().contains("subtree"));
123
-
}
99
+
let verifier = CarVerifier::new();
100
+
let missing_subtree_cid = make_cid(b"missing subtree");
101
+
let record_cid = make_cid(b"record");
102
+
let entry = Ipld::Map(std::collections::BTreeMap::from([
103
+
("k".to_string(), Ipld::Bytes(b"key1".to_vec())),
104
+
("v".to_string(), Ipld::Link(record_cid)),
105
+
("p".to_string(), Ipld::Integer(0)),
106
+
("t".to_string(), Ipld::Link(missing_subtree_cid)),
107
+
]));
108
+
let node = Ipld::Map(std::collections::BTreeMap::from([(
109
+
"e".to_string(),
110
+
Ipld::List(vec![entry]),
111
+
)]));
112
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
113
+
let cid = make_cid(&node_bytes);
114
+
let mut blocks = HashMap::new();
115
+
blocks.insert(cid, Bytes::from(node_bytes));
116
+
let result = verifier.verify_mst_structure(&cid, &blocks);
117
+
assert!(result.is_err());
118
+
let err = result.unwrap_err();
119
+
assert!(matches!(err, VerifyError::BlockNotFound(_)));
120
+
assert!(err.to_string().contains("subtree"));
121
+
}
124
122
125
-
#[test]
126
-
fn test_mst_validation_unsorted_keys() {
127
-
use ipld_core::ipld::Ipld;
123
+
#[test]
124
+
fn test_mst_validation_unsorted_keys() {
125
+
use ipld_core::ipld::Ipld;
128
126
129
-
let verifier = CarVerifier::new();
130
-
let record_cid = make_cid(b"record");
131
-
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
132
-
("k".to_string(), Ipld::Bytes(b"zzz".to_vec())),
133
-
("v".to_string(), Ipld::Link(record_cid)),
134
-
("p".to_string(), Ipld::Integer(0)),
135
-
]));
136
-
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
137
-
("k".to_string(), Ipld::Bytes(b"aaa".to_vec())),
138
-
("v".to_string(), Ipld::Link(record_cid)),
139
-
("p".to_string(), Ipld::Integer(0)),
140
-
]));
141
-
let node = Ipld::Map(std::collections::BTreeMap::from([(
142
-
"e".to_string(),
143
-
Ipld::List(vec![entry1, entry2]),
144
-
)]));
145
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
146
-
let cid = make_cid(&node_bytes);
147
-
let mut blocks = HashMap::new();
148
-
blocks.insert(cid, Bytes::from(node_bytes));
149
-
let result = verifier.verify_mst_structure(&cid, &blocks);
150
-
assert!(result.is_err());
151
-
let err = result.unwrap_err();
152
-
assert!(matches!(err, VerifyError::MstValidationFailed(_)));
153
-
assert!(err.to_string().contains("sorted"));
154
-
}
127
+
let verifier = CarVerifier::new();
128
+
let record_cid = make_cid(b"record");
129
+
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
130
+
("k".to_string(), Ipld::Bytes(b"zzz".to_vec())),
131
+
("v".to_string(), Ipld::Link(record_cid)),
132
+
("p".to_string(), Ipld::Integer(0)),
133
+
]));
134
+
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
135
+
("k".to_string(), Ipld::Bytes(b"aaa".to_vec())),
136
+
("v".to_string(), Ipld::Link(record_cid)),
137
+
("p".to_string(), Ipld::Integer(0)),
138
+
]));
139
+
let node = Ipld::Map(std::collections::BTreeMap::from([(
140
+
"e".to_string(),
141
+
Ipld::List(vec![entry1, entry2]),
142
+
)]));
143
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
144
+
let cid = make_cid(&node_bytes);
145
+
let mut blocks = HashMap::new();
146
+
blocks.insert(cid, Bytes::from(node_bytes));
147
+
let result = verifier.verify_mst_structure(&cid, &blocks);
148
+
assert!(result.is_err());
149
+
let err = result.unwrap_err();
150
+
assert!(matches!(err, VerifyError::MstValidationFailed(_)));
151
+
assert!(err.to_string().contains("sorted"));
152
+
}
155
153
156
-
#[test]
157
-
fn test_mst_validation_sorted_keys_ok() {
158
-
use ipld_core::ipld::Ipld;
154
+
#[test]
155
+
fn test_mst_validation_sorted_keys_ok() {
156
+
use ipld_core::ipld::Ipld;
159
157
160
-
let verifier = CarVerifier::new();
161
-
let record_cid = make_cid(b"record");
162
-
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
163
-
("k".to_string(), Ipld::Bytes(b"aaa".to_vec())),
164
-
("v".to_string(), Ipld::Link(record_cid)),
165
-
("p".to_string(), Ipld::Integer(0)),
166
-
]));
167
-
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
168
-
("k".to_string(), Ipld::Bytes(b"bbb".to_vec())),
169
-
("v".to_string(), Ipld::Link(record_cid)),
170
-
("p".to_string(), Ipld::Integer(0)),
171
-
]));
172
-
let entry3 = Ipld::Map(std::collections::BTreeMap::from([
173
-
("k".to_string(), Ipld::Bytes(b"zzz".to_vec())),
174
-
("v".to_string(), Ipld::Link(record_cid)),
175
-
("p".to_string(), Ipld::Integer(0)),
176
-
]));
177
-
let node = Ipld::Map(std::collections::BTreeMap::from([(
178
-
"e".to_string(),
179
-
Ipld::List(vec![entry1, entry2, entry3]),
180
-
)]));
181
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
182
-
let cid = make_cid(&node_bytes);
183
-
let mut blocks = HashMap::new();
184
-
blocks.insert(cid, Bytes::from(node_bytes));
185
-
let result = verifier.verify_mst_structure(&cid, &blocks);
186
-
assert!(result.is_ok());
187
-
}
158
+
let verifier = CarVerifier::new();
159
+
let record_cid = make_cid(b"record");
160
+
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
161
+
("k".to_string(), Ipld::Bytes(b"aaa".to_vec())),
162
+
("v".to_string(), Ipld::Link(record_cid)),
163
+
("p".to_string(), Ipld::Integer(0)),
164
+
]));
165
+
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
166
+
("k".to_string(), Ipld::Bytes(b"bbb".to_vec())),
167
+
("v".to_string(), Ipld::Link(record_cid)),
168
+
("p".to_string(), Ipld::Integer(0)),
169
+
]));
170
+
let entry3 = Ipld::Map(std::collections::BTreeMap::from([
171
+
("k".to_string(), Ipld::Bytes(b"zzz".to_vec())),
172
+
("v".to_string(), Ipld::Link(record_cid)),
173
+
("p".to_string(), Ipld::Integer(0)),
174
+
]));
175
+
let node = Ipld::Map(std::collections::BTreeMap::from([(
176
+
"e".to_string(),
177
+
Ipld::List(vec![entry1, entry2, entry3]),
178
+
)]));
179
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
180
+
let cid = make_cid(&node_bytes);
181
+
let mut blocks = HashMap::new();
182
+
blocks.insert(cid, Bytes::from(node_bytes));
183
+
let result = verifier.verify_mst_structure(&cid, &blocks);
184
+
assert!(result.is_ok());
185
+
}
188
186
189
-
#[test]
190
-
fn test_mst_validation_with_valid_left_pointer() {
191
-
use ipld_core::ipld::Ipld;
187
+
#[test]
188
+
fn test_mst_validation_with_valid_left_pointer() {
189
+
use ipld_core::ipld::Ipld;
192
190
193
-
let verifier = CarVerifier::new();
194
-
let left_node = Ipld::Map(std::collections::BTreeMap::from([(
195
-
"e".to_string(),
196
-
Ipld::List(vec![]),
197
-
)]));
198
-
let left_node_bytes = serde_ipld_dagcbor::to_vec(&left_node).unwrap();
199
-
let left_cid = make_cid(&left_node_bytes);
200
-
let root_node = Ipld::Map(std::collections::BTreeMap::from([
201
-
("l".to_string(), Ipld::Link(left_cid)),
202
-
("e".to_string(), Ipld::List(vec![])),
203
-
]));
204
-
let root_node_bytes = serde_ipld_dagcbor::to_vec(&root_node).unwrap();
205
-
let root_cid = make_cid(&root_node_bytes);
206
-
let mut blocks = HashMap::new();
207
-
blocks.insert(root_cid, Bytes::from(root_node_bytes));
208
-
blocks.insert(left_cid, Bytes::from(left_node_bytes));
209
-
let result = verifier.verify_mst_structure(&root_cid, &blocks);
210
-
assert!(result.is_ok());
211
-
}
191
+
let verifier = CarVerifier::new();
192
+
let left_node = Ipld::Map(std::collections::BTreeMap::from([(
193
+
"e".to_string(),
194
+
Ipld::List(vec![]),
195
+
)]));
196
+
let left_node_bytes = serde_ipld_dagcbor::to_vec(&left_node).unwrap();
197
+
let left_cid = make_cid(&left_node_bytes);
198
+
let root_node = Ipld::Map(std::collections::BTreeMap::from([
199
+
("l".to_string(), Ipld::Link(left_cid)),
200
+
("e".to_string(), Ipld::List(vec![])),
201
+
]));
202
+
let root_node_bytes = serde_ipld_dagcbor::to_vec(&root_node).unwrap();
203
+
let root_cid = make_cid(&root_node_bytes);
204
+
let mut blocks = HashMap::new();
205
+
blocks.insert(root_cid, Bytes::from(root_node_bytes));
206
+
blocks.insert(left_cid, Bytes::from(left_node_bytes));
207
+
let result = verifier.verify_mst_structure(&root_cid, &blocks);
208
+
assert!(result.is_ok());
209
+
}
212
210
213
-
#[test]
214
-
fn test_mst_validation_cycle_detection() {
215
-
let verifier = CarVerifier::new();
216
-
let node = serde_ipld_dagcbor::to_vec(&serde_json::json!({
217
-
"e": []
218
-
}))
219
-
.unwrap();
220
-
let cid = make_cid(&node);
221
-
let mut blocks = HashMap::new();
222
-
blocks.insert(cid, Bytes::from(node));
223
-
let result = verifier.verify_mst_structure(&cid, &blocks);
224
-
assert!(result.is_ok());
225
-
}
211
+
#[test]
212
+
fn test_mst_validation_cycle_detection() {
213
+
let verifier = CarVerifier::new();
214
+
let node = serde_ipld_dagcbor::to_vec(&serde_json::json!({
215
+
"e": []
216
+
}))
217
+
.unwrap();
218
+
let cid = make_cid(&node);
219
+
let mut blocks = HashMap::new();
220
+
blocks.insert(cid, Bytes::from(node));
221
+
let result = verifier.verify_mst_structure(&cid, &blocks);
222
+
assert!(result.is_ok());
223
+
}
226
224
227
-
#[tokio::test]
228
-
async fn test_unsupported_did_method() {
229
-
let verifier = CarVerifier::new();
230
-
let result = verifier.resolve_did_document("did:unknown:test").await;
231
-
assert!(result.is_err());
232
-
let err = result.unwrap_err();
233
-
assert!(matches!(err, VerifyError::DidResolutionFailed(_)));
234
-
assert!(err.to_string().contains("Unsupported"));
235
-
}
225
+
#[tokio::test]
226
+
async fn test_unsupported_did_method() {
227
+
let verifier = CarVerifier::new();
228
+
let result = verifier.resolve_did_document("did:unknown:test").await;
229
+
assert!(result.is_err());
230
+
let err = result.unwrap_err();
231
+
assert!(matches!(err, VerifyError::DidResolutionFailed(_)));
232
+
assert!(err.to_string().contains("Unsupported"));
233
+
}
236
234
237
-
#[test]
238
-
fn test_mst_validation_with_prefix_compression() {
239
-
use ipld_core::ipld::Ipld;
235
+
#[test]
236
+
fn test_mst_validation_with_prefix_compression() {
237
+
use ipld_core::ipld::Ipld;
240
238
241
-
let verifier = CarVerifier::new();
242
-
let record_cid = make_cid(b"record");
243
-
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
244
-
(
245
-
"k".to_string(),
246
-
Ipld::Bytes(b"app.bsky.feed.post/abc".to_vec()),
247
-
),
248
-
("v".to_string(), Ipld::Link(record_cid)),
249
-
("p".to_string(), Ipld::Integer(0)),
250
-
]));
251
-
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
252
-
("k".to_string(), Ipld::Bytes(b"def".to_vec())),
253
-
("v".to_string(), Ipld::Link(record_cid)),
254
-
("p".to_string(), Ipld::Integer(19)),
255
-
]));
256
-
let entry3 = Ipld::Map(std::collections::BTreeMap::from([
257
-
("k".to_string(), Ipld::Bytes(b"xyz".to_vec())),
258
-
("v".to_string(), Ipld::Link(record_cid)),
259
-
("p".to_string(), Ipld::Integer(19)),
260
-
]));
261
-
let node = Ipld::Map(std::collections::BTreeMap::from([(
262
-
"e".to_string(),
263
-
Ipld::List(vec![entry1, entry2, entry3]),
264
-
)]));
265
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
266
-
let cid = make_cid(&node_bytes);
267
-
let mut blocks = HashMap::new();
268
-
blocks.insert(cid, Bytes::from(node_bytes));
269
-
let result = verifier.verify_mst_structure(&cid, &blocks);
270
-
assert!(
271
-
result.is_ok(),
272
-
"Prefix-compressed keys should be validated correctly"
273
-
);
274
-
}
239
+
let verifier = CarVerifier::new();
240
+
let record_cid = make_cid(b"record");
241
+
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
242
+
(
243
+
"k".to_string(),
244
+
Ipld::Bytes(b"app.bsky.feed.post/abc".to_vec()),
245
+
),
246
+
("v".to_string(), Ipld::Link(record_cid)),
247
+
("p".to_string(), Ipld::Integer(0)),
248
+
]));
249
+
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
250
+
("k".to_string(), Ipld::Bytes(b"def".to_vec())),
251
+
("v".to_string(), Ipld::Link(record_cid)),
252
+
("p".to_string(), Ipld::Integer(19)),
253
+
]));
254
+
let entry3 = Ipld::Map(std::collections::BTreeMap::from([
255
+
("k".to_string(), Ipld::Bytes(b"xyz".to_vec())),
256
+
("v".to_string(), Ipld::Link(record_cid)),
257
+
("p".to_string(), Ipld::Integer(19)),
258
+
]));
259
+
let node = Ipld::Map(std::collections::BTreeMap::from([(
260
+
"e".to_string(),
261
+
Ipld::List(vec![entry1, entry2, entry3]),
262
+
)]));
263
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
264
+
let cid = make_cid(&node_bytes);
265
+
let mut blocks = HashMap::new();
266
+
blocks.insert(cid, Bytes::from(node_bytes));
267
+
let result = verifier.verify_mst_structure(&cid, &blocks);
268
+
assert!(
269
+
result.is_ok(),
270
+
"Prefix-compressed keys should be validated correctly"
271
+
);
272
+
}
275
273
276
-
#[test]
277
-
fn test_mst_validation_prefix_compression_unsorted() {
278
-
use ipld_core::ipld::Ipld;
274
+
#[test]
275
+
fn test_mst_validation_prefix_compression_unsorted() {
276
+
use ipld_core::ipld::Ipld;
279
277
280
-
let verifier = CarVerifier::new();
281
-
let record_cid = make_cid(b"record");
282
-
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
283
-
(
284
-
"k".to_string(),
285
-
Ipld::Bytes(b"app.bsky.feed.post/xyz".to_vec()),
286
-
),
287
-
("v".to_string(), Ipld::Link(record_cid)),
288
-
("p".to_string(), Ipld::Integer(0)),
289
-
]));
290
-
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
291
-
("k".to_string(), Ipld::Bytes(b"abc".to_vec())),
292
-
("v".to_string(), Ipld::Link(record_cid)),
293
-
("p".to_string(), Ipld::Integer(19)),
294
-
]));
295
-
let node = Ipld::Map(std::collections::BTreeMap::from([(
296
-
"e".to_string(),
297
-
Ipld::List(vec![entry1, entry2]),
298
-
)]));
299
-
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
300
-
let cid = make_cid(&node_bytes);
301
-
let mut blocks = HashMap::new();
302
-
blocks.insert(cid, Bytes::from(node_bytes));
303
-
let result = verifier.verify_mst_structure(&cid, &blocks);
304
-
assert!(
305
-
result.is_err(),
306
-
"Unsorted prefix-compressed keys should fail validation"
307
-
);
308
-
let err = result.unwrap_err();
309
-
assert!(matches!(err, VerifyError::MstValidationFailed(_)));
310
-
}
278
+
let verifier = CarVerifier::new();
279
+
let record_cid = make_cid(b"record");
280
+
let entry1 = Ipld::Map(std::collections::BTreeMap::from([
281
+
(
282
+
"k".to_string(),
283
+
Ipld::Bytes(b"app.bsky.feed.post/xyz".to_vec()),
284
+
),
285
+
("v".to_string(), Ipld::Link(record_cid)),
286
+
("p".to_string(), Ipld::Integer(0)),
287
+
]));
288
+
let entry2 = Ipld::Map(std::collections::BTreeMap::from([
289
+
("k".to_string(), Ipld::Bytes(b"abc".to_vec())),
290
+
("v".to_string(), Ipld::Link(record_cid)),
291
+
("p".to_string(), Ipld::Integer(19)),
292
+
]));
293
+
let node = Ipld::Map(std::collections::BTreeMap::from([(
294
+
"e".to_string(),
295
+
Ipld::List(vec![entry1, entry2]),
296
+
)]));
297
+
let node_bytes = serde_ipld_dagcbor::to_vec(&node).unwrap();
298
+
let cid = make_cid(&node_bytes);
299
+
let mut blocks = HashMap::new();
300
+
blocks.insert(cid, Bytes::from(node_bytes));
301
+
let result = verifier.verify_mst_structure(&cid, &blocks);
302
+
assert!(
303
+
result.is_err(),
304
+
"Unsorted prefix-compressed keys should fail validation"
305
+
);
306
+
let err = result.unwrap_err();
307
+
assert!(matches!(err, VerifyError::MstValidationFailed(_)));
311
308
}
+1
-1
tests/account_lifecycle.rs
+1
-1
tests/account_lifecycle.rs
+1
-1
tests/backup.rs
+1
-1
tests/backup.rs
+23
-21
tests/common/mod.rs
+23
-21
tests/common/mod.rs
···
52
52
}
53
53
if std::env::var("XDG_RUNTIME_DIR").is_ok() {
54
54
let _ = std::process::Command::new("podman")
55
-
.args(&["rm", "-f", "--filter", "label=tranquil_pds_test=true"])
55
+
.args(["rm", "-f", "--filter", "label=tranquil_pds_test=true"])
56
56
.output();
57
57
}
58
58
let _ = std::process::Command::new("docker")
59
-
.args(&[
59
+
.args([
60
60
"container",
61
61
"prune",
62
62
"-f",
···
83
83
unsafe {
84
84
std::env::set_var("TRANQUIL_PDS_ALLOW_INSECURE_SECRETS", "1");
85
85
}
86
-
if std::env::var("DOCKER_HOST").is_err() {
87
-
if let Ok(runtime_dir) = std::env::var("XDG_RUNTIME_DIR") {
88
-
let podman_sock = std::path::Path::new(&runtime_dir).join("podman/podman.sock");
89
-
if podman_sock.exists() {
90
-
unsafe {
91
-
std::env::set_var(
92
-
"DOCKER_HOST",
93
-
format!("unix://{}", podman_sock.display()),
94
-
);
95
-
}
86
+
if std::env::var("DOCKER_HOST").is_err()
87
+
&& let Ok(runtime_dir) = std::env::var("XDG_RUNTIME_DIR")
88
+
{
89
+
let podman_sock = std::path::Path::new(&runtime_dir).join("podman/podman.sock");
90
+
if podman_sock.exists() {
91
+
unsafe {
92
+
std::env::set_var(
93
+
"DOCKER_HOST",
94
+
format!("unix://{}", podman_sock.display()),
95
+
);
96
96
}
97
97
}
98
98
}
···
135
135
std::env::var("AWS_REGION").unwrap_or_else(|_| "us-east-1".to_string()),
136
136
);
137
137
std::env::set_var("S3_ENDPOINT", &s3_endpoint);
138
+
std::env::set_var("MAX_IMPORT_SIZE", "100000000");
138
139
}
139
140
let mock_server = MockServer::start().await;
140
141
setup_mock_appview(&mock_server).await;
···
168
169
std::env::set_var("AWS_SECRET_ACCESS_KEY", "minioadmin");
169
170
std::env::set_var("AWS_REGION", "us-east-1");
170
171
std::env::set_var("S3_ENDPOINT", &s3_endpoint);
172
+
std::env::set_var("MAX_IMPORT_SIZE", "100000000");
171
173
}
172
174
let sdk_config = aws_config::defaults(BehaviorVersion::latest())
173
175
.region("us-east-1")
···
418
420
.to_string();
419
421
let rkey = uri
420
422
.split('/')
421
-
.last()
423
+
.next_back()
422
424
.expect("URI was malformed")
423
425
.to_string();
424
426
(uri, cid, rkey)
···
472
474
.expect("Failed to mark user as admin");
473
475
}
474
476
let verification_required = body["verificationRequired"].as_bool().unwrap_or(true);
475
-
if let Some(access_jwt) = body["accessJwt"].as_str() {
476
-
if !verification_required {
477
-
return (access_jwt.to_string(), did);
478
-
}
477
+
if let Some(access_jwt) = body["accessJwt"].as_str()
478
+
&& !verification_required
479
+
{
480
+
return (access_jwt.to_string(), did);
479
481
}
480
482
let body_text: String = sqlx::query_scalar!(
481
483
"SELECT body FROM comms_queue WHERE user_id = (SELECT id FROM users WHERE did = $1) AND comms_type = 'email_verification' ORDER BY created_at DESC LIMIT 1",
···
488
490
let verification_code = lines
489
491
.iter()
490
492
.enumerate()
491
-
.find(|(_, line)| {
493
+
.find(|(_, line): &(usize, &&str)| {
492
494
line.contains("verification code is:") || line.contains("code is:")
493
495
})
494
-
.and_then(|(i, _)| lines.get(i + 1).map(|s| s.trim().to_string()))
496
+
.and_then(|(i, _)| lines.get(i + 1).map(|s: &&str| s.trim().to_string()))
495
497
.or_else(|| {
496
498
body_text
497
499
.split_whitespace()
498
-
.find(|word| {
500
+
.find(|word: &&str| {
499
501
word.contains('-') && word.chars().filter(|c| *c == '-').count() >= 3
500
502
})
501
-
.map(|s| s.to_string())
503
+
.map(|s: &str| s.to_string())
502
504
})
503
505
.unwrap_or_else(|| body_text.clone());
504
506
+6
-6
tests/delete_account.rs
+6
-6
tests/delete_account.rs
···
40
40
let handle = format!("delete-test-{}.test", ts);
41
41
let email = format!("delete-test-{}@test.com", ts);
42
42
let password = "Delete123pass!";
43
-
let (did, jwt) = create_verified_account(&client, &base_url, &handle, &email, password).await;
43
+
let (did, jwt) = create_verified_account(&client, base_url, &handle, &email, password).await;
44
44
let request_delete_res = client
45
45
.post(format!(
46
46
"{}/xrpc/com.atproto.server.requestAccountDelete",
···
97
97
let handle = format!("delete-wrongpw-{}.test", ts);
98
98
let email = format!("delete-wrongpw-{}@test.com", ts);
99
99
let password = "Correct123!";
100
-
let (did, jwt) = create_verified_account(&client, &base_url, &handle, &email, password).await;
100
+
let (did, jwt) = create_verified_account(&client, base_url, &handle, &email, password).await;
101
101
let request_delete_res = client
102
102
.post(format!(
103
103
"{}/xrpc/com.atproto.server.requestAccountDelete",
···
187
187
let handle = format!("delete-expired-{}.test", ts);
188
188
let email = format!("delete-expired-{}@test.com", ts);
189
189
let password = "Delete123!";
190
-
let (did, jwt) = create_verified_account(&client, &base_url, &handle, &email, password).await;
190
+
let (did, jwt) = create_verified_account(&client, base_url, &handle, &email, password).await;
191
191
let request_delete_res = client
192
192
.post(format!(
193
193
"{}/xrpc/com.atproto.server.requestAccountDelete",
···
242
242
let email1 = format!("delete-user1-{}@test.com", ts);
243
243
let password1 = "User1pass123!";
244
244
let (did1, jwt1) =
245
-
create_verified_account(&client, &base_url, &handle1, &email1, password1).await;
245
+
create_verified_account(&client, base_url, &handle1, &email1, password1).await;
246
246
let handle2 = format!("delete-user2-{}.test", ts);
247
247
let email2 = format!("delete-user2-{}@test.com", ts);
248
248
let password2 = "User2pass123!";
249
-
let (did2, _) = create_verified_account(&client, &base_url, &handle2, &email2, password2).await;
249
+
let (did2, _) = create_verified_account(&client, base_url, &handle2, &email2, password2).await;
250
250
let request_delete_res = client
251
251
.post(format!(
252
252
"{}/xrpc/com.atproto.server.requestAccountDelete",
···
294
294
let email = format!("delete-apppw-{}@test.com", ts);
295
295
let main_password = "Mainpass123!";
296
296
let (did, jwt) =
297
-
create_verified_account(&client, &base_url, &handle, &email, main_password).await;
297
+
create_verified_account(&client, base_url, &handle, &email, main_password).await;
298
298
let app_password_res = client
299
299
.post(format!(
300
300
"{}/xrpc/com.atproto.server.createAppPassword",
+537
tests/dpop_unit.rs
+537
tests/dpop_unit.rs
···
1
+
use base64::Engine as _;
2
+
use base64::engine::general_purpose::URL_SAFE_NO_PAD;
3
+
use chrono::Utc;
4
+
use p256::ecdsa::{SigningKey, signature::Signer};
5
+
use serde_json::json;
6
+
7
+
use tranquil_pds::oauth::dpop::{
8
+
DPoPJwk, DPoPVerifier, compute_access_token_hash, compute_jwk_thumbprint,
9
+
};
10
+
11
+
fn create_dpop_proof(
12
+
method: &str,
13
+
htu: &str,
14
+
iat_offset_secs: i64,
15
+
alg: &str,
16
+
nonce: Option<&str>,
17
+
ath: Option<&str>,
18
+
) -> (String, p256::ecdsa::VerifyingKey) {
19
+
let signing_key = SigningKey::random(&mut rand::thread_rng());
20
+
let verifying_key = *signing_key.verifying_key();
21
+
let point = verifying_key.to_encoded_point(false);
22
+
let x = URL_SAFE_NO_PAD.encode(point.x().unwrap());
23
+
let y = URL_SAFE_NO_PAD.encode(point.y().unwrap());
24
+
25
+
let header = json!({
26
+
"typ": "dpop+jwt",
27
+
"alg": alg,
28
+
"jwk": {
29
+
"kty": "EC",
30
+
"crv": "P-256",
31
+
"x": x,
32
+
"y": y
33
+
}
34
+
});
35
+
36
+
let iat = Utc::now().timestamp() + iat_offset_secs;
37
+
let jti = uuid::Uuid::new_v4().to_string();
38
+
39
+
let mut payload = json!({
40
+
"jti": jti,
41
+
"htm": method,
42
+
"htu": htu,
43
+
"iat": iat
44
+
});
45
+
46
+
if let Some(n) = nonce {
47
+
payload["nonce"] = json!(n);
48
+
}
49
+
if let Some(a) = ath {
50
+
payload["ath"] = json!(a);
51
+
}
52
+
53
+
let header_b64 = URL_SAFE_NO_PAD.encode(header.to_string().as_bytes());
54
+
let payload_b64 = URL_SAFE_NO_PAD.encode(payload.to_string().as_bytes());
55
+
let signing_input = format!("{}.{}", header_b64, payload_b64);
56
+
57
+
let signature: p256::ecdsa::Signature = signing_key.sign(signing_input.as_bytes());
58
+
let sig_b64 = URL_SAFE_NO_PAD.encode(signature.to_bytes());
59
+
60
+
let proof = format!("{}.{}.{}", header_b64, payload_b64, sig_b64);
61
+
(proof, verifying_key)
62
+
}
63
+
64
+
fn create_dpop_proof_with_invalid_sig(method: &str, htu: &str, alg: &str) -> String {
65
+
let signing_key = SigningKey::random(&mut rand::thread_rng());
66
+
let verifying_key = *signing_key.verifying_key();
67
+
let point = verifying_key.to_encoded_point(false);
68
+
let x = URL_SAFE_NO_PAD.encode(point.x().unwrap());
69
+
let y = URL_SAFE_NO_PAD.encode(point.y().unwrap());
70
+
71
+
let header = json!({
72
+
"typ": "dpop+jwt",
73
+
"alg": alg,
74
+
"jwk": {
75
+
"kty": "EC",
76
+
"crv": "P-256",
77
+
"x": x,
78
+
"y": y
79
+
}
80
+
});
81
+
82
+
let iat = Utc::now().timestamp();
83
+
let jti = uuid::Uuid::new_v4().to_string();
84
+
85
+
let payload = json!({
86
+
"jti": jti,
87
+
"htm": method,
88
+
"htu": htu,
89
+
"iat": iat
90
+
});
91
+
92
+
let header_b64 = URL_SAFE_NO_PAD.encode(header.to_string().as_bytes());
93
+
let payload_b64 = URL_SAFE_NO_PAD.encode(payload.to_string().as_bytes());
94
+
95
+
let fake_sig = URL_SAFE_NO_PAD.encode(vec![0u8; 64]);
96
+
97
+
format!("{}.{}.{}", header_b64, payload_b64, fake_sig)
98
+
}
99
+
100
+
#[test]
101
+
fn test_dpop_htu_query_params_stripped() {
102
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
103
+
let url_with_query = "https://pds.example/xrpc/com.atproto.server.getSession?foo=bar";
104
+
let url_without_query = "https://pds.example/xrpc/com.atproto.server.getSession";
105
+
106
+
let (proof, _) = create_dpop_proof("GET", url_with_query, 0, "ES256", None, None);
107
+
let result = verifier.verify_proof(&proof, "GET", url_without_query, None);
108
+
assert!(
109
+
result.is_ok(),
110
+
"Query params in htu should be stripped for comparison"
111
+
);
112
+
}
113
+
114
+
#[test]
115
+
fn test_dpop_htu_fragment_behavior() {
116
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
117
+
let url_with_fragment = "https://pds.example/xrpc/foo#fragment";
118
+
let url_without_fragment = "https://pds.example/xrpc/foo";
119
+
120
+
let (proof, _) = create_dpop_proof("GET", url_with_fragment, 0, "ES256", None, None);
121
+
let result = verifier.verify_proof(&proof, "GET", url_without_fragment, None);
122
+
123
+
assert!(
124
+
result.is_err(),
125
+
"Fragment in htu should cause mismatch (currently NOT stripped)"
126
+
);
127
+
}
128
+
129
+
#[test]
130
+
fn test_dpop_es512_algorithm_rejected() {
131
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
132
+
let url = "https://pds.example/xrpc/foo";
133
+
134
+
let signing_key = SigningKey::random(&mut rand::thread_rng());
135
+
let verifying_key = *signing_key.verifying_key();
136
+
let point = verifying_key.to_encoded_point(false);
137
+
let x = URL_SAFE_NO_PAD.encode(point.x().unwrap());
138
+
let y = URL_SAFE_NO_PAD.encode(point.y().unwrap());
139
+
140
+
let header = json!({
141
+
"typ": "dpop+jwt",
142
+
"alg": "ES512",
143
+
"jwk": {
144
+
"kty": "EC",
145
+
"crv": "P-256",
146
+
"x": x,
147
+
"y": y
148
+
}
149
+
});
150
+
151
+
let payload = json!({
152
+
"jti": uuid::Uuid::new_v4().to_string(),
153
+
"htm": "GET",
154
+
"htu": url,
155
+
"iat": Utc::now().timestamp()
156
+
});
157
+
158
+
let header_b64 = URL_SAFE_NO_PAD.encode(header.to_string().as_bytes());
159
+
let payload_b64 = URL_SAFE_NO_PAD.encode(payload.to_string().as_bytes());
160
+
let signing_input = format!("{}.{}", header_b64, payload_b64);
161
+
let signature: p256::ecdsa::Signature = signing_key.sign(signing_input.as_bytes());
162
+
let sig_b64 = URL_SAFE_NO_PAD.encode(signature.to_bytes());
163
+
let proof = format!("{}.{}.{}", header_b64, payload_b64, sig_b64);
164
+
165
+
let result = verifier.verify_proof(&proof, "GET", url, None);
166
+
assert!(result.is_err(), "ES512 should be rejected as unsupported");
167
+
}
168
+
169
+
#[test]
170
+
fn test_dpop_iat_clock_skew_within_bounds() {
171
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
172
+
let url = "https://pds.example/xrpc/foo";
173
+
174
+
let (proof_299s_future, _) = create_dpop_proof("GET", url, 299, "ES256", None, None);
175
+
let result = verifier.verify_proof(&proof_299s_future, "GET", url, None);
176
+
assert!(
177
+
result.is_ok(),
178
+
"299s in future should be within clock skew tolerance"
179
+
);
180
+
181
+
let (proof_299s_past, _) = create_dpop_proof("GET", url, -299, "ES256", None, None);
182
+
let result = verifier.verify_proof(&proof_299s_past, "GET", url, None);
183
+
assert!(
184
+
result.is_ok(),
185
+
"299s in past should be within clock skew tolerance"
186
+
);
187
+
}
188
+
189
+
#[test]
190
+
fn test_dpop_iat_clock_skew_beyond_bounds() {
191
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
192
+
let url = "https://pds.example/xrpc/foo";
193
+
194
+
let (proof_301s_future, _) = create_dpop_proof("GET", url, 301, "ES256", None, None);
195
+
let result = verifier.verify_proof(&proof_301s_future, "GET", url, None);
196
+
assert!(
197
+
result.is_err(),
198
+
"301s in future should exceed clock skew tolerance"
199
+
);
200
+
201
+
let (proof_301s_past, _) = create_dpop_proof("GET", url, -301, "ES256", None, None);
202
+
let result = verifier.verify_proof(&proof_301s_past, "GET", url, None);
203
+
assert!(
204
+
result.is_err(),
205
+
"301s in past should exceed clock skew tolerance"
206
+
);
207
+
}
208
+
209
+
#[test]
210
+
fn test_dpop_http_method_case_insensitive() {
211
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
212
+
let url = "https://pds.example/xrpc/foo";
213
+
214
+
let (proof_lowercase, _) = create_dpop_proof("get", url, 0, "ES256", None, None);
215
+
let result = verifier.verify_proof(&proof_lowercase, "GET", url, None);
216
+
assert!(
217
+
result.is_ok(),
218
+
"HTTP method comparison should be case-insensitive"
219
+
);
220
+
221
+
let (proof_mixed, _) = create_dpop_proof("GeT", url, 0, "ES256", None, None);
222
+
let result = verifier.verify_proof(&proof_mixed, "GET", url, None);
223
+
assert!(
224
+
result.is_ok(),
225
+
"HTTP method comparison should be case-insensitive"
226
+
);
227
+
}
228
+
229
+
#[test]
230
+
fn test_dpop_http_method_mismatch() {
231
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
232
+
let url = "https://pds.example/xrpc/foo";
233
+
234
+
let (proof_post, _) = create_dpop_proof("POST", url, 0, "ES256", None, None);
235
+
let result = verifier.verify_proof(&proof_post, "GET", url, None);
236
+
assert!(result.is_err(), "HTTP method mismatch should fail");
237
+
}
238
+
239
+
#[test]
240
+
fn test_dpop_invalid_signature() {
241
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
242
+
let url = "https://pds.example/xrpc/foo";
243
+
244
+
let proof = create_dpop_proof_with_invalid_sig("GET", url, "ES256");
245
+
let result = verifier.verify_proof(&proof, "GET", url, None);
246
+
assert!(result.is_err(), "Invalid signature should be rejected");
247
+
}
248
+
249
+
#[test]
250
+
fn test_dpop_malformed_base64() {
251
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
252
+
let result = verifier.verify_proof("not.valid.base64!!!", "GET", "https://example.com", None);
253
+
assert!(result.is_err(), "Malformed base64 should be rejected");
254
+
}
255
+
256
+
#[test]
257
+
fn test_dpop_missing_parts() {
258
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
259
+
260
+
let result = verifier.verify_proof("onlyonepart", "GET", "https://example.com", None);
261
+
assert!(
262
+
result.is_err(),
263
+
"DPoP with missing parts should be rejected"
264
+
);
265
+
266
+
let result = verifier.verify_proof("two.parts", "GET", "https://example.com", None);
267
+
assert!(
268
+
result.is_err(),
269
+
"DPoP with only two parts should be rejected"
270
+
);
271
+
}
272
+
273
+
#[test]
274
+
fn test_dpop_invalid_typ() {
275
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
276
+
let url = "https://pds.example/xrpc/foo";
277
+
278
+
let signing_key = SigningKey::random(&mut rand::thread_rng());
279
+
let verifying_key = *signing_key.verifying_key();
280
+
let point = verifying_key.to_encoded_point(false);
281
+
let x = URL_SAFE_NO_PAD.encode(point.x().unwrap());
282
+
let y = URL_SAFE_NO_PAD.encode(point.y().unwrap());
283
+
284
+
let header = json!({
285
+
"typ": "jwt",
286
+
"alg": "ES256",
287
+
"jwk": {
288
+
"kty": "EC",
289
+
"crv": "P-256",
290
+
"x": x,
291
+
"y": y
292
+
}
293
+
});
294
+
295
+
let payload = json!({
296
+
"jti": uuid::Uuid::new_v4().to_string(),
297
+
"htm": "GET",
298
+
"htu": url,
299
+
"iat": Utc::now().timestamp()
300
+
});
301
+
302
+
let header_b64 = URL_SAFE_NO_PAD.encode(header.to_string().as_bytes());
303
+
let payload_b64 = URL_SAFE_NO_PAD.encode(payload.to_string().as_bytes());
304
+
let signing_input = format!("{}.{}", header_b64, payload_b64);
305
+
let signature: p256::ecdsa::Signature = signing_key.sign(signing_input.as_bytes());
306
+
let sig_b64 = URL_SAFE_NO_PAD.encode(signature.to_bytes());
307
+
let proof = format!("{}.{}.{}", header_b64, payload_b64, sig_b64);
308
+
309
+
let result = verifier.verify_proof(&proof, "GET", url, None);
310
+
assert!(result.is_err(), "Invalid typ claim should be rejected");
311
+
}
312
+
313
+
#[test]
314
+
fn test_dpop_unsupported_algorithm() {
315
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
316
+
let url = "https://pds.example/xrpc/foo";
317
+
318
+
let signing_key = SigningKey::random(&mut rand::thread_rng());
319
+
let verifying_key = *signing_key.verifying_key();
320
+
let point = verifying_key.to_encoded_point(false);
321
+
let x = URL_SAFE_NO_PAD.encode(point.x().unwrap());
322
+
let y = URL_SAFE_NO_PAD.encode(point.y().unwrap());
323
+
324
+
let header = json!({
325
+
"typ": "dpop+jwt",
326
+
"alg": "RS256",
327
+
"jwk": {
328
+
"kty": "EC",
329
+
"crv": "P-256",
330
+
"x": x,
331
+
"y": y
332
+
}
333
+
});
334
+
335
+
let payload = json!({
336
+
"jti": uuid::Uuid::new_v4().to_string(),
337
+
"htm": "GET",
338
+
"htu": url,
339
+
"iat": Utc::now().timestamp()
340
+
});
341
+
342
+
let header_b64 = URL_SAFE_NO_PAD.encode(header.to_string().as_bytes());
343
+
let payload_b64 = URL_SAFE_NO_PAD.encode(payload.to_string().as_bytes());
344
+
let signing_input = format!("{}.{}", header_b64, payload_b64);
345
+
let signature: p256::ecdsa::Signature = signing_key.sign(signing_input.as_bytes());
346
+
let sig_b64 = URL_SAFE_NO_PAD.encode(signature.to_bytes());
347
+
let proof = format!("{}.{}.{}", header_b64, payload_b64, sig_b64);
348
+
349
+
let result = verifier.verify_proof(&proof, "GET", url, None);
350
+
assert!(result.is_err(), "Unsupported algorithm should be rejected");
351
+
}
352
+
353
+
#[test]
354
+
fn test_dpop_access_token_hash() {
355
+
let token = "test-access-token";
356
+
let hash = compute_access_token_hash(token);
357
+
assert!(!hash.is_empty());
358
+
359
+
let hash2 = compute_access_token_hash(token);
360
+
assert_eq!(hash, hash2, "Same token should produce same hash");
361
+
362
+
let hash3 = compute_access_token_hash("different-token");
363
+
assert_ne!(hash, hash3, "Different token should produce different hash");
364
+
}
365
+
366
+
#[test]
367
+
fn test_dpop_nonce_generation_and_validation() {
368
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
369
+
let nonce = verifier.generate_nonce();
370
+
assert!(!nonce.is_empty());
371
+
372
+
let result = verifier.validate_nonce(&nonce);
373
+
assert!(result.is_ok(), "Freshly generated nonce should be valid");
374
+
}
375
+
376
+
#[test]
377
+
fn test_dpop_nonce_invalid_encoding() {
378
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
379
+
let result = verifier.validate_nonce("not-valid-base64!!!");
380
+
assert!(result.is_err(), "Invalid base64 nonce should be rejected");
381
+
}
382
+
383
+
#[test]
384
+
fn test_dpop_nonce_too_short() {
385
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
386
+
let short_nonce = URL_SAFE_NO_PAD.encode(vec![0u8; 10]);
387
+
let result = verifier.validate_nonce(&short_nonce);
388
+
assert!(result.is_err(), "Too short nonce should be rejected");
389
+
}
390
+
391
+
#[test]
392
+
fn test_dpop_nonce_tampered_signature() {
393
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
394
+
let nonce = verifier.generate_nonce();
395
+
396
+
let nonce_bytes = URL_SAFE_NO_PAD.decode(&nonce).unwrap();
397
+
let mut tampered = nonce_bytes.clone();
398
+
tampered[10] ^= 0xFF;
399
+
let tampered_nonce = URL_SAFE_NO_PAD.encode(&tampered);
400
+
401
+
let result = verifier.validate_nonce(&tampered_nonce);
402
+
assert!(result.is_err(), "Tampered nonce should be rejected");
403
+
}
404
+
405
+
#[test]
406
+
fn test_jwk_thumbprint_ec() {
407
+
let jwk = DPoPJwk {
408
+
kty: "EC".to_string(),
409
+
crv: Some("P-256".to_string()),
410
+
x: Some("test_x".to_string()),
411
+
y: Some("test_y".to_string()),
412
+
};
413
+
let thumbprint = compute_jwk_thumbprint(&jwk).unwrap();
414
+
assert!(!thumbprint.is_empty());
415
+
416
+
let thumbprint2 = compute_jwk_thumbprint(&jwk).unwrap();
417
+
assert_eq!(
418
+
thumbprint, thumbprint2,
419
+
"Same JWK should produce same thumbprint"
420
+
);
421
+
}
422
+
423
+
#[test]
424
+
fn test_jwk_thumbprint_okp() {
425
+
let jwk = DPoPJwk {
426
+
kty: "OKP".to_string(),
427
+
crv: Some("Ed25519".to_string()),
428
+
x: Some("test_x".to_string()),
429
+
y: None,
430
+
};
431
+
let thumbprint = compute_jwk_thumbprint(&jwk).unwrap();
432
+
assert!(!thumbprint.is_empty());
433
+
}
434
+
435
+
#[test]
436
+
fn test_jwk_thumbprint_unsupported_kty() {
437
+
let jwk = DPoPJwk {
438
+
kty: "RSA".to_string(),
439
+
crv: None,
440
+
x: None,
441
+
y: None,
442
+
};
443
+
let result = compute_jwk_thumbprint(&jwk);
444
+
assert!(result.is_err(), "Unsupported key type should error");
445
+
}
446
+
447
+
#[test]
448
+
fn test_jwk_thumbprint_missing_fields() {
449
+
let jwk = DPoPJwk {
450
+
kty: "EC".to_string(),
451
+
crv: None,
452
+
x: None,
453
+
y: None,
454
+
};
455
+
let result = compute_jwk_thumbprint(&jwk);
456
+
assert!(result.is_err(), "Missing crv should error");
457
+
}
458
+
459
+
#[test]
460
+
fn test_dpop_uri_normalization_preserves_port() {
461
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
462
+
let url_with_port = "https://pds.example:8080/xrpc/foo";
463
+
464
+
let (proof, _) = create_dpop_proof("GET", url_with_port, 0, "ES256", None, None);
465
+
let result = verifier.verify_proof(&proof, "GET", url_with_port, None);
466
+
assert!(result.is_ok(), "URL with port should work");
467
+
468
+
let url_without_port = "https://pds.example/xrpc/foo";
469
+
let result = verifier.verify_proof(&proof, "GET", url_without_port, None);
470
+
assert!(result.is_err(), "Different port should fail");
471
+
}
472
+
473
+
#[test]
474
+
fn test_dpop_uri_normalization_preserves_path() {
475
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
476
+
let url = "https://pds.example/xrpc/com.atproto.server.getSession";
477
+
478
+
let (proof, _) = create_dpop_proof("GET", url, 0, "ES256", None, None);
479
+
480
+
let different_path = "https://pds.example/xrpc/com.atproto.server.refreshSession";
481
+
let result = verifier.verify_proof(&proof, "GET", different_path, None);
482
+
assert!(result.is_err(), "Different path should fail");
483
+
}
484
+
485
+
#[test]
486
+
fn test_dpop_htu_must_be_full_url_not_path() {
487
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
488
+
let full_url = "https://pds.example/xrpc/com.atproto.server.getSession";
489
+
let path_only = "/xrpc/com.atproto.server.getSession";
490
+
491
+
let (proof_with_path, _) = create_dpop_proof("GET", path_only, 0, "ES256", None, None);
492
+
let result = verifier.verify_proof(&proof_with_path, "GET", full_url, None);
493
+
assert!(
494
+
result.is_err(),
495
+
"htu with path-only should not match full URL"
496
+
);
497
+
498
+
let (proof_with_full, _) = create_dpop_proof("GET", full_url, 0, "ES256", None, None);
499
+
let result = verifier.verify_proof(&proof_with_full, "GET", full_url, None);
500
+
assert!(result.is_ok(), "htu with full URL should match");
501
+
}
502
+
503
+
#[test]
504
+
fn test_dpop_htu_scheme_must_match() {
505
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
506
+
let https_url = "https://pds.example/xrpc/foo";
507
+
let http_url = "http://pds.example/xrpc/foo";
508
+
509
+
let (proof, _) = create_dpop_proof("GET", http_url, 0, "ES256", None, None);
510
+
let result = verifier.verify_proof(&proof, "GET", https_url, None);
511
+
assert!(result.is_err(), "HTTP vs HTTPS scheme mismatch should fail");
512
+
}
513
+
514
+
#[test]
515
+
fn test_dpop_htu_host_must_match() {
516
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
517
+
let url1 = "https://pds1.example/xrpc/foo";
518
+
let url2 = "https://pds2.example/xrpc/foo";
519
+
520
+
let (proof, _) = create_dpop_proof("GET", url1, 0, "ES256", None, None);
521
+
let result = verifier.verify_proof(&proof, "GET", url2, None);
522
+
assert!(result.is_err(), "Different host should fail");
523
+
}
524
+
525
+
#[test]
526
+
fn test_dpop_server_must_check_full_url_not_path() {
527
+
let verifier = DPoPVerifier::new(b"test-secret-32-bytes-long!!!!!!!");
528
+
let full_url = "https://pds.example/xrpc/com.atproto.server.getSession";
529
+
let path_only = "/xrpc/com.atproto.server.getSession";
530
+
531
+
let (proof, _) = create_dpop_proof("GET", full_url, 0, "ES256", None, None);
532
+
let result = verifier.verify_proof(&proof, "GET", path_only, None);
533
+
assert!(
534
+
result.is_err(),
535
+
"Server checking path-only against full URL htu should fail"
536
+
);
537
+
}
+8
-8
tests/email_update.rs
+8
-8
tests/email_update.rs
···
59
59
let base_url = common::base_url().await;
60
60
let handle = format!("er{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
61
61
let email = format!("{}@example.com", handle);
62
-
let (access_jwt, _) = create_verified_account(&client, &base_url, &handle, &email).await;
62
+
let (access_jwt, _) = create_verified_account(&client, base_url, &handle, &email).await;
63
63
64
64
let res = client
65
65
.post(format!(
···
82
82
let pool = common::get_test_db_pool().await;
83
83
let handle = format!("eu{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
84
84
let email = format!("{}@example.com", handle);
85
-
let (access_jwt, did) = create_verified_account(&client, &base_url, &handle, &email).await;
85
+
let (access_jwt, did) = create_verified_account(&client, base_url, &handle, &email).await;
86
86
let new_email = format!("new_{}@example.com", handle);
87
87
88
88
let res = client
···
126
126
let base_url = common::base_url().await;
127
127
let handle = format!("ed{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
128
128
let email = format!("{}@example.com", handle);
129
-
let (access_jwt, _) = create_verified_account(&client, &base_url, &handle, &email).await;
129
+
let (access_jwt, _) = create_verified_account(&client, base_url, &handle, &email).await;
130
130
let new_email = format!("direct_{}@example.com", handle);
131
131
132
132
let res = client
···
147
147
let base_url = common::base_url().await;
148
148
let handle = format!("es{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
149
149
let email = format!("{}@example.com", handle);
150
-
let (access_jwt, _) = create_verified_account(&client, &base_url, &handle, &email).await;
150
+
let (access_jwt, _) = create_verified_account(&client, base_url, &handle, &email).await;
151
151
152
152
let res = client
153
153
.post(format!("{}/xrpc/com.atproto.server.updateEmail", base_url))
···
169
169
let base_url = common::base_url().await;
170
170
let handle = format!("eb{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
171
171
let email = format!("{}@example.com", handle);
172
-
let (access_jwt, _) = create_verified_account(&client, &base_url, &handle, &email).await;
172
+
let (access_jwt, _) = create_verified_account(&client, base_url, &handle, &email).await;
173
173
let new_email = format!("badtok_{}@example.com", handle);
174
174
175
175
let res = client
···
220
220
let base_url = common::base_url().await;
221
221
let handle = format!("ef{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
222
222
let email = format!("{}@example.com", handle);
223
-
let (access_jwt, _) = create_verified_account(&client, &base_url, &handle, &email).await;
223
+
let (access_jwt, _) = create_verified_account(&client, base_url, &handle, &email).await;
224
224
225
225
let res = client
226
226
.post(format!("{}/xrpc/com.atproto.server.updateEmail", base_url))
···
470
470
471
471
let handle1 = format!("d1{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
472
472
let email1 = format!("{}@example.com", handle1);
473
-
let (_, _) = create_verified_account(&client, &base_url, &handle1, &email1).await;
473
+
let (_, _) = create_verified_account(&client, base_url, &handle1, &email1).await;
474
474
475
475
let handle2 = format!("d2{}", &uuid::Uuid::new_v4().simple().to_string()[..12]);
476
476
let email2 = format!("{}@example.com", handle2);
477
-
let (access_jwt2, did2) = create_verified_account(&client, &base_url, &handle2, &email2).await;
477
+
let (access_jwt2, did2) = create_verified_account(&client, base_url, &handle2, &email2).await;
478
478
479
479
let res = client
480
480
.post(format!(
+30
-30
tests/firehose_validation.rs
+30
-30
tests/firehose_validation.rs
···
232
232
tungstenite::Message::Binary(bin) => bin,
233
233
_ => continue,
234
234
};
235
-
if let Ok((h, f)) = parse_frame(&raw_bytes) {
236
-
if f.repo == did {
237
-
frame_opt = Some((h, f));
238
-
break;
239
-
}
235
+
if let Ok((h, f)) = parse_frame(&raw_bytes)
236
+
&& f.repo == did
237
+
{
238
+
frame_opt = Some((h, f));
239
+
break;
240
240
}
241
241
}
242
242
})
···
427
427
tungstenite::Message::Binary(bin) => bin,
428
428
_ => continue,
429
429
};
430
-
if let Ok((_, f)) = parse_frame(&raw_bytes) {
431
-
if f.repo == did {
432
-
frame_opt = Some(f);
433
-
break;
434
-
}
430
+
if let Ok((_, f)) = parse_frame(&raw_bytes)
431
+
&& f.repo == did
432
+
{
433
+
frame_opt = Some(f);
434
+
break;
435
435
}
436
436
}
437
437
})
···
504
504
tungstenite::Message::Binary(bin) => bin,
505
505
_ => continue,
506
506
};
507
-
if let Ok((_, f)) = parse_frame(&raw_bytes) {
508
-
if f.repo == did {
509
-
first_frame_opt = Some(f);
510
-
break;
511
-
}
507
+
if let Ok((_, f)) = parse_frame(&raw_bytes)
508
+
&& f.repo == did
509
+
{
510
+
first_frame_opt = Some(f);
511
+
break;
512
512
}
513
513
}
514
514
})
···
554
554
tungstenite::Message::Binary(bin) => bin,
555
555
_ => continue,
556
556
};
557
-
if let Ok((_, f)) = parse_frame(&raw_bytes) {
558
-
if f.repo == did {
559
-
second_frame_opt = Some(f);
560
-
break;
561
-
}
557
+
if let Ok((_, f)) = parse_frame(&raw_bytes)
558
+
&& f.repo == did
559
+
{
560
+
second_frame_opt = Some(f);
561
+
break;
562
562
}
563
563
}
564
564
})
···
626
626
tungstenite::Message::Binary(bin) => bin,
627
627
_ => continue,
628
628
};
629
-
if let Ok((_, f)) = parse_frame(&raw) {
630
-
if f.repo == did {
631
-
raw_bytes_opt = Some(raw.to_vec());
632
-
break;
633
-
}
629
+
if let Ok((_, f)) = parse_frame(&raw)
630
+
&& f.repo == did
631
+
{
632
+
raw_bytes_opt = Some(raw.to_vec());
633
+
break;
634
634
}
635
635
}
636
636
})
···
826
826
found_info = true;
827
827
println!("Found OutdatedCursor info frame!");
828
828
}
829
-
} else if let Ok((_, frame)) = parse_frame(&bin) {
830
-
if frame.repo == did {
831
-
found_commit = true;
832
-
println!("Found commit for our DID");
833
-
}
829
+
} else if let Ok((_, frame)) = parse_frame(&bin)
830
+
&& frame.repo == did
831
+
{
832
+
found_commit = true;
833
+
println!("Found commit for our DID");
834
834
}
835
835
if found_commit {
836
836
break;
+1
-1
tests/import_verification.rs
+1
-1
tests/import_verification.rs
···
307
307
assert_eq!(res.status(), StatusCode::OK);
308
308
let body: serde_json::Value = res.json().await.unwrap();
309
309
let uri = body["uri"].as_str().unwrap();
310
-
let rkey = uri.split('/').last().unwrap().to_string();
310
+
let rkey = uri.split('/').next_back().unwrap().to_string();
311
311
rkeys.push(rkey);
312
312
}
313
313
for rkey in &rkeys {
+4
-4
tests/import_with_verification.rs
+4
-4
tests/import_with_verification.rs
···
192
192
let signing_key = SigningKey::from_slice(&key_bytes).expect("Failed to create signing key");
193
193
let hostname = std::env::var("PDS_HOSTNAME").unwrap_or_else(|_| "localhost".to_string());
194
194
let pds_endpoint = format!("https://{}", hostname);
195
-
let handle = did.split(':').last().unwrap_or("user");
195
+
let handle = did.split(':').next_back().unwrap_or("user");
196
196
let did_doc = create_did_document(&did, handle, &signing_key, &pds_endpoint);
197
197
let mock_plc = setup_mock_plc_directory(&did, did_doc).await;
198
198
unsafe {
···
236
236
SigningKey::from_slice(&key_bytes).expect("Failed to create signing key");
237
237
let hostname = std::env::var("PDS_HOSTNAME").unwrap_or_else(|_| "localhost".to_string());
238
238
let pds_endpoint = format!("https://{}", hostname);
239
-
let handle = did.split(':').last().unwrap_or("user");
239
+
let handle = did.split(':').next_back().unwrap_or("user");
240
240
let did_doc = create_did_document(&did, handle, &correct_signing_key, &pds_endpoint);
241
241
let mock_plc = setup_mock_plc_directory(&did, did_doc).await;
242
242
unsafe {
···
285
285
let wrong_did = "did:plc:wrongdidthatdoesnotmatch";
286
286
let hostname = std::env::var("PDS_HOSTNAME").unwrap_or_else(|_| "localhost".to_string());
287
287
let pds_endpoint = format!("https://{}", hostname);
288
-
let handle = did.split(':').last().unwrap_or("user");
288
+
let handle = did.split(':').next_back().unwrap_or("user");
289
289
let did_doc = create_did_document(&did, handle, &signing_key, &pds_endpoint);
290
290
let mock_plc = setup_mock_plc_directory(&did, did_doc).await;
291
291
unsafe {
···
370
370
.await
371
371
.expect("Failed to get user signing key");
372
372
let signing_key = SigningKey::from_slice(&key_bytes).expect("Failed to create signing key");
373
-
let handle = did.split(':').last().unwrap_or("user");
373
+
let handle = did.split(':').next_back().unwrap_or("user");
374
374
let did_doc_without_key = json!({
375
375
"@context": ["https://www.w3.org/ns/did/v1"],
376
376
"id": did,
+6
-6
tests/jwt_security.rs
+6
-6
tests/jwt_security.rs
···
44
44
let token = create_access_token(did, &key_bytes).expect("create token");
45
45
let parts: Vec<&str> = token.split('.').collect();
46
46
47
-
let forged_signature = URL_SAFE_NO_PAD.encode(&[0u8; 64]);
47
+
let forged_signature = URL_SAFE_NO_PAD.encode([0u8; 64]);
48
48
let forged_token = format!("{}.{}.{}", parts[0], parts[1], forged_signature);
49
49
let result = verify_access_token(&forged_token, &key_bytes);
50
50
assert!(result.is_err(), "Forged signature must be rejected");
···
121
121
let mut mac = HmacSha256::new_from_slice(&key_bytes).unwrap();
122
122
mac.update(message.as_bytes());
123
123
let hmac_sig = mac.finalize().into_bytes();
124
-
let hs256_token = format!("{}.{}", message, URL_SAFE_NO_PAD.encode(&hmac_sig));
124
+
let hs256_token = format!("{}.{}", message, URL_SAFE_NO_PAD.encode(hmac_sig));
125
125
assert!(
126
126
verify_access_token(&hs256_token, &key_bytes).is_err(),
127
127
"HS256 substitution must be rejected"
···
130
130
for (alg, sig_len) in [("RS256", 256), ("ES256", 64)] {
131
131
let header = json!({ "alg": alg, "typ": TOKEN_TYPE_ACCESS });
132
132
let header_b64 = URL_SAFE_NO_PAD.encode(serde_json::to_string(&header).unwrap());
133
-
let fake_sig = URL_SAFE_NO_PAD.encode(&vec![1u8; sig_len]);
133
+
let fake_sig = URL_SAFE_NO_PAD.encode(vec![1u8; sig_len]);
134
134
let token = format!("{}.{}.{}", header_b64, claims_b64, fake_sig);
135
135
assert!(
136
136
verify_access_token(&token, &key_bytes).is_err(),
···
335
335
336
336
let invalid_header = URL_SAFE_NO_PAD.encode("{not valid json}");
337
337
let claims_b64 = URL_SAFE_NO_PAD.encode(r#"{"sub":"test"}"#);
338
-
let fake_sig = URL_SAFE_NO_PAD.encode(&[1u8; 64]);
338
+
let fake_sig = URL_SAFE_NO_PAD.encode([1u8; 64]);
339
339
assert!(
340
340
verify_access_token(
341
341
&format!("{}.{}.{}", invalid_header, claims_b64, fake_sig),
···
439
439
440
440
let header_b64 = URL_SAFE_NO_PAD.encode(r#"{"alg":"ES256K"}"#);
441
441
let claims_b64 = URL_SAFE_NO_PAD.encode(r#"{"iss":"did:plc:iss","sub":"did:plc:sub"}"#);
442
-
let fake_sig = URL_SAFE_NO_PAD.encode(&[0u8; 64]);
442
+
let fake_sig = URL_SAFE_NO_PAD.encode([0u8; 64]);
443
443
let unverified = format!("{}.{}.{}", header_b64, claims_b64, fake_sig);
444
444
assert_eq!(get_did_from_token(&unverified).unwrap(), "did:plc:sub");
445
445
···
479
479
"{}.{}.{}",
480
480
parts[0],
481
481
parts[1],
482
-
URL_SAFE_NO_PAD.encode(&[0xFFu8; 64])
482
+
URL_SAFE_NO_PAD.encode([0xFFu8; 64])
483
483
);
484
484
let _ = verify_access_token(&almost_valid_token, &key_bytes);
485
485
let _ = verify_access_token(&completely_invalid_token, &key_bytes);
+4
-4
tests/lifecycle_record.rs
+4
-4
tests/lifecycle_record.rs
···
385
385
let (alice_did, alice_jwt) = setup_new_user("alice-auth").await;
386
386
let (_bob_did, bob_jwt) = setup_new_user("bob-auth").await;
387
387
let (post_uri, _) = create_post(&client, &alice_did, &alice_jwt, "Alice's post").await;
388
-
let post_rkey = post_uri.split('/').last().unwrap();
388
+
let post_rkey = post_uri.split('/').next_back().unwrap();
389
389
let post_payload = json!({
390
390
"repo": alice_did,
391
391
"collection": "app.bsky.feed.post",
···
630
630
assert_eq!(records.len(), 5);
631
631
let rkeys: Vec<&str> = records
632
632
.iter()
633
-
.map(|r| r["uri"].as_str().unwrap().split('/').last().unwrap())
633
+
.map(|r| r["uri"].as_str().unwrap().split('/').next_back().unwrap())
634
634
.collect();
635
635
assert_eq!(
636
636
rkeys,
···
661
661
.as_array()
662
662
.unwrap()
663
663
.iter()
664
-
.map(|r| r["uri"].as_str().unwrap().split('/').last().unwrap())
664
+
.map(|r| r["uri"].as_str().unwrap().split('/').next_back().unwrap())
665
665
.collect();
666
666
assert_eq!(
667
667
rev_rkeys,
···
733
733
.as_array()
734
734
.unwrap()
735
735
.iter()
736
-
.map(|r| r["uri"].as_str().unwrap().split('/').last().unwrap())
736
+
.map(|r| r["uri"].as_str().unwrap().split('/').next_back().unwrap())
737
737
.collect();
738
738
for rkey in &range_rkeys {
739
739
assert!(
+1
-1
tests/lifecycle_session.rs
+1
-1
tests/lifecycle_session.rs
···
461
461
let did = account["did"].as_str().unwrap().to_string();
462
462
let jwt = verify_new_account(&client, &did).await;
463
463
let (post_uri, _) = create_post(&client, &did, &jwt, "Post before deactivation").await;
464
-
let post_rkey = post_uri.split('/').last().unwrap();
464
+
let post_rkey = post_uri.split('/').next_back().unwrap();
465
465
let status_before = client
466
466
.get(format!(
467
467
"{}/xrpc/com.atproto.server.checkAccountStatus",
+3
-3
tests/oauth.rs
+3
-3
tests/oauth.rs
···
21
21
let code_verifier = URL_SAFE_NO_PAD.encode(verifier_bytes);
22
22
let mut hasher = Sha256::new();
23
23
hasher.update(code_verifier.as_bytes());
24
-
let code_challenge = URL_SAFE_NO_PAD.encode(&hasher.finalize());
24
+
let code_challenge = URL_SAFE_NO_PAD.encode(hasher.finalize());
25
25
(code_verifier, code_challenge)
26
26
}
27
27
···
1036
1036
);
1037
1037
let body: Value = create_res.json().await.unwrap();
1038
1038
let uri = body["uri"].as_str().expect("Should have uri");
1039
-
let rkey = uri.split('/').last().unwrap();
1039
+
let rkey = uri.split('/').next_back().unwrap();
1040
1040
let delete_res = http_client
1041
1041
.post(format!("{}/xrpc/com.atproto.repo.deleteRecord", url))
1042
1042
.bearer_auth(&token)
···
1092
1092
);
1093
1093
let body: Value = post_res.json().await.unwrap();
1094
1094
let uri = body["uri"].as_str().unwrap();
1095
-
let rkey = uri.split('/').last().unwrap();
1095
+
let rkey = uri.split('/').next_back().unwrap();
1096
1096
let delete_res = http_client
1097
1097
.post(format!("{}/xrpc/com.atproto.repo.deleteRecord", url))
1098
1098
.bearer_auth(&token)
+3
-3
tests/oauth_lifecycle.rs
+3
-3
tests/oauth_lifecycle.rs
···
17
17
let mut hasher = Sha256::new();
18
18
hasher.update(code_verifier.as_bytes());
19
19
let hash = hasher.finalize();
20
-
let code_challenge = URL_SAFE_NO_PAD.encode(&hash);
20
+
let code_challenge = URL_SAFE_NO_PAD.encode(hash);
21
21
(code_verifier, code_challenge)
22
22
}
23
23
···
195
195
);
196
196
let create_body: Value = create_res.json().await.unwrap();
197
197
let uri = create_body["uri"].as_str().unwrap();
198
-
let rkey = uri.split('/').last().unwrap();
198
+
let rkey = uri.split('/').next_back().unwrap();
199
199
let get_res = http_client
200
200
.get(format!("{}/xrpc/com.atproto.repo.getRecord", url))
201
201
.bearer_auth(&session.access_token)
···
290
290
assert_eq!(create_res.status(), StatusCode::OK);
291
291
let create_body: Value = create_res.json().await.unwrap();
292
292
let uri = create_body["uri"].as_str().unwrap();
293
-
let rkey = uri.split('/').last().unwrap();
293
+
let rkey = uri.split('/').next_back().unwrap();
294
294
let updated_text = "Updated post content via OAuth putRecord";
295
295
let put_res = http_client
296
296
.post(format!("{}/xrpc/com.atproto.repo.putRecord", url))
+2
-2
tests/oauth_scopes.rs
+2
-2
tests/oauth_scopes.rs
···
17
17
let mut hasher = Sha256::new();
18
18
hasher.update(code_verifier.as_bytes());
19
19
let hash = hasher.finalize();
20
-
let code_challenge = URL_SAFE_NO_PAD.encode(&hash);
20
+
let code_challenge = URL_SAFE_NO_PAD.encode(hash);
21
21
(code_verifier, code_challenge)
22
22
}
23
23
···
215
215
.as_str()
216
216
.unwrap()
217
217
.split('/')
218
-
.last()
218
+
.next_back()
219
219
.unwrap();
220
220
221
221
let put_res = http_client
+5
-5
tests/oauth_security.rs
+5
-5
tests/oauth_security.rs
···
17
17
let code_verifier = URL_SAFE_NO_PAD.encode(verifier_bytes);
18
18
let mut hasher = Sha256::new();
19
19
hasher.update(code_verifier.as_bytes());
20
-
let code_challenge = URL_SAFE_NO_PAD.encode(&hasher.finalize());
20
+
let code_challenge = URL_SAFE_NO_PAD.encode(hasher.finalize());
21
21
(code_verifier, code_challenge)
22
22
}
23
23
···
120
120
let (access_token, _, _) = get_oauth_tokens(&http_client, url).await;
121
121
let parts: Vec<&str> = access_token.split('.').collect();
122
122
assert_eq!(parts.len(), 3);
123
-
let forged_sig = URL_SAFE_NO_PAD.encode(&[0u8; 32]);
123
+
let forged_sig = URL_SAFE_NO_PAD.encode([0u8; 32]);
124
124
let forged_token = format!("{}.{}.{}", parts[0], parts[1], forged_sig);
125
125
assert_eq!(
126
126
http_client
···
173
173
"{}.{}.{}",
174
174
URL_SAFE_NO_PAD.encode(serde_json::to_string(&rs256_header).unwrap()),
175
175
URL_SAFE_NO_PAD.encode(serde_json::to_string(&none_payload).unwrap()),
176
-
URL_SAFE_NO_PAD.encode(&[1u8; 64])
176
+
URL_SAFE_NO_PAD.encode([1u8; 64])
177
177
);
178
178
assert_eq!(
179
179
http_client
···
193
193
URL_SAFE_NO_PAD
194
194
.encode(serde_json::to_string(&json!({"alg":"HS256","typ":"at+jwt"})).unwrap()),
195
195
URL_SAFE_NO_PAD.encode(serde_json::to_string(&expired_payload).unwrap()),
196
-
URL_SAFE_NO_PAD.encode(&[1u8; 32])
196
+
URL_SAFE_NO_PAD.encode([1u8; 32])
197
197
);
198
198
assert_eq!(
199
199
http_client
···
678
678
"{}.{}.{}",
679
679
URL_SAFE_NO_PAD.encode(serde_json::to_string(&header).unwrap()),
680
680
URL_SAFE_NO_PAD.encode(serde_json::to_string(&payload).unwrap()),
681
-
URL_SAFE_NO_PAD.encode(&[1u8; 32])
681
+
URL_SAFE_NO_PAD.encode([1u8; 32])
682
682
);
683
683
assert_eq!(
684
684
http_client
+2
-2
tests/plc_migration.rs
+2
-2
tests/plc_migration.rs
···
727
727
"{}/xrpc/com.atproto.repo.getRecord?repo={}&collection=app.bsky.feed.post&rkey={}",
728
728
base_url().await,
729
729
did,
730
-
original_uri.split('/').last().unwrap()
730
+
original_uri.split('/').next_back().unwrap()
731
731
))
732
732
.send()
733
733
.await
···
970
970
.as_array()
971
971
.expect("Should have records array");
972
972
assert!(
973
-
records.len() >= 1,
973
+
!records.is_empty(),
974
974
"Should have at least 1 record after migration, found {}",
975
975
records.len()
976
976
);
+1
-1
tests/plc_operations.rs
+1
-1
tests/plc_operations.rs
···
114
114
.await
115
115
.unwrap();
116
116
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
117
-
let handle = did.split(':').last().unwrap_or("user");
117
+
let handle = did.split(':').next_back().unwrap_or("user");
118
118
let res = client.post(format!("{}/xrpc/com.atproto.identity.submitPlcOperation", base_url().await))
119
119
.bearer_auth(&token).json(&json!({
120
120
"operation": { "type": "plc_operation", "rotationKeys": ["did:key:z123"],
+1
-1
tests/plc_validation.rs
+1
-1
tests/plc_validation.rs
···
172
172
"verificationMethods": {}, "alsoKnownAs": [], "services": {}, "prev": null
173
173
});
174
174
let signed = sign_operation(&op, &key).unwrap();
175
-
let result = verify_operation_signature(&signed, &[did_key.clone()]);
175
+
let result = verify_operation_signature(&signed, std::slice::from_ref(&did_key));
176
176
assert!(result.is_ok() && result.unwrap());
177
177
178
178
let other_key = SigningKey::random(&mut rand::thread_rng());
+316
tests/scope_edge_cases.rs
+316
tests/scope_edge_cases.rs
···
1
+
use tranquil_pds::delegation::{intersect_scopes, scopes::validate_delegation_scopes};
2
+
use tranquil_pds::oauth::scopes::{
3
+
AccountAction, IdentityAttr, ParsedScope, RepoAction, ScopePermissions, parse_scope,
4
+
parse_scope_string,
5
+
};
6
+
7
+
#[test]
8
+
fn test_repo_star_defaults_to_all_actions() {
9
+
let scope = parse_scope("repo:*");
10
+
if let ParsedScope::Repo(repo) = scope {
11
+
assert!(repo.actions.contains(&RepoAction::Create));
12
+
assert!(repo.actions.contains(&RepoAction::Update));
13
+
assert!(repo.actions.contains(&RepoAction::Delete));
14
+
assert_eq!(repo.actions.len(), 3);
15
+
} else {
16
+
panic!("Expected Repo scope");
17
+
}
18
+
}
19
+
20
+
#[test]
21
+
fn test_repo_collection_without_actions_defaults_to_all() {
22
+
let scope = parse_scope("repo:app.bsky.feed.post");
23
+
if let ParsedScope::Repo(repo) = scope {
24
+
assert!(repo.actions.contains(&RepoAction::Create));
25
+
assert!(repo.actions.contains(&RepoAction::Update));
26
+
assert!(repo.actions.contains(&RepoAction::Delete));
27
+
} else {
28
+
panic!("Expected Repo scope");
29
+
}
30
+
}
31
+
32
+
#[test]
33
+
fn test_repo_empty_string_after_colon() {
34
+
let scope = parse_scope("repo:");
35
+
if let ParsedScope::Repo(repo) = scope {
36
+
assert!(repo.collection.is_none());
37
+
} else {
38
+
panic!("Expected Repo scope");
39
+
}
40
+
}
41
+
42
+
#[test]
43
+
fn test_rpc_wildcard_aud_wildcard_forbidden() {
44
+
let scope = parse_scope("rpc:*?aud=*");
45
+
assert!(matches!(scope, ParsedScope::Unknown(_)));
46
+
}
47
+
48
+
#[test]
49
+
fn test_rpc_no_lxm_aud_wildcard_forbidden() {
50
+
let scope = parse_scope("rpc?aud=*");
51
+
assert!(matches!(scope, ParsedScope::Unknown(_)));
52
+
}
53
+
54
+
#[test]
55
+
fn test_rpc_specific_lxm_wildcard_aud_allowed() {
56
+
let scope = parse_scope("rpc:app.bsky.feed.getTimeline?aud=*");
57
+
assert!(matches!(scope, ParsedScope::Rpc(_)));
58
+
}
59
+
60
+
#[test]
61
+
fn test_rpc_wildcard_lxm_specific_aud_allowed() {
62
+
let scope = parse_scope("rpc:*?aud=did:web:api.bsky.app");
63
+
assert!(matches!(scope, ParsedScope::Rpc(_)));
64
+
}
65
+
66
+
#[test]
67
+
fn test_unknown_scope_preserved() {
68
+
let scope = parse_scope("completely:made:up:scope");
69
+
if let ParsedScope::Unknown(s) = scope {
70
+
assert_eq!(s, "completely:made:up:scope");
71
+
} else {
72
+
panic!("Expected Unknown scope");
73
+
}
74
+
}
75
+
76
+
#[test]
77
+
fn test_unknown_scope_with_params_preserved() {
78
+
let scope = parse_scope("unknown:thing?param=value");
79
+
if let ParsedScope::Unknown(s) = scope {
80
+
assert_eq!(s, "unknown:thing?param=value");
81
+
} else {
82
+
panic!("Expected Unknown scope");
83
+
}
84
+
}
85
+
86
+
#[test]
87
+
fn test_blob_empty_accept() {
88
+
let scope = parse_scope("blob");
89
+
if let ParsedScope::Blob(blob) = scope {
90
+
assert!(blob.accept.is_empty());
91
+
assert!(blob.matches_mime("anything/goes"));
92
+
} else {
93
+
panic!("Expected Blob scope");
94
+
}
95
+
}
96
+
97
+
#[test]
98
+
fn test_blob_matches_wildcard() {
99
+
let scope = parse_scope("blob:*/*");
100
+
if let ParsedScope::Blob(blob) = scope {
101
+
assert!(blob.matches_mime("image/png"));
102
+
assert!(blob.matches_mime("video/mp4"));
103
+
assert!(blob.matches_mime("application/json"));
104
+
} else {
105
+
panic!("Expected Blob scope");
106
+
}
107
+
}
108
+
109
+
#[test]
110
+
fn test_blob_type_prefix_matching() {
111
+
let scope = parse_scope("blob:image/*");
112
+
if let ParsedScope::Blob(blob) = scope {
113
+
assert!(blob.matches_mime("image/png"));
114
+
assert!(blob.matches_mime("image/jpeg"));
115
+
assert!(blob.matches_mime("image/gif"));
116
+
assert!(!blob.matches_mime("video/mp4"));
117
+
assert!(!blob.matches_mime("images/png"));
118
+
} else {
119
+
panic!("Expected Blob scope");
120
+
}
121
+
}
122
+
123
+
#[test]
124
+
fn test_account_default_action_is_read() {
125
+
let scope = parse_scope("account:email");
126
+
if let ParsedScope::Account(a) = scope {
127
+
assert_eq!(a.action, AccountAction::Read);
128
+
} else {
129
+
panic!("Expected Account scope");
130
+
}
131
+
}
132
+
133
+
#[test]
134
+
fn test_multiple_scopes_parsing() {
135
+
let scopes = parse_scope_string("atproto repo:* blob:*/* transition:generic");
136
+
assert_eq!(scopes.len(), 4);
137
+
assert!(matches!(scopes[0], ParsedScope::Atproto));
138
+
}
139
+
140
+
#[test]
141
+
fn test_permissions_null_scope_defaults_atproto() {
142
+
let perms = ScopePermissions::from_scope_string(None);
143
+
assert!(perms.has_full_access());
144
+
assert!(perms.allows_repo(RepoAction::Create, "any.collection"));
145
+
assert!(perms.allows_repo(RepoAction::Update, "any.collection"));
146
+
assert!(perms.allows_repo(RepoAction::Delete, "any.collection"));
147
+
}
148
+
149
+
#[test]
150
+
fn test_permissions_empty_string_defaults_atproto() {
151
+
let perms = ScopePermissions::from_scope_string(Some(""));
152
+
assert!(!perms.has_full_access());
153
+
}
154
+
155
+
#[test]
156
+
fn test_permissions_whitespace_only() {
157
+
let perms = ScopePermissions::from_scope_string(Some(" "));
158
+
assert!(!perms.has_full_access());
159
+
}
160
+
161
+
#[test]
162
+
fn test_permissions_repo_collection_wildcard_prefix() {
163
+
let perms = ScopePermissions::from_scope_string(Some("repo:app.bsky.*?action=create"));
164
+
assert!(perms.allows_repo(RepoAction::Create, "app.bsky.feed.post"));
165
+
assert!(perms.allows_repo(RepoAction::Create, "app.bsky.actor.profile"));
166
+
assert!(!perms.allows_repo(RepoAction::Create, "com.atproto.repo.blob"));
167
+
assert!(!perms.allows_repo(RepoAction::Update, "app.bsky.feed.post"));
168
+
}
169
+
170
+
#[test]
171
+
fn test_permissions_rpc_lxm_wildcard_prefix() {
172
+
let perms =
173
+
ScopePermissions::from_scope_string(Some("rpc:app.bsky.feed.*?aud=did:web:api.bsky.app"));
174
+
assert!(perms.allows_rpc("did:web:api.bsky.app", "app.bsky.feed.getTimeline"));
175
+
assert!(perms.allows_rpc("did:web:api.bsky.app", "app.bsky.feed.getAuthorFeed"));
176
+
assert!(!perms.allows_rpc("did:web:api.bsky.app", "app.bsky.actor.getProfile"));
177
+
}
178
+
179
+
#[test]
180
+
fn test_delegation_intersect_params_behavior() {
181
+
let result = intersect_scopes("repo:*?action=create", "repo:*?action=delete");
182
+
183
+
assert!(
184
+
result.is_empty() || result.contains("repo:*"),
185
+
"Delegation intersection with different action params: '{}'",
186
+
result
187
+
);
188
+
}
189
+
190
+
#[test]
191
+
fn test_delegation_intersect_wildcard_vs_specific() {
192
+
let result = intersect_scopes("repo:app.bsky.feed.post?action=create", "repo:*");
193
+
assert!(result.contains("repo:"));
194
+
}
195
+
196
+
#[test]
197
+
fn test_delegation_validate_known_prefixes() {
198
+
assert!(validate_delegation_scopes("atproto").is_ok());
199
+
assert!(validate_delegation_scopes("repo:*").is_ok());
200
+
assert!(validate_delegation_scopes("blob:*/*").is_ok());
201
+
assert!(validate_delegation_scopes("rpc:*").is_ok());
202
+
assert!(validate_delegation_scopes("account:email").is_ok());
203
+
assert!(validate_delegation_scopes("identity:handle").is_ok());
204
+
assert!(validate_delegation_scopes("transition:generic").is_ok());
205
+
}
206
+
207
+
#[test]
208
+
fn test_delegation_validate_unknown_prefixes() {
209
+
assert!(validate_delegation_scopes("invalid:scope").is_err());
210
+
assert!(validate_delegation_scopes("custom:something").is_err());
211
+
assert!(validate_delegation_scopes("made:up").is_err());
212
+
}
213
+
214
+
#[test]
215
+
fn test_delegation_validate_empty() {
216
+
assert!(validate_delegation_scopes("").is_ok());
217
+
}
218
+
219
+
#[test]
220
+
fn test_delegation_validate_multiple() {
221
+
assert!(validate_delegation_scopes("atproto repo:* blob:*/*").is_ok());
222
+
assert!(validate_delegation_scopes("atproto invalid:scope").is_err());
223
+
}
224
+
225
+
#[test]
226
+
fn test_delegation_intersect_empty_granted_returns_empty() {
227
+
assert_eq!(intersect_scopes("atproto", ""), "");
228
+
assert_eq!(intersect_scopes("repo:*", ""), "");
229
+
}
230
+
231
+
#[test]
232
+
fn test_delegation_intersect_no_overlap() {
233
+
let result = intersect_scopes("repo:app.bsky.feed.post", "repo:com.atproto.something");
234
+
assert!(result.is_empty());
235
+
}
236
+
237
+
#[test]
238
+
fn test_scope_with_multiple_params() {
239
+
let scope = parse_scope("repo:*?action=create&action=delete");
240
+
if let ParsedScope::Repo(repo) = scope {
241
+
assert!(repo.actions.contains(&RepoAction::Create));
242
+
assert!(repo.actions.contains(&RepoAction::Delete));
243
+
assert!(!repo.actions.contains(&RepoAction::Update));
244
+
} else {
245
+
panic!("Expected Repo scope");
246
+
}
247
+
}
248
+
249
+
#[test]
250
+
fn test_scope_invalid_action_ignored() {
251
+
let scope = parse_scope("repo:*?action=invalid");
252
+
if let ParsedScope::Repo(repo) = scope {
253
+
assert!(repo.actions.contains(&RepoAction::Create));
254
+
assert!(repo.actions.contains(&RepoAction::Update));
255
+
assert!(repo.actions.contains(&RepoAction::Delete));
256
+
} else {
257
+
panic!("Expected Repo scope");
258
+
}
259
+
}
260
+
261
+
#[test]
262
+
fn test_include_scope_parsing() {
263
+
let scope = parse_scope("include:app.bsky.authFullApp?aud=did:web:api.bsky.app");
264
+
if let ParsedScope::Include(inc) = scope {
265
+
assert_eq!(inc.nsid, "app.bsky.authFullApp");
266
+
assert_eq!(inc.aud, Some("did:web:api.bsky.app".to_string()));
267
+
} else {
268
+
panic!("Expected Include scope");
269
+
}
270
+
}
271
+
272
+
#[test]
273
+
fn test_include_scope_no_aud() {
274
+
let scope = parse_scope("include:com.example.authBasic");
275
+
if let ParsedScope::Include(inc) = scope {
276
+
assert_eq!(inc.nsid, "com.example.authBasic");
277
+
assert!(inc.aud.is_none());
278
+
} else {
279
+
panic!("Expected Include scope");
280
+
}
281
+
}
282
+
283
+
#[test]
284
+
fn test_identity_wildcard_vs_specific() {
285
+
let wildcard = parse_scope("identity:*");
286
+
let specific = parse_scope("identity:handle");
287
+
288
+
assert!(matches!(wildcard, ParsedScope::Identity(i) if i.attr == IdentityAttr::Wildcard));
289
+
assert!(matches!(specific, ParsedScope::Identity(i) if i.attr == IdentityAttr::Handle));
290
+
}
291
+
292
+
#[test]
293
+
fn test_identity_unknown_attr() {
294
+
let scope = parse_scope("identity:unknown");
295
+
assert!(matches!(scope, ParsedScope::Unknown(_)));
296
+
}
297
+
298
+
#[test]
299
+
fn test_transition_scopes_exact_match() {
300
+
assert!(matches!(
301
+
parse_scope("transition:generic"),
302
+
ParsedScope::TransitionGeneric
303
+
));
304
+
assert!(matches!(
305
+
parse_scope("transition:chat.bsky"),
306
+
ParsedScope::TransitionChat
307
+
));
308
+
assert!(matches!(
309
+
parse_scope("transition:email"),
310
+
ParsedScope::TransitionEmail
311
+
));
312
+
assert!(matches!(
313
+
parse_scope("transition:unknown"),
314
+
ParsedScope::Unknown(_)
315
+
));
316
+
}
+4
-4
tests/sync_repo.rs
+4
-4
tests/sync_repo.rs
···
115
115
let mut page_count = 0;
116
116
let max_pages = 100;
117
117
loop {
118
-
let mut params: Vec<(&str, String)> = vec![("limit".into(), "10".into())];
118
+
let mut params: Vec<(&str, String)> = vec![("limit", "10".into())];
119
119
if let Some(ref c) = cursor {
120
120
params.push(("cursor", c.clone()));
121
121
}
···
313
313
.expect("Failed to create record");
314
314
let create_body: Value = create_res.json().await.expect("Invalid JSON");
315
315
let uri = create_body["uri"].as_str().expect("No URI");
316
-
let rkey = uri.split('/').last().expect("Invalid URI");
316
+
let rkey = uri.split('/').next_back().expect("Invalid URI");
317
317
let params = [
318
318
("did", did.as_str()),
319
319
("collection", "app.bsky.feed.post"),
···
418
418
let client = client();
419
419
let (did, jwt) = setup_new_user("sync-record-lifecycle").await;
420
420
let (post_uri, _post_cid) = create_post(&client, &did, &jwt, "Post for sync record test").await;
421
-
let post_rkey = post_uri.split('/').last().unwrap();
421
+
let post_rkey = post_uri.split('/').next_back().unwrap();
422
422
let sync_record_res = client
423
423
.get(format!(
424
424
"{}/xrpc/com.atproto.sync.getRecord",
···
503
503
StatusCode::NOT_FOUND,
504
504
"Deleted record should return 404 via sync.getRecord"
505
505
);
506
-
let post2_rkey = post2_uri.split('/').last().unwrap();
506
+
let post2_rkey = post2_uri.split('/').next_back().unwrap();
507
507
let sync_post2_res = client
508
508
.get(format!(
509
509
"{}/xrpc/com.atproto.sync.getRecord",
+343
tests/validation_edge_cases.rs
+343
tests/validation_edge_cases.rs
···
1
+
use tranquil_pds::api::validation::{
2
+
HandleValidationError, MAX_DOMAIN_LABEL_LENGTH, MAX_EMAIL_LENGTH, MAX_LOCAL_PART_LENGTH,
3
+
MAX_SERVICE_HANDLE_LOCAL_PART, is_valid_email, validate_short_handle,
4
+
};
5
+
use tranquil_pds::validation::{
6
+
is_valid_did, validate_collection_nsid, validate_password, validate_record_key,
7
+
};
8
+
9
+
#[test]
10
+
fn test_record_key_boundary_min() {
11
+
assert!(validate_record_key("a").is_ok());
12
+
assert!(validate_record_key("1").is_ok());
13
+
assert!(validate_record_key("-").is_ok());
14
+
assert!(validate_record_key("_").is_ok());
15
+
assert!(validate_record_key("~").is_ok());
16
+
}
17
+
18
+
#[test]
19
+
fn test_record_key_boundary_max() {
20
+
assert!(validate_record_key(&"a".repeat(512)).is_ok());
21
+
assert!(validate_record_key(&"a".repeat(513)).is_err());
22
+
assert!(validate_record_key(&"a".repeat(1000)).is_err());
23
+
}
24
+
25
+
#[test]
26
+
fn test_record_key_special_dot_cases() {
27
+
assert!(validate_record_key(".").is_err());
28
+
assert!(validate_record_key("..").is_err());
29
+
assert!(validate_record_key("...").is_ok());
30
+
assert!(validate_record_key("a.b").is_ok());
31
+
assert!(validate_record_key(".a").is_ok());
32
+
assert!(validate_record_key("a.").is_ok());
33
+
assert!(validate_record_key("a..b").is_ok());
34
+
}
35
+
36
+
#[test]
37
+
fn test_record_key_all_valid_chars() {
38
+
assert!(validate_record_key("abc").is_ok());
39
+
assert!(validate_record_key("ABC").is_ok());
40
+
assert!(validate_record_key("123").is_ok());
41
+
assert!(validate_record_key("a-b").is_ok());
42
+
assert!(validate_record_key("a_b").is_ok());
43
+
assert!(validate_record_key("a~b").is_ok());
44
+
assert!(validate_record_key("a.b").is_ok());
45
+
assert!(validate_record_key("aA1-_.~").is_ok());
46
+
}
47
+
48
+
#[test]
49
+
fn test_record_key_invalid_chars() {
50
+
assert!(validate_record_key("a/b").is_err());
51
+
assert!(validate_record_key("a\\b").is_err());
52
+
assert!(validate_record_key("a b").is_err());
53
+
assert!(validate_record_key("a@b").is_err());
54
+
assert!(validate_record_key("a#b").is_err());
55
+
assert!(validate_record_key("a$b").is_err());
56
+
assert!(validate_record_key("a%b").is_err());
57
+
assert!(validate_record_key("a&b").is_err());
58
+
assert!(validate_record_key("a*b").is_err());
59
+
assert!(validate_record_key("a+b").is_err());
60
+
assert!(validate_record_key("a=b").is_err());
61
+
assert!(validate_record_key("a?b").is_err());
62
+
assert!(validate_record_key("a:b").is_err());
63
+
assert!(validate_record_key("a;b").is_err());
64
+
assert!(validate_record_key("a<b").is_err());
65
+
assert!(validate_record_key("a>b").is_err());
66
+
assert!(validate_record_key("a[b").is_err());
67
+
assert!(validate_record_key("a]b").is_err());
68
+
assert!(validate_record_key("a{b").is_err());
69
+
assert!(validate_record_key("a}b").is_err());
70
+
assert!(validate_record_key("a|b").is_err());
71
+
assert!(validate_record_key("a`b").is_err());
72
+
assert!(validate_record_key("a'b").is_err());
73
+
assert!(validate_record_key("a\"b").is_err());
74
+
assert!(validate_record_key("a\nb").is_err());
75
+
assert!(validate_record_key("a\tb").is_err());
76
+
assert!(validate_record_key("a\rb").is_err());
77
+
assert!(validate_record_key("a\0b").is_err());
78
+
}
79
+
80
+
#[test]
81
+
fn test_record_key_unicode() {
82
+
assert!(validate_record_key("café").is_err());
83
+
assert!(validate_record_key("日本語").is_err());
84
+
assert!(validate_record_key("emoji😀").is_err());
85
+
}
86
+
87
+
#[test]
88
+
fn test_password_length_boundaries() {
89
+
let base_valid = "Aa1";
90
+
91
+
let pass_7 = format!("{}{}", base_valid, "x".repeat(4));
92
+
assert!(validate_password(&pass_7).is_err());
93
+
94
+
let pass_8 = format!("{}{}", base_valid, "x".repeat(5));
95
+
assert!(validate_password(&pass_8).is_ok());
96
+
97
+
let pass_256 = format!("{}{}", base_valid, "x".repeat(253));
98
+
assert!(validate_password(&pass_256).is_ok());
99
+
100
+
let pass_257 = format!("{}{}", base_valid, "x".repeat(254));
101
+
assert!(validate_password(&pass_257).is_err());
102
+
}
103
+
104
+
#[test]
105
+
fn test_password_missing_requirements() {
106
+
assert!(validate_password("abcdefgh").is_err());
107
+
assert!(validate_password("ABCDEFGH").is_err());
108
+
assert!(validate_password("12345678").is_err());
109
+
110
+
assert!(validate_password("abcd1234").is_err());
111
+
assert!(validate_password("ABCD1234").is_err());
112
+
assert!(validate_password("abcdABCD").is_err());
113
+
114
+
assert!(validate_password("aB1xxxxx").is_ok());
115
+
}
116
+
117
+
#[test]
118
+
fn test_password_common_passwords() {
119
+
assert!(validate_password("Password1").is_err());
120
+
assert!(validate_password("PASSWORD1").is_err());
121
+
assert!(validate_password("password1").is_err());
122
+
assert!(validate_password("Qwerty123").is_err());
123
+
assert!(validate_password("Bluesky123").is_err());
124
+
}
125
+
126
+
#[test]
127
+
fn test_password_special_chars_allowed() {
128
+
assert!(validate_password("Aa1!@#$%").is_ok());
129
+
assert!(validate_password("Aa1^&*()").is_ok());
130
+
assert!(validate_password("Aa1 space").is_ok());
131
+
}
132
+
133
+
#[test]
134
+
fn test_did_validation_basic() {
135
+
assert!(is_valid_did("did:plc:abc123"));
136
+
assert!(is_valid_did("did:web:example.com"));
137
+
assert!(is_valid_did(
138
+
"did:key:z6MkhaXgBZDvotDkL5257faiztiGiC2QtKLGpbnnEGta2doK"
139
+
));
140
+
}
141
+
142
+
#[test]
143
+
fn test_did_validation_invalid() {
144
+
assert!(!is_valid_did(""));
145
+
assert!(!is_valid_did("did"));
146
+
assert!(!is_valid_did("did:"));
147
+
assert!(!is_valid_did("did::"));
148
+
assert!(!is_valid_did("did:plc"));
149
+
assert!(!is_valid_did("did:plc:"));
150
+
assert!(!is_valid_did(":plc:abc"));
151
+
assert!(!is_valid_did("plc:abc"));
152
+
}
153
+
154
+
#[test]
155
+
fn test_did_validation_method_case() {
156
+
assert!(!is_valid_did("did:PLC:abc123"));
157
+
assert!(!is_valid_did("did:Plc:abc123"));
158
+
assert!(!is_valid_did("DID:plc:abc123"));
159
+
}
160
+
161
+
#[test]
162
+
fn test_did_validation_method_chars() {
163
+
assert!(!is_valid_did("did:plc1:abc"));
164
+
assert!(!is_valid_did("did:plc-x:abc"));
165
+
assert!(!is_valid_did("did:plc_x:abc"));
166
+
}
167
+
168
+
#[test]
169
+
fn test_collection_nsid_minimum_segments() {
170
+
assert!(validate_collection_nsid("a.b.c").is_ok());
171
+
assert!(validate_collection_nsid("a.b").is_err());
172
+
assert!(validate_collection_nsid("a").is_err());
173
+
assert!(validate_collection_nsid("").is_err());
174
+
}
175
+
176
+
#[test]
177
+
fn test_collection_nsid_many_segments() {
178
+
assert!(validate_collection_nsid("a.b.c.d.e.f.g.h.i.j").is_ok());
179
+
}
180
+
181
+
#[test]
182
+
fn test_collection_nsid_empty_segments() {
183
+
assert!(validate_collection_nsid("a..b.c").is_err());
184
+
assert!(validate_collection_nsid(".a.b.c").is_err());
185
+
assert!(validate_collection_nsid("a.b.c.").is_err());
186
+
assert!(validate_collection_nsid("a.b..c").is_err());
187
+
}
188
+
189
+
#[test]
190
+
fn test_collection_nsid_valid_chars() {
191
+
assert!(validate_collection_nsid("app.bsky.feed.post").is_ok());
192
+
assert!(validate_collection_nsid("com.example.my-record").is_ok());
193
+
assert!(validate_collection_nsid("app.example.record123").is_ok());
194
+
assert!(validate_collection_nsid("APP.BSKY.FEED.POST").is_ok());
195
+
}
196
+
197
+
#[test]
198
+
fn test_collection_nsid_invalid_chars() {
199
+
assert!(validate_collection_nsid("app.bsky.feed_post").is_err());
200
+
assert!(validate_collection_nsid("app.bsky.feed/post").is_err());
201
+
assert!(validate_collection_nsid("app.bsky.feed:post").is_err());
202
+
assert!(validate_collection_nsid("app.bsky.feed@post").is_err());
203
+
}
204
+
205
+
#[test]
206
+
fn test_handle_boundary_lengths() {
207
+
let min_handle = "abc";
208
+
assert!(validate_short_handle(min_handle).is_ok());
209
+
210
+
let under_min = "ab";
211
+
assert!(matches!(
212
+
validate_short_handle(under_min),
213
+
Err(HandleValidationError::TooShort)
214
+
));
215
+
216
+
let at_max = "a".repeat(MAX_SERVICE_HANDLE_LOCAL_PART);
217
+
assert!(validate_short_handle(&at_max).is_ok());
218
+
219
+
let over_max = "a".repeat(MAX_SERVICE_HANDLE_LOCAL_PART + 1);
220
+
assert!(matches!(
221
+
validate_short_handle(&over_max),
222
+
Err(HandleValidationError::TooLong)
223
+
));
224
+
}
225
+
226
+
#[test]
227
+
fn test_handle_hyphen_positions() {
228
+
assert!(validate_short_handle("a-b-c").is_ok());
229
+
assert!(validate_short_handle("a--b").is_ok());
230
+
assert!(validate_short_handle("---").is_err());
231
+
assert!(matches!(
232
+
validate_short_handle("-abc"),
233
+
Err(HandleValidationError::StartsWithInvalidChar)
234
+
));
235
+
assert!(matches!(
236
+
validate_short_handle("abc-"),
237
+
Err(HandleValidationError::EndsWithInvalidChar)
238
+
));
239
+
}
240
+
241
+
#[test]
242
+
fn test_handle_case_normalization() {
243
+
assert_eq!(validate_short_handle("ABC").unwrap(), "abc");
244
+
assert_eq!(validate_short_handle("AbC123").unwrap(), "abc123");
245
+
assert_eq!(validate_short_handle("MixedCase").unwrap(), "mixedcase");
246
+
}
247
+
248
+
#[test]
249
+
fn test_handle_whitespace_handling() {
250
+
assert_eq!(validate_short_handle(" abc ").unwrap(), "abc");
251
+
assert!(matches!(
252
+
validate_short_handle("a b c"),
253
+
Err(HandleValidationError::ContainsSpaces)
254
+
));
255
+
assert!(matches!(
256
+
validate_short_handle("a\tb"),
257
+
Err(HandleValidationError::ContainsSpaces)
258
+
));
259
+
assert!(matches!(
260
+
validate_short_handle("a\nb"),
261
+
Err(HandleValidationError::ContainsSpaces)
262
+
));
263
+
}
264
+
265
+
#[test]
266
+
fn test_email_length_boundaries() {
267
+
let long_local = format!("{}@example.com", "a".repeat(MAX_LOCAL_PART_LENGTH));
268
+
assert!(is_valid_email(&long_local));
269
+
270
+
let too_long_local = format!("{}@example.com", "a".repeat(MAX_LOCAL_PART_LENGTH + 1));
271
+
assert!(!is_valid_email(&too_long_local));
272
+
273
+
let very_long_email = format!("a@{}.com", "a".repeat(240));
274
+
if very_long_email.len() <= MAX_EMAIL_LENGTH {
275
+
assert!(is_valid_email(&very_long_email) || !is_valid_email(&very_long_email));
276
+
}
277
+
}
278
+
279
+
#[test]
280
+
fn test_email_local_part_special_chars() {
281
+
assert!(is_valid_email("user.name@example.com"));
282
+
assert!(is_valid_email("user+tag@example.com"));
283
+
assert!(is_valid_email("user!def@example.com"));
284
+
assert!(is_valid_email("user#abc@example.com"));
285
+
assert!(is_valid_email("user$def@example.com"));
286
+
assert!(is_valid_email("user%abc@example.com"));
287
+
assert!(is_valid_email("user&def@example.com"));
288
+
assert!(is_valid_email("user'abc@example.com"));
289
+
assert!(is_valid_email("user*def@example.com"));
290
+
assert!(is_valid_email("user=abc@example.com"));
291
+
assert!(is_valid_email("user?def@example.com"));
292
+
assert!(is_valid_email("user^abc@example.com"));
293
+
assert!(is_valid_email("user_def@example.com"));
294
+
assert!(is_valid_email("user`abc@example.com"));
295
+
assert!(is_valid_email("user{def@example.com"));
296
+
assert!(is_valid_email("user|abc@example.com"));
297
+
assert!(is_valid_email("user}def@example.com"));
298
+
assert!(is_valid_email("user~abc@example.com"));
299
+
assert!(is_valid_email("user-def@example.com"));
300
+
}
301
+
302
+
#[test]
303
+
fn test_email_local_part_dots() {
304
+
assert!(!is_valid_email(".user@example.com"));
305
+
assert!(!is_valid_email("user.@example.com"));
306
+
assert!(!is_valid_email("user..name@example.com"));
307
+
assert!(is_valid_email("user.name@example.com"));
308
+
assert!(is_valid_email("u.s.e.r@example.com"));
309
+
}
310
+
311
+
#[test]
312
+
fn test_email_domain_labels() {
313
+
let long_label = "a".repeat(MAX_DOMAIN_LABEL_LENGTH);
314
+
let valid_domain = format!("user@{}.com", long_label);
315
+
assert!(is_valid_email(&valid_domain));
316
+
317
+
let too_long_label = "a".repeat(MAX_DOMAIN_LABEL_LENGTH + 1);
318
+
let invalid_domain = format!("user@{}.com", too_long_label);
319
+
assert!(!is_valid_email(&invalid_domain));
320
+
}
321
+
322
+
#[test]
323
+
fn test_email_domain_hyphens() {
324
+
assert!(!is_valid_email("user@-example.com"));
325
+
assert!(!is_valid_email("user@example-.com"));
326
+
assert!(is_valid_email("user@ex-ample.com"));
327
+
assert!(is_valid_email("user@ex--ample.com"));
328
+
}
329
+
330
+
#[test]
331
+
fn test_email_domain_must_have_dot() {
332
+
assert!(!is_valid_email("user@localhost"));
333
+
assert!(!is_valid_email("user@example"));
334
+
assert!(is_valid_email("user@a.b"));
335
+
}
336
+
337
+
#[test]
338
+
fn test_email_invalid_chars() {
339
+
assert!(!is_valid_email("user name@example.com"));
340
+
assert!(!is_valid_email("user\t@example.com"));
341
+
assert!(!is_valid_email("user\n@example.com"));
342
+
assert!(!is_valid_email("user@exam ple.com"));
343
+
}
+3
-5
tests/verify_live_commit.rs
+3
-5
tests/verify_live_commit.rs
···
84
84
serde_ipld_dagcbor::to_vec(&unsigned).unwrap()
85
85
}
86
86
87
+
#[allow(clippy::type_complexity)]
87
88
fn parse_car(
88
89
cursor: &mut std::io::Cursor<&[u8]>,
89
90
) -> Result<(Vec<Cid>, HashMap<Cid, Bytes>), Box<dyn std::error::Error>> {
···
113
114
}
114
115
let header: CarHeader = serde_ipld_dagcbor::from_slice(&header_bytes)?;
115
116
let mut blocks = HashMap::new();
116
-
loop {
117
-
let block_len = match read_varint(cursor) {
118
-
Ok(len) => len as usize,
119
-
Err(_) => break,
120
-
};
117
+
while let Ok(len) = read_varint(cursor) {
118
+
let block_len = len as usize;
121
119
if block_len == 0 {
122
120
break;
123
121
}