+3
-2
.nix/flake.nix
+3
-2
.nix/flake.nix
···
26
26
git
27
27
nixd
28
28
direnv
29
+
libpq
29
30
];
30
31
overlays = [ (import rust-overlay) ];
31
32
pkgs = import nixpkgs {
···
41
42
nativeBuildInputs = with pkgs; [ rust pkg-config ];
42
43
in
43
44
with pkgs;
44
-
{
45
+
{
45
46
devShells.default = mkShell {
46
47
inherit buildInputs nativeBuildInputs;
47
48
LD_LIBRARY_PATH = nixpkgs.legacyPackages.x86_64-linux.lib.makeLibraryPath buildInputs;
···
49
50
DATABASE_URL = "sqlite://data/sqlite.db";
50
51
};
51
52
});
52
-
}
53
+
}
-20
.sqlx/query-02a5737bb92665ef0a3dac013eb03366ab6b31a5c4ab856e6458a52704b86e23.json
-20
.sqlx/query-02a5737bb92665ef0a3dac013eb03366ab6b31a5c4ab856e6458a52704b86e23.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT COUNT(*) FROM oauth_used_jtis WHERE jti = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "COUNT(*)",
8
-
"ordinal": 0,
9
-
"type_info": "Integer"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "02a5737bb92665ef0a3dac013eb03366ab6b31a5c4ab856e6458a52704b86e23"
20
-
}
-12
.sqlx/query-19dc08b9f2f609e0610b6bd1e4908fc5d7922cc95b13de3214a055bf36b80284.json
-12
.sqlx/query-19dc08b9f2f609e0610b6bd1e4908fc5d7922cc95b13de3214a055bf36b80284.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO invites (id, did, count, created_at)\n VALUES (?, NULL, 1, datetime('now'))\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 1
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "19dc08b9f2f609e0610b6bd1e4908fc5d7922cc95b13de3214a055bf36b80284"
12
-
}
-20
.sqlx/query-1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d.json
-20
.sqlx/query-1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT data FROM blocks WHERE cid = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "data",
8
-
"ordinal": 0,
9
-
"type_info": "Blob"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "1db52857493a1e8a7004872eaff6e8fe5dec41579dd57d696008385b8d23788d"
20
-
}
-20
.sqlx/query-22c1e98ac038509ad16ce437e6670a59d3fc97a05ea8b0f1f80dba0157c53e13.json
-20
.sqlx/query-22c1e98ac038509ad16ce437e6670a59d3fc97a05ea8b0f1f80dba0157c53e13.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT name FROM actor_migration",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "name",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 0
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "22c1e98ac038509ad16ce437e6670a59d3fc97a05ea8b0f1f80dba0157c53e13"
20
-
}
-62
.sqlx/query-243e2127a5181657d5e08c981a7a6d395fb2112ebf7a1a676d57c33866310add.json
-62
.sqlx/query-243e2127a5181657d5e08c981a7a6d395fb2112ebf7a1a676d57c33866310add.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT * FROM oauth_refresh_tokens\n WHERE token = ? AND client_id = ? AND expires_at > ? AND revoked = FALSE AND dpop_thumbprint = ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "token",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "client_id",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "subject",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
},
21
-
{
22
-
"name": "dpop_thumbprint",
23
-
"ordinal": 3,
24
-
"type_info": "Text"
25
-
},
26
-
{
27
-
"name": "scope",
28
-
"ordinal": 4,
29
-
"type_info": "Text"
30
-
},
31
-
{
32
-
"name": "created_at",
33
-
"ordinal": 5,
34
-
"type_info": "Integer"
35
-
},
36
-
{
37
-
"name": "expires_at",
38
-
"ordinal": 6,
39
-
"type_info": "Integer"
40
-
},
41
-
{
42
-
"name": "revoked",
43
-
"ordinal": 7,
44
-
"type_info": "Bool"
45
-
}
46
-
],
47
-
"parameters": {
48
-
"Right": 4
49
-
},
50
-
"nullable": [
51
-
false,
52
-
false,
53
-
false,
54
-
false,
55
-
true,
56
-
false,
57
-
false,
58
-
false
59
-
]
60
-
},
61
-
"hash": "243e2127a5181657d5e08c981a7a6d395fb2112ebf7a1a676d57c33866310add"
62
-
}
-12
.sqlx/query-2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46.json
-12
.sqlx/query-2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "INSERT INTO blocks (cid, data, multicodec, multihash) VALUES (?, ?, ?, ?)",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "2918ecf03675a789568c777904966911ca63e991dede42a2d7d87e174799ea46"
12
-
}
-20
.sqlx/query-2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573.json
-20
.sqlx/query-2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT COUNT(*) FROM oauth_refresh_tokens WHERE dpop_thumbprint = ? AND client_id = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "COUNT(*)",
8
-
"ordinal": 0,
9
-
"type_info": "Integer"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 2
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "2e13e052dfc64f29d9da1bce2bf844cbb918ad3bb01e386801d3b0d3be246573"
20
-
}
-32
.sqlx/query-3516a6de0f3aa40b301d60479f5c34d0fd21a800328a05458ecc3ac688d016e6.json
-32
.sqlx/query-3516a6de0f3aa40b301d60479f5c34d0fd21a800328a05458ecc3ac688d016e6.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT a.email, a.status, (\n SELECT h.handle\n FROM handles h\n WHERE h.did = a.did\n ORDER BY h.created_at ASC\n LIMIT 1\n ) AS handle\n FROM accounts a\n WHERE a.did = ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "email",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "status",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "handle",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
}
21
-
],
22
-
"parameters": {
23
-
"Right": 1
24
-
},
25
-
"nullable": [
26
-
false,
27
-
false,
28
-
false
29
-
]
30
-
},
31
-
"hash": "3516a6de0f3aa40b301d60479f5c34d0fd21a800328a05458ecc3ac688d016e6"
32
-
}
-20
.sqlx/query-3b4745208f268678a84401e522c3836e0632ca34a0f23bbae5297d076610f0ab.json
-20
.sqlx/query-3b4745208f268678a84401e522c3836e0632ca34a0f23bbae5297d076610f0ab.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT content FROM repo_block WHERE cid = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "content",
8
-
"ordinal": 0,
9
-
"type_info": "Blob"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "3b4745208f268678a84401e522c3836e0632ca34a0f23bbae5297d076610f0ab"
20
-
}
-20
.sqlx/query-3d1a877177899665c37393beae31a399054b7c02d3871c6c5d317923fec8442e.json
-20
.sqlx/query-3d1a877177899665c37393beae31a399054b7c02d3871c6c5d317923fec8442e.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT did FROM handles WHERE handle = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "did",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "3d1a877177899665c37393beae31a399054b7c02d3871c6c5d317923fec8442e"
20
-
}
-20
.sqlx/query-4198b96804f3a0a805e441857b452e84a083d80dca12ce95c545dc9eadbac0c3.json
-20
.sqlx/query-4198b96804f3a0a805e441857b452e84a083d80dca12ce95c545dc9eadbac0c3.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT plc_root FROM accounts WHERE did = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "plc_root",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "4198b96804f3a0a805e441857b452e84a083d80dca12ce95c545dc9eadbac0c3"
20
-
}
-12
.sqlx/query-459be26080e3497b3807d22e86377eee9e19366709864e3369c867cef01c83bb.json
-12
.sqlx/query-459be26080e3497b3807d22e86377eee9e19366709864e3369c867cef01c83bb.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO repo_block (cid, repoRev, size, content)\n VALUES (?, ?, ?, ?)\n ON CONFLICT DO NOTHING\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "459be26080e3497b3807d22e86377eee9e19366709864e3369c867cef01c83bb"
12
-
}
-26
.sqlx/query-50a7b5f57df41d06a8c11c8268d8dbef4c76bcf92c6b47b6316bf5e39fb889a7.json
-26
.sqlx/query-50a7b5f57df41d06a8c11c8268d8dbef4c76bcf92c6b47b6316bf5e39fb889a7.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT a.status, h.handle\n FROM accounts a\n JOIN handles h ON a.did = h.did\n WHERE a.did = ?\n ORDER BY h.created_at ASC\n LIMIT 1\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "status",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "handle",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Right": 1
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "50a7b5f57df41d06a8c11c8268d8dbef4c76bcf92c6b47b6316bf5e39fb889a7"
26
-
}
-12
.sqlx/query-51f7f9d5bf4cbfe372a8fa130f4cabcb57766638792d61297df2fb91c2fe2937.json
-12
.sqlx/query-51f7f9d5bf4cbfe372a8fa130f4cabcb57766638792d61297df2fb91c2fe2937.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO repo_root (did, cid, rev, indexedAt)\n VALUES (?, ?, ?, ?)\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "51f7f9d5bf4cbfe372a8fa130f4cabcb57766638792d61297df2fb91c2fe2937"
12
-
}
-12
.sqlx/query-5bbf8300ca519576e4f60074cf16756bc1dca79f43e1e89c5a08b8c9d95d241f.json
-12
.sqlx/query-5bbf8300ca519576e4f60074cf16756bc1dca79f43e1e89c5a08b8c9d95d241f.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO repo_block (cid, repoRev, size, content)\n VALUES (?, ?, ?, ?)\n ON CONFLICT DO NOTHING\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "5bbf8300ca519576e4f60074cf16756bc1dca79f43e1e89c5a08b8c9d95d241f"
12
-
}
-12
.sqlx/query-5d4586821dff3ed0fd1e352946751c3bb66610a472d8c42a7bfa3a565fccc30a.json
-12
.sqlx/query-5d4586821dff3ed0fd1e352946751c3bb66610a472d8c42a7bfa3a565fccc30a.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO oauth_authorization_codes (\n code, client_id, subject, code_challenge, code_challenge_method,\n redirect_uri, scope, created_at, expires_at, used\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 10
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "5d4586821dff3ed0fd1e352946751c3bb66610a472d8c42a7bfa3a565fccc30a"
12
-
}
-12
.sqlx/query-5ea8376fbbe3077b2fc62187cc29a2d03eda91fa468c7fe63306f04e160ecb5d.json
-12
.sqlx/query-5ea8376fbbe3077b2fc62187cc29a2d03eda91fa468c7fe63306f04e160ecb5d.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "INSERT INTO actor_migration (name, appliedAt) VALUES (?, ?)",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 2
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "5ea8376fbbe3077b2fc62187cc29a2d03eda91fa468c7fe63306f04e160ecb5d"
12
-
}
-26
.sqlx/query-5f17a390750b52886f8c3ba80cb16776f3430bc91c4158aafb3012a7812a97cc.json
-26
.sqlx/query-5f17a390750b52886f8c3ba80cb16776f3430bc91c4158aafb3012a7812a97cc.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT rev, status FROM accounts WHERE did = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "rev",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "status",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Right": 1
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "5f17a390750b52886f8c3ba80cb16776f3430bc91c4158aafb3012a7812a97cc"
26
-
}
-32
.sqlx/query-6b0a871527c5c37663ee17ec6f5ec4f97521900f45e549b0b065004a4e2e6207.json
-32
.sqlx/query-6b0a871527c5c37663ee17ec6f5ec4f97521900f45e549b0b065004a4e2e6207.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n WITH LatestHandles AS (\n SELECT did, handle\n FROM handles\n WHERE (did, created_at) IN (\n SELECT did, MAX(created_at) AS max_created_at\n FROM handles\n GROUP BY did\n )\n )\n SELECT a.did, a.password, h.handle\n FROM accounts a\n LEFT JOIN LatestHandles h ON a.did = h.did\n WHERE h.handle = ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "did",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "password",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "handle",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
}
21
-
],
22
-
"parameters": {
23
-
"Right": 1
24
-
},
25
-
"nullable": [
26
-
false,
27
-
false,
28
-
false
29
-
]
30
-
},
31
-
"hash": "6b0a871527c5c37663ee17ec6f5ec4f97521900f45e549b0b065004a4e2e6207"
32
-
}
-20
.sqlx/query-73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c.json
-20
.sqlx/query-73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT COUNT(*) FROM blocks WHERE cid = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "COUNT(*)",
8
-
"ordinal": 0,
9
-
"type_info": "Integer"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "73fd3e30b7694c92cf9309751d186fe622fa7d99fdf56dde7e60c3696581116c"
20
-
}
-32
.sqlx/query-7eb22fdfc107b33361c599fcd4ae3a4a4fafef8438c41e1fdc6d4f7fd44f1094.json
-32
.sqlx/query-7eb22fdfc107b33361c599fcd4ae3a4a4fafef8438c41e1fdc6d4f7fd44f1094.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT did, root, rev FROM accounts LIMIT ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "did",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "root",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "rev",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
}
21
-
],
22
-
"parameters": {
23
-
"Right": 1
24
-
},
25
-
"nullable": [
26
-
false,
27
-
false,
28
-
false
29
-
]
30
-
},
31
-
"hash": "7eb22fdfc107b33361c599fcd4ae3a4a4fafef8438c41e1fdc6d4f7fd44f1094"
32
-
}
-20
.sqlx/query-813409fb7218c548ee3e8b1226559686cd40aa81ac1b68659b087276cbb0137d.json
-20
.sqlx/query-813409fb7218c548ee3e8b1226559686cd40aa81ac1b68659b087276cbb0137d.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT cid FROM blob_ref WHERE did = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "cid",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "813409fb7218c548ee3e8b1226559686cd40aa81ac1b68659b087276cbb0137d"
20
-
}
-20
.sqlx/query-865f757ca7c8b15357622bf0d1a25745288f87ad6ace019c1f4316a4ba1efb34.json
-20
.sqlx/query-865f757ca7c8b15357622bf0d1a25745288f87ad6ace019c1f4316a4ba1efb34.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT revoked FROM oauth_refresh_tokens WHERE token = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "revoked",
8
-
"ordinal": 0,
9
-
"type_info": "Bool"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "865f757ca7c8b15357622bf0d1a25745288f87ad6ace019c1f4316a4ba1efb34"
20
-
}
-12
.sqlx/query-87cbc4f5bb615163ff62234e0de0c69b543179cffcdaf79fcae5fd6fdc7e14c7.json
-12
.sqlx/query-87cbc4f5bb615163ff62234e0de0c69b543179cffcdaf79fcae5fd6fdc7e14c7.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "UPDATE oauth_refresh_tokens SET revoked = TRUE WHERE token = ?",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 1
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "87cbc4f5bb615163ff62234e0de0c69b543179cffcdaf79fcae5fd6fdc7e14c7"
12
-
}
-74
.sqlx/query-92858ad9b0a35c3b8d4be795f88325aa4a1995f53fc90ef455ef9a499335f088.json
-74
.sqlx/query-92858ad9b0a35c3b8d4be795f88325aa4a1995f53fc90ef455ef9a499335f088.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT * FROM oauth_authorization_codes\n WHERE code = ? AND client_id = ? AND redirect_uri = ? AND expires_at > ? AND used = FALSE\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "code",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "client_id",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "subject",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
},
21
-
{
22
-
"name": "code_challenge",
23
-
"ordinal": 3,
24
-
"type_info": "Text"
25
-
},
26
-
{
27
-
"name": "code_challenge_method",
28
-
"ordinal": 4,
29
-
"type_info": "Text"
30
-
},
31
-
{
32
-
"name": "redirect_uri",
33
-
"ordinal": 5,
34
-
"type_info": "Text"
35
-
},
36
-
{
37
-
"name": "scope",
38
-
"ordinal": 6,
39
-
"type_info": "Text"
40
-
},
41
-
{
42
-
"name": "created_at",
43
-
"ordinal": 7,
44
-
"type_info": "Integer"
45
-
},
46
-
{
47
-
"name": "expires_at",
48
-
"ordinal": 8,
49
-
"type_info": "Integer"
50
-
},
51
-
{
52
-
"name": "used",
53
-
"ordinal": 9,
54
-
"type_info": "Bool"
55
-
}
56
-
],
57
-
"parameters": {
58
-
"Right": 4
59
-
},
60
-
"nullable": [
61
-
false,
62
-
false,
63
-
false,
64
-
false,
65
-
false,
66
-
false,
67
-
true,
68
-
false,
69
-
false,
70
-
false
71
-
]
72
-
},
73
-
"hash": "92858ad9b0a35c3b8d4be795f88325aa4a1995f53fc90ef455ef9a499335f088"
74
-
}
-26
.sqlx/query-9890e97761e6ed1256ed32775ad4f394e199b5a3588a711ea8ad672cf666eee4.json
-26
.sqlx/query-9890e97761e6ed1256ed32775ad4f394e199b5a3588a711ea8ad672cf666eee4.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT cid, rev FROM repo_root WHERE did = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "cid",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "rev",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Right": 1
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "9890e97761e6ed1256ed32775ad4f394e199b5a3588a711ea8ad672cf666eee4"
26
-
}
-12
.sqlx/query-9a04bdf627ee146ddaac6cdd1bacf2106b22bc215ef22ab400cd62b4353f414b.json
-12
.sqlx/query-9a04bdf627ee146ddaac6cdd1bacf2106b22bc215ef22ab400cd62b4353f414b.json
-26
.sqlx/query-9b6ac33211a2231754650bb0daca5ffb980c9e530ea47dd892aa06fab1450a05.json
-26
.sqlx/query-9b6ac33211a2231754650bb0daca5ffb980c9e530ea47dd892aa06fab1450a05.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT cid, content\n FROM repo_block\n WHERE repoRev = ?\n LIMIT 15\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "cid",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "content",
13
-
"ordinal": 1,
14
-
"type_info": "Blob"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Right": 1
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "9b6ac33211a2231754650bb0daca5ffb980c9e530ea47dd892aa06fab1450a05"
26
-
}
-38
.sqlx/query-a16bb62753f6568238cab50d3a597d279db5564d3bcc1f8606850d5442aaf20a.json
-38
.sqlx/query-a16bb62753f6568238cab50d3a597d279db5564d3bcc1f8606850d5442aaf20a.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n WITH LatestHandles AS (\n SELECT did, handle\n FROM handles\n WHERE (did, created_at) IN (\n SELECT did, MAX(created_at) AS max_created_at\n FROM handles\n GROUP BY did\n )\n )\n SELECT a.did, a.email, a.password, h.handle\n FROM accounts a\n LEFT JOIN LatestHandles h ON a.did = h.did\n WHERE h.handle = ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "did",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "email",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "password",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
},
21
-
{
22
-
"name": "handle",
23
-
"ordinal": 3,
24
-
"type_info": "Text"
25
-
}
26
-
],
27
-
"parameters": {
28
-
"Right": 1
29
-
},
30
-
"nullable": [
31
-
false,
32
-
false,
33
-
false,
34
-
false
35
-
]
36
-
},
37
-
"hash": "a16bb62753f6568238cab50d3a597d279db5564d3bcc1f8606850d5442aaf20a"
38
-
}
-12
.sqlx/query-a527a1863a9a2f5ba129c1f5ee9d0cdc78e0c69de43c7da1f9a936222c17c4bf.json
-12
.sqlx/query-a527a1863a9a2f5ba129c1f5ee9d0cdc78e0c69de43c7da1f9a936222c17c4bf.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO accounts (did, email, password, root, plc_root, rev, created_at)\n VALUES (?, ?, ?, ?, ?, ?, datetime('now'));\n\n INSERT INTO handles (did, handle, created_at)\n VALUES (?, ?, datetime('now'));\n\n -- Cleanup stale invite codes\n DELETE FROM invites\n WHERE count <= 0;\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 8
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "a527a1863a9a2f5ba129c1f5ee9d0cdc78e0c69de43c7da1f9a936222c17c4bf"
12
-
}
-12
.sqlx/query-a9fbd43dbd50907f550a2221dab552ff5a00d7f00d7223b4cee745354f77c532.json
-12
.sqlx/query-a9fbd43dbd50907f550a2221dab552ff5a00d7f00d7223b4cee745354f77c532.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n UPDATE repo_root\n SET cid = ?, rev = ?, indexedAt = ?\n WHERE did = ?\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "a9fbd43dbd50907f550a2221dab552ff5a00d7f00d7223b4cee745354f77c532"
12
-
}
-92
.sqlx/query-b4e6da72ee82515d2ff739c805e1c0ccb837d06c62d338dd782a3ea375f7eee3.json
-92
.sqlx/query-b4e6da72ee82515d2ff739c805e1c0ccb837d06c62d338dd782a3ea375f7eee3.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT * FROM oauth_par_requests\n WHERE request_uri = ? AND client_id = ? AND expires_at > ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "request_uri",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "client_id",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "response_type",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
},
21
-
{
22
-
"name": "code_challenge",
23
-
"ordinal": 3,
24
-
"type_info": "Text"
25
-
},
26
-
{
27
-
"name": "code_challenge_method",
28
-
"ordinal": 4,
29
-
"type_info": "Text"
30
-
},
31
-
{
32
-
"name": "state",
33
-
"ordinal": 5,
34
-
"type_info": "Text"
35
-
},
36
-
{
37
-
"name": "login_hint",
38
-
"ordinal": 6,
39
-
"type_info": "Text"
40
-
},
41
-
{
42
-
"name": "scope",
43
-
"ordinal": 7,
44
-
"type_info": "Text"
45
-
},
46
-
{
47
-
"name": "redirect_uri",
48
-
"ordinal": 8,
49
-
"type_info": "Text"
50
-
},
51
-
{
52
-
"name": "response_mode",
53
-
"ordinal": 9,
54
-
"type_info": "Text"
55
-
},
56
-
{
57
-
"name": "display",
58
-
"ordinal": 10,
59
-
"type_info": "Text"
60
-
},
61
-
{
62
-
"name": "created_at",
63
-
"ordinal": 11,
64
-
"type_info": "Integer"
65
-
},
66
-
{
67
-
"name": "expires_at",
68
-
"ordinal": 12,
69
-
"type_info": "Integer"
70
-
}
71
-
],
72
-
"parameters": {
73
-
"Right": 3
74
-
},
75
-
"nullable": [
76
-
false,
77
-
false,
78
-
false,
79
-
false,
80
-
false,
81
-
true,
82
-
true,
83
-
true,
84
-
true,
85
-
true,
86
-
true,
87
-
false,
88
-
false
89
-
]
90
-
},
91
-
"hash": "b4e6da72ee82515d2ff739c805e1c0ccb837d06c62d338dd782a3ea375f7eee3"
92
-
}
-12
.sqlx/query-bcef1b9aeaf0db7ac4b2e8f4b3ec40b425e48af26cf91496208c04e31239f7c6.json
-12
.sqlx/query-bcef1b9aeaf0db7ac4b2e8f4b3ec40b425e48af26cf91496208c04e31239f7c6.json
-12
.sqlx/query-c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614.json
-12
.sqlx/query-c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "UPDATE oauth_refresh_tokens SET revoked = TRUE\n WHERE client_id = ? AND dpop_thumbprint = ?",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 2
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "c51b4c9de70b5be51a6e0a5fd744387ae804e8ba978b61c4d04d74b1f8de2614"
12
-
}
-20
.sqlx/query-cc1c5a90cfd95024cb03fe579941f296b1ac1230cce5819ae9f6eb03c8b19398.json
-20
.sqlx/query-cc1c5a90cfd95024cb03fe579941f296b1ac1230cce5819ae9f6eb03c8b19398.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT\n (SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites)\n AS total_count\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "total_count",
8
-
"ordinal": 0,
9
-
"type_info": "Integer"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 0
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "cc1c5a90cfd95024cb03fe579941f296b1ac1230cce5819ae9f6eb03c8b19398"
20
-
}
-12
.sqlx/query-cd91f7a134089bb77cac221a9bcc489b6d6860123f755c1ee2068e32dc687301.json
-12
.sqlx/query-cd91f7a134089bb77cac221a9bcc489b6d6860123f755c1ee2068e32dc687301.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO oauth_refresh_tokens (\n token, client_id, subject, dpop_thumbprint, scope, created_at, expires_at, revoked\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 8
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "cd91f7a134089bb77cac221a9bcc489b6d6860123f755c1ee2068e32dc687301"
12
-
}
-12
.sqlx/query-d1408c77d790337a265891b5502a59a62a5d1d01e787dea74b753b1fab794b3a.json
-12
.sqlx/query-d1408c77d790337a265891b5502a59a62a5d1d01e787dea74b753b1fab794b3a.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO oauth_par_requests (\n request_uri, client_id, response_type, code_challenge, code_challenge_method,\n state, login_hint, scope, redirect_uri, response_mode, display,\n created_at, expires_at\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 13
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "d1408c77d790337a265891b5502a59a62a5d1d01e787dea74b753b1fab794b3a"
12
-
}
-26
.sqlx/query-d1c3ea6ebc19b0362851ebd0b8c8a0b9c87d5cddf4f03670636d29ba5ceb9435.json
-26
.sqlx/query-d1c3ea6ebc19b0362851ebd0b8c8a0b9c87d5cddf4f03670636d29ba5ceb9435.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT cid, rev\n FROM repo_root\n WHERE did = ?\n LIMIT 1\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "cid",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "rev",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
}
16
-
],
17
-
"parameters": {
18
-
"Right": 1
19
-
},
20
-
"nullable": [
21
-
false,
22
-
false
23
-
]
24
-
},
25
-
"hash": "d1c3ea6ebc19b0362851ebd0b8c8a0b9c87d5cddf4f03670636d29ba5ceb9435"
26
-
}
-12
.sqlx/query-d39b83ec2f091556e6fb5e4d729b8e6fa1cc966855f934e2b1611d8a26614849.json
-12
.sqlx/query-d39b83ec2f091556e6fb5e4d729b8e6fa1cc966855f934e2b1611d8a26614849.json
-12
.sqlx/query-d6ddbce18d6a78a78e8713a0f0b1499517aae7ab9f49744a4cf8a722e03f82fa.json
-12
.sqlx/query-d6ddbce18d6a78a78e8713a0f0b1499517aae7ab9f49744a4cf8a722e03f82fa.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO oauth_used_jtis (jti, issuer, created_at, expires_at)\n VALUES (?, ?, ?, ?)\n ",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 4
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "d6ddbce18d6a78a78e8713a0f0b1499517aae7ab9f49744a4cf8a722e03f82fa"
12
-
}
-20
.sqlx/query-dbedb512e10704bc9f0e571314ff68724edf10b76a62071bd1ef04a68c708890.json
-20
.sqlx/query-dbedb512e10704bc9f0e571314ff68724edf10b76a62071bd1ef04a68c708890.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n INSERT INTO invites (id, did, count, created_at)\n VALUES (?, ?, ?, datetime('now'))\n RETURNING id\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "id",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 3
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "dbedb512e10704bc9f0e571314ff68724edf10b76a62071bd1ef04a68c708890"
20
-
}
-20
.sqlx/query-dc444d99848fff3578add45fb464004c0797ef7d455652cb92f2c7de8a7f8cc4.json
-20
.sqlx/query-dc444d99848fff3578add45fb464004c0797ef7d455652cb92f2c7de8a7f8cc4.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT status FROM accounts WHERE did = ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "status",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "dc444d99848fff3578add45fb464004c0797ef7d455652cb92f2c7de8a7f8cc4"
20
-
}
-20
.sqlx/query-e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab.json
-20
.sqlx/query-e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n UPDATE invites\n SET count = count - 1\n WHERE id = ?\n AND count > 0\n RETURNING id\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "id",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "e26b7c36a34130e350f3f3e06b3200c56a0e3330ac0b658de6bbdb39b5497fab"
20
-
}
-12
.sqlx/query-e4bd80a305f929229b234b79b1e9e90a36af0e630c8c7530b6d935c6e32d381f.json
-12
.sqlx/query-e4bd80a305f929229b234b79b1e9e90a36af0e630c8c7530b6d935c6e32d381f.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "UPDATE oauth_authorization_codes SET used = TRUE WHERE code = ?",
4
-
"describe": {
5
-
"columns": [],
6
-
"parameters": {
7
-
"Right": 1
8
-
},
9
-
"nullable": []
10
-
},
11
-
"hash": "e4bd80a305f929229b234b79b1e9e90a36af0e630c8c7530b6d935c6e32d381f"
12
-
}
-20
.sqlx/query-e6007f29d6b7681d7a1f5029d1bf635250ac4449494b925e67735513edfcbdb3.json
-20
.sqlx/query-e6007f29d6b7681d7a1f5029d1bf635250ac4449494b925e67735513edfcbdb3.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "\n SELECT root FROM accounts\n WHERE did = ?\n ",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "root",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
}
11
-
],
12
-
"parameters": {
13
-
"Right": 1
14
-
},
15
-
"nullable": [
16
-
false
17
-
]
18
-
},
19
-
"hash": "e6007f29d6b7681d7a1f5029d1bf635250ac4449494b925e67735513edfcbdb3"
20
-
}
-32
.sqlx/query-fdd74b27ee260f2cc6fa9102f5c216b86436bb6ccf9bf707118c12b0bd393922.json
-32
.sqlx/query-fdd74b27ee260f2cc6fa9102f5c216b86436bb6ccf9bf707118c12b0bd393922.json
···
1
-
{
2
-
"db_name": "SQLite",
3
-
"query": "SELECT did, root, rev FROM accounts WHERE did > ? LIMIT ?",
4
-
"describe": {
5
-
"columns": [
6
-
{
7
-
"name": "did",
8
-
"ordinal": 0,
9
-
"type_info": "Text"
10
-
},
11
-
{
12
-
"name": "root",
13
-
"ordinal": 1,
14
-
"type_info": "Text"
15
-
},
16
-
{
17
-
"name": "rev",
18
-
"ordinal": 2,
19
-
"type_info": "Text"
20
-
}
21
-
],
22
-
"parameters": {
23
-
"Right": 2
24
-
},
25
-
"nullable": [
26
-
false,
27
-
false,
28
-
false
29
-
]
30
-
},
31
-
"hash": "fdd74b27ee260f2cc6fa9102f5c216b86436bb6ccf9bf707118c12b0bd393922"
32
-
}
+55
-76
Cargo.lock
+55
-76
Cargo.lock
···
1183
1183
checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb"
1184
1184
1185
1185
[[package]]
1186
-
name = "bitcoin-io"
1187
-
version = "0.1.3"
1188
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1189
-
checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf"
1190
-
1191
-
[[package]]
1192
1186
name = "bitcoin_hashes"
1193
1187
version = "0.13.0"
1194
1188
source = "registry+https://github.com/rust-lang/crates.io-index"
1195
1189
checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b"
1196
1190
dependencies = [
1197
1191
"bitcoin-internals",
1198
-
"hex-conservative 0.1.2",
1199
-
]
1200
-
1201
-
[[package]]
1202
-
name = "bitcoin_hashes"
1203
-
version = "0.14.0"
1204
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1205
-
checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16"
1206
-
dependencies = [
1207
-
"bitcoin-io",
1208
-
"hex-conservative 0.2.1",
1192
+
"hex-conservative",
1209
1193
]
1210
1194
1211
1195
[[package]]
···
1298
1282
dependencies = [
1299
1283
"anyhow",
1300
1284
"argon2",
1301
-
"async-trait",
1302
1285
"atrium-api 0.25.3",
1303
1286
"atrium-crypto",
1304
1287
"atrium-repo",
1305
-
"atrium-xrpc",
1306
-
"atrium-xrpc-client",
1307
1288
"axum",
1308
1289
"azure_core",
1309
1290
"azure_identity",
···
1314
1295
"clap",
1315
1296
"clap-verbosity-flag",
1316
1297
"constcat",
1298
+
"deadpool-diesel",
1317
1299
"diesel",
1318
1300
"diesel_migrations",
1301
+
"dotenvy",
1319
1302
"figment",
1320
1303
"futures",
1321
1304
"hex",
1322
1305
"http-cache-reqwest",
1323
-
"ipld-core",
1324
-
"k256",
1325
-
"lazy_static",
1326
1306
"memmap2",
1327
1307
"metrics",
1328
1308
"metrics-exporter-prometheus",
1329
-
"multihash 0.19.3",
1330
-
"r2d2",
1331
1309
"rand 0.8.5",
1332
-
"regex",
1333
1310
"reqwest 0.12.15",
1334
1311
"reqwest-middleware",
1335
1312
"rsky-common",
1313
+
"rsky-identity",
1336
1314
"rsky-lexicon",
1337
1315
"rsky-pds",
1338
1316
"rsky-repo",
1339
1317
"rsky-syntax",
1340
-
"secp256k1 0.31.0",
1318
+
"secp256k1",
1341
1319
"serde",
1342
-
"serde_bytes",
1343
1320
"serde_ipld_dagcbor",
1344
-
"serde_ipld_dagjson",
1345
1321
"serde_json",
1346
1322
"sha2",
1347
1323
"thiserror 2.0.12",
···
1350
1326
"tower-http",
1351
1327
"tracing",
1352
1328
"tracing-subscriber",
1329
+
"ubyte",
1353
1330
"url",
1354
1331
"urlencoding",
1355
1332
"uuid 1.16.0",
···
1876
1853
dependencies = [
1877
1854
"data-encoding",
1878
1855
"syn 2.0.101",
1856
+
]
1857
+
1858
+
[[package]]
1859
+
name = "deadpool"
1860
+
version = "0.12.2"
1861
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1862
+
checksum = "5ed5957ff93768adf7a65ab167a17835c3d2c3c50d084fe305174c112f468e2f"
1863
+
dependencies = [
1864
+
"deadpool-runtime",
1865
+
"num_cpus",
1866
+
"serde",
1867
+
"tokio",
1868
+
]
1869
+
1870
+
[[package]]
1871
+
name = "deadpool-diesel"
1872
+
version = "0.6.1"
1873
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1874
+
checksum = "590573e9e29c5190a5ff782136f871e6e652e35d598a349888e028693601adf1"
1875
+
dependencies = [
1876
+
"deadpool",
1877
+
"deadpool-sync",
1878
+
"diesel",
1879
+
]
1880
+
1881
+
[[package]]
1882
+
name = "deadpool-runtime"
1883
+
version = "0.1.4"
1884
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1885
+
checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b"
1886
+
dependencies = [
1887
+
"tokio",
1888
+
]
1889
+
1890
+
[[package]]
1891
+
name = "deadpool-sync"
1892
+
version = "0.1.4"
1893
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1894
+
checksum = "524bc3df0d57e98ecd022e21ba31166c2625e7d3e5bcc4510efaeeab4abcab04"
1895
+
dependencies = [
1896
+
"deadpool-runtime",
1897
+
"tracing",
1879
1898
]
1880
1899
1881
1900
[[package]]
···
2711
2730
version = "0.1.2"
2712
2731
source = "registry+https://github.com/rust-lang/crates.io-index"
2713
2732
checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20"
2714
-
2715
-
[[package]]
2716
-
name = "hex-conservative"
2717
-
version = "0.2.1"
2718
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2719
-
checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd"
2720
-
dependencies = [
2721
-
"arrayvec",
2722
-
]
2723
2733
2724
2734
[[package]]
2725
2735
name = "hickory-proto"
···
5291
5301
"rand_core 0.6.4",
5292
5302
"regex",
5293
5303
"rsky-identity",
5294
-
"secp256k1 0.28.2",
5304
+
"secp256k1",
5295
5305
"serde",
5296
5306
"serde_ipld_dagcbor",
5297
5307
"serde_json",
···
5310
5320
"anyhow",
5311
5321
"multibase",
5312
5322
"p256 0.13.2",
5313
-
"secp256k1 0.28.2",
5323
+
"secp256k1",
5314
5324
"unsigned-varint 0.8.0",
5315
5325
]
5316
5326
···
5342
5352
"libipld",
5343
5353
"miette",
5344
5354
"parking_lot",
5345
-
"secp256k1 0.28.2",
5355
+
"secp256k1",
5346
5356
"serde",
5347
5357
"serde_bytes",
5348
5358
"serde_cbor",
···
5397
5407
"rsky-lexicon",
5398
5408
"rsky-repo",
5399
5409
"rsky-syntax",
5400
-
"secp256k1 0.28.2",
5410
+
"secp256k1",
5401
5411
"serde",
5402
5412
"serde_bytes",
5403
5413
"serde_cbor",
···
5436
5446
"rsky-crypto",
5437
5447
"rsky-lexicon",
5438
5448
"rsky-syntax",
5439
-
"secp256k1 0.28.2",
5449
+
"secp256k1",
5440
5450
"serde",
5441
5451
"serde_bytes",
5442
5452
"serde_cbor",
···
5692
5702
source = "registry+https://github.com/rust-lang/crates.io-index"
5693
5703
checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10"
5694
5704
dependencies = [
5695
-
"bitcoin_hashes 0.13.0",
5705
+
"bitcoin_hashes",
5696
5706
"rand 0.8.5",
5697
-
"secp256k1-sys 0.9.2",
5707
+
"secp256k1-sys",
5698
5708
"serde",
5699
5709
]
5700
5710
5701
5711
[[package]]
5702
-
name = "secp256k1"
5703
-
version = "0.31.0"
5704
-
source = "registry+https://github.com/rust-lang/crates.io-index"
5705
-
checksum = "6a3dff2d01c9aa65c3186a45ff846bfea52cbe6de3b6320ed2a358d90dad0d76"
5706
-
dependencies = [
5707
-
"bitcoin_hashes 0.14.0",
5708
-
"rand 0.9.1",
5709
-
"secp256k1-sys 0.11.0",
5710
-
]
5711
-
5712
-
[[package]]
5713
5712
name = "secp256k1-sys"
5714
5713
version = "0.9.2"
5715
5714
source = "registry+https://github.com/rust-lang/crates.io-index"
5716
5715
checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb"
5717
-
dependencies = [
5718
-
"cc",
5719
-
]
5720
-
5721
-
[[package]]
5722
-
name = "secp256k1-sys"
5723
-
version = "0.11.0"
5724
-
source = "registry+https://github.com/rust-lang/crates.io-index"
5725
-
checksum = "dcb913707158fadaf0d8702c2db0e857de66eb003ccfdda5924b5f5ac98efb38"
5726
5716
dependencies = [
5727
5717
"cc",
5728
5718
]
···
5840
5830
"ipld-core",
5841
5831
"scopeguard",
5842
5832
"serde",
5843
-
]
5844
-
5845
-
[[package]]
5846
-
name = "serde_ipld_dagjson"
5847
-
version = "0.2.0"
5848
-
source = "registry+https://github.com/rust-lang/crates.io-index"
5849
-
checksum = "3359b47ba7f4a306ef5984665e10539e212e97217afa489437d533208eecda36"
5850
-
dependencies = [
5851
-
"ipld-core",
5852
-
"serde",
5853
-
"serde_json",
5854
5833
]
5855
5834
5856
5835
[[package]]
+39
-20
Cargo.toml
+39
-20
Cargo.toml
···
1
+
# cargo-features = ["codegen-backend"]
2
+
1
3
[package]
2
4
name = "bluepds"
3
5
version = "0.0.0"
···
13
15
14
16
[profile.dev.package."*"]
15
17
opt-level = 3
18
+
# codegen-backend = "cranelift"
16
19
17
20
[profile.dev]
18
21
opt-level = 1
22
+
# codegen-backend = "cranelift"
19
23
20
24
[profile.release]
21
25
opt-level = "s" # Slightly slows compile times, great improvements to file size and runtime performance.
···
36
40
rust-2021-compatibility = { level = "warn", priority = -1 } # Lints used to transition code from the 2018 edition to 2021
37
41
rust-2018-idioms = { level = "warn", priority = -1 } # Lints to nudge you toward idiomatic features of Rust 2018
38
42
rust-2024-compatibility = { level = "warn", priority = -1 } # Lints used to transition code from the 2021 edition to 2024
39
-
unused = { level = "warn", priority = -1 } # Lints that detect things being declared but not used, or excess syntax
43
+
# unused = { level = "warn", priority = -1 } # Lints that detect things being declared but not used, or excess syntax
40
44
## Individual
41
45
ambiguous_negative_literals = "warn" # checks for cases that are confusing between a negative literal and a negation that's not part of the literal.
42
46
closure_returning_async_block = "warn" # detects cases where users write a closure that returns an async block. # nightly
···
62
66
unit_bindings = "warn"
63
67
unnameable_types = "warn"
64
68
# unqualified_local_imports = "warn" # unstable
65
-
unreachable_pub = "warn"
69
+
# unreachable_pub = "warn"
66
70
unsafe_code = "warn"
67
71
unstable_features = "warn"
68
72
# unused_crate_dependencies = "warn"
···
73
77
variant_size_differences = "warn"
74
78
elided_lifetimes_in_paths = "allow"
75
79
# unstable-features = "allow"
80
+
# # Temporary Allows
81
+
dead_code = "allow"
82
+
# unused_imports = "allow"
76
83
77
84
[lints.clippy]
78
85
# Groups
79
86
nursery = { level = "warn", priority = -1 }
80
87
correctness = { level = "warn", priority = -1 }
81
88
suspicious = { level = "warn", priority = -1 }
82
-
complexity = { level = "warn", priority = -1 }
83
-
perf = { level = "warn", priority = -1 }
84
-
style = { level = "warn", priority = -1 }
85
-
pedantic = { level = "warn", priority = -1 }
86
-
restriction = { level = "warn", priority = -1 }
89
+
# complexity = { level = "warn", priority = -1 }
90
+
# perf = { level = "warn", priority = -1 }
91
+
# style = { level = "warn", priority = -1 }
92
+
# pedantic = { level = "warn", priority = -1 }
93
+
# restriction = { level = "warn", priority = -1 }
87
94
cargo = { level = "warn", priority = -1 }
88
95
# Temporary Allows
89
96
multiple_crate_versions = "allow" # triggered by lib
···
128
135
# expect_used = "deny"
129
136
130
137
[dependencies]
131
-
multihash = "0.19.3"
132
-
diesel = { version = "2.1.5", features = ["chrono", "sqlite", "r2d2"] }
138
+
# multihash = "0.19.3"
139
+
diesel = { version = "2.1.5", features = [
140
+
"chrono",
141
+
"sqlite",
142
+
"r2d2",
143
+
"returning_clauses_for_sqlite_3_35",
144
+
] }
133
145
diesel_migrations = { version = "2.1.0" }
134
-
r2d2 = "0.8.10"
146
+
# r2d2 = "0.8.10"
135
147
136
148
atrium-repo = "0.1"
137
149
atrium-api = "0.25"
138
150
# atrium-common = { version = "0.1.2", path = "atrium-common" }
139
151
atrium-crypto = "0.1"
140
152
# atrium-identity = { version = "0.1.4", path = "atrium-identity" }
141
-
atrium-xrpc = "0.12"
142
-
atrium-xrpc-client = "0.5"
153
+
# atrium-xrpc = "0.12"
154
+
# atrium-xrpc-client = "0.5"
143
155
# bsky-sdk = { version = "0.1.19", path = "bsky-sdk" }
144
156
rsky-syntax = { git = "https://github.com/blacksky-algorithms/rsky.git" }
145
157
rsky-repo = { git = "https://github.com/blacksky-algorithms/rsky.git" }
146
158
rsky-pds = { git = "https://github.com/blacksky-algorithms/rsky.git" }
147
159
rsky-common = { git = "https://github.com/blacksky-algorithms/rsky.git" }
148
160
rsky-lexicon = { git = "https://github.com/blacksky-algorithms/rsky.git" }
161
+
rsky-identity = { git = "https://github.com/blacksky-algorithms/rsky.git" }
149
162
150
163
# async in streams
151
164
# async-stream = "0.3"
152
165
153
166
# DAG-CBOR codec
154
-
ipld-core = "0.4.2"
167
+
# ipld-core = "0.4.2"
155
168
serde_ipld_dagcbor = { version = "0.6.2", default-features = false, features = [
156
169
"std",
157
170
] }
158
-
serde_ipld_dagjson = "0.2.0"
171
+
# serde_ipld_dagjson = "0.2.0"
159
172
cidv10 = { version = "0.10.1", package = "cid" }
160
173
161
174
# Parsing and validation
···
164
177
hex = "0.4.3"
165
178
# langtag = "0.3"
166
179
# multibase = "0.9.1"
167
-
regex = "1.11.1"
180
+
# regex = "1.11.1"
168
181
serde = { version = "1.0.218", features = ["derive"] }
169
-
serde_bytes = "0.11.17"
182
+
# serde_bytes = "0.11.17"
170
183
# serde_html_form = "0.2.6"
171
184
serde_json = "1.0.139"
172
185
# unsigned-varint = "0.8"
···
176
189
# elliptic-curve = "0.13.6"
177
190
# jose-jwa = "0.1.2"
178
191
# jose-jwk = { version = "0.1.2", default-features = false }
179
-
k256 = "0.13.4"
192
+
# k256 = "0.13.4"
180
193
# p256 = { version = "0.13.2", default-features = false }
181
194
rand = "0.8.5"
182
195
sha2 = "0.10.8"
···
248
261
url = "2.5.4"
249
262
uuid = { version = "1.14.0", features = ["v4"] }
250
263
urlencoding = "2.1.3"
251
-
async-trait = "0.1.88"
252
-
lazy_static = "1.5.0"
253
-
secp256k1 = "0.31.0"
264
+
# lazy_static = "1.5.0"
265
+
secp256k1 = "0.28.2"
266
+
dotenvy = "0.15.7"
267
+
deadpool-diesel = { version = "0.6.1", features = [
268
+
"serde",
269
+
"sqlite",
270
+
"tracing",
271
+
] }
272
+
ubyte = "0.10.4"
+31
-118
README.md
+31
-118
README.md
···
11
11
\/_/
12
12
```
13
13
14
-
This is an implementation of an ATProto PDS, built with [Axum](https://github.com/tokio-rs/axum) and [Atrium](https://github.com/sugyan/atrium).
15
-
This PDS implementation uses a SQLite database to store private account information and file storage to store canonical user data.
14
+
This is an implementation of an ATProto PDS, built with [Axum](https://github.com/tokio-rs/axum), [rsky](https://github.com/blacksky-algorithms/rsky/) and [Atrium](https://github.com/sugyan/atrium).
15
+
This PDS implementation uses a SQLite database and [diesel.rs](https://diesel.rs/) ORM to store canonical user data, and file system storage to store user blobs.
16
16
17
17
Heavily inspired by David Buchanan's [millipds](https://github.com/DavidBuchanan314/millipds).
18
-
This implementation forked from the [azure-rust-app](https://github.com/DrChat/azure-rust-app) starter template and the upstream [DrChat/bluepds](https://github.com/DrChat/bluepds).
19
-
See TODO below for this fork's changes from upstream.
18
+
This implementation forked from [DrChat/bluepds](https://github.com/DrChat/bluepds), and now makes heavy use of the [rsky-repo](https://github.com/blacksky-algorithms/rsky/tree/main/rsky-repo) repository implementation.
19
+
The `actor_store` and `account_manager` modules have been reimplemented from [rsky-pds](https://github.com/blacksky-algorithms/rsky/tree/main/rsky-pds) to use a SQLite backend and file storage, which are themselves adapted from the [original Bluesky implementation](https://github.com/bluesky-social/atproto) using SQLite in Typescript.
20
+
20
21
21
22
If you want to see this fork in action, there is a live account hosted by this PDS at [@teq.shatteredsky.net](https://bsky.app/profile/teq.shatteredsky.net)!
22
23
23
24
> [!WARNING]
24
-
> This PDS is undergoing heavy development. Do _NOT_ use this to host your primary account or any important data!
25
+
> This PDS is undergoing heavy development, and this branch is not at an operable release. Do _NOT_ use this to host your primary account or any important data!
25
26
26
27
## Quick Start
27
28
```
···
43
44
- Size: 47 GB
44
45
- VPUs/GB: 10
45
46
46
-
This is about half of the 3,000 OCPU hours and 18,000 GB hours available per month for free on the VM.Standard.A1.Flex shape. This is _without_ optimizing for costs. The PDS can likely be made much cheaper.
47
-
48
-
## Code map
49
-
```
50
-
* migrations/ - SQLite database migrations
51
-
* src/
52
-
* endpoints/ - ATProto API endpoints
53
-
* auth.rs - Authentication primitives
54
-
* config.rs - Application configuration
55
-
* did.rs - Decentralized Identifier helpers
56
-
* error.rs - Axum error helpers
57
-
* firehose.rs - ATProto firehose producer
58
-
* main.rs - Main entrypoint
59
-
* metrics.rs - Definitions for telemetry instruments
60
-
* oauth.rs - OAuth routes
61
-
* plc.rs - Functionality to access the Public Ledger of Credentials
62
-
* storage.rs - Helpers to access user repository storage
63
-
```
47
+
This is about half of the 3,000 OCPU hours and 18,000 GB hours available per month for free on the VM.Standard.A1.Flex shape. This is _without_ optimizing for costs. The PDS can likely be made to run on much less resources.
64
48
65
49
## To-do
66
-
### Teq's fork
67
-
- [ ] OAuth
68
-
- [X] `/.well-known/oauth-protected-resource` - Authorization Server Metadata
69
-
- [X] `/.well-known/oauth-authorization-server`
70
-
- [X] `/par` - Pushed Authorization Request
71
-
- [X] `/client-metadata.json` - Client metadata discovery
72
-
- [X] `/oauth/authorize`
73
-
- [X] `/oauth/authorize/sign-in`
74
-
- [X] `/oauth/token`
75
-
- [ ] Authorization flow - Backend client
76
-
- [X] Authorization flow - Serverless browser app
77
-
- [ ] DPoP-Nonce
78
-
- [ ] Verify JWT signature with JWK
79
-
- [ ] Email verification
80
-
- [ ] 2FA
81
-
- [ ] Admin endpoints
82
-
- [ ] App passwords
83
-
- [X] `listRecords` fixes
84
-
- [X] Fix collection prefixing (terminate with `/`)
85
-
- [X] Fix cursor handling (return `cid` instead of `key`)
86
-
- [X] Session management (JWT)
87
-
- [X] Match token fields to reference implementation
88
-
- [X] RefreshSession from Bluesky Client
89
-
- [X] Respond with JSON error message `ExpiredToken`
90
-
- [X] Cursor handling
91
-
- [X] Implement time-based unix microsecond sequences
92
-
- [X] Startup with present cursor
93
-
- [X] Respond `RecordNotFound`, required for:
94
-
- [X] app.bsky.feed.postgate
95
-
- [X] app.bsky.feed.threadgate
96
-
- [ ] app.bsky... (profile creation?)
97
-
- [X] Linting
98
-
- [X] Rustfmt
99
-
- [X] warnings
100
-
- [X] deprecated-safe
101
-
- [X] future-incompatible
102
-
- [X] keyword-idents
103
-
- [X] let-underscore
104
-
- [X] nonstandard-style
105
-
- [X] refining-impl-trait
106
-
- [X] rust-2018-idioms
107
-
- [X] rust-2018/2021/2024-compatibility
108
-
- [X] ungrouped
109
-
- [X] Clippy
110
-
- [X] nursery
111
-
- [X] correctness
112
-
- [X] suspicious
113
-
- [X] complexity
114
-
- [X] perf
115
-
- [X] style
116
-
- [X] pedantic
117
-
- [X] cargo
118
-
- [X] ungrouped
119
-
120
-
### High-level features
121
-
- [ ] Storage backend abstractions
122
-
- [ ] Azure blob storage backend
123
-
- [ ] Backblaze b2(?)
124
-
- [ ] Telemetry
125
-
- [X] [Metrics](https://github.com/metrics-rs/metrics) (counters/gauges/etc)
126
-
- [X] Exporters for common backends (Prometheus/etc)
127
-
128
50
### APIs
129
-
- [X] [Service proxying](https://atproto.com/specs/xrpc#service-proxying)
130
-
- [X] UG /xrpc/_health (undocumented, but impl by reference PDS)
51
+
- [ ] [Service proxying](https://atproto.com/specs/xrpc#service-proxying)
52
+
- [ ] UG /xrpc/_health (undocumented, but impl by reference PDS)
131
53
<!-- - [ ] xx /xrpc/app.bsky.notification.registerPush
132
54
- app.bsky.actor
133
-
- [X] AG /xrpc/app.bsky.actor.getPreferences
55
+
- [ ] AG /xrpc/app.bsky.actor.getPreferences
134
56
- [ ] xx /xrpc/app.bsky.actor.getProfile
135
57
- [ ] xx /xrpc/app.bsky.actor.getProfiles
136
-
- [X] AP /xrpc/app.bsky.actor.putPreferences
58
+
- [ ] AP /xrpc/app.bsky.actor.putPreferences
137
59
- app.bsky.feed
138
60
- [ ] xx /xrpc/app.bsky.feed.getActorLikes
139
61
- [ ] xx /xrpc/app.bsky.feed.getAuthorFeed
···
157
79
- com.atproto.identity
158
80
- [ ] xx /xrpc/com.atproto.identity.getRecommendedDidCredentials
159
81
- [ ] AP /xrpc/com.atproto.identity.requestPlcOperationSignature
160
-
- [X] UG /xrpc/com.atproto.identity.resolveHandle
82
+
- [ ] UG /xrpc/com.atproto.identity.resolveHandle
161
83
- [ ] AP /xrpc/com.atproto.identity.signPlcOperation
162
84
- [ ] xx /xrpc/com.atproto.identity.submitPlcOperation
163
-
- [X] AP /xrpc/com.atproto.identity.updateHandle
85
+
- [ ] AP /xrpc/com.atproto.identity.updateHandle
164
86
<!-- - com.atproto.moderation
165
87
- [ ] xx /xrpc/com.atproto.moderation.createReport -->
166
88
- com.atproto.repo
···
169
91
- [X] AP /xrpc/com.atproto.repo.deleteRecord
170
92
- [X] UG /xrpc/com.atproto.repo.describeRepo
171
93
- [X] UG /xrpc/com.atproto.repo.getRecord
172
-
- [ ] xx /xrpc/com.atproto.repo.importRepo
173
-
- [ ] xx /xrpc/com.atproto.repo.listMissingBlobs
94
+
- [X] xx /xrpc/com.atproto.repo.importRepo
95
+
- [X] xx /xrpc/com.atproto.repo.listMissingBlobs
174
96
- [X] UG /xrpc/com.atproto.repo.listRecords
175
97
- [X] AP /xrpc/com.atproto.repo.putRecord
176
98
- [X] AP /xrpc/com.atproto.repo.uploadBlob
···
178
100
- [ ] xx /xrpc/com.atproto.server.activateAccount
179
101
- [ ] xx /xrpc/com.atproto.server.checkAccountStatus
180
102
- [ ] xx /xrpc/com.atproto.server.confirmEmail
181
-
- [X] UP /xrpc/com.atproto.server.createAccount
103
+
- [ ] UP /xrpc/com.atproto.server.createAccount
182
104
- [ ] xx /xrpc/com.atproto.server.createAppPassword
183
-
- [X] AP /xrpc/com.atproto.server.createInviteCode
105
+
- [ ] AP /xrpc/com.atproto.server.createInviteCode
184
106
- [ ] xx /xrpc/com.atproto.server.createInviteCodes
185
-
- [X] UP /xrpc/com.atproto.server.createSession
107
+
- [ ] UP /xrpc/com.atproto.server.createSession
186
108
- [ ] xx /xrpc/com.atproto.server.deactivateAccount
187
109
- [ ] xx /xrpc/com.atproto.server.deleteAccount
188
110
- [ ] xx /xrpc/com.atproto.server.deleteSession
189
-
- [X] UG /xrpc/com.atproto.server.describeServer
111
+
- [ ] UG /xrpc/com.atproto.server.describeServer
190
112
- [ ] xx /xrpc/com.atproto.server.getAccountInviteCodes
191
-
- [X] AG /xrpc/com.atproto.server.getServiceAuth
192
-
- [X] AG /xrpc/com.atproto.server.getSession
113
+
- [ ] AG /xrpc/com.atproto.server.getServiceAuth
114
+
- [ ] AG /xrpc/com.atproto.server.getSession
193
115
- [ ] xx /xrpc/com.atproto.server.listAppPasswords
194
116
- [ ] xx /xrpc/com.atproto.server.refreshSession
195
117
- [ ] xx /xrpc/com.atproto.server.requestAccountDelete
···
201
123
- [ ] xx /xrpc/com.atproto.server.revokeAppPassword
202
124
- [ ] xx /xrpc/com.atproto.server.updateEmail
203
125
- com.atproto.sync
204
-
- [X] UG /xrpc/com.atproto.sync.getBlob
205
-
- [X] UG /xrpc/com.atproto.sync.getBlocks
206
-
- [X] UG /xrpc/com.atproto.sync.getLatestCommit
207
-
- [X] UG /xrpc/com.atproto.sync.getRecord
208
-
- [X] UG /xrpc/com.atproto.sync.getRepo
209
-
- [X] UG /xrpc/com.atproto.sync.getRepoStatus
210
-
- [X] UG /xrpc/com.atproto.sync.listBlobs
211
-
- [X] UG /xrpc/com.atproto.sync.listRepos
212
-
- [X] UG /xrpc/com.atproto.sync.subscribeRepos
126
+
- [ ] UG /xrpc/com.atproto.sync.getBlob
127
+
- [ ] UG /xrpc/com.atproto.sync.getBlocks
128
+
- [ ] UG /xrpc/com.atproto.sync.getLatestCommit
129
+
- [ ] UG /xrpc/com.atproto.sync.getRecord
130
+
- [ ] UG /xrpc/com.atproto.sync.getRepo
131
+
- [ ] UG /xrpc/com.atproto.sync.getRepoStatus
132
+
- [ ] UG /xrpc/com.atproto.sync.listBlobs
133
+
- [ ] UG /xrpc/com.atproto.sync.listRepos
134
+
- [ ] UG /xrpc/com.atproto.sync.subscribeRepos
213
135
214
-
## Quick Deployment (Azure CLI)
215
-
```
216
-
az group create --name "webapp" --location southcentralus
217
-
az deployment group create --resource-group "webapp" --template-file .\deployment.bicep --parameters webAppName=testapp
218
-
219
-
az acr login --name <insert name of ACR resource here>
220
-
docker build -t <ACR>.azurecr.io/testapp:latest .
221
-
docker push <ACR>.azurecr.io/testapp:latest
222
-
```
223
-
## Quick Deployment (NixOS)
136
+
## Deployment (NixOS)
224
137
```nix
225
138
{
226
139
inputs = {
-182
deployment.bicep
-182
deployment.bicep
···
1
-
param webAppName string
2
-
param location string = resourceGroup().location // Location for all resources
3
-
4
-
param sku string = 'B1' // The SKU of App Service Plan
5
-
param dockerContainerName string = '${webAppName}:latest'
6
-
param repositoryUrl string = 'https://github.com/DrChat/bluepds'
7
-
param branch string = 'main'
8
-
param customDomain string
9
-
10
-
@description('Redeploy hostnames without SSL binding. Just specify `true` if this is the first time you\'re deploying the app.')
11
-
param redeployHostnamesHack bool = false
12
-
13
-
var acrName = toLower('${webAppName}${uniqueString(resourceGroup().id)}')
14
-
var aspName = toLower('${webAppName}-asp')
15
-
var webName = toLower('${webAppName}${uniqueString(resourceGroup().id)}')
16
-
var sanName = toLower('${webAppName}${uniqueString(resourceGroup().id)}')
17
-
18
-
// resource appInsights 'Microsoft.OperationalInsights/workspaces@2023-09-01' = {
19
-
// name: '${webAppName}-ai'
20
-
// location: location
21
-
// properties: {
22
-
// publicNetworkAccessForIngestion: 'Enabled'
23
-
// workspaceCapping: {
24
-
// dailyQuotaGb: 1
25
-
// }
26
-
// sku: {
27
-
// name: 'Standalone'
28
-
// }
29
-
// }
30
-
// }
31
-
32
-
// resource appServicePlanDiagnostics 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = {
33
-
// name: appServicePlan.name
34
-
// scope: appServicePlan
35
-
// properties: {
36
-
// workspaceId: appInsights.id
37
-
// metrics: [
38
-
// {
39
-
// category: 'AllMetrics'
40
-
// enabled: true
41
-
// }
42
-
// ]
43
-
// }
44
-
// }
45
-
46
-
resource appServicePlan 'Microsoft.Web/serverfarms@2020-06-01' = {
47
-
name: aspName
48
-
location: location
49
-
properties: {
50
-
reserved: true
51
-
}
52
-
sku: {
53
-
name: sku
54
-
}
55
-
kind: 'linux'
56
-
}
57
-
58
-
resource acrResource 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = {
59
-
name: acrName
60
-
location: location
61
-
sku: {
62
-
name: 'Basic'
63
-
}
64
-
properties: {
65
-
adminUserEnabled: false
66
-
}
67
-
}
68
-
69
-
resource appStorage 'Microsoft.Storage/storageAccounts@2023-05-01' = {
70
-
name: sanName
71
-
location: location
72
-
kind: 'StorageV2'
73
-
sku: {
74
-
name: 'Standard_LRS'
75
-
}
76
-
}
77
-
78
-
resource fileShare 'Microsoft.Storage/storageAccounts/fileServices/shares@2023-05-01' = {
79
-
name: '${appStorage.name}/default/data'
80
-
properties: {}
81
-
}
82
-
83
-
resource appService 'Microsoft.Web/sites@2020-06-01' = {
84
-
name: webName
85
-
location: location
86
-
identity: {
87
-
type: 'SystemAssigned'
88
-
}
89
-
properties: {
90
-
httpsOnly: true
91
-
serverFarmId: appServicePlan.id
92
-
siteConfig: {
93
-
// Sigh. This took _far_ too long to figure out.
94
-
// We must authenticate to ACR, as no credentials are set up by default
95
-
// (the Az CLI will implicitly set them up in the background)
96
-
acrUseManagedIdentityCreds: true
97
-
appSettings: [
98
-
{
99
-
name: 'BLUEPDS_HOST_NAME'
100
-
value: empty(customDomain) ? '${webName}.azurewebsites.net' : customDomain
101
-
}
102
-
{
103
-
name: 'BLUEPDS_TEST'
104
-
value: 'false'
105
-
}
106
-
{
107
-
name: 'WEBSITES_PORT'
108
-
value: '8000'
109
-
}
110
-
]
111
-
linuxFxVersion: 'DOCKER|${acrName}.azurecr.io/${dockerContainerName}'
112
-
}
113
-
}
114
-
}
115
-
116
-
resource hostNameBinding 'Microsoft.Web/sites/hostNameBindings@2024-04-01' = if (redeployHostnamesHack) {
117
-
name: customDomain
118
-
parent: appService
119
-
properties: {
120
-
siteName: appService.name
121
-
hostNameType: 'Verified'
122
-
sslState: 'Disabled'
123
-
}
124
-
}
125
-
126
-
// This stupidity is required because Azure requires a circular dependency in order to define a custom hostname with SSL.
127
-
// https://stackoverflow.com/questions/73077972/how-to-deploy-app-service-with-managed-ssl-certificate-using-arm
128
-
module certificateBindings './deploymentBindingHack.bicep' = {
129
-
name: '${deployment().name}-ssl'
130
-
params: {
131
-
appServicePlanResourceId: appServicePlan.id
132
-
customHostnames: [customDomain]
133
-
location: location
134
-
webAppName: appService.name
135
-
}
136
-
dependsOn: [hostNameBinding]
137
-
}
138
-
139
-
resource appServiceStorageConfig 'Microsoft.Web/sites/config@2024-04-01' = {
140
-
name: 'azurestorageaccounts'
141
-
parent: appService
142
-
properties: {
143
-
data: {
144
-
type: 'AzureFiles'
145
-
shareName: 'data'
146
-
mountPath: '/app/data'
147
-
accountName: appStorage.name
148
-
// WTF? Where's the ability to mount storage via managed identity?
149
-
accessKey: appStorage.listKeys().keys[0].value
150
-
}
151
-
}
152
-
}
153
-
154
-
@description('This is the built-in AcrPull role. See https://docs.microsoft.com/azure/role-based-access-control/built-in-roles#acrpull')
155
-
resource acrPullRoleDefinition 'Microsoft.Authorization/roleDefinitions@2018-01-01-preview' existing = {
156
-
scope: subscription()
157
-
name: '7f951dda-4ed3-4680-a7ca-43fe172d538d'
158
-
}
159
-
160
-
resource appServiceAcrPull 'Microsoft.Authorization/roleAssignments@2020-04-01-preview' = {
161
-
name: guid(resourceGroup().id, acrResource.id, appService.id, 'AssignAcrPullToAS')
162
-
scope: acrResource
163
-
properties: {
164
-
description: 'Assign AcrPull role to AS'
165
-
principalId: appService.identity.principalId
166
-
principalType: 'ServicePrincipal'
167
-
roleDefinitionId: acrPullRoleDefinition.id
168
-
}
169
-
}
170
-
171
-
resource srcControls 'Microsoft.Web/sites/sourcecontrols@2021-01-01' = {
172
-
name: 'web'
173
-
parent: appService
174
-
properties: {
175
-
repoUrl: repositoryUrl
176
-
branch: branch
177
-
isManualIntegration: true
178
-
}
179
-
}
180
-
181
-
output acr string = acrResource.name
182
-
output domain string = appService.properties.hostNames[0]
-30
deploymentBindingHack.bicep
-30
deploymentBindingHack.bicep
···
1
-
// https://stackoverflow.com/questions/73077972/how-to-deploy-app-service-with-managed-ssl-certificate-using-arm
2
-
//
3
-
// TLDR: Azure requires a circular dependency in order to define an app service with a custom domain with SSL enabled.
4
-
// Terrific user experience. Really makes me love using Azure in my free time.
5
-
param webAppName string
6
-
param location string
7
-
param appServicePlanResourceId string
8
-
param customHostnames array
9
-
10
-
// Managed certificates can only be created once the hostname is added to the web app.
11
-
resource certificates 'Microsoft.Web/certificates@2022-03-01' = [for (fqdn, i) in customHostnames: {
12
-
name: '${fqdn}-${webAppName}'
13
-
location: location
14
-
properties: {
15
-
serverFarmId: appServicePlanResourceId
16
-
canonicalName: fqdn
17
-
}
18
-
}]
19
-
20
-
// sslState and thumbprint can only be set once the managed certificate is created
21
-
@batchSize(1)
22
-
resource customHostname 'Microsoft.web/sites/hostnameBindings@2019-08-01' = [for (fqdn, i) in customHostnames: {
23
-
name: '${webAppName}/${fqdn}'
24
-
properties: {
25
-
siteName: webAppName
26
-
hostNameType: 'Verified'
27
-
sslState: 'SniEnabled'
28
-
thumbprint: certificates[i].properties.thumbprint
29
-
}
30
-
}]
+3
-2
flake.nix
+3
-2
flake.nix
···
22
22
"rust-analyzer"
23
23
];
24
24
}));
25
-
25
+
26
26
inherit (pkgs) lib;
27
27
unfilteredRoot = ./.; # The original, unfiltered source
28
28
src = lib.fileset.toSource {
···
109
109
git
110
110
nixd
111
111
direnv
112
+
libpq
112
113
];
113
114
};
114
115
})
···
165
166
};
166
167
};
167
168
});
168
-
}
169
+
}
+14
migrations/2025-05-15-182818_init_diff/down.sql
+14
migrations/2025-05-15-182818_init_diff/down.sql
···
1
+
DROP TABLE IF EXISTS `repo_seq`;
2
+
DROP TABLE IF EXISTS `app_password`;
3
+
DROP TABLE IF EXISTS `device_account`;
4
+
DROP TABLE IF EXISTS `actor`;
5
+
DROP TABLE IF EXISTS `device`;
6
+
DROP TABLE IF EXISTS `did_doc`;
7
+
DROP TABLE IF EXISTS `email_token`;
8
+
DROP TABLE IF EXISTS `invite_code`;
9
+
DROP TABLE IF EXISTS `used_refresh_token`;
10
+
DROP TABLE IF EXISTS `invite_code_use`;
11
+
DROP TABLE IF EXISTS `authorization_request`;
12
+
DROP TABLE IF EXISTS `token`;
13
+
DROP TABLE IF EXISTS `refresh_token`;
14
+
DROP TABLE IF EXISTS `account`;
+122
migrations/2025-05-15-182818_init_diff/up.sql
+122
migrations/2025-05-15-182818_init_diff/up.sql
···
1
+
CREATE TABLE `repo_seq`(
2
+
`seq` INT8 NOT NULL PRIMARY KEY,
3
+
`did` VARCHAR NOT NULL,
4
+
`eventtype` VARCHAR NOT NULL,
5
+
`event` BYTEA NOT NULL,
6
+
`invalidated` INT2 NOT NULL,
7
+
`sequencedat` VARCHAR NOT NULL
8
+
);
9
+
10
+
CREATE TABLE `app_password`(
11
+
`did` VARCHAR NOT NULL,
12
+
`name` VARCHAR NOT NULL,
13
+
`password` VARCHAR NOT NULL,
14
+
`createdat` VARCHAR NOT NULL,
15
+
PRIMARY KEY(`did`, `name`)
16
+
);
17
+
18
+
CREATE TABLE `device_account`(
19
+
`did` VARCHAR NOT NULL,
20
+
`deviceid` VARCHAR NOT NULL,
21
+
`authenticatedat` TIMESTAMPTZ NOT NULL,
22
+
`remember` BOOL NOT NULL,
23
+
`authorizedclients` VARCHAR NOT NULL,
24
+
PRIMARY KEY(`deviceId`, `did`)
25
+
);
26
+
27
+
CREATE TABLE `actor`(
28
+
`did` VARCHAR NOT NULL PRIMARY KEY,
29
+
`handle` VARCHAR,
30
+
`createdat` VARCHAR NOT NULL,
31
+
`takedownref` VARCHAR,
32
+
`deactivatedat` VARCHAR,
33
+
`deleteafter` VARCHAR
34
+
);
35
+
36
+
CREATE TABLE `device`(
37
+
`id` VARCHAR NOT NULL PRIMARY KEY,
38
+
`sessionid` VARCHAR,
39
+
`useragent` VARCHAR,
40
+
`ipaddress` VARCHAR NOT NULL,
41
+
`lastseenat` TIMESTAMPTZ NOT NULL
42
+
);
43
+
44
+
CREATE TABLE `did_doc`(
45
+
`did` VARCHAR NOT NULL PRIMARY KEY,
46
+
`doc` TEXT NOT NULL,
47
+
`updatedat` INT8 NOT NULL
48
+
);
49
+
50
+
CREATE TABLE `email_token`(
51
+
`purpose` VARCHAR NOT NULL,
52
+
`did` VARCHAR NOT NULL,
53
+
`token` VARCHAR NOT NULL,
54
+
`requestedat` VARCHAR NOT NULL,
55
+
PRIMARY KEY(`purpose`, `did`)
56
+
);
57
+
58
+
CREATE TABLE `invite_code`(
59
+
`code` VARCHAR NOT NULL PRIMARY KEY,
60
+
`availableuses` INT4 NOT NULL,
61
+
`disabled` INT2 NOT NULL,
62
+
`foraccount` VARCHAR NOT NULL,
63
+
`createdby` VARCHAR NOT NULL,
64
+
`createdat` VARCHAR NOT NULL
65
+
);
66
+
67
+
CREATE TABLE `used_refresh_token`(
68
+
`refreshtoken` VARCHAR NOT NULL PRIMARY KEY,
69
+
`tokenid` VARCHAR NOT NULL
70
+
);
71
+
72
+
CREATE TABLE `invite_code_use`(
73
+
`code` VARCHAR NOT NULL,
74
+
`usedby` VARCHAR NOT NULL,
75
+
`usedat` VARCHAR NOT NULL,
76
+
PRIMARY KEY(`code`, `usedBy`)
77
+
);
78
+
79
+
CREATE TABLE `authorization_request`(
80
+
`id` VARCHAR NOT NULL PRIMARY KEY,
81
+
`did` VARCHAR,
82
+
`deviceid` VARCHAR,
83
+
`clientid` VARCHAR NOT NULL,
84
+
`clientauth` VARCHAR NOT NULL,
85
+
`parameters` VARCHAR NOT NULL,
86
+
`expiresat` TIMESTAMPTZ NOT NULL,
87
+
`code` VARCHAR
88
+
);
89
+
90
+
CREATE TABLE `token`(
91
+
`id` VARCHAR NOT NULL PRIMARY KEY,
92
+
`did` VARCHAR NOT NULL,
93
+
`tokenid` VARCHAR NOT NULL,
94
+
`createdat` TIMESTAMPTZ NOT NULL,
95
+
`updatedat` TIMESTAMPTZ NOT NULL,
96
+
`expiresat` TIMESTAMPTZ NOT NULL,
97
+
`clientid` VARCHAR NOT NULL,
98
+
`clientauth` VARCHAR NOT NULL,
99
+
`deviceid` VARCHAR,
100
+
`parameters` VARCHAR NOT NULL,
101
+
`details` VARCHAR,
102
+
`code` VARCHAR,
103
+
`currentrefreshtoken` VARCHAR
104
+
);
105
+
106
+
CREATE TABLE `refresh_token`(
107
+
`id` VARCHAR NOT NULL PRIMARY KEY,
108
+
`did` VARCHAR NOT NULL,
109
+
`expiresat` VARCHAR NOT NULL,
110
+
`nextid` VARCHAR,
111
+
`apppasswordname` VARCHAR
112
+
);
113
+
114
+
CREATE TABLE `account`(
115
+
`did` VARCHAR NOT NULL PRIMARY KEY,
116
+
`email` VARCHAR NOT NULL,
117
+
`recoverykey` VARCHAR,
118
+
`password` VARCHAR NOT NULL,
119
+
`createdat` VARCHAR NOT NULL,
120
+
`invitesdisabled` INT2 NOT NULL,
121
+
`emailconfirmedat` VARCHAR
122
+
);
+4
migrations/2025-05-17-094600_oauth_temp/down.sql
+4
migrations/2025-05-17-094600_oauth_temp/down.sql
+46
migrations/2025-05-17-094600_oauth_temp/up.sql
+46
migrations/2025-05-17-094600_oauth_temp/up.sql
···
1
+
CREATE TABLE `oauth_refresh_tokens`(
2
+
`token` VARCHAR NOT NULL PRIMARY KEY,
3
+
`client_id` VARCHAR NOT NULL,
4
+
`subject` VARCHAR NOT NULL,
5
+
`dpop_thumbprint` VARCHAR NOT NULL,
6
+
`scope` VARCHAR,
7
+
`created_at` INT8 NOT NULL,
8
+
`expires_at` INT8 NOT NULL,
9
+
`revoked` BOOL NOT NULL
10
+
);
11
+
12
+
CREATE TABLE `oauth_used_jtis`(
13
+
`jti` VARCHAR NOT NULL PRIMARY KEY,
14
+
`issuer` VARCHAR NOT NULL,
15
+
`created_at` INT8 NOT NULL,
16
+
`expires_at` INT8 NOT NULL
17
+
);
18
+
19
+
CREATE TABLE `oauth_par_requests`(
20
+
`request_uri` VARCHAR NOT NULL PRIMARY KEY,
21
+
`client_id` VARCHAR NOT NULL,
22
+
`response_type` VARCHAR NOT NULL,
23
+
`code_challenge` VARCHAR NOT NULL,
24
+
`code_challenge_method` VARCHAR NOT NULL,
25
+
`state` VARCHAR,
26
+
`login_hint` VARCHAR,
27
+
`scope` VARCHAR,
28
+
`redirect_uri` VARCHAR,
29
+
`response_mode` VARCHAR,
30
+
`display` VARCHAR,
31
+
`created_at` INT8 NOT NULL,
32
+
`expires_at` INT8 NOT NULL
33
+
);
34
+
35
+
CREATE TABLE `oauth_authorization_codes`(
36
+
`code` VARCHAR NOT NULL PRIMARY KEY,
37
+
`client_id` VARCHAR NOT NULL,
38
+
`subject` VARCHAR NOT NULL,
39
+
`code_challenge` VARCHAR NOT NULL,
40
+
`code_challenge_method` VARCHAR NOT NULL,
41
+
`redirect_uri` VARCHAR NOT NULL,
42
+
`scope` VARCHAR,
43
+
`created_at` INT8 NOT NULL,
44
+
`expires_at` INT8 NOT NULL,
45
+
`used` BOOL NOT NULL
46
+
);
-7
migrations/20250104202448_init.down.sql
-7
migrations/20250104202448_init.down.sql
-29
migrations/20250104202448_init.up.sql
-29
migrations/20250104202448_init.up.sql
···
1
-
CREATE TABLE IF NOT EXISTS accounts (
2
-
did TEXT PRIMARY KEY NOT NULL,
3
-
email TEXT NOT NULL UNIQUE,
4
-
password TEXT NOT NULL,
5
-
root TEXT NOT NULL,
6
-
rev TEXT NOT NULL,
7
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
8
-
);
9
-
10
-
CREATE TABLE IF NOT EXISTS handles (
11
-
handle TEXT PRIMARY KEY NOT NULL,
12
-
did TEXT NOT NULL,
13
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
14
-
FOREIGN KEY (did) REFERENCES accounts(did)
15
-
);
16
-
17
-
CREATE TABLE IF NOT EXISTS invites (
18
-
id TEXT PRIMARY KEY NOT NULL,
19
-
did TEXT,
20
-
count INTEGER NOT NULL DEFAULT 1,
21
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
22
-
);
23
-
24
-
CREATE TABLE IF NOT EXISTS sessions (
25
-
id TEXT PRIMARY KEY NOT NULL,
26
-
did TEXT NOT NULL,
27
-
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
28
-
FOREIGN KEY (did) REFERENCES accounts(did)
29
-
);
-1
migrations/20250217052304_repo_status.down.sql
-1
migrations/20250217052304_repo_status.down.sql
···
1
-
ALTER TABLE accounts DROP COLUMN status;
-1
migrations/20250217052304_repo_status.up.sql
-1
migrations/20250217052304_repo_status.up.sql
···
1
-
ALTER TABLE accounts ADD COLUMN status TEXT NOT NULL DEFAULT "active";
-1
migrations/20250219055555_account_plc_root.down.sql
-1
migrations/20250219055555_account_plc_root.down.sql
···
1
-
ALTER TABLE accounts DROP COLUMN plc_root;
-1
migrations/20250219055555_account_plc_root.up.sql
-1
migrations/20250219055555_account_plc_root.up.sql
···
1
-
ALTER TABLE accounts ADD COLUMN plc_root TEXT NOT NULL;
-1
migrations/20250220235950_private_data.down.sql
-1
migrations/20250220235950_private_data.down.sql
···
1
-
ALTER TABLE accounts DROP COLUMN private_prefs;
-1
migrations/20250220235950_private_data.up.sql
-1
migrations/20250220235950_private_data.up.sql
···
1
-
ALTER TABLE accounts ADD COLUMN private_prefs JSON;
-1
migrations/20250223015249_blob_ref.down.sql
-1
migrations/20250223015249_blob_ref.down.sql
···
1
-
DROP TABLE blob_ref;
-6
migrations/20250223015249_blob_ref.up.sql
-6
migrations/20250223015249_blob_ref.up.sql
-1
migrations/20250330074000_oauth.down.sql
-1
migrations/20250330074000_oauth.down.sql
···
1
-
DROP TABLE oauth_par_requests;
-37
migrations/20250330074000_oauth.up.sql
-37
migrations/20250330074000_oauth.up.sql
···
1
-
CREATE TABLE IF NOT EXISTS oauth_par_requests (
2
-
request_uri TEXT PRIMARY KEY NOT NULL,
3
-
client_id TEXT NOT NULL,
4
-
response_type TEXT NOT NULL,
5
-
code_challenge TEXT NOT NULL,
6
-
code_challenge_method TEXT NOT NULL,
7
-
state TEXT,
8
-
login_hint TEXT,
9
-
scope TEXT,
10
-
redirect_uri TEXT,
11
-
response_mode TEXT,
12
-
display TEXT,
13
-
created_at INTEGER NOT NULL,
14
-
expires_at INTEGER NOT NULL
15
-
);
16
-
CREATE TABLE IF NOT EXISTS oauth_authorization_codes (
17
-
code TEXT PRIMARY KEY NOT NULL,
18
-
client_id TEXT NOT NULL,
19
-
subject TEXT NOT NULL,
20
-
code_challenge TEXT NOT NULL,
21
-
code_challenge_method TEXT NOT NULL,
22
-
redirect_uri TEXT NOT NULL,
23
-
scope TEXT,
24
-
created_at INTEGER NOT NULL,
25
-
expires_at INTEGER NOT NULL,
26
-
used BOOLEAN NOT NULL DEFAULT FALSE
27
-
);
28
-
CREATE TABLE IF NOT EXISTS oauth_refresh_tokens (
29
-
token TEXT PRIMARY KEY NOT NULL,
30
-
client_id TEXT NOT NULL,
31
-
subject TEXT NOT NULL,
32
-
dpop_thumbprint TEXT NOT NULL,
33
-
scope TEXT,
34
-
created_at INTEGER NOT NULL,
35
-
expires_at INTEGER NOT NULL,
36
-
revoked BOOLEAN NOT NULL DEFAULT FALSE
37
-
);
-6
migrations/20250502032700_jti.down.sql
-6
migrations/20250502032700_jti.down.sql
-13
migrations/20250502032700_jti.up.sql
-13
migrations/20250502032700_jti.up.sql
···
1
-
-- Table for tracking used JTIs to prevent replay attacks
2
-
CREATE TABLE IF NOT EXISTS oauth_used_jtis (
3
-
jti TEXT PRIMARY KEY NOT NULL,
4
-
issuer TEXT NOT NULL,
5
-
created_at INTEGER NOT NULL,
6
-
expires_at INTEGER NOT NULL
7
-
);
8
-
9
-
-- Create indexes for faster lookups and cleanup
10
-
CREATE INDEX IF NOT EXISTS idx_par_expires_at ON oauth_par_requests(expires_at);
11
-
CREATE INDEX IF NOT EXISTS idx_auth_codes_expires_at ON oauth_authorization_codes(expires_at);
12
-
CREATE INDEX IF NOT EXISTS idx_refresh_tokens_expires_at ON oauth_refresh_tokens(expires_at);
13
-
CREATE INDEX IF NOT EXISTS idx_jtis_expires_at ON oauth_used_jtis(expires_at);
-16
migrations/20250508251242_actor_store.down.sql
-16
migrations/20250508251242_actor_store.down.sql
···
1
-
-- Drop indexes
2
-
DROP INDEX IF EXISTS idx_backlink_link_to;
3
-
DROP INDEX IF EXISTS idx_blob_tempkey;
4
-
DROP INDEX IF EXISTS idx_record_repo_rev;
5
-
DROP INDEX IF EXISTS idx_record_collection;
6
-
DROP INDEX IF EXISTS idx_record_cid;
7
-
DROP INDEX IF EXISTS idx_repo_block_repo_rev;
8
-
9
-
-- Drop tables
10
-
DROP TABLE IF EXISTS account_pref;
11
-
DROP TABLE IF EXISTS backlink;
12
-
DROP TABLE IF EXISTS record_blob;
13
-
DROP TABLE IF EXISTS blob;
14
-
DROP TABLE IF EXISTS record;
15
-
DROP TABLE IF EXISTS repo_block;
16
-
DROP TABLE IF EXISTS repo_root;
-70
migrations/20250508251242_actor_store.up.sql
-70
migrations/20250508251242_actor_store.up.sql
···
1
-
-- Actor store schema matching TypeScript implementation
2
-
3
-
-- Repository root information
4
-
CREATE TABLE IF NOT EXISTS repo_root (
5
-
did TEXT PRIMARY KEY NOT NULL,
6
-
cid TEXT NOT NULL,
7
-
rev TEXT NOT NULL,
8
-
indexedAt TEXT NOT NULL
9
-
);
10
-
11
-
-- Repository blocks (IPLD blocks)
12
-
CREATE TABLE IF NOT EXISTS repo_block (
13
-
cid TEXT PRIMARY KEY NOT NULL,
14
-
repoRev TEXT NOT NULL,
15
-
size INTEGER NOT NULL,
16
-
content BLOB NOT NULL
17
-
);
18
-
19
-
-- Record index
20
-
CREATE TABLE IF NOT EXISTS record (
21
-
uri TEXT PRIMARY KEY NOT NULL,
22
-
cid TEXT NOT NULL,
23
-
collection TEXT NOT NULL,
24
-
rkey TEXT NOT NULL,
25
-
repoRev TEXT NOT NULL,
26
-
indexedAt TEXT NOT NULL,
27
-
takedownRef TEXT
28
-
);
29
-
30
-
-- Blob storage metadata
31
-
CREATE TABLE IF NOT EXISTS blob (
32
-
cid TEXT PRIMARY KEY NOT NULL,
33
-
mimeType TEXT NOT NULL,
34
-
size INTEGER NOT NULL,
35
-
tempKey TEXT,
36
-
width INTEGER,
37
-
height INTEGER,
38
-
createdAt TEXT NOT NULL,
39
-
takedownRef TEXT
40
-
);
41
-
42
-
-- Record-blob associations
43
-
CREATE TABLE IF NOT EXISTS record_blob (
44
-
blobCid TEXT NOT NULL,
45
-
recordUri TEXT NOT NULL,
46
-
PRIMARY KEY (blobCid, recordUri)
47
-
);
48
-
49
-
-- Backlinks between records
50
-
CREATE TABLE IF NOT EXISTS backlink (
51
-
uri TEXT NOT NULL,
52
-
path TEXT NOT NULL,
53
-
linkTo TEXT NOT NULL,
54
-
PRIMARY KEY (uri, path)
55
-
);
56
-
57
-
-- User preferences
58
-
CREATE TABLE IF NOT EXISTS account_pref (
59
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
60
-
name TEXT NOT NULL,
61
-
valueJson TEXT NOT NULL
62
-
);
63
-
64
-
-- Create indexes
65
-
CREATE INDEX IF NOT EXISTS idx_repo_block_repo_rev ON repo_block(repoRev, cid);
66
-
CREATE INDEX IF NOT EXISTS idx_record_cid ON record(cid);
67
-
CREATE INDEX IF NOT EXISTS idx_record_collection ON record(collection);
68
-
CREATE INDEX IF NOT EXISTS idx_record_repo_rev ON record(repoRev);
69
-
CREATE INDEX IF NOT EXISTS idx_blob_tempkey ON blob(tempKey);
70
-
CREATE INDEX IF NOT EXISTS idx_backlink_link_to ON backlink(path, linkTo);
-15
migrations/20250508252057_blockstore.up.sql
-15
migrations/20250508252057_blockstore.up.sql
···
1
-
CREATE TABLE IF NOT EXISTS blocks (
2
-
cid TEXT PRIMARY KEY NOT NULL,
3
-
data BLOB NOT NULL,
4
-
multicodec INTEGER NOT NULL,
5
-
multihash INTEGER NOT NULL
6
-
);
7
-
CREATE TABLE IF NOT EXISTS tree_nodes (
8
-
repo_did TEXT NOT NULL,
9
-
key TEXT NOT NULL,
10
-
value_cid TEXT NOT NULL,
11
-
PRIMARY KEY (repo_did, key),
12
-
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
13
-
);
14
-
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
15
-
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
-5
migrations/20250510222500_actor_migration.up.sql
-5
migrations/20250510222500_actor_migration.up.sql
+540
src/account_manager/helpers/account.rs
+540
src/account_manager/helpers/account.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/account.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::schema::pds::account::dsl as AccountSchema;
6
+
use crate::schema::pds::account::table as AccountTable;
7
+
use crate::schema::pds::actor::dsl as ActorSchema;
8
+
use crate::schema::pds::actor::table as ActorTable;
9
+
use anyhow::Result;
10
+
use chrono::DateTime;
11
+
use chrono::offset::Utc as UtcOffset;
12
+
use diesel::result::{DatabaseErrorKind, Error as DieselError};
13
+
use diesel::*;
14
+
use rsky_common::RFC3339_VARIANT;
15
+
use rsky_lexicon::com::atproto::admin::StatusAttr;
16
+
#[expect(unused_imports)]
17
+
pub(crate) use rsky_pds::account_manager::helpers::account::{
18
+
AccountStatus, ActorAccount, AvailabilityFlags, FormattedAccountStatus,
19
+
GetAccountAdminStatusOutput, format_account_status,
20
+
};
21
+
use std::ops::Add;
22
+
use std::time::SystemTime;
23
+
use thiserror::Error;
24
+
25
+
use diesel::dsl::{LeftJoinOn, exists, not};
26
+
use diesel::helper_types::Eq;
27
+
28
+
#[derive(Error, Debug)]
29
+
pub enum AccountHelperError {
30
+
#[error("UserAlreadyExistsError")]
31
+
UserAlreadyExistsError,
32
+
#[error("DatabaseError: `{0}`")]
33
+
DieselError(String),
34
+
}
35
+
pub type ActorJoinAccount =
36
+
LeftJoinOn<ActorTable, AccountTable, Eq<ActorSchema::did, AccountSchema::did>>;
37
+
pub type BoxedQuery<'life> = dsl::IntoBoxed<'life, ActorJoinAccount, sqlite::Sqlite>;
38
+
pub fn select_account_qb(flags: Option<AvailabilityFlags>) -> BoxedQuery<'static> {
39
+
let AvailabilityFlags {
40
+
include_taken_down,
41
+
include_deactivated,
42
+
} = flags.unwrap_or(AvailabilityFlags {
43
+
include_taken_down: Some(false),
44
+
include_deactivated: Some(false),
45
+
});
46
+
let include_taken_down = include_taken_down.unwrap_or(false);
47
+
let include_deactivated = include_deactivated.unwrap_or(false);
48
+
49
+
let mut builder = ActorSchema::actor
50
+
.left_join(AccountSchema::account.on(ActorSchema::did.eq(AccountSchema::did)))
51
+
.into_boxed();
52
+
if !include_taken_down {
53
+
builder = builder.filter(ActorSchema::takedownRef.is_null());
54
+
}
55
+
if !include_deactivated {
56
+
builder = builder.filter(ActorSchema::deactivatedAt.is_null());
57
+
}
58
+
builder
59
+
}
60
+
61
+
pub async fn get_account(
62
+
_handle_or_did: &str,
63
+
flags: Option<AvailabilityFlags>,
64
+
db: &deadpool_diesel::Pool<
65
+
deadpool_diesel::Manager<SqliteConnection>,
66
+
deadpool_diesel::sqlite::Object,
67
+
>,
68
+
) -> Result<Option<ActorAccount>> {
69
+
let handle_or_did = _handle_or_did.to_owned();
70
+
let found = db
71
+
.get()
72
+
.await?
73
+
.interact(move |conn| {
74
+
let mut builder = select_account_qb(flags);
75
+
if handle_or_did.starts_with("did:") {
76
+
builder = builder.filter(ActorSchema::did.eq(handle_or_did));
77
+
} else {
78
+
builder = builder.filter(ActorSchema::handle.eq(handle_or_did));
79
+
}
80
+
81
+
builder
82
+
.select((
83
+
ActorSchema::did,
84
+
ActorSchema::handle,
85
+
ActorSchema::createdAt,
86
+
ActorSchema::takedownRef,
87
+
ActorSchema::deactivatedAt,
88
+
ActorSchema::deleteAfter,
89
+
AccountSchema::email.nullable(),
90
+
AccountSchema::emailConfirmedAt.nullable(),
91
+
AccountSchema::invitesDisabled.nullable(),
92
+
))
93
+
.first::<(
94
+
String,
95
+
Option<String>,
96
+
String,
97
+
Option<String>,
98
+
Option<String>,
99
+
Option<String>,
100
+
Option<String>,
101
+
Option<String>,
102
+
Option<i16>,
103
+
)>(conn)
104
+
.map(|res| ActorAccount {
105
+
did: res.0,
106
+
handle: res.1,
107
+
created_at: res.2,
108
+
takedown_ref: res.3,
109
+
deactivated_at: res.4,
110
+
delete_after: res.5,
111
+
email: res.6,
112
+
email_confirmed_at: res.7,
113
+
invites_disabled: res.8,
114
+
})
115
+
.optional()
116
+
})
117
+
.await
118
+
.expect("Failed to get account")?;
119
+
Ok(found)
120
+
}
121
+
122
+
pub async fn get_account_by_email(
123
+
_email: &str,
124
+
flags: Option<AvailabilityFlags>,
125
+
db: &deadpool_diesel::Pool<
126
+
deadpool_diesel::Manager<SqliteConnection>,
127
+
deadpool_diesel::sqlite::Object,
128
+
>,
129
+
) -> Result<Option<ActorAccount>> {
130
+
let email = _email.to_owned();
131
+
let found = db
132
+
.get()
133
+
.await?
134
+
.interact(move |conn| {
135
+
select_account_qb(flags)
136
+
.select((
137
+
ActorSchema::did,
138
+
ActorSchema::handle,
139
+
ActorSchema::createdAt,
140
+
ActorSchema::takedownRef,
141
+
ActorSchema::deactivatedAt,
142
+
ActorSchema::deleteAfter,
143
+
AccountSchema::email.nullable(),
144
+
AccountSchema::emailConfirmedAt.nullable(),
145
+
AccountSchema::invitesDisabled.nullable(),
146
+
))
147
+
.filter(AccountSchema::email.eq(email.to_lowercase()))
148
+
.first::<(
149
+
String,
150
+
Option<String>,
151
+
String,
152
+
Option<String>,
153
+
Option<String>,
154
+
Option<String>,
155
+
Option<String>,
156
+
Option<String>,
157
+
Option<i16>,
158
+
)>(conn)
159
+
.map(|res| ActorAccount {
160
+
did: res.0,
161
+
handle: res.1,
162
+
created_at: res.2,
163
+
takedown_ref: res.3,
164
+
deactivated_at: res.4,
165
+
delete_after: res.5,
166
+
email: res.6,
167
+
email_confirmed_at: res.7,
168
+
invites_disabled: res.8,
169
+
})
170
+
.optional()
171
+
})
172
+
.await
173
+
.expect("Failed to get account")?;
174
+
Ok(found)
175
+
}
176
+
177
+
pub async fn register_actor(
178
+
did: String,
179
+
handle: String,
180
+
deactivated: Option<bool>,
181
+
db: &deadpool_diesel::Pool<
182
+
deadpool_diesel::Manager<SqliteConnection>,
183
+
deadpool_diesel::sqlite::Object,
184
+
>,
185
+
) -> Result<()> {
186
+
let system_time = SystemTime::now();
187
+
let dt: DateTime<UtcOffset> = system_time.into();
188
+
let created_at = format!("{}", dt.format(RFC3339_VARIANT));
189
+
let deactivate_at = match deactivated {
190
+
Some(true) => Some(created_at.clone()),
191
+
_ => None,
192
+
};
193
+
let deactivate_after = match deactivated {
194
+
Some(true) => {
195
+
let exp = dt.add(chrono::Duration::days(3));
196
+
Some(format!("{}", exp.format(RFC3339_VARIANT)))
197
+
}
198
+
_ => None,
199
+
};
200
+
201
+
let _: String = db
202
+
.get()
203
+
.await?
204
+
.interact(move |conn| {
205
+
insert_into(ActorSchema::actor)
206
+
.values((
207
+
ActorSchema::did.eq(did),
208
+
ActorSchema::handle.eq(handle),
209
+
ActorSchema::createdAt.eq(created_at),
210
+
ActorSchema::deactivatedAt.eq(deactivate_at),
211
+
ActorSchema::deleteAfter.eq(deactivate_after),
212
+
))
213
+
.on_conflict_do_nothing()
214
+
.returning(ActorSchema::did)
215
+
.get_result(conn)
216
+
})
217
+
.await
218
+
.expect("Failed to register actor")?;
219
+
Ok(())
220
+
}
221
+
222
+
pub async fn register_account(
223
+
did: String,
224
+
email: String,
225
+
password: String,
226
+
db: &deadpool_diesel::Pool<
227
+
deadpool_diesel::Manager<SqliteConnection>,
228
+
deadpool_diesel::sqlite::Object,
229
+
>,
230
+
) -> Result<()> {
231
+
let created_at = rsky_common::now();
232
+
233
+
// @TODO record recovery key for bring your own recovery key
234
+
let _: String = db
235
+
.get()
236
+
.await?
237
+
.interact(move |conn| {
238
+
insert_into(AccountSchema::account)
239
+
.values((
240
+
AccountSchema::did.eq(did),
241
+
AccountSchema::email.eq(email),
242
+
AccountSchema::password.eq(password),
243
+
AccountSchema::createdAt.eq(created_at),
244
+
))
245
+
.on_conflict_do_nothing()
246
+
.returning(AccountSchema::did)
247
+
.get_result(conn)
248
+
})
249
+
.await
250
+
.expect("Failed to register account")?;
251
+
Ok(())
252
+
}
253
+
254
+
pub async fn delete_account(
255
+
did: &str,
256
+
db: &deadpool_diesel::Pool<
257
+
deadpool_diesel::Manager<SqliteConnection>,
258
+
deadpool_diesel::sqlite::Object,
259
+
>,
260
+
actor_db: &deadpool_diesel::Pool<
261
+
deadpool_diesel::Manager<SqliteConnection>,
262
+
deadpool_diesel::sqlite::Object,
263
+
>,
264
+
) -> Result<()> {
265
+
use crate::schema::actor_store::repo_root::dsl as RepoRootSchema;
266
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
267
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
268
+
269
+
let did_clone = did.to_owned();
270
+
_ = actor_db
271
+
.get()
272
+
.await?
273
+
.interact(move |conn| {
274
+
delete(RepoRootSchema::repo_root)
275
+
.filter(RepoRootSchema::did.eq(&did_clone))
276
+
.execute(conn)
277
+
})
278
+
.await
279
+
.expect("Failed to delete actor")?;
280
+
let did_clone = did.to_owned();
281
+
_ = db
282
+
.get()
283
+
.await?
284
+
.interact(move |conn| {
285
+
_ = delete(EmailTokenSchema::email_token)
286
+
.filter(EmailTokenSchema::did.eq(&did_clone))
287
+
.execute(conn)?;
288
+
_ = delete(RefreshTokenSchema::refresh_token)
289
+
.filter(RefreshTokenSchema::did.eq(&did_clone))
290
+
.execute(conn)?;
291
+
_ = delete(AccountSchema::account)
292
+
.filter(AccountSchema::did.eq(&did_clone))
293
+
.execute(conn)?;
294
+
delete(ActorSchema::actor)
295
+
.filter(ActorSchema::did.eq(&did_clone))
296
+
.execute(conn)
297
+
})
298
+
.await
299
+
.expect("Failed to delete account")?;
300
+
301
+
let data_repo_file = format!("data/repo/{}.db", did.to_owned());
302
+
let data_blob_path = format!("data/blob/{}", did);
303
+
let data_blob_path = std::path::Path::new(&data_blob_path);
304
+
let data_repo_file = std::path::Path::new(&data_repo_file);
305
+
if data_repo_file.exists() {
306
+
std::fs::remove_file(data_repo_file)?;
307
+
};
308
+
if data_blob_path.exists() {
309
+
std::fs::remove_dir_all(data_blob_path)?;
310
+
};
311
+
Ok(())
312
+
}
313
+
314
+
pub async fn update_account_takedown_status(
315
+
did: &str,
316
+
takedown: StatusAttr,
317
+
db: &deadpool_diesel::Pool<
318
+
deadpool_diesel::Manager<SqliteConnection>,
319
+
deadpool_diesel::sqlite::Object,
320
+
>,
321
+
) -> Result<()> {
322
+
let takedown_ref: Option<String> = match takedown.applied {
323
+
true => takedown
324
+
.r#ref
325
+
.map_or_else(|| Some(rsky_common::now()), Some),
326
+
false => None,
327
+
};
328
+
let did = did.to_owned();
329
+
_ = db
330
+
.get()
331
+
.await?
332
+
.interact(move |conn| {
333
+
update(ActorSchema::actor)
334
+
.filter(ActorSchema::did.eq(did))
335
+
.set((ActorSchema::takedownRef.eq(takedown_ref),))
336
+
.execute(conn)
337
+
})
338
+
.await
339
+
.expect("Failed to update account takedown status")?;
340
+
Ok(())
341
+
}
342
+
343
+
pub async fn deactivate_account(
344
+
did: &str,
345
+
delete_after: Option<String>,
346
+
db: &deadpool_diesel::Pool<
347
+
deadpool_diesel::Manager<SqliteConnection>,
348
+
deadpool_diesel::sqlite::Object,
349
+
>,
350
+
) -> Result<()> {
351
+
let did = did.to_owned();
352
+
_ = db
353
+
.get()
354
+
.await?
355
+
.interact(move |conn| {
356
+
update(ActorSchema::actor)
357
+
.filter(ActorSchema::did.eq(did))
358
+
.set((
359
+
ActorSchema::deactivatedAt.eq(rsky_common::now()),
360
+
ActorSchema::deleteAfter.eq(delete_after),
361
+
))
362
+
.execute(conn)
363
+
})
364
+
.await
365
+
.expect("Failed to deactivate account")?;
366
+
Ok(())
367
+
}
368
+
369
+
pub async fn activate_account(
370
+
did: &str,
371
+
db: &deadpool_diesel::Pool<
372
+
deadpool_diesel::Manager<SqliteConnection>,
373
+
deadpool_diesel::sqlite::Object,
374
+
>,
375
+
) -> Result<()> {
376
+
let did = did.to_owned();
377
+
_ = db
378
+
.get()
379
+
.await?
380
+
.interact(move |conn| {
381
+
update(ActorSchema::actor)
382
+
.filter(ActorSchema::did.eq(did))
383
+
.set((
384
+
ActorSchema::deactivatedAt.eq::<Option<String>>(None),
385
+
ActorSchema::deleteAfter.eq::<Option<String>>(None),
386
+
))
387
+
.execute(conn)
388
+
})
389
+
.await
390
+
.expect("Failed to activate account")?;
391
+
Ok(())
392
+
}
393
+
394
+
pub async fn update_email(
395
+
did: &str,
396
+
email: &str,
397
+
db: &deadpool_diesel::Pool<
398
+
deadpool_diesel::Manager<SqliteConnection>,
399
+
deadpool_diesel::sqlite::Object,
400
+
>,
401
+
) -> Result<()> {
402
+
let did = did.to_owned();
403
+
let email = email.to_owned();
404
+
let res = db
405
+
.get()
406
+
.await?
407
+
.interact(move |conn| {
408
+
update(AccountSchema::account)
409
+
.filter(AccountSchema::did.eq(did))
410
+
.set((
411
+
AccountSchema::email.eq(email.to_lowercase()),
412
+
AccountSchema::emailConfirmedAt.eq::<Option<String>>(None),
413
+
))
414
+
.execute(conn)
415
+
})
416
+
.await
417
+
.expect("Failed to update email");
418
+
419
+
match res {
420
+
Ok(_) => Ok(()),
421
+
Err(DieselError::DatabaseError(kind, _)) => match kind {
422
+
DatabaseErrorKind::UniqueViolation => Err(anyhow::Error::new(
423
+
AccountHelperError::UserAlreadyExistsError,
424
+
)),
425
+
_ => Err(anyhow::Error::new(AccountHelperError::DieselError(
426
+
format!("{:?}", kind),
427
+
))),
428
+
},
429
+
Err(e) => Err(anyhow::Error::new(e)),
430
+
}
431
+
}
432
+
433
+
pub async fn update_handle(
434
+
did: &str,
435
+
handle: &str,
436
+
db: &deadpool_diesel::Pool<
437
+
deadpool_diesel::Manager<SqliteConnection>,
438
+
deadpool_diesel::sqlite::Object,
439
+
>,
440
+
) -> Result<()> {
441
+
use crate::schema::pds::actor;
442
+
443
+
let actor2 = diesel::alias!(actor as actor2);
444
+
445
+
let did = did.to_owned();
446
+
let handle = handle.to_owned();
447
+
let res = db
448
+
.get()
449
+
.await?
450
+
.interact(move |conn| {
451
+
update(ActorSchema::actor)
452
+
.filter(ActorSchema::did.eq(did))
453
+
.filter(not(exists(actor2.filter(ActorSchema::handle.eq(&handle)))))
454
+
.set((ActorSchema::handle.eq(&handle),))
455
+
.execute(conn)
456
+
})
457
+
.await
458
+
.expect("Failed to update handle")?;
459
+
460
+
if res < 1 {
461
+
return Err(anyhow::Error::new(
462
+
AccountHelperError::UserAlreadyExistsError,
463
+
));
464
+
}
465
+
Ok(())
466
+
}
467
+
468
+
pub async fn set_email_confirmed_at(
469
+
did: &str,
470
+
email_confirmed_at: String,
471
+
db: &deadpool_diesel::Pool<
472
+
deadpool_diesel::Manager<SqliteConnection>,
473
+
deadpool_diesel::sqlite::Object,
474
+
>,
475
+
) -> Result<()> {
476
+
let did = did.to_owned();
477
+
_ = db
478
+
.get()
479
+
.await?
480
+
.interact(move |conn| {
481
+
update(AccountSchema::account)
482
+
.filter(AccountSchema::did.eq(did))
483
+
.set(AccountSchema::emailConfirmedAt.eq(email_confirmed_at))
484
+
.execute(conn)
485
+
})
486
+
.await
487
+
.expect("Failed to set email confirmed at")?;
488
+
Ok(())
489
+
}
490
+
491
+
pub async fn get_account_admin_status(
492
+
did: &str,
493
+
db: &deadpool_diesel::Pool<
494
+
deadpool_diesel::Manager<SqliteConnection>,
495
+
deadpool_diesel::sqlite::Object,
496
+
>,
497
+
) -> Result<Option<GetAccountAdminStatusOutput>> {
498
+
let did = did.to_owned();
499
+
let res: Option<(Option<String>, Option<String>)> = db
500
+
.get()
501
+
.await?
502
+
.interact(move |conn| {
503
+
ActorSchema::actor
504
+
.filter(ActorSchema::did.eq(did))
505
+
.select((ActorSchema::takedownRef, ActorSchema::deactivatedAt))
506
+
.first(conn)
507
+
.optional()
508
+
})
509
+
.await
510
+
.expect("Failed to get account admin status")?;
511
+
match res {
512
+
None => Ok(None),
513
+
Some(res) => {
514
+
let takedown = res.0.map_or(
515
+
StatusAttr {
516
+
applied: false,
517
+
r#ref: None,
518
+
},
519
+
|takedown_ref| StatusAttr {
520
+
applied: true,
521
+
r#ref: Some(takedown_ref),
522
+
},
523
+
);
524
+
let deactivated = match res.1 {
525
+
Some(_) => StatusAttr {
526
+
applied: true,
527
+
r#ref: None,
528
+
},
529
+
None => StatusAttr {
530
+
applied: false,
531
+
r#ref: None,
532
+
},
533
+
};
534
+
Ok(Some(GetAccountAdminStatusOutput {
535
+
takedown,
536
+
deactivated,
537
+
}))
538
+
}
539
+
}
540
+
}
+206
src/account_manager/helpers/auth.rs
+206
src/account_manager/helpers/auth.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/auth.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::models::pds as models;
6
+
use anyhow::Result;
7
+
use diesel::*;
8
+
use rsky_common::time::from_micros_to_utc;
9
+
use rsky_common::{RFC3339_VARIANT, get_random_str};
10
+
#[expect(unused_imports)]
11
+
pub(crate) use rsky_pds::account_manager::helpers::auth::{
12
+
AuthHelperError, AuthToken, CreateTokensOpts, CustomClaimObj, RefreshGracePeriodOpts,
13
+
RefreshToken, ServiceJwtHeader, ServiceJwtParams, ServiceJwtPayload, create_access_token,
14
+
create_refresh_token, create_service_jwt, create_tokens, decode_refresh_token,
15
+
};
16
+
17
+
pub async fn store_refresh_token(
18
+
payload: RefreshToken,
19
+
app_password_name: Option<String>,
20
+
db: &deadpool_diesel::Pool<
21
+
deadpool_diesel::Manager<SqliteConnection>,
22
+
deadpool_diesel::sqlite::Object,
23
+
>,
24
+
) -> Result<()> {
25
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
26
+
27
+
let exp = from_micros_to_utc((payload.exp.as_millis() / 1000) as i64);
28
+
29
+
_ = db
30
+
.get()
31
+
.await?
32
+
.interact(move |conn| {
33
+
insert_into(RefreshTokenSchema::refresh_token)
34
+
.values((
35
+
RefreshTokenSchema::id.eq(payload.jti),
36
+
RefreshTokenSchema::did.eq(payload.sub),
37
+
RefreshTokenSchema::appPasswordName.eq(app_password_name),
38
+
RefreshTokenSchema::expiresAt.eq(format!("{}", exp.format(RFC3339_VARIANT))),
39
+
))
40
+
.on_conflict_do_nothing() // E.g. when re-granting during a refresh grace period
41
+
.execute(conn)
42
+
})
43
+
.await
44
+
.expect("Failed to store refresh token")?;
45
+
46
+
Ok(())
47
+
}
48
+
49
+
pub async fn revoke_refresh_token(
50
+
id: String,
51
+
db: &deadpool_diesel::Pool<
52
+
deadpool_diesel::Manager<SqliteConnection>,
53
+
deadpool_diesel::sqlite::Object,
54
+
>,
55
+
) -> Result<bool> {
56
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
57
+
db.get()
58
+
.await?
59
+
.interact(move |conn| {
60
+
let deleted_rows = delete(RefreshTokenSchema::refresh_token)
61
+
.filter(RefreshTokenSchema::id.eq(id))
62
+
.get_results::<models::RefreshToken>(conn)?;
63
+
64
+
Ok(!deleted_rows.is_empty())
65
+
})
66
+
.await
67
+
.expect("Failed to revoke refresh token")
68
+
}
69
+
70
+
pub async fn revoke_refresh_tokens_by_did(
71
+
did: &str,
72
+
db: &deadpool_diesel::Pool<
73
+
deadpool_diesel::Manager<SqliteConnection>,
74
+
deadpool_diesel::sqlite::Object,
75
+
>,
76
+
) -> Result<bool> {
77
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
78
+
let did = did.to_owned();
79
+
db.get()
80
+
.await?
81
+
.interact(move |conn| {
82
+
let deleted_rows = delete(RefreshTokenSchema::refresh_token)
83
+
.filter(RefreshTokenSchema::did.eq(did))
84
+
.get_results::<models::RefreshToken>(conn)?;
85
+
86
+
Ok(!deleted_rows.is_empty())
87
+
})
88
+
.await
89
+
.expect("Failed to revoke refresh tokens by DID")
90
+
}
91
+
92
+
pub async fn revoke_app_password_refresh_token(
93
+
did: &str,
94
+
app_pass_name: &str,
95
+
db: &deadpool_diesel::Pool<
96
+
deadpool_diesel::Manager<SqliteConnection>,
97
+
deadpool_diesel::sqlite::Object,
98
+
>,
99
+
) -> Result<bool> {
100
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
101
+
102
+
let did = did.to_owned();
103
+
let app_pass_name = app_pass_name.to_owned();
104
+
db.get()
105
+
.await?
106
+
.interact(move |conn| {
107
+
let deleted_rows = delete(RefreshTokenSchema::refresh_token)
108
+
.filter(RefreshTokenSchema::did.eq(did))
109
+
.filter(RefreshTokenSchema::appPasswordName.eq(app_pass_name))
110
+
.get_results::<models::RefreshToken>(conn)?;
111
+
112
+
Ok(!deleted_rows.is_empty())
113
+
})
114
+
.await
115
+
.expect("Failed to revoke app password refresh token")
116
+
}
117
+
118
+
pub async fn get_refresh_token(
119
+
id: &str,
120
+
db: &deadpool_diesel::Pool<
121
+
deadpool_diesel::Manager<SqliteConnection>,
122
+
deadpool_diesel::sqlite::Object,
123
+
>,
124
+
) -> Result<Option<models::RefreshToken>> {
125
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
126
+
let id = id.to_owned();
127
+
db.get()
128
+
.await?
129
+
.interact(move |conn| {
130
+
Ok(RefreshTokenSchema::refresh_token
131
+
.find(id)
132
+
.first(conn)
133
+
.optional()?)
134
+
})
135
+
.await
136
+
.expect("Failed to get refresh token")
137
+
}
138
+
139
+
pub async fn delete_expired_refresh_tokens(
140
+
did: &str,
141
+
now: String,
142
+
db: &deadpool_diesel::Pool<
143
+
deadpool_diesel::Manager<SqliteConnection>,
144
+
deadpool_diesel::sqlite::Object,
145
+
>,
146
+
) -> Result<()> {
147
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
148
+
let did = did.to_owned();
149
+
150
+
db.get()
151
+
.await?
152
+
.interact(move |conn| {
153
+
_ = delete(RefreshTokenSchema::refresh_token)
154
+
.filter(RefreshTokenSchema::did.eq(did))
155
+
.filter(RefreshTokenSchema::expiresAt.le(now))
156
+
.execute(conn)?;
157
+
Ok(())
158
+
})
159
+
.await
160
+
.expect("Failed to delete expired refresh tokens")
161
+
}
162
+
163
+
pub async fn add_refresh_grace_period(
164
+
opts: RefreshGracePeriodOpts,
165
+
db: &deadpool_diesel::Pool<
166
+
deadpool_diesel::Manager<SqliteConnection>,
167
+
deadpool_diesel::sqlite::Object,
168
+
>,
169
+
) -> Result<()> {
170
+
db.get()
171
+
.await?
172
+
.interact(move |conn| {
173
+
let RefreshGracePeriodOpts {
174
+
id,
175
+
expires_at,
176
+
next_id,
177
+
} = opts;
178
+
use crate::schema::pds::refresh_token::dsl as RefreshTokenSchema;
179
+
180
+
drop(
181
+
update(RefreshTokenSchema::refresh_token)
182
+
.filter(RefreshTokenSchema::id.eq(id))
183
+
.filter(
184
+
RefreshTokenSchema::nextId
185
+
.is_null()
186
+
.or(RefreshTokenSchema::nextId.eq(&next_id)),
187
+
)
188
+
.set((
189
+
RefreshTokenSchema::expiresAt.eq(expires_at),
190
+
RefreshTokenSchema::nextId.eq(&next_id),
191
+
))
192
+
.returning(models::RefreshToken::as_select())
193
+
.get_results(conn)
194
+
.map_err(|error| {
195
+
anyhow::Error::new(AuthHelperError::ConcurrentRefresh).context(error)
196
+
})?,
197
+
);
198
+
Ok(())
199
+
})
200
+
.await
201
+
.expect("Failed to add refresh grace period")
202
+
}
203
+
204
+
pub fn get_refresh_token_id() -> String {
205
+
get_random_str()
206
+
}
+173
src/account_manager/helpers/email_token.rs
+173
src/account_manager/helpers/email_token.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/email_token.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::models::pds::EmailToken;
6
+
use crate::models::pds::EmailTokenPurpose;
7
+
use anyhow::{Result, bail};
8
+
use diesel::*;
9
+
use rsky_common::time::{MINUTE, from_str_to_utc, less_than_ago_s};
10
+
use rsky_pds::apis::com::atproto::server::get_random_token;
11
+
12
+
pub async fn create_email_token(
13
+
did: &str,
14
+
purpose: EmailTokenPurpose,
15
+
db: &deadpool_diesel::Pool<
16
+
deadpool_diesel::Manager<SqliteConnection>,
17
+
deadpool_diesel::sqlite::Object,
18
+
>,
19
+
) -> Result<String> {
20
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
21
+
let token = get_random_token().to_uppercase();
22
+
let now = rsky_common::now();
23
+
24
+
let did = did.to_owned();
25
+
db.get()
26
+
.await?
27
+
.interact(move |conn| {
28
+
_ = insert_into(EmailTokenSchema::email_token)
29
+
.values((
30
+
EmailTokenSchema::purpose.eq(purpose),
31
+
EmailTokenSchema::did.eq(did),
32
+
EmailTokenSchema::token.eq(&token),
33
+
EmailTokenSchema::requestedAt.eq(&now),
34
+
))
35
+
.on_conflict((EmailTokenSchema::purpose, EmailTokenSchema::did))
36
+
.do_update()
37
+
.set((
38
+
EmailTokenSchema::token.eq(&token),
39
+
EmailTokenSchema::requestedAt.eq(&now),
40
+
))
41
+
.execute(conn)?;
42
+
Ok(token)
43
+
})
44
+
.await
45
+
.expect("Failed to create email token")
46
+
}
47
+
48
+
pub async fn assert_valid_token(
49
+
did: &str,
50
+
purpose: EmailTokenPurpose,
51
+
token: &str,
52
+
expiration_len: Option<i32>,
53
+
db: &deadpool_diesel::Pool<
54
+
deadpool_diesel::Manager<SqliteConnection>,
55
+
deadpool_diesel::sqlite::Object,
56
+
>,
57
+
) -> Result<()> {
58
+
let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
59
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
60
+
61
+
let did = did.to_owned();
62
+
let token = token.to_owned();
63
+
let res = db
64
+
.get()
65
+
.await?
66
+
.interact(move |conn| {
67
+
EmailTokenSchema::email_token
68
+
.filter(EmailTokenSchema::purpose.eq(purpose))
69
+
.filter(EmailTokenSchema::did.eq(did))
70
+
.filter(EmailTokenSchema::token.eq(token.to_uppercase()))
71
+
.select(EmailToken::as_select())
72
+
.first(conn)
73
+
.optional()
74
+
})
75
+
.await
76
+
.expect("Failed to assert token")?;
77
+
if let Some(res) = res {
78
+
let requested_at = from_str_to_utc(&res.requested_at);
79
+
let expired = !less_than_ago_s(requested_at, expiration_len);
80
+
if expired {
81
+
bail!("Token is expired")
82
+
}
83
+
Ok(())
84
+
} else {
85
+
bail!("Token is invalid")
86
+
}
87
+
}
88
+
89
+
pub async fn assert_valid_token_and_find_did(
90
+
purpose: EmailTokenPurpose,
91
+
token: &str,
92
+
expiration_len: Option<i32>,
93
+
db: &deadpool_diesel::Pool<
94
+
deadpool_diesel::Manager<SqliteConnection>,
95
+
deadpool_diesel::sqlite::Object,
96
+
>,
97
+
) -> Result<String> {
98
+
let expiration_len = expiration_len.unwrap_or(MINUTE * 15);
99
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
100
+
101
+
let token = token.to_owned();
102
+
let res = db
103
+
.get()
104
+
.await?
105
+
.interact(move |conn| {
106
+
EmailTokenSchema::email_token
107
+
.filter(EmailTokenSchema::purpose.eq(purpose))
108
+
.filter(EmailTokenSchema::token.eq(token.to_uppercase()))
109
+
.select(EmailToken::as_select())
110
+
.first(conn)
111
+
.optional()
112
+
})
113
+
.await
114
+
.expect("Failed to assert token")?;
115
+
if let Some(res) = res {
116
+
let requested_at = from_str_to_utc(&res.requested_at);
117
+
let expired = !less_than_ago_s(requested_at, expiration_len);
118
+
if expired {
119
+
bail!("Token is expired")
120
+
}
121
+
Ok(res.did)
122
+
} else {
123
+
bail!("Token is invalid")
124
+
}
125
+
}
126
+
127
+
pub async fn delete_email_token(
128
+
did: &str,
129
+
purpose: EmailTokenPurpose,
130
+
db: &deadpool_diesel::Pool<
131
+
deadpool_diesel::Manager<SqliteConnection>,
132
+
deadpool_diesel::sqlite::Object,
133
+
>,
134
+
) -> Result<()> {
135
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
136
+
let did = did.to_owned();
137
+
_ = db
138
+
.get()
139
+
.await?
140
+
.interact(move |conn| {
141
+
delete(EmailTokenSchema::email_token)
142
+
.filter(EmailTokenSchema::did.eq(did))
143
+
.filter(EmailTokenSchema::purpose.eq(purpose))
144
+
.execute(conn)
145
+
})
146
+
.await
147
+
.expect("Failed to delete token")?;
148
+
Ok(())
149
+
}
150
+
151
+
pub async fn delete_all_email_tokens(
152
+
did: &str,
153
+
db: &deadpool_diesel::Pool<
154
+
deadpool_diesel::Manager<SqliteConnection>,
155
+
deadpool_diesel::sqlite::Object,
156
+
>,
157
+
) -> Result<()> {
158
+
use crate::schema::pds::email_token::dsl as EmailTokenSchema;
159
+
160
+
let did = did.to_owned();
161
+
_ = db
162
+
.get()
163
+
.await?
164
+
.interact(move |conn| {
165
+
delete(EmailTokenSchema::email_token)
166
+
.filter(EmailTokenSchema::did.eq(did))
167
+
.execute(conn)
168
+
})
169
+
.await
170
+
.expect("Failed to delete all tokens")?;
171
+
172
+
Ok(())
173
+
}
+397
src/account_manager/helpers/invite.rs
+397
src/account_manager/helpers/invite.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/invite.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::models::pds as models;
6
+
use anyhow::{Result, bail};
7
+
use diesel::*;
8
+
use rsky_lexicon::com::atproto::server::AccountCodes;
9
+
use rsky_lexicon::com::atproto::server::{
10
+
InviteCode as LexiconInviteCode, InviteCodeUse as LexiconInviteCodeUse,
11
+
};
12
+
use rsky_pds::account_manager::DisableInviteCodesOpts;
13
+
use std::collections::BTreeMap;
14
+
use std::mem;
15
+
16
+
pub type CodeUse = LexiconInviteCodeUse;
17
+
pub type CodeDetail = LexiconInviteCode;
18
+
19
+
pub async fn ensure_invite_is_available(
20
+
invite_code: String,
21
+
db: &deadpool_diesel::Pool<
22
+
deadpool_diesel::Manager<SqliteConnection>,
23
+
deadpool_diesel::sqlite::Object,
24
+
>,
25
+
) -> Result<()> {
26
+
use crate::schema::pds::actor::dsl as ActorSchema;
27
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
28
+
use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
29
+
30
+
db.get().await?.interact(move |conn| {
31
+
let invite: Option<models::InviteCode> = InviteCodeSchema::invite_code
32
+
.left_join(
33
+
ActorSchema::actor.on(InviteCodeSchema::forAccount
34
+
.eq(ActorSchema::did)
35
+
.and(ActorSchema::takedownRef.is_null())),
36
+
)
37
+
.filter(InviteCodeSchema::code.eq(&invite_code))
38
+
.select(models::InviteCode::as_select())
39
+
.first(conn)
40
+
.optional()?;
41
+
42
+
if let Some(invite) = invite {
43
+
if invite.disabled > 0 {
44
+
bail!("InvalidInviteCode: Disabled. Provided invite code not available `{invite_code:?}`");
45
+
}
46
+
47
+
let uses: i64 = InviteCodeUseSchema::invite_code_use
48
+
.count()
49
+
.filter(InviteCodeUseSchema::code.eq(&invite_code))
50
+
.first(conn)?;
51
+
52
+
if invite.available_uses as i64 <= uses {
53
+
bail!("InvalidInviteCode: Not enough uses. Provided invite code not available `{invite_code:?}`");
54
+
}
55
+
} else {
56
+
bail!("InvalidInviteCode: None. Provided invite code not available `{invite_code:?}`");
57
+
}
58
+
59
+
Ok(())
60
+
}).await.expect("Failed to check invite code availability")?;
61
+
62
+
Ok(())
63
+
}
64
+
65
+
pub async fn record_invite_use(
66
+
did: String,
67
+
invite_code: Option<String>,
68
+
now: String,
69
+
db: &deadpool_diesel::Pool<
70
+
deadpool_diesel::Manager<SqliteConnection>,
71
+
deadpool_diesel::sqlite::Object,
72
+
>,
73
+
) -> Result<()> {
74
+
if let Some(invite_code) = invite_code {
75
+
use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
76
+
77
+
_ = db
78
+
.get()
79
+
.await?
80
+
.interact(move |conn| {
81
+
insert_into(InviteCodeUseSchema::invite_code_use)
82
+
.values((
83
+
InviteCodeUseSchema::code.eq(invite_code),
84
+
InviteCodeUseSchema::usedBy.eq(did),
85
+
InviteCodeUseSchema::usedAt.eq(now),
86
+
))
87
+
.execute(conn)
88
+
})
89
+
.await
90
+
.expect("Failed to record invite code use")?;
91
+
}
92
+
Ok(())
93
+
}
94
+
95
+
pub async fn create_invite_codes(
96
+
to_create: Vec<AccountCodes>,
97
+
use_count: i32,
98
+
db: &deadpool_diesel::Pool<
99
+
deadpool_diesel::Manager<SqliteConnection>,
100
+
deadpool_diesel::sqlite::Object,
101
+
>,
102
+
) -> Result<()> {
103
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
104
+
let created_at = rsky_common::now();
105
+
106
+
_ = db
107
+
.get()
108
+
.await?
109
+
.interact(move |conn| {
110
+
let rows: Vec<models::InviteCode> = to_create
111
+
.into_iter()
112
+
.flat_map(|account| {
113
+
let for_account = account.account;
114
+
account
115
+
.codes
116
+
.iter()
117
+
.map(|code| models::InviteCode {
118
+
code: code.clone(),
119
+
available_uses: use_count,
120
+
disabled: 0,
121
+
for_account: for_account.clone(),
122
+
created_by: "admin".to_owned(),
123
+
created_at: created_at.clone(),
124
+
})
125
+
.collect::<Vec<models::InviteCode>>()
126
+
})
127
+
.collect();
128
+
insert_into(InviteCodeSchema::invite_code)
129
+
.values(&rows)
130
+
.execute(conn)
131
+
})
132
+
.await
133
+
.expect("Failed to create invite codes")?;
134
+
Ok(())
135
+
}
136
+
137
+
pub async fn create_account_invite_codes(
138
+
for_account: &str,
139
+
codes: Vec<String>,
140
+
expected_total: usize,
141
+
disabled: bool,
142
+
db: &deadpool_diesel::Pool<
143
+
deadpool_diesel::Manager<SqliteConnection>,
144
+
deadpool_diesel::sqlite::Object,
145
+
>,
146
+
) -> Result<Vec<CodeDetail>> {
147
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
148
+
149
+
let for_account = for_account.to_owned();
150
+
let rows = db
151
+
.get()
152
+
.await?
153
+
.interact(move |conn| {
154
+
let now = rsky_common::now();
155
+
156
+
let rows: Vec<models::InviteCode> = codes
157
+
.into_iter()
158
+
.map(|code| models::InviteCode {
159
+
code,
160
+
available_uses: 1,
161
+
disabled: if disabled { 1 } else { 0 },
162
+
for_account: for_account.clone(),
163
+
created_by: for_account.clone(),
164
+
created_at: now.clone(),
165
+
})
166
+
.collect();
167
+
168
+
_ = insert_into(InviteCodeSchema::invite_code)
169
+
.values(&rows)
170
+
.execute(conn)?;
171
+
172
+
let final_routine_invite_codes: Vec<models::InviteCode> = InviteCodeSchema::invite_code
173
+
.filter(InviteCodeSchema::forAccount.eq(for_account))
174
+
.filter(InviteCodeSchema::createdBy.ne("admin")) // don't count admin-gifted codes against the user
175
+
.select(models::InviteCode::as_select())
176
+
.get_results(conn)?;
177
+
178
+
if final_routine_invite_codes.len() > expected_total {
179
+
bail!("DuplicateCreate: attempted to create additional codes in another request")
180
+
}
181
+
182
+
Ok(rows.into_iter().map(|row| CodeDetail {
183
+
code: row.code,
184
+
available: 1,
185
+
disabled: row.disabled == 1,
186
+
for_account: row.for_account,
187
+
created_by: row.created_by,
188
+
created_at: row.created_at,
189
+
uses: Vec::new(),
190
+
}))
191
+
})
192
+
.await
193
+
.expect("Failed to create account invite codes")?;
194
+
Ok(rows.collect())
195
+
}
196
+
197
+
pub async fn get_account_invite_codes(
198
+
did: &str,
199
+
db: &deadpool_diesel::Pool<
200
+
deadpool_diesel::Manager<SqliteConnection>,
201
+
deadpool_diesel::sqlite::Object,
202
+
>,
203
+
) -> Result<Vec<CodeDetail>> {
204
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
205
+
206
+
let did = did.to_owned();
207
+
let res: Vec<models::InviteCode> = db
208
+
.get()
209
+
.await?
210
+
.interact(move |conn| {
211
+
InviteCodeSchema::invite_code
212
+
.filter(InviteCodeSchema::forAccount.eq(did))
213
+
.select(models::InviteCode::as_select())
214
+
.get_results(conn)
215
+
})
216
+
.await
217
+
.expect("Failed to get account invite codes")?;
218
+
219
+
let codes: Vec<String> = res.iter().map(|row| row.code.clone()).collect();
220
+
let mut uses = get_invite_codes_uses_v2(codes, db).await?;
221
+
Ok(res
222
+
.into_iter()
223
+
.map(|row| CodeDetail {
224
+
code: row.code.clone(),
225
+
available: row.available_uses,
226
+
disabled: row.disabled == 1,
227
+
for_account: row.for_account,
228
+
created_by: row.created_by,
229
+
created_at: row.created_at,
230
+
uses: mem::take(uses.get_mut(&row.code).unwrap_or(&mut Vec::new())),
231
+
})
232
+
.collect::<Vec<CodeDetail>>())
233
+
}
234
+
235
+
pub async fn get_invite_codes_uses_v2(
236
+
codes: Vec<String>,
237
+
db: &deadpool_diesel::Pool<
238
+
deadpool_diesel::Manager<SqliteConnection>,
239
+
deadpool_diesel::sqlite::Object,
240
+
>,
241
+
) -> Result<BTreeMap<String, Vec<CodeUse>>> {
242
+
use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
243
+
244
+
let mut uses: BTreeMap<String, Vec<CodeUse>> = BTreeMap::new();
245
+
if !codes.is_empty() {
246
+
let uses_res: Vec<models::InviteCodeUse> = db
247
+
.get()
248
+
.await?
249
+
.interact(|conn| {
250
+
InviteCodeUseSchema::invite_code_use
251
+
.filter(InviteCodeUseSchema::code.eq_any(codes))
252
+
.order_by(InviteCodeUseSchema::usedAt.desc())
253
+
.select(models::InviteCodeUse::as_select())
254
+
.get_results(conn)
255
+
})
256
+
.await
257
+
.expect("Failed to get invite code uses")?;
258
+
for invite_code_use in uses_res {
259
+
let models::InviteCodeUse {
260
+
code,
261
+
used_by,
262
+
used_at,
263
+
} = invite_code_use;
264
+
match uses.get_mut(&code) {
265
+
None => {
266
+
drop(uses.insert(code, vec![CodeUse { used_by, used_at }]));
267
+
}
268
+
Some(matched_uses) => matched_uses.push(CodeUse { used_by, used_at }),
269
+
};
270
+
}
271
+
}
272
+
Ok(uses)
273
+
}
274
+
275
+
pub async fn get_invited_by_for_accounts(
276
+
dids: Vec<String>,
277
+
db: &deadpool_diesel::Pool<
278
+
deadpool_diesel::Manager<SqliteConnection>,
279
+
deadpool_diesel::sqlite::Object,
280
+
>,
281
+
) -> Result<BTreeMap<String, CodeDetail>> {
282
+
if dids.is_empty() {
283
+
return Ok(BTreeMap::new());
284
+
}
285
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
286
+
use crate::schema::pds::invite_code_use::dsl as InviteCodeUseSchema;
287
+
288
+
let dids = dids.clone();
289
+
let res: Vec<models::InviteCode> = db
290
+
.get()
291
+
.await?
292
+
.interact(|conn| {
293
+
InviteCodeSchema::invite_code
294
+
.filter(
295
+
InviteCodeSchema::forAccount.eq_any(
296
+
InviteCodeUseSchema::invite_code_use
297
+
.filter(InviteCodeUseSchema::usedBy.eq_any(dids))
298
+
.select(InviteCodeUseSchema::code)
299
+
.distinct(),
300
+
),
301
+
)
302
+
.select(models::InviteCode::as_select())
303
+
.get_results(conn)
304
+
})
305
+
.await
306
+
.expect("Failed to get account invite codes")?;
307
+
let codes: Vec<String> = res.iter().map(|row| row.code.clone()).collect();
308
+
let mut uses = get_invite_codes_uses_v2(codes, db).await?;
309
+
310
+
let code_details = res
311
+
.into_iter()
312
+
.map(|row| CodeDetail {
313
+
code: row.code.clone(),
314
+
available: row.available_uses,
315
+
disabled: row.disabled == 1,
316
+
for_account: row.for_account,
317
+
created_by: row.created_by,
318
+
created_at: row.created_at,
319
+
uses: mem::take(uses.get_mut(&row.code).unwrap_or(&mut Vec::new())),
320
+
})
321
+
.collect::<Vec<CodeDetail>>();
322
+
323
+
Ok(code_details.iter().fold(
324
+
BTreeMap::new(),
325
+
|mut acc: BTreeMap<String, CodeDetail>, cur| {
326
+
for code_use in &cur.uses {
327
+
drop(acc.insert(code_use.used_by.clone(), cur.clone()));
328
+
}
329
+
acc
330
+
},
331
+
))
332
+
}
333
+
334
+
pub async fn set_account_invites_disabled(
335
+
did: &str,
336
+
disabled: bool,
337
+
db: &deadpool_diesel::Pool<
338
+
deadpool_diesel::Manager<SqliteConnection>,
339
+
deadpool_diesel::sqlite::Object,
340
+
>,
341
+
) -> Result<()> {
342
+
use crate::schema::pds::account::dsl as AccountSchema;
343
+
344
+
let disabled: i16 = if disabled { 1 } else { 0 };
345
+
let did = did.to_owned();
346
+
_ = db
347
+
.get()
348
+
.await?
349
+
.interact(move |conn| {
350
+
update(AccountSchema::account)
351
+
.filter(AccountSchema::did.eq(did))
352
+
.set((AccountSchema::invitesDisabled.eq(disabled),))
353
+
.execute(conn)
354
+
})
355
+
.await
356
+
.expect("Failed to set account invites disabled")?;
357
+
Ok(())
358
+
}
359
+
360
+
pub async fn disable_invite_codes(
361
+
opts: DisableInviteCodesOpts,
362
+
db: &deadpool_diesel::Pool<
363
+
deadpool_diesel::Manager<SqliteConnection>,
364
+
deadpool_diesel::sqlite::Object,
365
+
>,
366
+
) -> Result<()> {
367
+
use crate::schema::pds::invite_code::dsl as InviteCodeSchema;
368
+
369
+
let DisableInviteCodesOpts { codes, accounts } = opts;
370
+
if !codes.is_empty() {
371
+
_ = db
372
+
.get()
373
+
.await?
374
+
.interact(move |conn| {
375
+
update(InviteCodeSchema::invite_code)
376
+
.filter(InviteCodeSchema::code.eq_any(&codes))
377
+
.set((InviteCodeSchema::disabled.eq(1),))
378
+
.execute(conn)
379
+
})
380
+
.await
381
+
.expect("Failed to disable invite codes")?;
382
+
}
383
+
if !accounts.is_empty() {
384
+
_ = db
385
+
.get()
386
+
.await?
387
+
.interact(move |conn| {
388
+
update(InviteCodeSchema::invite_code)
389
+
.filter(InviteCodeSchema::forAccount.eq_any(&accounts))
390
+
.set((InviteCodeSchema::disabled.eq(1),))
391
+
.execute(conn)
392
+
})
393
+
.await
394
+
.expect("Failed to disable invite codes")?;
395
+
}
396
+
Ok(())
397
+
}
+192
src/account_manager/helpers/password.rs
+192
src/account_manager/helpers/password.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/password.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::models::pds as models;
6
+
use crate::models::pds::AppPassword;
7
+
use anyhow::{Result, bail};
8
+
use diesel::*;
9
+
use rsky_common::{get_random_str, now};
10
+
use rsky_lexicon::com::atproto::server::CreateAppPasswordOutput;
11
+
#[expect(unused_imports)]
12
+
pub(crate) use rsky_pds::account_manager::helpers::password::{
13
+
UpdateUserPasswordOpts, gen_salt_and_hash, hash_app_password, hash_with_salt, verify,
14
+
};
15
+
16
+
pub async fn verify_account_password(
17
+
did: &str,
18
+
password: &String,
19
+
db: &deadpool_diesel::Pool<
20
+
deadpool_diesel::Manager<SqliteConnection>,
21
+
deadpool_diesel::sqlite::Object,
22
+
>,
23
+
) -> Result<bool> {
24
+
use crate::schema::pds::account::dsl as AccountSchema;
25
+
26
+
let did = did.to_owned();
27
+
let found = db
28
+
.get()
29
+
.await?
30
+
.interact(move |conn| {
31
+
AccountSchema::account
32
+
.filter(AccountSchema::did.eq(did))
33
+
.select(models::Account::as_select())
34
+
.first(conn)
35
+
.optional()
36
+
})
37
+
.await
38
+
.expect("Failed to get account")?;
39
+
if let Some(found) = found {
40
+
verify(password, &found.password)
41
+
} else {
42
+
Ok(false)
43
+
}
44
+
}
45
+
46
+
pub async fn verify_app_password(
47
+
did: &str,
48
+
password: &str,
49
+
db: &deadpool_diesel::Pool<
50
+
deadpool_diesel::Manager<SqliteConnection>,
51
+
deadpool_diesel::sqlite::Object,
52
+
>,
53
+
) -> Result<Option<String>> {
54
+
use crate::schema::pds::app_password::dsl as AppPasswordSchema;
55
+
56
+
let did = did.to_owned();
57
+
let password = password.to_owned();
58
+
let password_encrypted = hash_app_password(&did, &password).await?;
59
+
let found = db
60
+
.get()
61
+
.await?
62
+
.interact(move |conn| {
63
+
AppPasswordSchema::app_password
64
+
.filter(AppPasswordSchema::did.eq(did))
65
+
.filter(AppPasswordSchema::password.eq(password_encrypted))
66
+
.select(AppPassword::as_select())
67
+
.first(conn)
68
+
.optional()
69
+
})
70
+
.await
71
+
.expect("Failed to get app password")?;
72
+
if let Some(found) = found {
73
+
Ok(Some(found.name))
74
+
} else {
75
+
Ok(None)
76
+
}
77
+
}
78
+
79
+
/// create an app password with format:
80
+
/// 1234-abcd-5678-efgh
81
+
pub async fn create_app_password(
82
+
did: String,
83
+
name: String,
84
+
db: &deadpool_diesel::Pool<
85
+
deadpool_diesel::Manager<SqliteConnection>,
86
+
deadpool_diesel::sqlite::Object,
87
+
>,
88
+
) -> Result<CreateAppPasswordOutput> {
89
+
let str = &get_random_str()[0..16].to_lowercase();
90
+
let chunks = [&str[0..4], &str[4..8], &str[8..12], &str[12..16]];
91
+
let password = chunks.join("-");
92
+
let password_encrypted = hash_app_password(&did, &password).await?;
93
+
94
+
use crate::schema::pds::app_password::dsl as AppPasswordSchema;
95
+
96
+
let created_at = now();
97
+
98
+
db.get()
99
+
.await?
100
+
.interact(move |conn| {
101
+
let got: Option<AppPassword> = insert_into(AppPasswordSchema::app_password)
102
+
.values((
103
+
AppPasswordSchema::did.eq(did),
104
+
AppPasswordSchema::name.eq(&name),
105
+
AppPasswordSchema::password.eq(password_encrypted),
106
+
AppPasswordSchema::createdAt.eq(&created_at),
107
+
))
108
+
.returning(AppPassword::as_select())
109
+
.get_result(conn)
110
+
.optional()?;
111
+
if got.is_some() {
112
+
Ok(CreateAppPasswordOutput {
113
+
name,
114
+
password,
115
+
created_at,
116
+
})
117
+
} else {
118
+
bail!("could not create app-specific password")
119
+
}
120
+
})
121
+
.await
122
+
.expect("Failed to create app password")
123
+
}
124
+
125
+
pub async fn list_app_passwords(
126
+
did: &str,
127
+
db: &deadpool_diesel::Pool<
128
+
deadpool_diesel::Manager<SqliteConnection>,
129
+
deadpool_diesel::sqlite::Object,
130
+
>,
131
+
) -> Result<Vec<(String, String)>> {
132
+
use crate::schema::pds::app_password::dsl as AppPasswordSchema;
133
+
134
+
let did = did.to_owned();
135
+
db.get()
136
+
.await?
137
+
.interact(move |conn| {
138
+
Ok(AppPasswordSchema::app_password
139
+
.filter(AppPasswordSchema::did.eq(did))
140
+
.select((AppPasswordSchema::name, AppPasswordSchema::createdAt))
141
+
.get_results(conn)?)
142
+
})
143
+
.await
144
+
.expect("Failed to list app passwords")
145
+
}
146
+
147
+
pub async fn update_user_password(
148
+
opts: UpdateUserPasswordOpts,
149
+
db: &deadpool_diesel::Pool<
150
+
deadpool_diesel::Manager<SqliteConnection>,
151
+
deadpool_diesel::sqlite::Object,
152
+
>,
153
+
) -> Result<()> {
154
+
use crate::schema::pds::account::dsl as AccountSchema;
155
+
156
+
db.get()
157
+
.await?
158
+
.interact(move |conn| {
159
+
_ = update(AccountSchema::account)
160
+
.filter(AccountSchema::did.eq(opts.did))
161
+
.set(AccountSchema::password.eq(opts.password_encrypted))
162
+
.execute(conn)?;
163
+
Ok(())
164
+
})
165
+
.await
166
+
.expect("Failed to update user password")
167
+
}
168
+
169
+
pub async fn delete_app_password(
170
+
did: &str,
171
+
name: &str,
172
+
db: &deadpool_diesel::Pool<
173
+
deadpool_diesel::Manager<SqliteConnection>,
174
+
deadpool_diesel::sqlite::Object,
175
+
>,
176
+
) -> Result<()> {
177
+
use crate::schema::pds::app_password::dsl as AppPasswordSchema;
178
+
179
+
let did = did.to_owned();
180
+
let name = name.to_owned();
181
+
db.get()
182
+
.await?
183
+
.interact(move |conn| {
184
+
_ = delete(AppPasswordSchema::app_password)
185
+
.filter(AppPasswordSchema::did.eq(did))
186
+
.filter(AppPasswordSchema::name.eq(name))
187
+
.execute(conn)?;
188
+
Ok(())
189
+
})
190
+
.await
191
+
.expect("Failed to delete app password")
192
+
}
+44
src/account_manager/helpers/repo.rs
+44
src/account_manager/helpers/repo.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/helpers/repo.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use anyhow::Result;
6
+
use cidv10::Cid;
7
+
use deadpool_diesel::{Manager, Pool, sqlite::Object};
8
+
use diesel::*;
9
+
10
+
pub async fn update_root(
11
+
did: String,
12
+
cid: Cid,
13
+
rev: String,
14
+
db: &Pool<Manager<SqliteConnection>, Object>,
15
+
) -> Result<()> {
16
+
// @TODO balance risk of a race in the case of a long retry
17
+
use crate::schema::actor_store::repo_root::dsl as RepoRootSchema;
18
+
19
+
let now = rsky_common::now();
20
+
21
+
_ = db
22
+
.get()
23
+
.await?
24
+
.interact(move |conn| {
25
+
insert_into(RepoRootSchema::repo_root)
26
+
.values((
27
+
RepoRootSchema::did.eq(did),
28
+
RepoRootSchema::cid.eq(cid.to_string()),
29
+
RepoRootSchema::rev.eq(rev.clone()),
30
+
RepoRootSchema::indexedAt.eq(now),
31
+
))
32
+
.on_conflict(RepoRootSchema::did)
33
+
.do_update()
34
+
.set((
35
+
RepoRootSchema::cid.eq(cid.to_string()),
36
+
RepoRootSchema::rev.eq(rev),
37
+
))
38
+
.execute(conn)
39
+
})
40
+
.await
41
+
.expect("Failed to update repo root")?;
42
+
43
+
Ok(())
44
+
}
+558
src/account_manager/mod.rs
+558
src/account_manager/mod.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/account_manager/mod.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for SQLite backend
5
+
use crate::account_manager::helpers::account::{
6
+
AccountStatus, ActorAccount, AvailabilityFlags, GetAccountAdminStatusOutput,
7
+
};
8
+
use crate::account_manager::helpers::auth::{
9
+
AuthHelperError, CreateTokensOpts, RefreshGracePeriodOpts,
10
+
};
11
+
use crate::account_manager::helpers::invite::CodeDetail;
12
+
use crate::account_manager::helpers::password::UpdateUserPasswordOpts;
13
+
use crate::models::pds::EmailTokenPurpose;
14
+
use crate::serve::ActorStorage;
15
+
use anyhow::Result;
16
+
use chrono::DateTime;
17
+
use chrono::offset::Utc as UtcOffset;
18
+
use cidv10::Cid;
19
+
use diesel::*;
20
+
use futures::try_join;
21
+
use helpers::{account, auth, email_token, invite, password, repo};
22
+
use rsky_common::RFC3339_VARIANT;
23
+
use rsky_common::time::{HOUR, from_micros_to_str, from_str_to_micros};
24
+
use rsky_lexicon::com::atproto::admin::StatusAttr;
25
+
use rsky_lexicon::com::atproto::server::{AccountCodes, CreateAppPasswordOutput};
26
+
use rsky_pds::account_manager::{
27
+
ConfirmEmailOpts, CreateAccountOpts, DisableInviteCodesOpts, ResetPasswordOpts,
28
+
UpdateAccountPasswordOpts, UpdateEmailOpts,
29
+
};
30
+
use rsky_pds::auth_verifier::AuthScope;
31
+
use secp256k1::{Keypair, Secp256k1, SecretKey};
32
+
use std::collections::BTreeMap;
33
+
use std::env;
34
+
use std::time::SystemTime;
35
+
use tokio::sync::RwLock;
36
+
37
+
pub(crate) mod helpers {
38
+
pub mod account;
39
+
pub mod auth;
40
+
pub mod email_token;
41
+
pub mod invite;
42
+
pub mod password;
43
+
pub mod repo;
44
+
}
45
+
46
+
#[derive(Clone)]
47
+
pub struct AccountManager {
48
+
pub db: deadpool_diesel::Pool<
49
+
deadpool_diesel::Manager<SqliteConnection>,
50
+
deadpool_diesel::sqlite::Object,
51
+
>,
52
+
}
53
+
impl std::fmt::Debug for AccountManager {
54
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
55
+
f.debug_struct("AccountManager").finish()
56
+
}
57
+
}
58
+
59
+
pub type AccountManagerCreator = Box<
60
+
dyn Fn(
61
+
deadpool_diesel::Pool<
62
+
deadpool_diesel::Manager<SqliteConnection>,
63
+
deadpool_diesel::sqlite::Object,
64
+
>,
65
+
) -> AccountManager
66
+
+ Send
67
+
+ Sync,
68
+
>;
69
+
70
+
impl AccountManager {
71
+
pub const fn new(
72
+
db: deadpool_diesel::Pool<
73
+
deadpool_diesel::Manager<SqliteConnection>,
74
+
deadpool_diesel::sqlite::Object,
75
+
>,
76
+
) -> Self {
77
+
Self { db }
78
+
}
79
+
80
+
pub fn creator() -> AccountManagerCreator {
81
+
Box::new(
82
+
move |db: deadpool_diesel::Pool<
83
+
deadpool_diesel::Manager<SqliteConnection>,
84
+
deadpool_diesel::sqlite::Object,
85
+
>|
86
+
-> Self { Self::new(db) },
87
+
)
88
+
}
89
+
90
+
pub async fn get_account(
91
+
&self,
92
+
handle_or_did: &str,
93
+
flags: Option<AvailabilityFlags>,
94
+
) -> Result<Option<ActorAccount>> {
95
+
account::get_account(handle_or_did, flags, &self.db).await
96
+
}
97
+
98
+
pub async fn get_account_by_email(
99
+
&self,
100
+
email: &str,
101
+
flags: Option<AvailabilityFlags>,
102
+
) -> Result<Option<ActorAccount>> {
103
+
account::get_account_by_email(email, flags, &self.db).await
104
+
}
105
+
106
+
pub async fn is_account_activated(&self, did: &str) -> Result<bool> {
107
+
let account = self
108
+
.get_account(
109
+
did,
110
+
Some(AvailabilityFlags {
111
+
include_taken_down: None,
112
+
include_deactivated: Some(true),
113
+
}),
114
+
)
115
+
.await?;
116
+
if let Some(account) = account {
117
+
Ok(account.deactivated_at.is_none())
118
+
} else {
119
+
Ok(false)
120
+
}
121
+
}
122
+
123
+
pub async fn get_did_for_actor(
124
+
&self,
125
+
handle_or_did: &str,
126
+
flags: Option<AvailabilityFlags>,
127
+
) -> Result<Option<String>> {
128
+
match self.get_account(handle_or_did, flags).await {
129
+
Ok(Some(got)) => Ok(Some(got.did)),
130
+
_ => Ok(None),
131
+
}
132
+
}
133
+
134
+
pub async fn create_account(
135
+
&self,
136
+
opts: CreateAccountOpts,
137
+
actor_pools: &mut std::collections::HashMap<String, ActorStorage>,
138
+
) -> Result<(String, String)> {
139
+
let CreateAccountOpts {
140
+
did,
141
+
handle,
142
+
email,
143
+
password,
144
+
repo_cid,
145
+
repo_rev,
146
+
invite_code,
147
+
deactivated,
148
+
} = opts;
149
+
let password_encrypted: Option<String> = match password {
150
+
Some(password) => Some(password::gen_salt_and_hash(password)?),
151
+
None => None,
152
+
};
153
+
// Should be a global var so this only happens once
154
+
let secp = Secp256k1::new();
155
+
let private_key = env::var("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX")?;
156
+
let secret_key =
157
+
SecretKey::from_slice(&Result::unwrap(hex::decode(private_key.as_bytes())))?;
158
+
let jwt_key = Keypair::from_secret_key(&secp, &secret_key);
159
+
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
160
+
did: did.clone(),
161
+
jwt_key,
162
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
163
+
scope: Some(AuthScope::Access),
164
+
jti: None,
165
+
expires_in: None,
166
+
})?;
167
+
let refresh_payload = auth::decode_refresh_token(refresh_jwt.clone(), jwt_key)?;
168
+
let now = rsky_common::now();
169
+
170
+
if let Some(invite_code) = invite_code.clone() {
171
+
invite::ensure_invite_is_available(invite_code, &self.db).await?;
172
+
}
173
+
account::register_actor(did.clone(), handle, deactivated, &self.db).await?;
174
+
if let (Some(email), Some(password_encrypted)) = (email, password_encrypted) {
175
+
account::register_account(did.clone(), email, password_encrypted, &self.db).await?;
176
+
}
177
+
invite::record_invite_use(did.clone(), invite_code, now, &self.db).await?;
178
+
auth::store_refresh_token(refresh_payload, None, &self.db).await?;
179
+
180
+
let did_path = did
181
+
.strip_prefix("did:plc:")
182
+
.ok_or_else(|| anyhow::anyhow!("Invalid DID"))?;
183
+
let repo_path = format!("sqlite://data/repo/{}.db", did_path);
184
+
let actor_repo_pool =
185
+
crate::db::establish_pool(repo_path.as_str()).expect("Failed to establish pool");
186
+
let blob_path = std::path::Path::new("data/blob").to_path_buf();
187
+
let actor_pool = ActorStorage {
188
+
repo: actor_repo_pool,
189
+
blob: blob_path.clone(),
190
+
};
191
+
let blob_path = blob_path.join(did_path);
192
+
tokio::fs::create_dir_all(&blob_path)
193
+
.await
194
+
.map_err(|_| anyhow::anyhow!("Failed to create blob path"))?;
195
+
drop(
196
+
actor_pools
197
+
.insert(did.clone(), actor_pool)
198
+
.expect("Failed to insert actor pools"),
199
+
);
200
+
let db = actor_pools
201
+
.get(&did)
202
+
.ok_or_else(|| anyhow::anyhow!("Actor not found"))?
203
+
.repo
204
+
.clone();
205
+
repo::update_root(did, repo_cid, repo_rev, &db).await?;
206
+
Ok((access_jwt, refresh_jwt))
207
+
}
208
+
209
+
pub async fn get_account_admin_status(
210
+
&self,
211
+
did: &str,
212
+
) -> Result<Option<GetAccountAdminStatusOutput>> {
213
+
account::get_account_admin_status(did, &self.db).await
214
+
}
215
+
216
+
pub async fn update_repo_root(
217
+
&self,
218
+
did: String,
219
+
cid: Cid,
220
+
rev: String,
221
+
actor_pools: &std::collections::HashMap<String, ActorStorage>,
222
+
) -> Result<()> {
223
+
let db = actor_pools
224
+
.get(&did)
225
+
.ok_or_else(|| anyhow::anyhow!("Actor not found"))?
226
+
.repo
227
+
.clone();
228
+
repo::update_root(did, cid, rev, &db).await
229
+
}
230
+
231
+
pub async fn delete_account(
232
+
&self,
233
+
did: &str,
234
+
actor_pools: &std::collections::HashMap<String, ActorStorage>,
235
+
) -> Result<()> {
236
+
let db = actor_pools
237
+
.get(did)
238
+
.ok_or_else(|| anyhow::anyhow!("Actor not found"))?
239
+
.repo
240
+
.clone();
241
+
account::delete_account(did, &self.db, &db).await
242
+
}
243
+
244
+
pub async fn takedown_account(&self, did: &str, takedown: StatusAttr) -> Result<()> {
245
+
(_, _) = try_join!(
246
+
account::update_account_takedown_status(did, takedown, &self.db),
247
+
auth::revoke_refresh_tokens_by_did(did, &self.db)
248
+
)?;
249
+
Ok(())
250
+
}
251
+
252
+
// @NOTE should always be paired with a sequenceHandle().
253
+
pub async fn update_handle(&self, did: &str, handle: &str) -> Result<()> {
254
+
account::update_handle(did, handle, &self.db).await
255
+
}
256
+
257
+
pub async fn deactivate_account(&self, did: &str, delete_after: Option<String>) -> Result<()> {
258
+
account::deactivate_account(did, delete_after, &self.db).await
259
+
}
260
+
261
+
pub async fn activate_account(&self, did: &str) -> Result<()> {
262
+
account::activate_account(did, &self.db).await
263
+
}
264
+
265
+
pub async fn get_account_status(&self, handle_or_did: &str) -> Result<AccountStatus> {
266
+
let got = account::get_account(
267
+
handle_or_did,
268
+
Some(AvailabilityFlags {
269
+
include_deactivated: Some(true),
270
+
include_taken_down: Some(true),
271
+
}),
272
+
&self.db,
273
+
)
274
+
.await?;
275
+
let res = account::format_account_status(got);
276
+
match res.active {
277
+
true => Ok(AccountStatus::Active),
278
+
false => Ok(res.status.expect("Account status not properly formatted.")),
279
+
}
280
+
}
281
+
282
+
// Auth
283
+
// ----------
284
+
pub async fn create_session(
285
+
&self,
286
+
did: String,
287
+
app_password_name: Option<String>,
288
+
) -> Result<(String, String)> {
289
+
let secp = Secp256k1::new();
290
+
let private_key = env::var("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX")?;
291
+
let secret_key = SecretKey::from_slice(&hex::decode(private_key.as_bytes())?)?;
292
+
let jwt_key = Keypair::from_secret_key(&secp, &secret_key);
293
+
let scope = if app_password_name.is_none() {
294
+
AuthScope::Access
295
+
} else {
296
+
AuthScope::AppPass
297
+
};
298
+
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
299
+
did,
300
+
jwt_key,
301
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
302
+
scope: Some(scope),
303
+
jti: None,
304
+
expires_in: None,
305
+
})?;
306
+
let refresh_payload = auth::decode_refresh_token(refresh_jwt.clone(), jwt_key)?;
307
+
auth::store_refresh_token(refresh_payload, app_password_name, &self.db).await?;
308
+
Ok((access_jwt, refresh_jwt))
309
+
}
310
+
311
+
pub async fn rotate_refresh_token(&self, id: &String) -> Result<Option<(String, String)>> {
312
+
let token = auth::get_refresh_token(id, &self.db).await?;
313
+
if let Some(token) = token {
314
+
let system_time = SystemTime::now();
315
+
let dt: DateTime<UtcOffset> = system_time.into();
316
+
let now = format!("{}", dt.format(RFC3339_VARIANT));
317
+
318
+
// take the chance to tidy all of a user's expired tokens
319
+
// does not need to be transactional since this is just best-effort
320
+
auth::delete_expired_refresh_tokens(&token.did, now, &self.db).await?;
321
+
322
+
// Shorten the refresh token lifespan down from its
323
+
// original expiration time to its revocation grace period.
324
+
let prev_expires_at = from_str_to_micros(&token.expires_at);
325
+
326
+
const REFRESH_GRACE_MS: i32 = 2 * HOUR;
327
+
let grace_expires_at = dt.timestamp_micros() + REFRESH_GRACE_MS as i64;
328
+
329
+
let expires_at = if grace_expires_at < prev_expires_at {
330
+
grace_expires_at
331
+
} else {
332
+
prev_expires_at
333
+
};
334
+
335
+
if expires_at <= dt.timestamp_micros() {
336
+
return Ok(None);
337
+
}
338
+
339
+
// Determine the next refresh token id: upon refresh token
340
+
// reuse you always receive a refresh token with the same id.
341
+
let next_id = token.next_id.unwrap_or_else(auth::get_refresh_token_id);
342
+
343
+
let secp = Secp256k1::new();
344
+
let private_key = env::var("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX")
345
+
.expect("PDS_JWT_KEY_K256_PRIVATE_KEY_HEX not set");
346
+
let secret_key =
347
+
SecretKey::from_slice(&hex::decode(private_key.as_bytes()).expect("Invalid key"))?;
348
+
let jwt_key = Keypair::from_secret_key(&secp, &secret_key);
349
+
350
+
let (access_jwt, refresh_jwt) = auth::create_tokens(CreateTokensOpts {
351
+
did: token.did,
352
+
jwt_key,
353
+
service_did: env::var("PDS_SERVICE_DID").expect("PDS_SERVICE_DID not set"),
354
+
scope: Some(if token.app_password_name.is_none() {
355
+
AuthScope::Access
356
+
} else {
357
+
AuthScope::AppPass
358
+
}),
359
+
jti: Some(next_id.clone()),
360
+
expires_in: None,
361
+
})?;
362
+
let refresh_payload = auth::decode_refresh_token(refresh_jwt.clone(), jwt_key)?;
363
+
match try_join!(
364
+
auth::add_refresh_grace_period(
365
+
RefreshGracePeriodOpts {
366
+
id: id.clone(),
367
+
expires_at: from_micros_to_str(expires_at),
368
+
next_id
369
+
},
370
+
&self.db
371
+
),
372
+
auth::store_refresh_token(refresh_payload, token.app_password_name, &self.db)
373
+
) {
374
+
Ok(_) => Ok(Some((access_jwt, refresh_jwt))),
375
+
Err(e) => match e.downcast_ref() {
376
+
Some(AuthHelperError::ConcurrentRefresh) => {
377
+
Box::pin(self.rotate_refresh_token(id)).await
378
+
}
379
+
_ => Err(e),
380
+
},
381
+
}
382
+
} else {
383
+
Ok(None)
384
+
}
385
+
}
386
+
387
+
pub async fn revoke_refresh_token(&self, id: String) -> Result<bool> {
388
+
auth::revoke_refresh_token(id, &self.db).await
389
+
}
390
+
391
+
// Invites
392
+
// ----------
393
+
394
+
pub async fn create_invite_codes(
395
+
&self,
396
+
to_create: Vec<AccountCodes>,
397
+
use_count: i32,
398
+
) -> Result<()> {
399
+
invite::create_invite_codes(to_create, use_count, &self.db).await
400
+
}
401
+
402
+
pub async fn create_account_invite_codes(
403
+
&self,
404
+
for_account: &str,
405
+
codes: Vec<String>,
406
+
expected_total: usize,
407
+
disabled: bool,
408
+
) -> Result<Vec<CodeDetail>> {
409
+
invite::create_account_invite_codes(for_account, codes, expected_total, disabled, &self.db)
410
+
.await
411
+
}
412
+
413
+
pub async fn get_account_invite_codes(&self, did: &str) -> Result<Vec<CodeDetail>> {
414
+
invite::get_account_invite_codes(did, &self.db).await
415
+
}
416
+
417
+
pub async fn get_invited_by_for_accounts(
418
+
&self,
419
+
dids: Vec<String>,
420
+
) -> Result<BTreeMap<String, CodeDetail>> {
421
+
invite::get_invited_by_for_accounts(dids, &self.db).await
422
+
}
423
+
424
+
pub async fn set_account_invites_disabled(&self, did: &str, disabled: bool) -> Result<()> {
425
+
invite::set_account_invites_disabled(did, disabled, &self.db).await
426
+
}
427
+
428
+
pub async fn disable_invite_codes(&self, opts: DisableInviteCodesOpts) -> Result<()> {
429
+
invite::disable_invite_codes(opts, &self.db).await
430
+
}
431
+
432
+
// Passwords
433
+
// ----------
434
+
435
+
pub async fn create_app_password(
436
+
&self,
437
+
did: String,
438
+
name: String,
439
+
) -> Result<CreateAppPasswordOutput> {
440
+
password::create_app_password(did, name, &self.db).await
441
+
}
442
+
443
+
pub async fn list_app_passwords(&self, did: &str) -> Result<Vec<(String, String)>> {
444
+
password::list_app_passwords(did, &self.db).await
445
+
}
446
+
447
+
pub async fn verify_account_password(&self, did: &str, password_str: &String) -> Result<bool> {
448
+
password::verify_account_password(did, password_str, &self.db).await
449
+
}
450
+
451
+
pub async fn verify_app_password(
452
+
&self,
453
+
did: &str,
454
+
password_str: &str,
455
+
) -> Result<Option<String>> {
456
+
password::verify_app_password(did, password_str, &self.db).await
457
+
}
458
+
459
+
pub async fn reset_password(&self, opts: ResetPasswordOpts) -> Result<()> {
460
+
let did = email_token::assert_valid_token_and_find_did(
461
+
EmailTokenPurpose::ResetPassword,
462
+
&opts.token,
463
+
None,
464
+
&self.db,
465
+
)
466
+
.await?;
467
+
self.update_account_password(UpdateAccountPasswordOpts {
468
+
did,
469
+
password: opts.password,
470
+
})
471
+
.await
472
+
}
473
+
474
+
pub async fn update_account_password(&self, opts: UpdateAccountPasswordOpts) -> Result<()> {
475
+
let UpdateAccountPasswordOpts { did, .. } = opts;
476
+
let password_encrypted = password::gen_salt_and_hash(opts.password)?;
477
+
try_join!(
478
+
password::update_user_password(
479
+
UpdateUserPasswordOpts {
480
+
did: did.clone(),
481
+
password_encrypted
482
+
},
483
+
&self.db
484
+
),
485
+
email_token::delete_email_token(&did, EmailTokenPurpose::ResetPassword, &self.db),
486
+
auth::revoke_refresh_tokens_by_did(&did, &self.db)
487
+
)?;
488
+
Ok(())
489
+
}
490
+
491
+
pub async fn revoke_app_password(&self, did: String, name: String) -> Result<()> {
492
+
try_join!(
493
+
password::delete_app_password(&did, &name, &self.db),
494
+
auth::revoke_app_password_refresh_token(&did, &name, &self.db)
495
+
)?;
496
+
Ok(())
497
+
}
498
+
499
+
// Email Tokens
500
+
// ----------
501
+
pub async fn confirm_email(&self, opts: ConfirmEmailOpts<'_>) -> Result<()> {
502
+
let ConfirmEmailOpts { did, token } = opts;
503
+
email_token::assert_valid_token(
504
+
did,
505
+
EmailTokenPurpose::ConfirmEmail,
506
+
token,
507
+
None,
508
+
&self.db,
509
+
)
510
+
.await?;
511
+
let now = rsky_common::now();
512
+
try_join!(
513
+
email_token::delete_email_token(did, EmailTokenPurpose::ConfirmEmail, &self.db),
514
+
account::set_email_confirmed_at(did, now, &self.db)
515
+
)?;
516
+
Ok(())
517
+
}
518
+
519
+
pub async fn update_email(&self, opts: UpdateEmailOpts) -> Result<()> {
520
+
let UpdateEmailOpts { did, email } = opts;
521
+
try_join!(
522
+
account::update_email(&did, &email, &self.db),
523
+
email_token::delete_all_email_tokens(&did, &self.db)
524
+
)?;
525
+
Ok(())
526
+
}
527
+
528
+
pub async fn assert_valid_email_token(
529
+
&self,
530
+
did: &str,
531
+
purpose: EmailTokenPurpose,
532
+
token: &str,
533
+
) -> Result<()> {
534
+
email_token::assert_valid_token(did, purpose, token, None, &self.db).await
535
+
}
536
+
537
+
pub async fn assert_valid_email_token_and_cleanup(
538
+
&self,
539
+
did: &str,
540
+
purpose: EmailTokenPurpose,
541
+
token: &str,
542
+
) -> Result<()> {
543
+
email_token::assert_valid_token(did, purpose, token, None, &self.db).await?;
544
+
email_token::delete_email_token(did, purpose, &self.db).await
545
+
}
546
+
547
+
pub async fn create_email_token(
548
+
&self,
549
+
did: &str,
550
+
purpose: EmailTokenPurpose,
551
+
) -> Result<String> {
552
+
email_token::create_email_token(did, purpose, &self.db).await
553
+
}
554
+
}
555
+
556
+
pub struct SharedAccountManager {
557
+
pub account_manager: RwLock<AccountManager>,
558
+
}
+112
src/actor_endpoints.rs
+112
src/actor_endpoints.rs
···
1
+
/// HACK: store private user preferences in the PDS.
2
+
///
3
+
/// We shouldn't have to know about any bsky endpoints to store private user data.
4
+
/// This will _very likely_ be changed in the future.
5
+
use atrium_api::app::bsky::actor;
6
+
use axum::{
7
+
Json, Router,
8
+
extract::State,
9
+
routing::{get, post},
10
+
};
11
+
use constcat::concat;
12
+
13
+
use crate::auth::AuthenticatedUser;
14
+
15
+
use super::serve::*;
16
+
17
+
async fn put_preferences(
18
+
user: AuthenticatedUser,
19
+
State(actor_pools): State<std::collections::HashMap<String, ActorStorage>>,
20
+
Json(input): Json<actor::put_preferences::Input>,
21
+
) -> Result<()> {
22
+
let did = user.did();
23
+
// let json_string =
24
+
// serde_json::to_string(&input.preferences).context("failed to serialize preferences")?;
25
+
26
+
// let conn = &mut actor_pools
27
+
// .get(&did)
28
+
// .context("failed to get actor pool")?
29
+
// .repo
30
+
// .get()
31
+
// .await
32
+
// .expect("failed to get database connection");
33
+
// conn.interact(move |conn| {
34
+
// diesel::update(accounts::table)
35
+
// .filter(accounts::did.eq(did))
36
+
// .set(accounts::private_prefs.eq(json_string))
37
+
// .execute(conn)
38
+
// .context("failed to update user preferences")
39
+
// });
40
+
todo!("Use actor_store's preferences writer instead");
41
+
// let mut actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
42
+
// let values = actor::defs::Preferences {
43
+
// private_prefs: Some(json_string),
44
+
// ..Default::default()
45
+
// };
46
+
// let namespace = actor::defs::PreferencesNamespace::Private;
47
+
// let scope = actor::defs::PreferencesScope::User;
48
+
// actor_store.pref.put_preferences(values, namespace, scope);
49
+
50
+
Ok(())
51
+
}
52
+
53
+
async fn get_preferences(
54
+
user: AuthenticatedUser,
55
+
State(actor_pools): State<std::collections::HashMap<String, ActorStorage>>,
56
+
) -> Result<Json<actor::get_preferences::Output>> {
57
+
let did = user.did();
58
+
// let conn = &mut actor_pools
59
+
// .get(&did)
60
+
// .context("failed to get actor pool")?
61
+
// .repo
62
+
// .get()
63
+
// .await
64
+
// .expect("failed to get database connection");
65
+
66
+
// #[derive(QueryableByName)]
67
+
// struct Prefs {
68
+
// #[diesel(sql_type = diesel::sql_types::Text)]
69
+
// private_prefs: Option<String>,
70
+
// }
71
+
72
+
// let result = conn
73
+
// .interact(move |conn| {
74
+
// diesel::sql_query("SELECT private_prefs FROM accounts WHERE did = ?")
75
+
// .bind::<diesel::sql_types::Text, _>(did)
76
+
// .get_result::<Prefs>(conn)
77
+
// })
78
+
// .await
79
+
// .expect("failed to fetch preferences");
80
+
81
+
// if let Some(prefs_json) = result.private_prefs {
82
+
// let prefs: actor::defs::Preferences =
83
+
// serde_json::from_str(&prefs_json).context("failed to deserialize preferences")?;
84
+
85
+
// Ok(Json(
86
+
// actor::get_preferences::OutputData { preferences: prefs }.into(),
87
+
// ))
88
+
// } else {
89
+
// Ok(Json(
90
+
// actor::get_preferences::OutputData {
91
+
// preferences: Vec::new(),
92
+
// }
93
+
// .into(),
94
+
// ))
95
+
// }
96
+
todo!("Use actor_store's preferences writer instead");
97
+
}
98
+
99
+
/// Register all actor endpoints.
100
+
pub(crate) fn routes() -> Router<AppState> {
101
+
// AP /xrpc/app.bsky.actor.putPreferences
102
+
// AG /xrpc/app.bsky.actor.getPreferences
103
+
Router::new()
104
+
.route(
105
+
concat!("/", actor::put_preferences::NSID),
106
+
post(put_preferences),
107
+
)
108
+
.route(
109
+
concat!("/", actor::get_preferences::NSID),
110
+
get(get_preferences),
111
+
)
112
+
}
-472
src/actor_store/actor_store.rs
-472
src/actor_store/actor_store.rs
···
1
-
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/mod.rs
2
-
//! Which is based on https://github.com/bluesky-social/atproto/blob/main/packages/repo/src/repo.ts
3
-
//! and also adds components from https://github.com/bluesky-social/atproto/blob/main/packages/pds/src/actor-store/repo/transactor.ts
4
-
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
5
-
//!
6
-
//! Modified for SQLite backend
7
-
8
-
use anyhow::Result;
9
-
use cidv10::Cid;
10
-
use diesel::*;
11
-
use futures::stream::{self, StreamExt};
12
-
use rsky_common;
13
-
use rsky_pds::actor_store::repo::types::SyncEvtData;
14
-
use rsky_repo::repo::Repo;
15
-
use rsky_repo::storage::readable_blockstore::ReadableBlockstore;
16
-
use rsky_repo::storage::types::RepoStorage;
17
-
use rsky_repo::types::{
18
-
CommitAction, CommitData, CommitDataWithOps, CommitOp, PreparedCreateOrUpdate, PreparedWrite,
19
-
RecordCreateOrUpdateOp, RecordWriteEnum, RecordWriteOp, WriteOpAction, write_to_op,
20
-
};
21
-
use rsky_repo::util::format_data_key;
22
-
use rsky_syntax::aturi::AtUri;
23
-
use secp256k1::{Keypair, Secp256k1, SecretKey};
24
-
use std::env;
25
-
use std::fmt;
26
-
use std::str::FromStr;
27
-
use std::sync::Arc;
28
-
use tokio::sync::RwLock;
29
-
30
-
use super::ActorDb;
31
-
use super::blob::BlobReader;
32
-
use super::preference::PreferenceReader;
33
-
use super::record::RecordReader;
34
-
use super::sql_blob::BlobStoreSql;
35
-
use super::sql_repo::SqlRepoReader;
36
-
37
-
#[derive(Debug)]
38
-
enum FormatCommitError {
39
-
BadRecordSwap(String),
40
-
RecordSwapMismatch(String),
41
-
BadCommitSwap(String),
42
-
MissingRepoRoot(String),
43
-
}
44
-
45
-
pub struct ActorStore {
46
-
pub did: String,
47
-
pub storage: Arc<RwLock<SqlRepoReader>>, // get ipld blocks from db
48
-
pub record: RecordReader, // get lexicon records from db
49
-
pub blob: BlobReader, // get blobs
50
-
pub pref: PreferenceReader, // get preferences
51
-
}
52
-
53
-
// Combination of RepoReader/Transactor, BlobReader/Transactor, SqlRepoReader/Transactor
54
-
impl ActorStore {
55
-
/// Concrete reader of an individual repo (hence BlobStoreSql which takes `did` param)
56
-
pub fn new(did: String, blobstore: BlobStoreSql, db: ActorDb) -> Self {
57
-
let db = Arc::new(db);
58
-
ActorStore {
59
-
storage: Arc::new(RwLock::new(SqlRepoReader::new(
60
-
did.clone(),
61
-
None,
62
-
db.clone(),
63
-
))),
64
-
record: RecordReader::new(did.clone(), db.clone()),
65
-
pref: PreferenceReader::new(did.clone(), db.clone()),
66
-
did,
67
-
blob: BlobReader::new(blobstore, db.clone()), // Unlike TS impl, just use blob reader vs generator
68
-
}
69
-
}
70
-
71
-
pub async fn get_repo_root(&self) -> Option<Cid> {
72
-
let storage_guard = self.storage.read().await;
73
-
storage_guard.get_root().await
74
-
}
75
-
76
-
// Transactors
77
-
// -------------------
78
-
79
-
#[deprecated]
80
-
pub async fn create_repo_legacy(
81
-
&self,
82
-
keypair: Keypair,
83
-
writes: Vec<PreparedCreateOrUpdate>,
84
-
) -> Result<CommitData> {
85
-
let write_ops = writes
86
-
.clone()
87
-
.into_iter()
88
-
.map(|prepare| {
89
-
let at_uri: AtUri = prepare.uri.try_into()?;
90
-
Ok(RecordCreateOrUpdateOp {
91
-
action: WriteOpAction::Create,
92
-
collection: at_uri.get_collection(),
93
-
rkey: at_uri.get_rkey(),
94
-
record: prepare.record,
95
-
})
96
-
})
97
-
.collect::<Result<Vec<RecordCreateOrUpdateOp>>>()?;
98
-
let commit = Repo::format_init_commit(
99
-
self.storage.clone(),
100
-
self.did.clone(),
101
-
keypair,
102
-
Some(write_ops),
103
-
)
104
-
.await?;
105
-
let storage_guard = self.storage.read().await;
106
-
storage_guard.apply_commit(commit.clone(), None).await?;
107
-
let writes = writes
108
-
.into_iter()
109
-
.map(PreparedWrite::Create)
110
-
.collect::<Vec<PreparedWrite>>();
111
-
self.blob.process_write_blobs(writes).await?;
112
-
Ok(commit)
113
-
}
114
-
115
-
pub async fn create_repo(
116
-
&self,
117
-
keypair: Keypair,
118
-
writes: Vec<PreparedCreateOrUpdate>,
119
-
) -> Result<CommitDataWithOps> {
120
-
let write_ops = writes
121
-
.clone()
122
-
.into_iter()
123
-
.map(|prepare| {
124
-
let at_uri: AtUri = prepare.uri.try_into()?;
125
-
Ok(RecordCreateOrUpdateOp {
126
-
action: WriteOpAction::Create,
127
-
collection: at_uri.get_collection(),
128
-
rkey: at_uri.get_rkey(),
129
-
record: prepare.record,
130
-
})
131
-
})
132
-
.collect::<Result<Vec<RecordCreateOrUpdateOp>>>()?;
133
-
let commit = Repo::format_init_commit(
134
-
self.storage.clone(),
135
-
self.did.clone(),
136
-
keypair,
137
-
Some(write_ops),
138
-
)
139
-
.await?;
140
-
let storage_guard = self.storage.read().await;
141
-
storage_guard.apply_commit(commit.clone(), None).await?;
142
-
let write_commit_ops = writes.iter().try_fold(
143
-
Vec::with_capacity(writes.len()),
144
-
|mut acc, w| -> Result<Vec<CommitOp>> {
145
-
let aturi: AtUri = w.uri.clone().try_into()?;
146
-
acc.push(CommitOp {
147
-
action: CommitAction::Create,
148
-
path: format_data_key(aturi.get_collection(), aturi.get_rkey()),
149
-
cid: Some(w.cid.clone()),
150
-
prev: None,
151
-
});
152
-
Ok(acc)
153
-
},
154
-
)?;
155
-
let writes = writes
156
-
.into_iter()
157
-
.map(PreparedWrite::Create)
158
-
.collect::<Vec<PreparedWrite>>();
159
-
self.blob.process_write_blobs(writes).await?;
160
-
Ok(CommitDataWithOps {
161
-
commit_data: commit,
162
-
ops: write_commit_ops,
163
-
prev_data: None,
164
-
})
165
-
}
166
-
167
-
pub async fn process_import_repo(
168
-
&mut self,
169
-
commit: CommitData,
170
-
writes: Vec<PreparedWrite>,
171
-
) -> Result<()> {
172
-
{
173
-
let immutable_borrow = &self;
174
-
// & send to indexing
175
-
immutable_borrow
176
-
.index_writes(writes.clone(), &commit.rev)
177
-
.await?;
178
-
}
179
-
// persist the commit to repo storage
180
-
let storage_guard = self.storage.read().await;
181
-
storage_guard.apply_commit(commit.clone(), None).await?;
182
-
// process blobs
183
-
self.blob.process_write_blobs(writes).await?;
184
-
Ok(())
185
-
}
186
-
187
-
pub async fn process_writes(
188
-
&mut self,
189
-
writes: Vec<PreparedWrite>,
190
-
swap_commit_cid: Option<Cid>,
191
-
) -> Result<CommitDataWithOps> {
192
-
// NOTE: In the typescript PR on sync v1.1
193
-
// there are some safeguards added for adding
194
-
// very large commits and very many commits
195
-
// for which I'm sure we could safeguard on
196
-
// but may not be necessary.
197
-
// https://github.com/bluesky-social/atproto/pull/3585/files#diff-7627844a4a6b50190014e947d1331a96df3c64d4c5273fa0ce544f85c3c1265f
198
-
let commit = self.format_commit(writes.clone(), swap_commit_cid).await?;
199
-
{
200
-
let immutable_borrow = &self;
201
-
// & send to indexing
202
-
immutable_borrow
203
-
.index_writes(writes.clone(), &commit.commit_data.rev)
204
-
.await?;
205
-
}
206
-
// persist the commit to repo storage
207
-
let storage_guard = self.storage.read().await;
208
-
storage_guard
209
-
.apply_commit(commit.commit_data.clone(), None)
210
-
.await?;
211
-
// process blobs
212
-
self.blob.process_write_blobs(writes).await?;
213
-
Ok(commit)
214
-
}
215
-
216
-
pub async fn get_sync_event_data(&mut self) -> Result<SyncEvtData> {
217
-
let storage_guard = self.storage.read().await;
218
-
let current_root = storage_guard.get_root_detailed().await?;
219
-
let blocks_and_missing = storage_guard.get_blocks(vec![current_root.cid]).await?;
220
-
Ok(SyncEvtData {
221
-
cid: current_root.cid,
222
-
rev: current_root.rev,
223
-
blocks: blocks_and_missing.blocks,
224
-
})
225
-
}
226
-
227
-
pub async fn format_commit(
228
-
&mut self,
229
-
writes: Vec<PreparedWrite>,
230
-
swap_commit: Option<Cid>,
231
-
) -> Result<CommitDataWithOps> {
232
-
let current_root = {
233
-
let storage_guard = self.storage.read().await;
234
-
storage_guard.get_root_detailed().await
235
-
};
236
-
if let Ok(current_root) = current_root {
237
-
if let Some(swap_commit) = swap_commit {
238
-
if !current_root.cid.eq(&swap_commit) {
239
-
return Err(
240
-
FormatCommitError::BadCommitSwap(current_root.cid.to_string()).into(),
241
-
);
242
-
}
243
-
}
244
-
{
245
-
let mut storage_guard = self.storage.write().await;
246
-
storage_guard.cache_rev(current_root.rev).await?;
247
-
}
248
-
let mut new_record_cids: Vec<Cid> = vec![];
249
-
let mut delete_and_update_uris = vec![];
250
-
let mut commit_ops = vec![];
251
-
for write in &writes {
252
-
let commit_action: CommitAction = write.action().into();
253
-
match write.clone() {
254
-
PreparedWrite::Create(c) => new_record_cids.push(c.cid),
255
-
PreparedWrite::Update(u) => {
256
-
new_record_cids.push(u.cid);
257
-
let u_at_uri: AtUri = u.uri.try_into()?;
258
-
delete_and_update_uris.push(u_at_uri);
259
-
}
260
-
PreparedWrite::Delete(d) => {
261
-
let d_at_uri: AtUri = d.uri.try_into()?;
262
-
delete_and_update_uris.push(d_at_uri)
263
-
}
264
-
}
265
-
if write.swap_cid().is_none() {
266
-
continue;
267
-
}
268
-
let write_at_uri: &AtUri = &write.uri().try_into()?;
269
-
let record = self
270
-
.record
271
-
.get_record(write_at_uri, None, Some(true))
272
-
.await?;
273
-
let current_record = match record {
274
-
Some(record) => Some(Cid::from_str(&record.cid)?),
275
-
None => None,
276
-
};
277
-
let cid = match &write {
278
-
&PreparedWrite::Delete(_) => None,
279
-
&PreparedWrite::Create(w) | &PreparedWrite::Update(w) => Some(w.cid),
280
-
};
281
-
let mut op = CommitOp {
282
-
action: commit_action,
283
-
path: format_data_key(write_at_uri.get_collection(), write_at_uri.get_rkey()),
284
-
cid,
285
-
prev: None,
286
-
};
287
-
if let Some(_) = current_record {
288
-
op.prev = current_record;
289
-
};
290
-
commit_ops.push(op);
291
-
match write {
292
-
// There should be no current record for a create
293
-
PreparedWrite::Create(_) if write.swap_cid().is_some() => {
294
-
Err::<(), anyhow::Error>(
295
-
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
296
-
.into(),
297
-
)
298
-
}
299
-
// There should be a current record for an update
300
-
PreparedWrite::Update(_) if write.swap_cid().is_none() => {
301
-
Err::<(), anyhow::Error>(
302
-
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
303
-
.into(),
304
-
)
305
-
}
306
-
// There should be a current record for a delete
307
-
PreparedWrite::Delete(_) if write.swap_cid().is_none() => {
308
-
Err::<(), anyhow::Error>(
309
-
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
310
-
.into(),
311
-
)
312
-
}
313
-
_ => Ok::<(), anyhow::Error>(()),
314
-
}?;
315
-
match (current_record, write.swap_cid()) {
316
-
(Some(current_record), Some(swap_cid)) if current_record.eq(swap_cid) => {
317
-
Ok::<(), anyhow::Error>(())
318
-
}
319
-
_ => Err::<(), anyhow::Error>(
320
-
FormatCommitError::RecordSwapMismatch(format!("{:?}", current_record))
321
-
.into(),
322
-
),
323
-
}?;
324
-
}
325
-
let mut repo = Repo::load(self.storage.clone(), Some(current_root.cid)).await?;
326
-
let previous_data = repo.commit.data;
327
-
let write_ops: Vec<RecordWriteOp> = writes
328
-
.into_iter()
329
-
.map(write_to_op)
330
-
.collect::<Result<Vec<RecordWriteOp>>>()?;
331
-
// @TODO: Use repo signing key global config
332
-
let secp = Secp256k1::new();
333
-
let repo_private_key = env::var("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX").unwrap();
334
-
let repo_secret_key =
335
-
SecretKey::from_slice(&hex::decode(repo_private_key.as_bytes()).unwrap()).unwrap();
336
-
let repo_signing_key = Keypair::from_secret_key(&secp, &repo_secret_key);
337
-
338
-
let mut commit = repo
339
-
.format_commit(RecordWriteEnum::List(write_ops), repo_signing_key)
340
-
.await?;
341
-
342
-
// find blocks that would be deleted but are referenced by another record
343
-
let duplicate_record_cids = self
344
-
.get_duplicate_record_cids(commit.removed_cids.to_list(), delete_and_update_uris)
345
-
.await?;
346
-
for cid in duplicate_record_cids {
347
-
commit.removed_cids.delete(cid)
348
-
}
349
-
350
-
// find blocks that are relevant to ops but not included in diff
351
-
// (for instance a record that was moved but cid stayed the same)
352
-
let new_record_blocks = commit.relevant_blocks.get_many(new_record_cids)?;
353
-
if !new_record_blocks.missing.is_empty() {
354
-
let missing_blocks = {
355
-
let storage_guard = self.storage.read().await;
356
-
storage_guard.get_blocks(new_record_blocks.missing).await?
357
-
};
358
-
commit.relevant_blocks.add_map(missing_blocks.blocks)?;
359
-
}
360
-
let commit_with_data_ops = CommitDataWithOps {
361
-
ops: commit_ops,
362
-
commit_data: commit,
363
-
prev_data: Some(previous_data),
364
-
};
365
-
Ok(commit_with_data_ops)
366
-
} else {
367
-
Err(FormatCommitError::MissingRepoRoot(self.did.clone()).into())
368
-
}
369
-
}
370
-
371
-
pub async fn index_writes(&self, writes: Vec<PreparedWrite>, rev: &str) -> Result<()> {
372
-
let now: &str = &rsky_common::now();
373
-
374
-
let _ = stream::iter(writes)
375
-
.then(|write| async move {
376
-
Ok::<(), anyhow::Error>(match write {
377
-
PreparedWrite::Create(write) => {
378
-
let write_at_uri: AtUri = write.uri.try_into()?;
379
-
self.record
380
-
.index_record(
381
-
write_at_uri.clone(),
382
-
write.cid,
383
-
Some(write.record),
384
-
Some(write.action),
385
-
rev.to_owned(),
386
-
Some(now.to_string()),
387
-
)
388
-
.await?
389
-
}
390
-
PreparedWrite::Update(write) => {
391
-
let write_at_uri: AtUri = write.uri.try_into()?;
392
-
self.record
393
-
.index_record(
394
-
write_at_uri.clone(),
395
-
write.cid,
396
-
Some(write.record),
397
-
Some(write.action),
398
-
rev.to_owned(),
399
-
Some(now.to_string()),
400
-
)
401
-
.await?
402
-
}
403
-
PreparedWrite::Delete(write) => {
404
-
let write_at_uri: AtUri = write.uri.try_into()?;
405
-
self.record.delete_record(&write_at_uri).await?
406
-
}
407
-
})
408
-
})
409
-
.collect::<Vec<_>>()
410
-
.await
411
-
.into_iter()
412
-
.collect::<Result<Vec<_>, _>>()?;
413
-
Ok(())
414
-
}
415
-
416
-
pub async fn destroy(&mut self) -> Result<()> {
417
-
let did: String = self.did.clone();
418
-
let storage_guard = self.storage.read().await;
419
-
let db: Arc<ActorDb> = storage_guard.db.clone();
420
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
421
-
422
-
let blob_rows: Vec<String> = db
423
-
.run(move |conn| {
424
-
BlobSchema::blob
425
-
.filter(BlobSchema::did.eq(did))
426
-
.select(BlobSchema::cid)
427
-
.get_results(conn)
428
-
})
429
-
.await?;
430
-
let cids = blob_rows
431
-
.into_iter()
432
-
.map(|row| Ok(Cid::from_str(&row)?))
433
-
.collect::<Result<Vec<Cid>>>()?;
434
-
let _ = stream::iter(cids.chunks(500))
435
-
.then(|chunk| async { self.blob.blobstore.delete_many(chunk.to_vec()).await })
436
-
.collect::<Vec<_>>()
437
-
.await
438
-
.into_iter()
439
-
.collect::<Result<Vec<_>, _>>()?;
440
-
Ok(())
441
-
}
442
-
443
-
pub async fn get_duplicate_record_cids(
444
-
&self,
445
-
cids: Vec<Cid>,
446
-
touched_uris: Vec<AtUri>,
447
-
) -> Result<Vec<Cid>> {
448
-
if touched_uris.is_empty() || cids.is_empty() {
449
-
return Ok(vec![]);
450
-
}
451
-
let did: String = self.did.clone();
452
-
let storage_guard = self.storage.read().await;
453
-
let db: Arc<ActorDb> = storage_guard.db.clone();
454
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
455
-
456
-
let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
457
-
let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
458
-
let res: Vec<String> = db
459
-
.run(move |conn| {
460
-
RecordSchema::record
461
-
.filter(RecordSchema::did.eq(did))
462
-
.filter(RecordSchema::cid.eq_any(cid_strs))
463
-
.filter(RecordSchema::uri.ne_all(touched_uri_strs))
464
-
.select(RecordSchema::cid)
465
-
.get_results(conn)
466
-
})
467
-
.await?;
468
-
res.into_iter()
469
-
.map(|row| Cid::from_str(&row).map_err(|error| anyhow::Error::new(error)))
470
-
.collect::<Result<Vec<Cid>>>()
471
-
}
472
-
}
+357
-210
src/actor_store/blob.rs
+357
-210
src/actor_store/blob.rs
···
1
-
//! Blob storage and retrieval for the actor store.
1
+
//! Blob operations for the actor store
2
2
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/blob/mod.rs
3
3
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
4
4
//!
5
5
//! Modified for SQLite backend
6
6
7
+
use crate::models::actor_store as models;
7
8
use anyhow::{Result, bail};
9
+
use axum::body::Bytes;
8
10
use cidv10::Cid;
9
11
use diesel::dsl::{count_distinct, exists, not};
10
-
use diesel::result::Error;
11
12
use diesel::sql_types::{Integer, Nullable, Text};
12
13
use diesel::*;
13
-
use futures::stream::{self, StreamExt};
14
-
use futures::try_join;
15
-
use rsky_pds::actor_store::blob::sha256_stream;
16
-
// use rocket::data::{Data, ToByteUnit};
17
-
// use rocket::form::validate::Contains;
14
+
use futures::{
15
+
stream::{self, StreamExt},
16
+
try_join,
17
+
};
18
18
use rsky_common::ipld::sha256_raw_to_cid;
19
19
use rsky_common::now;
20
20
use rsky_lexicon::blob_refs::BlobRef;
21
21
use rsky_lexicon::com::atproto::admin::StatusAttr;
22
22
use rsky_lexicon::com::atproto::repo::ListMissingBlobsRefRecordBlob;
23
23
use rsky_pds::actor_store::blob::{
24
-
BlobMetadata, GetBlobMetadataOutput, GetBlobOutput, ListBlobsOpts, ListMissingBlobsOpts,
25
-
verify_blob,
24
+
BlobMetadata, GetBlobMetadataOutput, ListBlobsOpts, ListMissingBlobsOpts, accepted_mime,
25
+
sha256_stream,
26
26
};
27
27
use rsky_pds::image;
28
-
use rsky_pds::models::models;
29
28
use rsky_repo::error::BlobError;
30
29
use rsky_repo::types::{PreparedBlobRef, PreparedWrite};
31
-
use sha2::{Digest, Sha256};
30
+
use std::str::FromStr as _;
32
31
33
-
use super::ActorDb;
34
-
use super::sql_blob::BlobStoreSql;
32
+
use super::blob_fs::{BlobStoreFs, ByteStream};
35
33
34
+
pub struct GetBlobOutput {
35
+
pub size: i32,
36
+
pub mime_type: Option<String>,
37
+
pub stream: ByteStream,
38
+
}
39
+
40
+
/// Handles blob operations for an actor store
36
41
pub struct BlobReader {
37
-
pub blobstore: BlobStoreSql,
42
+
/// SQL-based blob storage
43
+
pub blobstore: BlobStoreFs,
44
+
/// DID of the actor
38
45
pub did: String,
39
-
pub db: ActorDb,
46
+
/// Database connection
47
+
pub db: deadpool_diesel::Pool<
48
+
deadpool_diesel::Manager<SqliteConnection>,
49
+
deadpool_diesel::sqlite::Object,
50
+
>,
40
51
}
41
52
42
-
// Basically handles getting blob records from db
43
53
impl BlobReader {
44
-
pub fn new(blobstore: BlobStoreSql, db: ActorDb) -> Self {
45
-
BlobReader {
46
-
did: blobstore.bucket.clone(),
54
+
/// Create a new blob reader
55
+
pub fn new(
56
+
blobstore: BlobStoreFs,
57
+
db: deadpool_diesel::Pool<
58
+
deadpool_diesel::Manager<SqliteConnection>,
59
+
deadpool_diesel::sqlite::Object,
60
+
>,
61
+
) -> Self {
62
+
Self {
63
+
did: blobstore.did.clone(),
47
64
blobstore,
48
65
db,
49
66
}
50
67
}
51
68
69
+
/// Get metadata for a blob by CID
52
70
pub async fn get_blob_metadata(&self, cid: Cid) -> Result<GetBlobMetadataOutput> {
53
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
71
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
54
72
55
73
let did = self.did.clone();
56
74
let found = self
57
75
.db
58
-
.run(move |conn| {
76
+
.get()
77
+
.await?
78
+
.interact(move |conn| {
59
79
BlobSchema::blob
60
80
.filter(BlobSchema::did.eq(did))
61
81
.filter(BlobSchema::cid.eq(cid.to_string()))
···
64
84
.first(conn)
65
85
.optional()
66
86
})
67
-
.await?;
87
+
.await
88
+
.expect("Failed to get blob metadata")?;
68
89
69
90
match found {
70
91
None => bail!("Blob not found"),
···
75
96
}
76
97
}
77
98
99
+
/// Get a blob by CID with metadata and content
78
100
pub async fn get_blob(&self, cid: Cid) -> Result<GetBlobOutput> {
79
101
let metadata = self.get_blob_metadata(cid).await?;
80
102
let blob_stream = match self.blobstore.get_stream(cid).await {
81
-
Ok(res) => res,
82
-
Err(e) => {
83
-
return match e.downcast_ref() {
84
-
Some(GetObjectError::NoSuchKey(key)) => {
85
-
Err(anyhow::Error::new(GetObjectError::NoSuchKey(key.clone())))
86
-
}
87
-
_ => bail!(e.to_string()),
88
-
};
89
-
}
103
+
Ok(stream) => stream,
104
+
Err(e) => bail!("Failed to get blob: {}", e),
90
105
};
106
+
91
107
Ok(GetBlobOutput {
92
108
size: metadata.size,
93
109
mime_type: metadata.mime_type,
···
95
111
})
96
112
}
97
113
114
+
/// Get all records that reference a specific blob
98
115
pub async fn get_records_for_blob(&self, cid: Cid) -> Result<Vec<String>> {
99
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
116
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
100
117
101
118
let did = self.did.clone();
102
119
let res = self
103
120
.db
104
-
.run(move |conn| {
121
+
.get()
122
+
.await?
123
+
.interact(move |conn| {
105
124
let results = RecordBlobSchema::record_blob
106
125
.filter(RecordBlobSchema::blobCid.eq(cid.to_string()))
107
126
.filter(RecordBlobSchema::did.eq(did))
108
127
.select(models::RecordBlob::as_select())
109
128
.get_results(conn)?;
110
-
Ok::<_, Error>(results.into_iter().map(|row| row.record_uri))
129
+
Ok::<_, result::Error>(results.into_iter().map(|row| row.record_uri))
111
130
})
112
-
.await?
131
+
.await
132
+
.expect("Failed to get records for blob")?
113
133
.collect::<Vec<String>>();
114
134
115
135
Ok(res)
116
136
}
117
137
138
+
/// Upload a blob and get its metadata
118
139
pub async fn upload_blob_and_get_metadata(
119
140
&self,
120
141
user_suggested_mime: String,
121
-
blob: Data<'_>,
142
+
blob: Bytes,
122
143
) -> Result<BlobMetadata> {
123
-
let blob_stream = blob.open(100.mebibytes());
124
-
let bytes = blob_stream.into_bytes().await?;
125
-
let size = bytes.n.written;
126
-
let bytes = bytes.into_inner();
144
+
let bytes = blob;
145
+
let size = bytes.len() as i64;
146
+
127
147
let (temp_key, sha256, img_info, sniffed_mime) = try_join!(
128
148
self.blobstore.put_temp(bytes.clone()),
129
-
sha256_stream(bytes.clone()),
130
-
image::maybe_get_info(bytes.clone()),
131
-
image::mime_type_from_bytes(bytes.clone())
149
+
// TODO: reimpl funcs to use Bytes instead of Vec<u8>
150
+
sha256_stream(bytes.to_vec()),
151
+
image::maybe_get_info(bytes.to_vec()),
152
+
image::mime_type_from_bytes(bytes.to_vec())
132
153
)?;
133
154
134
155
let cid = sha256_raw_to_cid(sha256);
···
136
157
137
158
Ok(BlobMetadata {
138
159
temp_key,
139
-
size: size as i64,
160
+
size,
140
161
cid,
141
162
mime_type,
142
-
width: if let Some(ref info) = img_info {
143
-
Some(info.width as i32)
144
-
} else {
145
-
None
146
-
},
163
+
width: img_info.as_ref().map(|info| info.width as i32),
147
164
height: if let Some(info) = img_info {
148
165
Some(info.height as i32)
149
166
} else {
···
152
169
})
153
170
}
154
171
172
+
/// Track a blob that hasn't been associated with any records yet
155
173
pub async fn track_untethered_blob(&self, metadata: BlobMetadata) -> Result<BlobRef> {
156
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
174
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
157
175
158
176
let did = self.did.clone();
159
-
self.db.run(move |conn| {
177
+
self.db.get().await?.interact(move |conn| {
160
178
let BlobMetadata {
161
179
temp_key,
162
180
size,
···
186
204
ON CONFLICT (cid, did) DO UPDATE \
187
205
SET \"tempKey\" = EXCLUDED.\"tempKey\" \
188
206
WHERE pds.blob.\"tempKey\" is not null;");
189
-
upsert
207
+
#[expect(trivial_casts)]
208
+
let _ = upsert
190
209
.bind::<Text, _>(&cid.to_string())
191
210
.bind::<Text, _>(&did)
192
211
.bind::<Text, _>(&mime_type)
193
212
.bind::<Integer, _>(size as i32)
194
-
.bind::<Nullable<Text>, _>(Some(temp_key.clone()))
213
+
.bind::<Nullable<Text>, _>(Some(temp_key))
195
214
.bind::<Nullable<Integer>, _>(width)
196
215
.bind::<Nullable<Integer>, _>(height)
197
216
.bind::<Text, _>(created_at)
···
199
218
.execute(conn)?;
200
219
201
220
Ok(BlobRef::new(cid, mime_type, size, None))
202
-
}).await
221
+
}).await.expect("Failed to track untethered blob")
203
222
}
204
223
224
+
/// Process blobs associated with writes
205
225
pub async fn process_write_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
206
226
self.delete_dereferenced_blobs(writes.clone()).await?;
207
-
let _ = stream::iter(writes)
208
-
.then(|write| async move {
209
-
Ok::<(), anyhow::Error>(match write {
210
-
PreparedWrite::Create(w) => {
211
-
for blob in w.blobs {
212
-
self.verify_blob_and_make_permanent(blob.clone()).await?;
213
-
self.associate_blob(blob, w.uri.clone()).await?;
227
+
228
+
drop(
229
+
stream::iter(writes)
230
+
.then(async move |write| {
231
+
match write {
232
+
PreparedWrite::Create(w) => {
233
+
for blob in w.blobs {
234
+
self.verify_blob_and_make_permanent(blob.clone()).await?;
235
+
self.associate_blob(blob, w.uri.clone()).await?;
236
+
}
214
237
}
215
-
}
216
-
PreparedWrite::Update(w) => {
217
-
for blob in w.blobs {
218
-
self.verify_blob_and_make_permanent(blob.clone()).await?;
219
-
self.associate_blob(blob, w.uri.clone()).await?;
238
+
PreparedWrite::Update(w) => {
239
+
for blob in w.blobs {
240
+
self.verify_blob_and_make_permanent(blob.clone()).await?;
241
+
self.associate_blob(blob, w.uri.clone()).await?;
242
+
}
220
243
}
221
-
}
222
-
_ => (),
244
+
_ => (),
245
+
};
246
+
Ok::<(), anyhow::Error>(())
223
247
})
224
-
})
225
-
.collect::<Vec<_>>()
226
-
.await
227
-
.into_iter()
228
-
.collect::<Result<Vec<_>, _>>()?;
248
+
.collect::<Vec<_>>()
249
+
.await
250
+
.into_iter()
251
+
.collect::<Result<Vec<_>, _>>()?,
252
+
);
253
+
229
254
Ok(())
230
255
}
231
256
257
+
/// Delete blobs that are no longer referenced by any records
232
258
pub async fn delete_dereferenced_blobs(&self, writes: Vec<PreparedWrite>) -> Result<()> {
233
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
234
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
259
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
260
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
235
261
262
+
// Extract URIs
236
263
let uris: Vec<String> = writes
237
-
.clone()
238
-
.into_iter()
264
+
.iter()
239
265
.filter_map(|w| match w {
240
-
PreparedWrite::Delete(w) => Some(w.uri),
241
-
PreparedWrite::Update(w) => Some(w.uri),
266
+
PreparedWrite::Delete(w) => Some(w.uri.clone()),
267
+
PreparedWrite::Update(w) => Some(w.uri.clone()),
242
268
_ => None,
243
269
})
244
270
.collect();
271
+
245
272
if uris.is_empty() {
246
273
return Ok(());
247
274
}
248
275
276
+
// In SQLite, we can't do DELETE...RETURNING
277
+
// So we need to fetch the records first, then delete
278
+
let did = self.did.clone();
279
+
let uris_clone = uris.clone();
249
280
let deleted_repo_blobs: Vec<models::RecordBlob> = self
250
281
.db
251
-
.run(move |conn| {
252
-
delete(RecordBlobSchema::record_blob)
253
-
.filter(RecordBlobSchema::recordUri.eq_any(uris))
254
-
.get_results(conn)
282
+
.get()
283
+
.await?
284
+
.interact(move |conn| {
285
+
RecordBlobSchema::record_blob
286
+
.filter(RecordBlobSchema::recordUri.eq_any(&uris_clone))
287
+
.filter(RecordBlobSchema::did.eq(&did))
288
+
.load::<models::RecordBlob>(conn)
255
289
})
256
-
.await?
257
-
.into_iter()
258
-
.collect::<Vec<models::RecordBlob>>();
290
+
.await
291
+
.expect("Failed to get deleted repo blobs")?;
292
+
259
293
if deleted_repo_blobs.is_empty() {
260
294
return Ok(());
261
295
}
262
296
297
+
// Now perform the delete
298
+
let uris_clone = uris.clone();
299
+
_ = self
300
+
.db
301
+
.get()
302
+
.await?
303
+
.interact(move |conn| {
304
+
delete(RecordBlobSchema::record_blob)
305
+
.filter(RecordBlobSchema::recordUri.eq_any(uris_clone))
306
+
.execute(conn)
307
+
})
308
+
.await
309
+
.expect("Failed to delete repo blobs")?;
310
+
311
+
// Extract blob cids from the deleted records
263
312
let deleted_repo_blob_cids: Vec<String> = deleted_repo_blobs
264
313
.into_iter()
265
314
.map(|row| row.blob_cid)
266
-
.collect::<Vec<String>>();
315
+
.collect();
267
316
268
-
let x = deleted_repo_blob_cids.clone();
269
-
let mut duplicated_cids: Vec<String> = self
317
+
// Find duplicates (blobs referenced by other records)
318
+
let cids_clone = deleted_repo_blob_cids.clone();
319
+
let did_clone = self.did.clone();
320
+
let duplicated_cids: Vec<String> = self
270
321
.db
271
-
.run(move |conn| {
322
+
.get()
323
+
.await?
324
+
.interact(move |conn| {
272
325
RecordBlobSchema::record_blob
326
+
.filter(RecordBlobSchema::blobCid.eq_any(cids_clone))
327
+
.filter(RecordBlobSchema::did.eq(did_clone))
273
328
.select(RecordBlobSchema::blobCid)
274
-
.filter(RecordBlobSchema::blobCid.eq_any(&x))
275
-
.load(conn)
329
+
.load::<String>(conn)
276
330
})
277
-
.await?
278
-
.into_iter()
279
-
.collect::<Vec<String>>();
331
+
.await
332
+
.expect("Failed to get duplicated cids")?;
280
333
281
-
let mut new_blob_cids: Vec<String> = writes
282
-
.into_iter()
283
-
.map(|w| match w {
284
-
PreparedWrite::Create(w) => w.blobs,
285
-
PreparedWrite::Update(w) => w.blobs,
286
-
PreparedWrite::Delete(_) => Vec::new(),
334
+
// Extract new blob cids from writes (creates and updates)
335
+
let new_blob_cids: Vec<String> = writes
336
+
.iter()
337
+
.flat_map(|w| match w {
338
+
PreparedWrite::Create(w) => w.blobs.clone(),
339
+
PreparedWrite::Update(w) => w.blobs.clone(),
340
+
_ => Vec::new(),
287
341
})
288
-
.collect::<Vec<Vec<PreparedBlobRef>>>()
342
+
.map(|b| b.cid.to_string())
343
+
.collect();
344
+
345
+
// Determine which blobs to keep vs delete
346
+
let cids_to_keep: Vec<String> = [&new_blob_cids[..], &duplicated_cids[..]].concat();
347
+
let cids_to_delete: Vec<String> = deleted_repo_blob_cids
289
348
.into_iter()
290
-
.flat_map(|v: Vec<PreparedBlobRef>| v.into_iter().map(|b| b.cid.to_string()))
349
+
.filter(|cid| !cids_to_keep.contains(cid))
291
350
.collect();
292
-
let mut cids_to_keep = Vec::new();
293
-
cids_to_keep.append(&mut new_blob_cids);
294
-
cids_to_keep.append(&mut duplicated_cids);
295
351
296
-
let cids_to_delete = deleted_repo_blob_cids
297
-
.into_iter()
298
-
.filter_map(|cid: String| match cids_to_keep.contains(&cid) {
299
-
true => Some(cid),
300
-
false => None,
301
-
})
302
-
.collect::<Vec<String>>();
303
352
if cids_to_delete.is_empty() {
304
353
return Ok(());
305
354
}
306
355
307
-
let y = cids_to_delete.clone();
308
-
self.db
309
-
.run(move |conn| {
356
+
// Delete from the blob table
357
+
let cids = cids_to_delete.clone();
358
+
let did_clone = self.did.clone();
359
+
_ = self
360
+
.db
361
+
.get()
362
+
.await?
363
+
.interact(move |conn| {
310
364
delete(BlobSchema::blob)
311
-
.filter(BlobSchema::cid.eq_any(&y))
365
+
.filter(BlobSchema::cid.eq_any(cids))
366
+
.filter(BlobSchema::did.eq(did_clone))
312
367
.execute(conn)
313
368
})
314
-
.await?;
315
-
316
-
// Original code queues a background job to delete by CID from S3 compatible blobstore
317
-
let _ = stream::iter(cids_to_delete)
318
-
.then(|cid| async { self.blobstore.delete(cid).await })
319
-
.collect::<Vec<_>>()
320
369
.await
321
-
.into_iter()
322
-
.collect::<Result<Vec<_>, _>>()?;
370
+
.expect("Failed to delete blobs")?;
371
+
372
+
// Delete from blob storage
373
+
// Ideally we'd use a background queue here, but for now:
374
+
drop(
375
+
stream::iter(cids_to_delete)
376
+
.then(async move |cid| match Cid::from_str(&cid) {
377
+
Ok(cid) => self.blobstore.delete(cid.to_string()).await,
378
+
Err(e) => Err(anyhow::Error::new(e)),
379
+
})
380
+
.collect::<Vec<_>>()
381
+
.await
382
+
.into_iter()
383
+
.collect::<Result<Vec<_>, _>>()?,
384
+
);
385
+
323
386
Ok(())
324
387
}
325
388
389
+
/// Verify a blob and make it permanent
326
390
pub async fn verify_blob_and_make_permanent(&self, blob: PreparedBlobRef) -> Result<()> {
327
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
391
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
328
392
329
393
let found = self
330
394
.db
331
-
.run(move |conn| {
395
+
.get()
396
+
.await?
397
+
.interact(move |conn| {
332
398
BlobSchema::blob
333
399
.filter(
334
400
BlobSchema::cid
···
339
405
.first(conn)
340
406
.optional()
341
407
})
342
-
.await?;
408
+
.await
409
+
.expect("Failed to verify blob")?;
410
+
343
411
if let Some(found) = found {
344
412
verify_blob(&blob, &found).await?;
345
413
if let Some(ref temp_key) = found.temp_key {
···
347
415
.make_permanent(temp_key.clone(), blob.cid)
348
416
.await?;
349
417
}
350
-
self.db
351
-
.run(move |conn| {
418
+
_ = self
419
+
.db
420
+
.get()
421
+
.await?
422
+
.interact(move |conn| {
352
423
update(BlobSchema::blob)
353
424
.filter(BlobSchema::tempKey.eq(found.temp_key))
354
425
.set(BlobSchema::tempKey.eq::<Option<String>>(None))
355
426
.execute(conn)
356
427
})
357
-
.await?;
428
+
.await
429
+
.expect("Failed to update blob")?;
358
430
Ok(())
359
431
} else {
360
-
bail!("Cound not find blob: {:?}", blob.cid.to_string())
432
+
bail!("Could not find blob: {:?}", blob.cid.to_string())
361
433
}
362
434
}
363
435
364
-
pub async fn associate_blob(&self, blob: PreparedBlobRef, _record_uri: String) -> Result<()> {
365
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
436
+
/// Associate a blob with a record
437
+
pub async fn associate_blob(&self, blob: PreparedBlobRef, record_uri: String) -> Result<()> {
438
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
366
439
367
440
let cid = blob.cid.to_string();
368
-
let record_uri = _record_uri;
369
441
let did = self.did.clone();
370
-
self.db
371
-
.run(move |conn| {
442
+
443
+
_ = self
444
+
.db
445
+
.get()
446
+
.await?
447
+
.interact(move |conn| {
372
448
insert_into(RecordBlobSchema::record_blob)
373
449
.values((
374
450
RecordBlobSchema::blobCid.eq(cid),
···
378
454
.on_conflict_do_nothing()
379
455
.execute(conn)
380
456
})
381
-
.await?;
457
+
.await
458
+
.expect("Failed to associate blob")?;
459
+
382
460
Ok(())
383
461
}
384
462
463
+
/// Count all blobs for this actor
385
464
pub async fn blob_count(&self) -> Result<i64> {
386
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
465
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
387
466
388
467
let did = self.did.clone();
389
468
self.db
390
-
.run(move |conn| {
469
+
.get()
470
+
.await?
471
+
.interact(move |conn| {
391
472
let res = BlobSchema::blob
392
473
.filter(BlobSchema::did.eq(&did))
393
474
.count()
···
395
476
Ok(res)
396
477
})
397
478
.await
479
+
.expect("Failed to count blobs")
398
480
}
399
481
482
+
/// Count blobs associated with records
400
483
pub async fn record_blob_count(&self) -> Result<i64> {
401
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
484
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
402
485
403
486
let did = self.did.clone();
404
487
self.db
405
-
.run(move |conn| {
488
+
.get()
489
+
.await?
490
+
.interact(move |conn| {
406
491
let res: i64 = RecordBlobSchema::record_blob
407
492
.filter(RecordBlobSchema::did.eq(&did))
408
493
.select(count_distinct(RecordBlobSchema::blobCid))
···
410
495
Ok(res)
411
496
})
412
497
.await
498
+
.expect("Failed to count record blobs")
413
499
}
414
500
501
+
/// List blobs that are referenced but missing
415
502
pub async fn list_missing_blobs(
416
503
&self,
417
504
opts: ListMissingBlobsOpts,
418
505
) -> Result<Vec<ListMissingBlobsRefRecordBlob>> {
419
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
420
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
506
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
507
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
421
508
422
509
let did = self.did.clone();
423
510
self.db
424
-
.run(move |conn| {
511
+
.get()
512
+
.await?
513
+
.interact(move |conn| {
425
514
let ListMissingBlobsOpts { cursor, limit } = opts;
426
515
427
516
if limit > 1000 {
428
517
bail!("Limit too high. Max: 1000.");
429
518
}
430
519
431
-
let res: Vec<models::RecordBlob> = if let Some(cursor) = cursor {
432
-
RecordBlobSchema::record_blob
433
-
.limit(limit as i64)
434
-
.filter(not(exists(
435
-
BlobSchema::blob
436
-
.filter(BlobSchema::cid.eq(RecordBlobSchema::blobCid))
437
-
.filter(BlobSchema::did.eq(&did))
438
-
.select(models::Blob::as_select()),
439
-
)))
440
-
.filter(RecordBlobSchema::blobCid.gt(cursor))
441
-
.filter(RecordBlobSchema::did.eq(&did))
442
-
.select(models::RecordBlob::as_select())
443
-
.order(RecordBlobSchema::blobCid.asc())
444
-
.distinct_on(RecordBlobSchema::blobCid)
445
-
.get_results(conn)?
520
+
// TODO: Improve this query
521
+
522
+
// SQLite doesn't support DISTINCT ON, so we use GROUP BY instead
523
+
let query = RecordBlobSchema::record_blob
524
+
.filter(not(exists(
525
+
BlobSchema::blob
526
+
.filter(BlobSchema::cid.eq(RecordBlobSchema::blobCid))
527
+
.filter(BlobSchema::did.eq(&did)),
528
+
)))
529
+
.filter(RecordBlobSchema::did.eq(&did))
530
+
.into_boxed();
531
+
532
+
// Apply cursor filtering if provided
533
+
let query = if let Some(cursor) = cursor {
534
+
query.filter(RecordBlobSchema::blobCid.gt(cursor))
446
535
} else {
447
-
RecordBlobSchema::record_blob
448
-
.limit(limit as i64)
449
-
.filter(not(exists(
450
-
BlobSchema::blob
451
-
.filter(BlobSchema::cid.eq(RecordBlobSchema::blobCid))
452
-
.filter(BlobSchema::did.eq(&did))
453
-
.select(models::Blob::as_select()),
454
-
)))
455
-
.filter(RecordBlobSchema::did.eq(&did))
456
-
.select(models::RecordBlob::as_select())
457
-
.order(RecordBlobSchema::blobCid.asc())
458
-
.distinct_on(RecordBlobSchema::blobCid)
459
-
.get_results(conn)?
536
+
query
460
537
};
461
538
462
-
Ok(res
463
-
.into_iter()
464
-
.map(|row| ListMissingBlobsRefRecordBlob {
465
-
cid: row.blob_cid,
466
-
record_uri: row.record_uri,
467
-
})
468
-
.collect())
539
+
// For SQLite, use a simplified approach without GROUP BY to avoid recursion limit issues
540
+
let res = query
541
+
.select((RecordBlobSchema::blobCid, RecordBlobSchema::recordUri))
542
+
.order(RecordBlobSchema::blobCid.asc())
543
+
.limit(limit as i64)
544
+
.load::<(String, String)>(conn)?;
545
+
546
+
// Process results to get distinct cids with their first record URI
547
+
let mut result = Vec::new();
548
+
let mut last_cid = None;
549
+
550
+
for (cid, uri) in res {
551
+
if last_cid.as_ref() != Some(&cid) {
552
+
result.push(ListMissingBlobsRefRecordBlob {
553
+
cid: cid.clone(),
554
+
record_uri: uri,
555
+
});
556
+
last_cid = Some(cid);
557
+
}
558
+
}
559
+
560
+
Ok(result)
469
561
})
470
562
.await
563
+
.expect("Failed to list missing blobs")
471
564
}
472
565
566
+
/// List all blobs with optional filtering
473
567
pub async fn list_blobs(&self, opts: ListBlobsOpts) -> Result<Vec<String>> {
474
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
475
-
use rsky_pds::schema::pds::record_blob::dsl as RecordBlobSchema;
568
+
use crate::schema::actor_store::record::dsl as RecordSchema;
569
+
use crate::schema::actor_store::record_blob::dsl as RecordBlobSchema;
570
+
476
571
let ListBlobsOpts {
477
572
since,
478
573
cursor,
···
494
589
if let Some(cursor) = cursor {
495
590
builder = builder.filter(RecordBlobSchema::blobCid.gt(cursor));
496
591
}
497
-
self.db.run(move |conn| builder.load(conn)).await?
592
+
self.db
593
+
.get()
594
+
.await?
595
+
.interact(move |conn| builder.load(conn))
596
+
.await
597
+
.expect("Failed to list blobs")?
498
598
} else {
499
599
let mut builder = RecordBlobSchema::record_blob
500
600
.select(RecordBlobSchema::blobCid)
···
506
606
if let Some(cursor) = cursor {
507
607
builder = builder.filter(RecordBlobSchema::blobCid.gt(cursor));
508
608
}
509
-
self.db.run(move |conn| builder.load(conn)).await?
609
+
self.db
610
+
.get()
611
+
.await?
612
+
.interact(move |conn| builder.load(conn))
613
+
.await
614
+
.expect("Failed to list blobs")?
510
615
};
616
+
511
617
Ok(res)
512
618
}
513
619
620
+
/// Get the takedown status of a blob
514
621
pub async fn get_blob_takedown_status(&self, cid: Cid) -> Result<Option<StatusAttr>> {
515
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
622
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
516
623
517
624
self.db
518
-
.run(move |conn| {
625
+
.get()
626
+
.await?
627
+
.interact(move |conn| {
519
628
let res = BlobSchema::blob
520
629
.filter(BlobSchema::cid.eq(cid.to_string()))
521
630
.select(models::Blob::as_select())
522
631
.first(conn)
523
632
.optional()?;
633
+
524
634
match res {
525
635
None => Ok(None),
526
-
Some(res) => match res.takedown_ref {
527
-
None => Ok(Some(StatusAttr {
528
-
applied: false,
529
-
r#ref: None,
530
-
})),
531
-
Some(takedown_ref) => Ok(Some(StatusAttr {
532
-
applied: true,
533
-
r#ref: Some(takedown_ref),
534
-
})),
535
-
},
636
+
Some(res) => res.takedown_ref.map_or_else(
637
+
|| {
638
+
Ok(Some(StatusAttr {
639
+
applied: false,
640
+
r#ref: None,
641
+
}))
642
+
},
643
+
|takedown_ref| {
644
+
Ok(Some(StatusAttr {
645
+
applied: true,
646
+
r#ref: Some(takedown_ref),
647
+
}))
648
+
},
649
+
),
536
650
}
537
651
})
538
652
.await
653
+
.expect("Failed to get blob takedown status")
539
654
}
540
655
541
-
// Transactors
542
-
// -------------------
543
-
656
+
/// Update the takedown status of a blob
544
657
pub async fn update_blob_takedown_status(&self, blob: Cid, takedown: StatusAttr) -> Result<()> {
545
-
use rsky_pds::schema::pds::blob::dsl as BlobSchema;
658
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
546
659
547
660
let takedown_ref: Option<String> = match takedown.applied {
548
-
true => match takedown.r#ref {
549
-
Some(takedown_ref) => Some(takedown_ref),
550
-
None => Some(now()),
551
-
},
661
+
true => takedown.r#ref.map_or_else(|| Some(now()), Some),
552
662
false => None,
553
663
};
554
664
555
-
let blob = self
665
+
let blob_cid = blob.to_string();
666
+
let did_clone = self.did.clone();
667
+
668
+
_ = self
556
669
.db
557
-
.run(move |conn| {
558
-
update(BlobSchema::blob)
559
-
.filter(BlobSchema::cid.eq(blob.to_string()))
670
+
.get()
671
+
.await?
672
+
.interact(move |conn| {
673
+
_ = update(BlobSchema::blob)
674
+
.filter(BlobSchema::cid.eq(blob_cid))
675
+
.filter(BlobSchema::did.eq(did_clone))
560
676
.set(BlobSchema::takedownRef.eq(takedown_ref))
561
677
.execute(conn)?;
562
-
Ok::<_, Error>(blob)
678
+
Ok::<_, result::Error>(blob)
563
679
})
564
-
.await?;
680
+
.await
681
+
.expect("Failed to update blob takedown status")?;
565
682
566
683
let res = match takedown.applied {
567
684
true => self.blobstore.quarantine(blob).await,
568
685
false => self.blobstore.unquarantine(blob).await,
569
686
};
687
+
570
688
match res {
571
689
Ok(_) => Ok(()),
572
690
Err(e) => match e.downcast_ref() {
···
576
694
}
577
695
}
578
696
}
697
+
698
+
pub async fn verify_blob(blob: &PreparedBlobRef, found: &models::Blob) -> Result<()> {
699
+
if let Some(max_size) = blob.constraints.max_size {
700
+
if found.size as usize > max_size {
701
+
bail!(
702
+
"BlobTooLarge: This file is too large. It is {:?} but the maximum size is {:?}",
703
+
found.size,
704
+
max_size
705
+
)
706
+
}
707
+
}
708
+
if blob.mime_type != found.mime_type {
709
+
bail!(
710
+
"InvalidMimeType: Referenced MimeType does not match stored blob. Expected: {:?}, Got: {:?}",
711
+
found.mime_type,
712
+
blob.mime_type
713
+
)
714
+
}
715
+
if let Some(ref accept) = blob.constraints.accept {
716
+
if !accepted_mime(blob.mime_type.clone(), accept.clone()).await {
717
+
bail!(
718
+
"Wrong type of file. It is {:?} but it must match {:?}.",
719
+
blob.mime_type,
720
+
accept
721
+
)
722
+
}
723
+
}
724
+
Ok(())
725
+
}
+287
src/actor_store/blob_fs.rs
+287
src/actor_store/blob_fs.rs
···
1
+
//! File system implementation of blob storage
2
+
//! Based on the S3 implementation but using local file system instead
3
+
use anyhow::Result;
4
+
use axum::body::Bytes;
5
+
use cidv10::Cid;
6
+
use rsky_common::get_random_str;
7
+
use rsky_repo::error::BlobError;
8
+
use std::path::PathBuf;
9
+
use std::str::FromStr;
10
+
use tokio::fs as async_fs;
11
+
use tokio::io::AsyncWriteExt;
12
+
use tracing::{debug, error, warn};
13
+
14
+
/// ByteStream implementation for blob data
15
+
pub struct ByteStream {
16
+
pub bytes: Bytes,
17
+
}
18
+
19
+
impl ByteStream {
20
+
/// Create a new ByteStream with the given bytes
21
+
pub const fn new(bytes: Bytes) -> Self {
22
+
Self { bytes }
23
+
}
24
+
25
+
/// Collect the bytes from the stream
26
+
pub async fn collect(self) -> Result<Bytes> {
27
+
Ok(self.bytes)
28
+
}
29
+
}
30
+
31
+
/// Path information for moving a blob
32
+
struct MoveObject {
33
+
from: PathBuf,
34
+
to: PathBuf,
35
+
}
36
+
37
+
/// File system implementation of blob storage
38
+
pub struct BlobStoreFs {
39
+
/// Base directory for storing blobs
40
+
pub base_dir: PathBuf,
41
+
/// DID of the actor
42
+
pub did: String,
43
+
}
44
+
45
+
impl BlobStoreFs {
46
+
/// Create a new file system blob store for the given DID and base directory
47
+
pub const fn new(did: String, base_dir: PathBuf) -> Self {
48
+
Self { base_dir, did }
49
+
}
50
+
51
+
/// Create a factory function for blob stores
52
+
pub fn creator(base_dir: PathBuf) -> Box<dyn Fn(String) -> Self> {
53
+
let base_dir_clone = base_dir;
54
+
Box::new(move |did: String| Self::new(did, base_dir_clone.clone()))
55
+
}
56
+
57
+
/// Generate a random key for temporary storage
58
+
fn gen_key(&self) -> String {
59
+
get_random_str()
60
+
}
61
+
62
+
/// Get path to the temporary blob storage
63
+
fn get_tmp_path(&self, key: &str) -> PathBuf {
64
+
self.base_dir.join("tmp").join(&self.did).join(key)
65
+
}
66
+
67
+
/// Get path to the stored blob with appropriate sharding
68
+
fn get_stored_path(&self, cid: Cid) -> PathBuf {
69
+
let cid_str = cid.to_string();
70
+
71
+
// Create two-level sharded structure based on CID
72
+
// First 10 chars for level 1, next 10 chars for level 2
73
+
let first_level = if cid_str.len() >= 10 {
74
+
&cid_str[0..10]
75
+
} else {
76
+
"short"
77
+
};
78
+
79
+
let second_level = if cid_str.len() >= 20 {
80
+
&cid_str[10..20]
81
+
} else {
82
+
"short"
83
+
};
84
+
85
+
self.base_dir
86
+
.join("blocks")
87
+
.join(&self.did)
88
+
.join(first_level)
89
+
.join(second_level)
90
+
.join(&cid_str)
91
+
}
92
+
93
+
/// Get path to the quarantined blob
94
+
fn get_quarantined_path(&self, cid: Cid) -> PathBuf {
95
+
let cid_str = cid.to_string();
96
+
self.base_dir
97
+
.join("quarantine")
98
+
.join(&self.did)
99
+
.join(&cid_str)
100
+
}
101
+
102
+
/// Store a blob temporarily
103
+
pub async fn put_temp(&self, bytes: Bytes) -> Result<String> {
104
+
let key = self.gen_key();
105
+
let temp_path = self.get_tmp_path(&key);
106
+
107
+
// Ensure the directory exists
108
+
if let Some(parent) = temp_path.parent() {
109
+
async_fs::create_dir_all(parent).await?;
110
+
}
111
+
112
+
// Write the temporary blob
113
+
let mut file = async_fs::File::create(&temp_path).await?;
114
+
file.write_all(&bytes).await?;
115
+
file.flush().await?;
116
+
117
+
debug!("Stored temp blob at: {:?}", temp_path);
118
+
Ok(key)
119
+
}
120
+
121
+
/// Make a temporary blob permanent by moving it to the blob store
122
+
pub async fn make_permanent(&self, key: String, cid: Cid) -> Result<()> {
123
+
let already_has = self.has_stored(cid).await?;
124
+
125
+
if !already_has {
126
+
// Move the temporary blob to permanent storage
127
+
self.move_object(MoveObject {
128
+
from: self.get_tmp_path(&key),
129
+
to: self.get_stored_path(cid),
130
+
})
131
+
.await?;
132
+
debug!("Moved temp blob to permanent: {} -> {}", key, cid);
133
+
} else {
134
+
// Already saved, so just delete the temp
135
+
let temp_path = self.get_tmp_path(&key);
136
+
if temp_path.exists() {
137
+
async_fs::remove_file(temp_path).await?;
138
+
debug!("Deleted temp blob as permanent already exists: {}", key);
139
+
}
140
+
}
141
+
142
+
Ok(())
143
+
}
144
+
145
+
/// Store a blob directly as permanent
146
+
pub async fn put_permanent(&self, cid: Cid, bytes: Bytes) -> Result<()> {
147
+
let target_path = self.get_stored_path(cid);
148
+
149
+
// Ensure the directory exists
150
+
if let Some(parent) = target_path.parent() {
151
+
async_fs::create_dir_all(parent).await?;
152
+
}
153
+
154
+
// Write the blob
155
+
let mut file = async_fs::File::create(&target_path).await?;
156
+
file.write_all(&bytes).await?;
157
+
file.flush().await?;
158
+
159
+
debug!("Stored permanent blob: {}", cid);
160
+
Ok(())
161
+
}
162
+
163
+
/// Quarantine a blob by moving it to the quarantine area
164
+
pub async fn quarantine(&self, cid: Cid) -> Result<()> {
165
+
self.move_object(MoveObject {
166
+
from: self.get_stored_path(cid),
167
+
to: self.get_quarantined_path(cid),
168
+
})
169
+
.await?;
170
+
171
+
debug!("Quarantined blob: {}", cid);
172
+
Ok(())
173
+
}
174
+
175
+
/// Unquarantine a blob by moving it back to regular storage
176
+
pub async fn unquarantine(&self, cid: Cid) -> Result<()> {
177
+
self.move_object(MoveObject {
178
+
from: self.get_quarantined_path(cid),
179
+
to: self.get_stored_path(cid),
180
+
})
181
+
.await?;
182
+
183
+
debug!("Unquarantined blob: {}", cid);
184
+
Ok(())
185
+
}
186
+
187
+
/// Get a blob as a stream
188
+
async fn get_object(&self, cid: Cid) -> Result<ByteStream> {
189
+
let blob_path = self.get_stored_path(cid);
190
+
191
+
match async_fs::read(&blob_path).await {
192
+
Ok(bytes) => Ok(ByteStream::new(Bytes::from(bytes))),
193
+
Err(e) => {
194
+
error!("Failed to read blob at path {:?}: {}", blob_path, e);
195
+
Err(anyhow::Error::new(BlobError::BlobNotFoundError))
196
+
}
197
+
}
198
+
}
199
+
200
+
/// Get blob bytes
201
+
pub async fn get_bytes(&self, cid: Cid) -> Result<Bytes> {
202
+
let stream = self.get_object(cid).await?;
203
+
stream.collect().await
204
+
}
205
+
206
+
/// Get a blob as a stream
207
+
pub async fn get_stream(&self, cid: Cid) -> Result<ByteStream> {
208
+
self.get_object(cid).await
209
+
}
210
+
211
+
/// Delete a blob by CID string
212
+
pub async fn delete(&self, cid_str: String) -> Result<()> {
213
+
match Cid::from_str(&cid_str) {
214
+
Ok(cid) => self.delete_path(self.get_stored_path(cid)).await,
215
+
Err(e) => {
216
+
warn!("Invalid CID: {} - {}", cid_str, e);
217
+
Err(anyhow::anyhow!("Invalid CID: {}", e))
218
+
}
219
+
}
220
+
}
221
+
222
+
/// Delete multiple blobs by CID
223
+
pub async fn delete_many(&self, cids: Vec<Cid>) -> Result<()> {
224
+
let mut futures = Vec::with_capacity(cids.len());
225
+
226
+
for cid in cids {
227
+
futures.push(self.delete_path(self.get_stored_path(cid)));
228
+
}
229
+
230
+
// Execute all delete operations concurrently
231
+
let results = futures::future::join_all(futures).await;
232
+
233
+
// Count errors but don't fail the operation
234
+
let error_count = results.iter().filter(|r| r.is_err()).count();
235
+
if error_count > 0 {
236
+
warn!(
237
+
"{} errors occurred while deleting {} blobs",
238
+
error_count,
239
+
results.len()
240
+
);
241
+
}
242
+
243
+
Ok(())
244
+
}
245
+
246
+
/// Check if a blob is stored in the regular storage
247
+
pub async fn has_stored(&self, cid: Cid) -> Result<bool> {
248
+
let blob_path = self.get_stored_path(cid);
249
+
Ok(blob_path.exists())
250
+
}
251
+
252
+
/// Check if a temporary blob exists
253
+
pub async fn has_temp(&self, key: String) -> Result<bool> {
254
+
let temp_path = self.get_tmp_path(&key);
255
+
Ok(temp_path.exists())
256
+
}
257
+
258
+
/// Helper function to delete a file at the given path
259
+
async fn delete_path(&self, path: PathBuf) -> Result<()> {
260
+
if path.exists() {
261
+
async_fs::remove_file(&path).await?;
262
+
debug!("Deleted file at: {:?}", path);
263
+
Ok(())
264
+
} else {
265
+
Err(anyhow::Error::new(BlobError::BlobNotFoundError))
266
+
}
267
+
}
268
+
269
+
/// Move a blob from one path to another
270
+
async fn move_object(&self, mov: MoveObject) -> Result<()> {
271
+
// Ensure the source exists
272
+
if !mov.from.exists() {
273
+
return Err(anyhow::Error::new(BlobError::BlobNotFoundError));
274
+
}
275
+
276
+
// Ensure the target directory exists
277
+
if let Some(parent) = mov.to.parent() {
278
+
async_fs::create_dir_all(parent).await?;
279
+
}
280
+
281
+
// Move the file
282
+
async_fs::rename(&mov.from, &mov.to).await?;
283
+
284
+
debug!("Moved blob: {:?} -> {:?}", mov.from, mov.to);
285
+
Ok(())
286
+
}
287
+
}
-52
src/actor_store/db.rs
-52
src/actor_store/db.rs
···
1
-
//! Database schema and connection management for the actor store.
2
-
3
-
use crate::db::DatabaseConnection;
4
-
use anyhow::{Context as _, Result};
5
-
6
-
/// Type alias for the actor database.
7
-
pub(crate) type ActorDb = DatabaseConnection;
8
-
9
-
/// Gets a database connection for the actor store.
10
-
///
11
-
/// # Arguments
12
-
///
13
-
/// * `location` - The file path or URI for the SQLite database.
14
-
/// * `disable_wal_auto_checkpoint` - Whether to disable the WAL auto-checkpoint.
15
-
///
16
-
/// # Returns
17
-
///
18
-
/// A `Result` containing the `ActorDb` instance or an error.
19
-
pub async fn get_db(location: &str, disable_wal_auto_checkpoint: bool) -> Result<ActorDb> {
20
-
let pragmas = if disable_wal_auto_checkpoint {
21
-
Some(
22
-
&[
23
-
("wal_autocheckpoint", "0"),
24
-
("journal_mode", "WAL"),
25
-
("synchronous", "NORMAL"),
26
-
("foreign_keys", "ON"),
27
-
][..],
28
-
)
29
-
} else {
30
-
Some(
31
-
&[
32
-
("journal_mode", "WAL"),
33
-
("synchronous", "NORMAL"),
34
-
("foreign_keys", "ON"),
35
-
][..],
36
-
)
37
-
};
38
-
39
-
let db = DatabaseConnection::new(location, pragmas)
40
-
.await
41
-
.context("Failed to initialize the actor database")?;
42
-
43
-
// Ensure WAL mode is properly set up
44
-
db.ensure_wal().await?;
45
-
46
-
// Run migrations
47
-
// TODO: make sure the migrations are populated?
48
-
db.run_migrations()
49
-
.context("Failed to run migrations on the actor database")?;
50
-
51
-
Ok(db)
52
-
}
+540
-6
src/actor_store/mod.rs
+540
-6
src/actor_store/mod.rs
···
1
1
//! Actor store implementation for ATProto PDS.
2
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/mod.rs
3
+
//! Which is based on https://github.com/bluesky-social/atproto/blob/main/packages/repo/src/repo.ts
4
+
//! and also adds components from https://github.com/bluesky-social/atproto/blob/main/packages/pds/src/actor-store/repo/transactor.ts
5
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
6
+
//!
7
+
//! Modified for SQLite backend
2
8
3
-
mod actor_store;
4
9
mod blob;
5
-
mod db;
10
+
pub(crate) mod blob_fs;
6
11
mod preference;
7
12
mod record;
8
-
mod sql_blob;
13
+
pub(crate) mod sql_blob;
9
14
mod sql_repo;
10
15
11
-
pub(crate) use actor_store::ActorStore;
12
-
pub(crate) use db::ActorDb;
13
-
pub(crate) use sql_blob::BlobStoreSql;
16
+
use anyhow::Result;
17
+
use cidv10::Cid;
18
+
use diesel::*;
19
+
use futures::stream::{self, StreamExt};
20
+
use rsky_pds::actor_store::repo::types::SyncEvtData;
21
+
use rsky_repo::repo::Repo;
22
+
use rsky_repo::storage::readable_blockstore::ReadableBlockstore;
23
+
use rsky_repo::storage::types::RepoStorage;
24
+
use rsky_repo::types::{
25
+
CommitAction, CommitData, CommitDataWithOps, CommitOp, PreparedCreateOrUpdate, PreparedWrite,
26
+
RecordCreateOrUpdateOp, RecordWriteEnum, RecordWriteOp, WriteOpAction, write_to_op,
27
+
};
28
+
use rsky_repo::util::format_data_key;
29
+
use rsky_syntax::aturi::AtUri;
30
+
use secp256k1::{Keypair, Secp256k1, SecretKey};
31
+
use std::str::FromStr;
32
+
use std::sync::Arc;
33
+
use std::{env, fmt};
34
+
use tokio::sync::RwLock;
35
+
36
+
use blob::BlobReader;
37
+
use blob_fs::BlobStoreFs;
38
+
use preference::PreferenceReader;
39
+
use record::RecordReader;
40
+
use sql_repo::SqlRepoReader;
41
+
42
+
use crate::serve::ActorStorage;
43
+
44
+
#[derive(Debug)]
45
+
enum FormatCommitError {
46
+
BadRecordSwap(String),
47
+
RecordSwapMismatch(String),
48
+
BadCommitSwap(String),
49
+
MissingRepoRoot(String),
50
+
}
51
+
52
+
impl fmt::Display for FormatCommitError {
53
+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
54
+
match self {
55
+
Self::BadRecordSwap(record) => write!(f, "BadRecordSwapError: `{:?}`", record),
56
+
Self::RecordSwapMismatch(record) => {
57
+
write!(f, "BadRecordSwapError: current record is `{:?}`", record)
58
+
}
59
+
Self::BadCommitSwap(cid) => write!(f, "BadCommitSwapError: {}", cid),
60
+
Self::MissingRepoRoot(did) => write!(f, "No repo root found for `{}`", did),
61
+
}
62
+
}
63
+
}
64
+
65
+
impl std::error::Error for FormatCommitError {}
66
+
67
+
pub struct ActorStore {
68
+
pub did: String,
69
+
pub storage: Arc<RwLock<SqlRepoReader>>, // get ipld blocks from db
70
+
pub record: RecordReader, // get lexicon records from db
71
+
pub blob: BlobReader, // get blobs
72
+
pub pref: PreferenceReader, // get preferences
73
+
}
74
+
75
+
// Combination of RepoReader/Transactor, BlobReader/Transactor, SqlRepoReader/Transactor
76
+
impl ActorStore {
77
+
/// Concrete reader of an individual repo (hence BlobStoreFs which takes `did` param)
78
+
pub fn new(
79
+
did: String,
80
+
blobstore: BlobStoreFs,
81
+
db: deadpool_diesel::Pool<
82
+
deadpool_diesel::Manager<SqliteConnection>,
83
+
deadpool_diesel::sqlite::Object,
84
+
>,
85
+
conn: deadpool_diesel::sqlite::Object,
86
+
) -> Self {
87
+
Self {
88
+
storage: Arc::new(RwLock::new(SqlRepoReader::new(did.clone(), None, conn))),
89
+
record: RecordReader::new(did.clone(), db.clone()),
90
+
pref: PreferenceReader::new(did.clone(), db.clone()),
91
+
did,
92
+
blob: BlobReader::new(blobstore, db),
93
+
}
94
+
}
95
+
96
+
/// Create a new ActorStore taking ActorPools HashMap as input
97
+
pub async fn from_actor_pools(
98
+
did: &String,
99
+
hashmap_actor_pools: &std::collections::HashMap<String, ActorStorage>,
100
+
) -> Self {
101
+
let actor_pool = hashmap_actor_pools
102
+
.get(did)
103
+
.expect("Actor pool not found")
104
+
.clone();
105
+
let blobstore = BlobStoreFs::new(did.clone(), actor_pool.blob);
106
+
let conn = actor_pool
107
+
.repo
108
+
.clone()
109
+
.get()
110
+
.await
111
+
.expect("Failed to get connection");
112
+
Self::new(did.clone(), blobstore, actor_pool.repo, conn)
113
+
}
114
+
115
+
pub async fn get_repo_root(&self) -> Option<Cid> {
116
+
let storage_guard = self.storage.read().await;
117
+
storage_guard.get_root().await
118
+
}
119
+
120
+
// Transactors
121
+
// -------------------
122
+
123
+
#[deprecated]
124
+
pub async fn create_repo_legacy(
125
+
&self,
126
+
keypair: Keypair,
127
+
writes: Vec<PreparedCreateOrUpdate>,
128
+
) -> Result<CommitData> {
129
+
let write_ops = writes
130
+
.clone()
131
+
.into_iter()
132
+
.map(|prepare| {
133
+
let at_uri: AtUri = prepare.uri.try_into()?;
134
+
Ok(RecordCreateOrUpdateOp {
135
+
action: WriteOpAction::Create,
136
+
collection: at_uri.get_collection(),
137
+
rkey: at_uri.get_rkey(),
138
+
record: prepare.record,
139
+
})
140
+
})
141
+
.collect::<Result<Vec<RecordCreateOrUpdateOp>>>()?;
142
+
let commit = Repo::format_init_commit(
143
+
self.storage.clone(),
144
+
self.did.clone(),
145
+
keypair,
146
+
Some(write_ops),
147
+
)
148
+
.await?;
149
+
self.storage
150
+
.read()
151
+
.await
152
+
.apply_commit(commit.clone(), None)
153
+
.await?;
154
+
let writes = writes
155
+
.into_iter()
156
+
.map(PreparedWrite::Create)
157
+
.collect::<Vec<PreparedWrite>>();
158
+
self.blob.process_write_blobs(writes).await?;
159
+
Ok(commit)
160
+
}
161
+
162
+
pub async fn create_repo(
163
+
&self,
164
+
keypair: Keypair,
165
+
writes: Vec<PreparedCreateOrUpdate>,
166
+
) -> Result<CommitDataWithOps> {
167
+
let write_ops = writes
168
+
.clone()
169
+
.into_iter()
170
+
.map(|prepare| {
171
+
let at_uri: AtUri = prepare.uri.try_into()?;
172
+
Ok(RecordCreateOrUpdateOp {
173
+
action: WriteOpAction::Create,
174
+
collection: at_uri.get_collection(),
175
+
rkey: at_uri.get_rkey(),
176
+
record: prepare.record,
177
+
})
178
+
})
179
+
.collect::<Result<Vec<RecordCreateOrUpdateOp>>>()?;
180
+
let commit = Repo::format_init_commit(
181
+
self.storage.clone(),
182
+
self.did.clone(),
183
+
keypair,
184
+
Some(write_ops),
185
+
)
186
+
.await?;
187
+
self.storage
188
+
.read()
189
+
.await
190
+
.apply_commit(commit.clone(), None)
191
+
.await?;
192
+
let write_commit_ops = writes.iter().try_fold(
193
+
Vec::with_capacity(writes.len()),
194
+
|mut acc, w| -> Result<Vec<CommitOp>> {
195
+
let aturi: AtUri = w.uri.clone().try_into()?;
196
+
acc.push(CommitOp {
197
+
action: CommitAction::Create,
198
+
path: format_data_key(aturi.get_collection(), aturi.get_rkey()),
199
+
cid: Some(w.cid),
200
+
prev: None,
201
+
});
202
+
Ok(acc)
203
+
},
204
+
)?;
205
+
let writes = writes
206
+
.into_iter()
207
+
.map(PreparedWrite::Create)
208
+
.collect::<Vec<PreparedWrite>>();
209
+
self.blob.process_write_blobs(writes).await?;
210
+
Ok(CommitDataWithOps {
211
+
commit_data: commit,
212
+
ops: write_commit_ops,
213
+
prev_data: None,
214
+
})
215
+
}
216
+
217
+
pub async fn process_import_repo(
218
+
&mut self,
219
+
commit: CommitData,
220
+
writes: Vec<PreparedWrite>,
221
+
) -> Result<()> {
222
+
{
223
+
let immutable_borrow = &self;
224
+
// & send to indexing
225
+
immutable_borrow
226
+
.index_writes(writes.clone(), &commit.rev)
227
+
.await?;
228
+
}
229
+
// persist the commit to repo storage
230
+
self.storage
231
+
.read()
232
+
.await
233
+
.apply_commit(commit.clone(), None)
234
+
.await?;
235
+
// process blobs
236
+
self.blob.process_write_blobs(writes).await?;
237
+
Ok(())
238
+
}
239
+
240
+
pub async fn process_writes(
241
+
&mut self,
242
+
writes: Vec<PreparedWrite>,
243
+
swap_commit_cid: Option<Cid>,
244
+
) -> Result<CommitDataWithOps> {
245
+
// NOTE: In the typescript PR on sync v1.1
246
+
// there are some safeguards added for adding
247
+
// very large commits and very many commits
248
+
// for which I'm sure we could safeguard on
249
+
// but may not be necessary.
250
+
// https://github.com/bluesky-social/atproto/pull/3585/files#diff-7627844a4a6b50190014e947d1331a96df3c64d4c5273fa0ce544f85c3c1265f
251
+
let commit = self.format_commit(writes.clone(), swap_commit_cid).await?;
252
+
{
253
+
let immutable_borrow = &self;
254
+
// & send to indexing
255
+
immutable_borrow
256
+
.index_writes(writes.clone(), &commit.commit_data.rev)
257
+
.await?;
258
+
}
259
+
// persist the commit to repo storage
260
+
self.storage
261
+
.read()
262
+
.await
263
+
.apply_commit(commit.commit_data.clone(), None)
264
+
.await?;
265
+
// process blobs
266
+
self.blob.process_write_blobs(writes).await?;
267
+
Ok(commit)
268
+
}
269
+
270
+
pub async fn get_sync_event_data(&mut self) -> Result<SyncEvtData> {
271
+
let current_root = self.storage.read().await.get_root_detailed().await?;
272
+
let blocks_and_missing = self
273
+
.storage
274
+
.read()
275
+
.await
276
+
.get_blocks(vec![current_root.cid])
277
+
.await?;
278
+
Ok(SyncEvtData {
279
+
cid: current_root.cid,
280
+
rev: current_root.rev,
281
+
blocks: blocks_and_missing.blocks,
282
+
})
283
+
}
284
+
285
+
pub async fn format_commit(
286
+
&mut self,
287
+
writes: Vec<PreparedWrite>,
288
+
swap_commit: Option<Cid>,
289
+
) -> Result<CommitDataWithOps> {
290
+
let current_root = {
291
+
let storage_guard = self.storage.read().await;
292
+
storage_guard.get_root_detailed().await
293
+
};
294
+
if let Ok(current_root) = current_root {
295
+
if let Some(swap_commit) = swap_commit {
296
+
if !current_root.cid.eq(&swap_commit) {
297
+
return Err(
298
+
FormatCommitError::BadCommitSwap(current_root.cid.to_string()).into(),
299
+
);
300
+
}
301
+
}
302
+
{
303
+
self.storage
304
+
.write()
305
+
.await
306
+
.cache_rev(current_root.rev)
307
+
.await?;
308
+
}
309
+
let mut new_record_cids: Vec<Cid> = vec![];
310
+
let mut delete_and_update_uris = vec![];
311
+
let mut commit_ops = vec![];
312
+
for write in &writes {
313
+
let commit_action: CommitAction = write.action().into();
314
+
match write.clone() {
315
+
PreparedWrite::Create(c) => new_record_cids.push(c.cid),
316
+
PreparedWrite::Update(u) => {
317
+
new_record_cids.push(u.cid);
318
+
let u_at_uri: AtUri = u.uri.try_into()?;
319
+
delete_and_update_uris.push(u_at_uri);
320
+
}
321
+
PreparedWrite::Delete(d) => {
322
+
let d_at_uri: AtUri = d.uri.try_into()?;
323
+
delete_and_update_uris.push(d_at_uri)
324
+
}
325
+
}
326
+
if write.swap_cid().is_none() {
327
+
continue;
328
+
}
329
+
let write_at_uri: &AtUri = &write.uri().try_into()?;
330
+
let record = self
331
+
.record
332
+
.get_record(write_at_uri, None, Some(true))
333
+
.await?;
334
+
let current_record = match record {
335
+
Some(record) => Some(Cid::from_str(&record.cid)?),
336
+
None => None,
337
+
};
338
+
let cid = match &write {
339
+
&PreparedWrite::Delete(_) => None,
340
+
&PreparedWrite::Create(w) | &PreparedWrite::Update(w) => Some(w.cid),
341
+
};
342
+
let mut op = CommitOp {
343
+
action: commit_action,
344
+
path: format_data_key(write_at_uri.get_collection(), write_at_uri.get_rkey()),
345
+
cid,
346
+
prev: None,
347
+
};
348
+
if current_record.is_some() {
349
+
op.prev = current_record;
350
+
};
351
+
commit_ops.push(op);
352
+
match write {
353
+
// There should be no current record for a create
354
+
PreparedWrite::Create(_) if write.swap_cid().is_some() => {
355
+
Err::<(), anyhow::Error>(
356
+
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
357
+
.into(),
358
+
)
359
+
}
360
+
// There should be a current record for an update
361
+
PreparedWrite::Update(_) if write.swap_cid().is_none() => {
362
+
Err::<(), anyhow::Error>(
363
+
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
364
+
.into(),
365
+
)
366
+
}
367
+
// There should be a current record for a delete
368
+
PreparedWrite::Delete(_) if write.swap_cid().is_none() => {
369
+
Err::<(), anyhow::Error>(
370
+
FormatCommitError::BadRecordSwap(format!("{:?}", current_record))
371
+
.into(),
372
+
)
373
+
}
374
+
_ => Ok::<(), anyhow::Error>(()),
375
+
}?;
376
+
match (current_record, write.swap_cid()) {
377
+
(Some(current_record), Some(swap_cid)) if current_record.eq(swap_cid) => {
378
+
Ok::<(), anyhow::Error>(())
379
+
}
380
+
_ => Err::<(), anyhow::Error>(
381
+
FormatCommitError::RecordSwapMismatch(format!("{:?}", current_record))
382
+
.into(),
383
+
),
384
+
}?;
385
+
}
386
+
let mut repo = Repo::load(self.storage.clone(), Some(current_root.cid)).await?;
387
+
let previous_data = repo.commit.data;
388
+
let write_ops: Vec<RecordWriteOp> = writes
389
+
.into_iter()
390
+
.map(write_to_op)
391
+
.collect::<Result<Vec<RecordWriteOp>>>()?;
392
+
// @TODO: Use repo signing key global config
393
+
let secp = Secp256k1::new();
394
+
let repo_private_key = env::var("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX")
395
+
.expect("PDS_REPO_SIGNING_KEY_K256_PRIVATE_KEY_HEX not set");
396
+
let repo_secret_key = SecretKey::from_slice(
397
+
&hex::decode(repo_private_key.as_bytes()).expect("Failed to decode hex"),
398
+
)
399
+
.expect("Failed to create secret key from hex");
400
+
let repo_signing_key = Keypair::from_secret_key(&secp, &repo_secret_key);
401
+
402
+
let mut commit = repo
403
+
.format_commit(RecordWriteEnum::List(write_ops), repo_signing_key)
404
+
.await?;
405
+
406
+
// find blocks that would be deleted but are referenced by another record
407
+
let duplicate_record_cids = self
408
+
.get_duplicate_record_cids(commit.removed_cids.to_list(), delete_and_update_uris)
409
+
.await?;
410
+
for cid in duplicate_record_cids {
411
+
commit.removed_cids.delete(cid)
412
+
}
413
+
414
+
// find blocks that are relevant to ops but not included in diff
415
+
// (for instance a record that was moved but cid stayed the same)
416
+
let new_record_blocks = commit.relevant_blocks.get_many(new_record_cids)?;
417
+
if !new_record_blocks.missing.is_empty() {
418
+
let missing_blocks = {
419
+
let storage_guard = self.storage.read().await;
420
+
storage_guard.get_blocks(new_record_blocks.missing).await?
421
+
};
422
+
commit.relevant_blocks.add_map(missing_blocks.blocks)?;
423
+
}
424
+
let commit_with_data_ops = CommitDataWithOps {
425
+
ops: commit_ops,
426
+
commit_data: commit,
427
+
prev_data: Some(previous_data),
428
+
};
429
+
Ok(commit_with_data_ops)
430
+
} else {
431
+
Err(FormatCommitError::MissingRepoRoot(self.did.clone()).into())
432
+
}
433
+
}
434
+
435
+
pub async fn index_writes(&self, writes: Vec<PreparedWrite>, rev: &str) -> Result<()> {
436
+
let now: &str = &rsky_common::now();
437
+
438
+
drop(
439
+
stream::iter(writes)
440
+
.then(async move |write| {
441
+
match write {
442
+
PreparedWrite::Create(write) => {
443
+
let write_at_uri: AtUri = write.uri.try_into()?;
444
+
self.record
445
+
.index_record(
446
+
write_at_uri.clone(),
447
+
write.cid,
448
+
Some(write.record),
449
+
Some(write.action),
450
+
rev.to_owned(),
451
+
Some(now.to_owned()),
452
+
)
453
+
.await?;
454
+
}
455
+
PreparedWrite::Update(write) => {
456
+
let write_at_uri: AtUri = write.uri.try_into()?;
457
+
self.record
458
+
.index_record(
459
+
write_at_uri.clone(),
460
+
write.cid,
461
+
Some(write.record),
462
+
Some(write.action),
463
+
rev.to_owned(),
464
+
Some(now.to_owned()),
465
+
)
466
+
.await?;
467
+
}
468
+
PreparedWrite::Delete(write) => {
469
+
let write_at_uri: AtUri = write.uri.try_into()?;
470
+
self.record.delete_record(&write_at_uri).await?;
471
+
}
472
+
}
473
+
Ok::<(), anyhow::Error>(())
474
+
})
475
+
.collect::<Vec<_>>()
476
+
.await
477
+
.into_iter()
478
+
.collect::<Result<Vec<_>, _>>()?,
479
+
);
480
+
Ok(())
481
+
}
482
+
483
+
pub async fn destroy(&mut self) -> Result<()> {
484
+
let did: String = self.did.clone();
485
+
use crate::schema::actor_store::blob::dsl as BlobSchema;
486
+
487
+
let blob_rows: Vec<String> = self
488
+
.storage
489
+
.read()
490
+
.await
491
+
.db
492
+
.interact(move |conn| {
493
+
BlobSchema::blob
494
+
.filter(BlobSchema::did.eq(did))
495
+
.select(BlobSchema::cid)
496
+
.get_results(conn)
497
+
})
498
+
.await
499
+
.expect("Failed to get blob rows")?;
500
+
let cids = blob_rows
501
+
.into_iter()
502
+
.map(|row| Ok(Cid::from_str(&row)?))
503
+
.collect::<Result<Vec<Cid>>>()?;
504
+
drop(
505
+
stream::iter(cids.chunks(500))
506
+
.then(|chunk| async { self.blob.blobstore.delete_many(chunk.to_vec()).await })
507
+
.collect::<Vec<_>>()
508
+
.await
509
+
.into_iter()
510
+
.collect::<Result<Vec<_>, _>>()?,
511
+
);
512
+
Ok(())
513
+
}
514
+
515
+
pub async fn get_duplicate_record_cids(
516
+
&self,
517
+
cids: Vec<Cid>,
518
+
touched_uris: Vec<AtUri>,
519
+
) -> Result<Vec<Cid>> {
520
+
if touched_uris.is_empty() || cids.is_empty() {
521
+
return Ok(vec![]);
522
+
}
523
+
let did: String = self.did.clone();
524
+
use crate::schema::actor_store::record::dsl as RecordSchema;
525
+
526
+
let cid_strs: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
527
+
let touched_uri_strs: Vec<String> = touched_uris.iter().map(|t| t.to_string()).collect();
528
+
let res: Vec<String> = self
529
+
.storage
530
+
.read()
531
+
.await
532
+
.db
533
+
.interact(move |conn| {
534
+
RecordSchema::record
535
+
.filter(RecordSchema::did.eq(did))
536
+
.filter(RecordSchema::cid.eq_any(cid_strs))
537
+
.filter(RecordSchema::uri.ne_all(touched_uri_strs))
538
+
.select(RecordSchema::cid)
539
+
.get_results(conn)
540
+
})
541
+
.await
542
+
.expect("Failed to get duplicate record cids")?;
543
+
res.into_iter()
544
+
.map(|row| Cid::from_str(&row).map_err(anyhow::Error::new))
545
+
.collect::<Result<Vec<Cid>>>()
546
+
}
547
+
}
+31
-23
src/actor_store/preference.rs
+31
-23
src/actor_store/preference.rs
···
4
4
//!
5
5
//! Modified for SQLite backend
6
6
7
+
use crate::models::actor_store::AccountPref;
7
8
use anyhow::{Result, bail};
8
9
use diesel::*;
9
10
use rsky_lexicon::app::bsky::actor::RefPreferences;
10
11
use rsky_pds::actor_store::preference::pref_match_namespace;
11
12
use rsky_pds::actor_store::preference::util::pref_in_scope;
12
13
use rsky_pds::auth_verifier::AuthScope;
13
-
use rsky_pds::db::DbConn;
14
-
use rsky_pds::models;
15
-
use rsky_pds::models::AccountPref;
16
-
use std::sync::Arc;
17
14
18
15
pub struct PreferenceReader {
19
16
pub did: String,
20
-
pub db: Arc<DbConn>,
17
+
pub db: deadpool_diesel::Pool<
18
+
deadpool_diesel::Manager<SqliteConnection>,
19
+
deadpool_diesel::sqlite::Object,
20
+
>,
21
21
}
22
22
23
23
impl PreferenceReader {
24
-
pub fn new(did: String, db: Arc<DbConn>) -> Self {
25
-
PreferenceReader { did, db }
24
+
pub const fn new(
25
+
did: String,
26
+
db: deadpool_diesel::Pool<
27
+
deadpool_diesel::Manager<SqliteConnection>,
28
+
deadpool_diesel::sqlite::Object,
29
+
>,
30
+
) -> Self {
31
+
Self { did, db }
26
32
}
27
33
28
34
pub async fn get_preferences(
···
30
36
namespace: Option<String>,
31
37
scope: AuthScope,
32
38
) -> Result<Vec<RefPreferences>> {
33
-
use rsky_pds::schema::pds::account_pref::dsl as AccountPrefSchema;
39
+
use crate::schema::actor_store::account_pref::dsl as AccountPrefSchema;
34
40
35
41
let did = self.did.clone();
36
42
self.db
37
-
.run(move |conn| {
43
+
.get()
44
+
.await?
45
+
.interact(move |conn| {
38
46
let prefs_res = AccountPrefSchema::account_pref
39
47
.filter(AccountPrefSchema::did.eq(&did))
40
48
.select(AccountPref::as_select())
···
42
50
.load(conn)?;
43
51
let account_prefs = prefs_res
44
52
.into_iter()
45
-
.filter(|pref| match &namespace {
46
-
None => true,
47
-
Some(namespace) => pref_match_namespace(namespace, &pref.name),
53
+
.filter(|pref| {
54
+
namespace
55
+
.as_ref()
56
+
.is_none_or(|namespace| pref_match_namespace(namespace, &pref.name))
48
57
})
49
58
.filter(|pref| pref_in_scope(scope.clone(), pref.name.clone()))
50
59
.map(|pref| {
···
61
70
Ok(account_prefs)
62
71
})
63
72
.await
73
+
.expect("Failed to get preferences")
64
74
}
65
75
66
76
#[tracing::instrument(skip_all)]
···
71
81
scope: AuthScope,
72
82
) -> Result<()> {
73
83
let did = self.did.clone();
74
-
self.db
75
-
.run(move |conn| {
84
+
self.db.get().await?
85
+
.interact(move |conn| {
76
86
match values
77
87
.iter()
78
88
.all(|value| pref_match_namespace(&namespace, &value.get_type()))
79
89
{
80
90
false => bail!("Some preferences are not in the {namespace} namespace"),
81
91
true => {
82
-
let not_in_scope = values
83
-
.iter()
84
-
.filter(|value| !pref_in_scope(scope.clone(), value.get_type()))
85
-
.collect::<Vec<&RefPreferences>>();
86
-
if !not_in_scope.is_empty() {
92
+
if values
93
+
.iter().any(|value| !pref_in_scope(scope.clone(), value.get_type())) {
87
94
tracing::info!(
88
95
"@LOG: PreferenceReader::put_preferences() debug scope: {:?}, values: {:?}",
89
96
scope,
···
92
99
bail!("Do not have authorization to set preferences.");
93
100
}
94
101
// get all current prefs for user and prep new pref rows
95
-
use rsky_pds::schema::pds::account_pref::dsl as AccountPrefSchema;
102
+
use crate::schema::actor_store::account_pref::dsl as AccountPrefSchema;
96
103
let all_prefs = AccountPrefSchema::account_pref
97
104
.filter(AccountPrefSchema::did.eq(&did))
98
-
.select(models::AccountPref::as_select())
105
+
.select(AccountPref::as_select())
99
106
.load(conn)?;
100
107
let put_prefs = values
101
108
.into_iter()
···
116
123
.collect::<Vec<i32>>();
117
124
// replace all prefs in given namespace
118
125
if !all_pref_ids_in_namespace.is_empty() {
119
-
delete(AccountPrefSchema::account_pref)
126
+
_ = delete(AccountPrefSchema::account_pref)
120
127
.filter(AccountPrefSchema::id.eq_any(all_pref_ids_in_namespace))
121
128
.execute(conn)?;
122
129
}
123
130
if !put_prefs.is_empty() {
124
-
insert_into(AccountPrefSchema::account_pref)
131
+
_ = insert_into(AccountPrefSchema::account_pref)
125
132
.values(
126
133
put_prefs
127
134
.into_iter()
···
141
148
}
142
149
})
143
150
.await
151
+
.expect("Failed to put preferences")
144
152
}
145
153
}
+178
-98
src/actor_store/record.rs
+178
-98
src/actor_store/record.rs
···
4
4
//!
5
5
//! Modified for SQLite backend
6
6
7
-
use anyhow::{Error, Result, bail};
7
+
use crate::models::actor_store::{Backlink, Record, RepoBlock};
8
+
use anyhow::{Result, bail};
8
9
use cidv10::Cid;
10
+
use diesel::result::Error;
9
11
use diesel::*;
10
12
use futures::stream::{self, StreamExt};
11
13
use rsky_lexicon::com::atproto::admin::StatusAttr;
12
-
use rsky_pds::actor_store::record::{GetRecord, RecordsForCollection, get_backlinks};
13
-
use rsky_pds::models::{Backlink, Record};
14
-
use rsky_repo::types::{RepoRecord, WriteOpAction};
14
+
use rsky_pds::actor_store::record::{GetRecord, RecordsForCollection};
15
+
use rsky_repo::storage::Ipld;
16
+
use rsky_repo::types::{Ids, Lex, RepoRecord, WriteOpAction};
15
17
use rsky_repo::util::cbor_to_lex_record;
16
18
use rsky_syntax::aturi::AtUri;
19
+
use rsky_syntax::aturi_validation::ensure_valid_at_uri;
20
+
use rsky_syntax::did::ensure_valid_did;
21
+
use serde_json::Value as JsonValue;
17
22
use std::env;
18
23
use std::str::FromStr;
19
24
20
-
use crate::actor_store::db::ActorDb;
25
+
// @NOTE in the future this can be replaced with a more generic routine that pulls backlinks based on lex docs.
26
+
// For now, we just want to ensure we're tracking links from follows, blocks, likes, and reposts.
27
+
pub fn get_backlinks(uri: &AtUri, record: &RepoRecord) -> Result<Vec<Backlink>> {
28
+
if let Some(Lex::Ipld(Ipld::Json(JsonValue::String(record_type)))) = record.get("$type") {
29
+
if record_type == Ids::AppBskyGraphFollow.as_str()
30
+
|| record_type == Ids::AppBskyGraphBlock.as_str()
31
+
{
32
+
if let Some(Lex::Ipld(Ipld::Json(JsonValue::String(subject)))) = record.get("subject") {
33
+
match ensure_valid_did(uri) {
34
+
Ok(_) => {
35
+
return Ok(vec![Backlink {
36
+
uri: uri.to_string(),
37
+
path: "subject".to_owned(),
38
+
link_to: subject.clone(),
39
+
}]);
40
+
}
41
+
Err(e) => bail!("get_backlinks Error: invalid did {}", e),
42
+
};
43
+
}
44
+
} else if record_type == Ids::AppBskyFeedLike.as_str()
45
+
|| record_type == Ids::AppBskyFeedRepost.as_str()
46
+
{
47
+
if let Some(Lex::Map(ref_object)) = record.get("subject") {
48
+
if let Some(Lex::Ipld(Ipld::Json(JsonValue::String(subject_uri)))) =
49
+
ref_object.get("uri")
50
+
{
51
+
match ensure_valid_at_uri(uri) {
52
+
Ok(_) => {
53
+
return Ok(vec![Backlink {
54
+
uri: uri.to_string(),
55
+
path: "subject.uri".to_owned(),
56
+
link_to: subject_uri.clone(),
57
+
}]);
58
+
}
59
+
Err(e) => bail!("get_backlinks Error: invalid AtUri {}", e),
60
+
};
61
+
}
62
+
}
63
+
}
64
+
}
65
+
Ok(Vec::new())
66
+
}
21
67
22
68
/// Combined handler for record operations with both read and write capabilities.
23
69
pub(crate) struct RecordReader {
24
70
/// Database connection.
25
-
pub db: ActorDb,
71
+
pub db: deadpool_diesel::Pool<
72
+
deadpool_diesel::Manager<SqliteConnection>,
73
+
deadpool_diesel::sqlite::Object,
74
+
>,
26
75
/// DID of the actor.
27
76
pub did: String,
28
77
}
29
78
30
79
impl RecordReader {
31
80
/// Create a new record handler.
32
-
pub(crate) fn new(did: String, db: ActorDb) -> Self {
81
+
pub(crate) const fn new(
82
+
did: String,
83
+
db: deadpool_diesel::Pool<
84
+
deadpool_diesel::Manager<SqliteConnection>,
85
+
deadpool_diesel::sqlite::Object,
86
+
>,
87
+
) -> Self {
33
88
Self { did, db }
34
89
}
35
90
36
91
/// Count the total number of records.
37
92
pub(crate) async fn record_count(&mut self) -> Result<i64> {
38
-
use rsky_pds::schema::pds::record::dsl::*;
93
+
use crate::schema::actor_store::record::dsl::*;
39
94
40
95
let other_did = self.did.clone();
41
96
self.db
42
-
.run(move |conn| {
97
+
.get()
98
+
.await?
99
+
.interact(move |conn| {
43
100
let res: i64 = record.filter(did.eq(&other_did)).count().get_result(conn)?;
44
101
Ok(res)
45
102
})
46
103
.await
104
+
.expect("Failed to count records")
47
105
}
48
106
49
107
/// List all collections in the repository.
50
108
pub(crate) async fn list_collections(&self) -> Result<Vec<String>> {
51
-
use rsky_pds::schema::pds::record::dsl::*;
109
+
use crate::schema::actor_store::record::dsl::*;
52
110
53
111
let other_did = self.did.clone();
54
112
self.db
55
-
.run(move |conn| {
113
+
.get()
114
+
.await?
115
+
.interact(move |conn| {
56
116
let collections = record
57
117
.filter(did.eq(&other_did))
58
118
.select(collection)
···
63
123
Ok(collections)
64
124
})
65
125
.await
126
+
.expect("Failed to list collections")
66
127
}
67
128
68
129
/// List records for a specific collection.
···
76
137
rkey_end: Option<String>,
77
138
include_soft_deleted: Option<bool>,
78
139
) -> Result<Vec<RecordsForCollection>> {
79
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
80
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
140
+
use crate::schema::actor_store::record::dsl as RecordSchema;
141
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
81
142
82
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
83
-
include_soft_deleted
84
-
} else {
85
-
false
86
-
};
143
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
87
144
let mut builder = RecordSchema::record
88
145
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
89
146
.limit(limit)
90
-
.select((
91
-
rsky_pds::models::Record::as_select(),
92
-
rsky_pds::models::RepoBlock::as_select(),
93
-
))
147
+
.select((Record::as_select(), RepoBlock::as_select()))
94
148
.filter(RecordSchema::did.eq(self.did.clone()))
95
149
.filter(RecordSchema::collection.eq(collection))
96
150
.into_boxed();
···
117
171
builder = builder.filter(RecordSchema::rkey.lt(rkey_end));
118
172
}
119
173
}
120
-
let res: Vec<(rsky_pds::models::Record, rsky_pds::models::RepoBlock)> =
121
-
self.db.run(move |conn| builder.load(conn)).await?;
174
+
let res: Vec<(Record, RepoBlock)> = self
175
+
.db
176
+
.get()
177
+
.await?
178
+
.interact(move |conn| builder.load(conn))
179
+
.await
180
+
.expect("Failed to load records")?;
122
181
res.into_iter()
123
182
.map(|row| {
124
183
Ok(RecordsForCollection {
···
137
196
cid: Option<String>,
138
197
include_soft_deleted: Option<bool>,
139
198
) -> Result<Option<GetRecord>> {
140
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
141
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
199
+
use crate::schema::actor_store::record::dsl as RecordSchema;
200
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
142
201
143
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
144
-
include_soft_deleted
145
-
} else {
146
-
false
147
-
};
202
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
148
203
let mut builder = RecordSchema::record
149
204
.inner_join(RepoBlockSchema::repo_block.on(RepoBlockSchema::cid.eq(RecordSchema::cid)))
150
-
.select((
151
-
rsky_pds::models::Record::as_select(),
152
-
rsky_pds::models::RepoBlock::as_select(),
153
-
))
205
+
.select((Record::as_select(), RepoBlock::as_select()))
154
206
.filter(RecordSchema::uri.eq(uri.to_string()))
155
207
.into_boxed();
156
208
if !include_soft_deleted {
···
159
211
if let Some(cid) = cid {
160
212
builder = builder.filter(RecordSchema::cid.eq(cid));
161
213
}
162
-
let record: Option<(rsky_pds::models::Record, rsky_pds::models::RepoBlock)> = self
214
+
let record: Option<(Record, RepoBlock)> = self
163
215
.db
164
-
.run(move |conn| builder.first(conn).optional())
165
-
.await?;
216
+
.get()
217
+
.await?
218
+
.interact(move |conn| builder.first(conn).optional())
219
+
.await
220
+
.expect("Failed to load record")?;
166
221
if let Some(record) = record {
167
222
Ok(Some(GetRecord {
168
223
uri: record.0.uri,
···
183
238
cid: Option<String>,
184
239
include_soft_deleted: Option<bool>,
185
240
) -> Result<bool> {
186
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
241
+
use crate::schema::actor_store::record::dsl as RecordSchema;
187
242
188
-
let include_soft_deleted: bool = if let Some(include_soft_deleted) = include_soft_deleted {
189
-
include_soft_deleted
190
-
} else {
191
-
false
192
-
};
243
+
let include_soft_deleted: bool = include_soft_deleted.unwrap_or(false);
193
244
let mut builder = RecordSchema::record
194
245
.select(RecordSchema::uri)
195
246
.filter(RecordSchema::uri.eq(uri))
···
202
253
}
203
254
let record_uri = self
204
255
.db
205
-
.run(move |conn| builder.first::<String>(conn).optional())
206
-
.await?;
207
-
Ok(!!record_uri.is_some())
256
+
.get()
257
+
.await?
258
+
.interact(move |conn| builder.first::<String>(conn).optional())
259
+
.await
260
+
.expect("Failed to check record")?;
261
+
Ok(record_uri.is_some())
208
262
}
209
263
210
264
/// Get the takedown status of a record.
···
212
266
&self,
213
267
uri: String,
214
268
) -> Result<Option<StatusAttr>> {
215
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
269
+
use crate::schema::actor_store::record::dsl as RecordSchema;
216
270
217
271
let res = self
218
272
.db
219
-
.run(move |conn| {
273
+
.get()
274
+
.await?
275
+
.interact(move |conn| {
220
276
RecordSchema::record
221
277
.select(RecordSchema::takedownRef)
222
278
.filter(RecordSchema::uri.eq(uri))
223
279
.first::<Option<String>>(conn)
224
280
.optional()
225
281
})
226
-
.await?;
227
-
if let Some(res) = res {
228
-
if let Some(takedown_ref) = res {
229
-
Ok(Some(StatusAttr {
230
-
applied: true,
231
-
r#ref: Some(takedown_ref),
232
-
}))
233
-
} else {
234
-
Ok(Some(StatusAttr {
235
-
applied: false,
236
-
r#ref: None,
237
-
}))
238
-
}
239
-
} else {
240
-
Ok(None)
241
-
}
282
+
.await
283
+
.expect("Failed to get takedown status")?;
284
+
res.map_or_else(
285
+
|| Ok(None),
286
+
|res| {
287
+
res.map_or_else(
288
+
|| {
289
+
Ok(Some(StatusAttr {
290
+
applied: false,
291
+
r#ref: None,
292
+
}))
293
+
},
294
+
|takedown_ref| {
295
+
Ok(Some(StatusAttr {
296
+
applied: true,
297
+
r#ref: Some(takedown_ref),
298
+
}))
299
+
},
300
+
)
301
+
},
302
+
)
242
303
}
243
304
244
305
/// Get the current CID for a record URI.
245
306
pub(crate) async fn get_current_record_cid(&self, uri: String) -> Result<Option<Cid>> {
246
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
307
+
use crate::schema::actor_store::record::dsl as RecordSchema;
247
308
248
309
let res = self
249
310
.db
250
-
.run(move |conn| {
311
+
.get()
312
+
.await?
313
+
.interact(move |conn| {
251
314
RecordSchema::record
252
315
.select(RecordSchema::cid)
253
316
.filter(RecordSchema::uri.eq(uri))
254
317
.first::<String>(conn)
255
318
.optional()
256
319
})
257
-
.await?;
320
+
.await
321
+
.expect("Failed to get current CID")?;
258
322
if let Some(res) = res {
259
323
Ok(Some(Cid::from_str(&res)?))
260
324
} else {
···
269
333
path: String,
270
334
link_to: String,
271
335
) -> Result<Vec<Record>> {
272
-
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
273
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
336
+
use crate::schema::actor_store::backlink::dsl as BacklinkSchema;
337
+
use crate::schema::actor_store::record::dsl as RecordSchema;
274
338
275
339
let res = self
276
340
.db
277
-
.run(move |conn| {
341
+
.get()
342
+
.await?
343
+
.interact(move |conn| {
278
344
RecordSchema::record
279
345
.inner_join(
280
346
BacklinkSchema::backlink.on(BacklinkSchema::uri.eq(RecordSchema::uri)),
···
285
351
.filter(RecordSchema::collection.eq(collection))
286
352
.load::<Record>(conn)
287
353
})
288
-
.await?;
354
+
.await
355
+
.expect("Failed to get backlinks")?;
289
356
Ok(res)
290
357
}
291
358
···
345
412
let rkey = uri.get_rkey();
346
413
let hostname = uri.get_hostname().to_string();
347
414
let action = action.unwrap_or(WriteOpAction::Create);
348
-
let indexed_at = timestamp.unwrap_or_else(|| rsky_common::now());
415
+
let indexed_at = timestamp.unwrap_or_else(rsky_common::now);
349
416
let row = Record {
350
417
did: self.did.clone(),
351
418
uri: uri.to_string(),
···
365
432
bail!("Expected indexed URI to contain a record key")
366
433
}
367
434
368
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
435
+
use crate::schema::actor_store::record::dsl as RecordSchema;
369
436
370
437
// Track current version of record
371
438
let (record, uri) = self
372
439
.db
373
-
.run(move |conn| {
374
-
insert_into(RecordSchema::record)
440
+
.get()
441
+
.await?
442
+
.interact(move |conn| {
443
+
_ = insert_into(RecordSchema::record)
375
444
.values(row)
376
445
.on_conflict(RecordSchema::uri)
377
446
.do_update()
···
383
452
.execute(conn)?;
384
453
Ok::<_, Error>((record, uri))
385
454
})
386
-
.await?;
455
+
.await
456
+
.expect("Failed to index record")?;
387
457
388
458
if let Some(record) = record {
389
459
// Maintain backlinks
390
460
let backlinks = get_backlinks(&uri, &record)?;
391
-
if let WriteOpAction::Update = action {
461
+
if action == WriteOpAction::Update {
392
462
// On update just recreate backlinks from scratch for the record, so we can clear out
393
463
// the old ones. E.g. for weird cases like updating a follow to be for a different did.
394
464
self.remove_backlinks_by_uri(&uri).await?;
···
403
473
#[tracing::instrument(skip_all)]
404
474
pub(crate) async fn delete_record(&self, uri: &AtUri) -> Result<()> {
405
475
tracing::debug!("@LOG DEBUG RecordReader::delete_record, deleting indexed record {uri}");
406
-
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
407
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
476
+
use crate::schema::actor_store::backlink::dsl as BacklinkSchema;
477
+
use crate::schema::actor_store::record::dsl as RecordSchema;
408
478
let uri = uri.to_string();
409
479
self.db
410
-
.run(move |conn| {
411
-
delete(RecordSchema::record)
480
+
.get()
481
+
.await?
482
+
.interact(move |conn| {
483
+
_ = delete(RecordSchema::record)
412
484
.filter(RecordSchema::uri.eq(&uri))
413
485
.execute(conn)?;
414
-
delete(BacklinkSchema::backlink)
486
+
_ = delete(BacklinkSchema::backlink)
415
487
.filter(BacklinkSchema::uri.eq(&uri))
416
488
.execute(conn)?;
417
489
tracing::debug!(
···
420
492
Ok(())
421
493
})
422
494
.await
495
+
.expect("Failed to delete record")
423
496
}
424
497
425
498
/// Remove backlinks for a URI.
426
499
pub(crate) async fn remove_backlinks_by_uri(&self, uri: &AtUri) -> Result<()> {
427
-
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
500
+
use crate::schema::actor_store::backlink::dsl as BacklinkSchema;
428
501
let uri = uri.to_string();
429
502
self.db
430
-
.run(move |conn| {
431
-
delete(BacklinkSchema::backlink)
503
+
.get()
504
+
.await?
505
+
.interact(move |conn| {
506
+
_ = delete(BacklinkSchema::backlink)
432
507
.filter(BacklinkSchema::uri.eq(uri))
433
508
.execute(conn)?;
434
509
Ok(())
435
510
})
436
511
.await
512
+
.expect("Failed to remove backlinks")
437
513
}
438
514
439
515
/// Add backlinks to the database.
440
516
pub(crate) async fn add_backlinks(&self, backlinks: Vec<Backlink>) -> Result<()> {
441
-
if backlinks.len() == 0 {
517
+
if backlinks.is_empty() {
442
518
Ok(())
443
519
} else {
444
-
use rsky_pds::schema::pds::backlink::dsl as BacklinkSchema;
520
+
use crate::schema::actor_store::backlink::dsl as BacklinkSchema;
445
521
self.db
446
-
.run(move |conn| {
447
-
insert_into(BacklinkSchema::backlink)
522
+
.get()
523
+
.await?
524
+
.interact(move |conn| {
525
+
_ = insert_or_ignore_into(BacklinkSchema::backlink)
448
526
.values(&backlinks)
449
-
.on_conflict_do_nothing()
450
527
.execute(conn)?;
451
528
Ok(())
452
529
})
453
530
.await
531
+
.expect("Failed to add backlinks")
454
532
}
455
533
}
456
534
···
460
538
uri: &AtUri,
461
539
takedown: StatusAttr,
462
540
) -> Result<()> {
463
-
use rsky_pds::schema::pds::record::dsl as RecordSchema;
541
+
use crate::schema::actor_store::record::dsl as RecordSchema;
464
542
465
543
let takedown_ref: Option<String> = match takedown.applied {
466
-
true => match takedown.r#ref {
467
-
Some(takedown_ref) => Some(takedown_ref),
468
-
None => Some(rsky_common::now()),
469
-
},
544
+
true => takedown
545
+
.r#ref
546
+
.map_or_else(|| Some(rsky_common::now()), Some),
470
547
false => None,
471
548
};
472
549
let uri_string = uri.to_string();
473
550
474
551
self.db
475
-
.run(move |conn| {
476
-
update(RecordSchema::record)
552
+
.get()
553
+
.await?
554
+
.interact(move |conn| {
555
+
_ = update(RecordSchema::record)
477
556
.filter(RecordSchema::uri.eq(uri_string))
478
557
.set(RecordSchema::takedownRef.eq(takedown_ref))
479
558
.execute(conn)?;
480
559
Ok(())
481
560
})
482
561
.await
562
+
.expect("Failed to update takedown status")
483
563
}
484
564
}
+279
-172
src/actor_store/sql_blob.rs
+279
-172
src/actor_store/sql_blob.rs
···
1
-
use std::{path::PathBuf, str::FromStr as _};
2
-
3
-
use anyhow::Result;
1
+
//! SQL-based blob storage implementation
2
+
#![expect(
3
+
clippy::pub_use,
4
+
clippy::single_char_lifetime_names,
5
+
unused_qualifications,
6
+
unnameable_types
7
+
)]
8
+
use anyhow::{Context, Result};
4
9
use cidv10::Cid;
5
-
use rsky_common::get_random_str;
10
+
use diesel::prelude::*;
6
11
7
-
use crate::db::DatabaseConnection;
8
-
9
-
/// Type for stream of blob data
10
-
pub type BlobStream = Box<dyn std::io::Read + Send>;
11
-
12
-
/// Placeholder implementation for blob store
13
-
#[derive(Clone)]
14
-
pub(crate) struct BlobStoreSql {
15
-
client: DatabaseConnection,
16
-
path: PathBuf,
12
+
/// ByteStream implementation for blob data
13
+
pub struct ByteStream {
14
+
pub bytes: Vec<u8>,
17
15
}
18
16
19
-
impl BlobStoreSql {
20
-
pub fn new(did: String, cfg: &SdkConfig) -> Self {
21
-
// let client = aws_sdk_s3::Client::new(cfg);
22
-
// BlobStorePlaceholder {
23
-
// client,
24
-
// bucket: did,
25
-
// }
26
-
todo!();
17
+
impl ByteStream {
18
+
pub const fn new(bytes: Vec<u8>) -> Self {
19
+
Self { bytes }
27
20
}
28
21
29
-
pub fn creator(cfg: &SdkConfig) -> Box<dyn Fn(String) -> BlobStoreSql + '_> {
30
-
Box::new(move |did: String| BlobStoreSql::new(did, cfg))
22
+
pub async fn collect(self) -> Result<Vec<u8>> {
23
+
Ok(self.bytes)
31
24
}
25
+
}
32
26
33
-
fn gen_key(&self) -> String {
34
-
get_random_str()
35
-
}
27
+
/// SQL-based implementation of blob storage
28
+
pub struct BlobStoreSql {
29
+
/// Database connection for metadata
30
+
pub db: deadpool_diesel::Pool<
31
+
deadpool_diesel::Manager<SqliteConnection>,
32
+
deadpool_diesel::sqlite::Object,
33
+
>,
34
+
/// DID of the actor
35
+
pub did: String,
36
+
}
37
+
38
+
/// Blob table structure for SQL operations
39
+
#[derive(Queryable, Insertable, Debug)]
40
+
#[diesel(table_name = blobs)]
41
+
struct BlobEntry {
42
+
cid: String,
43
+
did: String,
44
+
data: Vec<u8>,
45
+
size: i32,
46
+
mime_type: String,
47
+
quarantined: bool,
48
+
}
36
49
37
-
fn get_tmp_path(&self, key: &String) -> String {
38
-
// format!("tmp/{0}/{1}", self.bucket, key)
39
-
todo!();
50
+
// Table definition for blobs
51
+
table! {
52
+
blobs (cid, did) {
53
+
cid -> Text,
54
+
did -> Text,
55
+
data -> Binary,
56
+
size -> Integer,
57
+
mime_type -> Text,
58
+
quarantined -> Bool,
40
59
}
60
+
}
41
61
42
-
fn get_stored_path(&self, cid: Cid) -> String {
43
-
// format!("blocks/{0}/{1}", self.bucket, cid)
44
-
todo!();
62
+
impl BlobStoreSql {
63
+
/// Create a new SQL-based blob store for the given DID
64
+
pub const fn new(
65
+
did: String,
66
+
db: deadpool_diesel::Pool<
67
+
deadpool_diesel::Manager<SqliteConnection>,
68
+
deadpool_diesel::sqlite::Object,
69
+
>,
70
+
) -> Self {
71
+
Self { db, did }
45
72
}
46
73
47
-
fn get_quarantined_path(&self, cid: Cid) -> String {
48
-
// format!("quarantine/{0}/{1}", self.bucket, cid)
49
-
todo!();
74
+
// /// Create a factory function for blob stores
75
+
pub fn creator(
76
+
db: deadpool_diesel::Pool<
77
+
deadpool_diesel::Manager<SqliteConnection>,
78
+
deadpool_diesel::sqlite::Object,
79
+
>,
80
+
) -> Box<dyn Fn(String) -> BlobStoreSql> {
81
+
let db_clone = db.clone();
82
+
Box::new(move |did: String| BlobStoreSql::new(did, db_clone.clone()))
50
83
}
51
84
85
+
/// Store a blob temporarily - now just stores permanently with a key returned for API compatibility
52
86
pub async fn put_temp(&self, bytes: Vec<u8>) -> Result<String> {
53
-
let key = self.gen_key();
54
-
// let body = ByteStream::from(bytes);
55
-
// self.client
56
-
// .put_object()
57
-
// .body(body)
58
-
// .bucket(&self.bucket)
59
-
// .key(self.get_tmp_path(&key))
60
-
// .acl(ObjectCannedAcl::PublicRead)
61
-
// .send()
62
-
// .await?;
63
-
// Ok(key)
64
-
todo!();
87
+
// Generate a unique key as a CID based on the data
88
+
// use sha2::{Digest, Sha256};
89
+
// let digest = Sha256::digest(&bytes);
90
+
// let key = hex::encode(digest);
91
+
let key = rsky_common::get_random_str();
92
+
93
+
// Just store the blob directly
94
+
self.put_permanent_with_mime(
95
+
Cid::try_from(format!("bafy{}", key)).unwrap_or_else(|_| Cid::default()),
96
+
bytes,
97
+
"application/octet-stream".to_owned(),
98
+
)
99
+
.await?;
100
+
101
+
// Return the key for API compatibility
102
+
Ok(key)
65
103
}
66
104
67
-
pub async fn make_permanent(&self, key: String, cid: Cid) -> Result<()> {
68
-
// let already_has = self.has_stored(cid).await?;
69
-
// if !already_has {
70
-
// Ok(self
71
-
// .move_object(MoveObject {
72
-
// from: self.get_tmp_path(&key),
73
-
// to: self.get_stored_path(cid),
74
-
// })
75
-
// .await?)
76
-
// } else {
77
-
// // already saved, so we no-op & just delete the temp
78
-
// Ok(self.delete_key(self.get_tmp_path(&key)).await?)
79
-
// }
80
-
todo!();
105
+
/// Make a temporary blob permanent - just a no-op for API compatibility
106
+
pub async fn make_permanent(&self, _key: String, _cid: Cid) -> Result<()> {
107
+
// No-op since we don't have temporary blobs anymore
108
+
Ok(())
109
+
}
110
+
111
+
/// Store a blob with specific mime type
112
+
pub async fn put_permanent_with_mime(
113
+
&self,
114
+
cid: Cid,
115
+
bytes: Vec<u8>,
116
+
mime_type: String,
117
+
) -> Result<()> {
118
+
let cid_str = cid.to_string();
119
+
let did_clone = self.did.clone();
120
+
let bytes_len = bytes.len() as i32;
121
+
122
+
// Store directly in the database
123
+
_ = self
124
+
.db
125
+
.get()
126
+
.await?
127
+
.interact(move |conn| {
128
+
let data_clone = bytes.clone();
129
+
let entry = BlobEntry {
130
+
cid: cid_str.clone(),
131
+
did: did_clone.clone(),
132
+
data: bytes,
133
+
size: bytes_len,
134
+
mime_type,
135
+
quarantined: false,
136
+
};
137
+
138
+
diesel::insert_into(blobs::table)
139
+
.values(&entry)
140
+
.on_conflict((blobs::cid, blobs::did))
141
+
.do_update()
142
+
.set(blobs::data.eq(data_clone))
143
+
.execute(conn)
144
+
.context("Failed to insert blob data")
145
+
})
146
+
.await
147
+
.expect("Failed to store blob data")?;
148
+
149
+
Ok(())
81
150
}
82
151
152
+
/// Store a blob directly as permanent
83
153
pub async fn put_permanent(&self, cid: Cid, bytes: Vec<u8>) -> Result<()> {
84
-
// let body = ByteStream::from(bytes);
85
-
// self.client
86
-
// .put_object()
87
-
// .body(body)
88
-
// .bucket(&self.bucket)
89
-
// .key(self.get_stored_path(cid))
90
-
// .acl(ObjectCannedAcl::PublicRead)
91
-
// .send()
92
-
// .await?;
93
-
// Ok(())
94
-
todo!();
154
+
self.put_permanent_with_mime(cid, bytes, "application/octet-stream".to_owned())
155
+
.await
95
156
}
96
157
158
+
/// Quarantine a blob
97
159
pub async fn quarantine(&self, cid: Cid) -> Result<()> {
98
-
// self.move_object(MoveObject {
99
-
// from: self.get_stored_path(cid),
100
-
// to: self.get_quarantined_path(cid),
101
-
// })
102
-
// .await
103
-
todo!();
160
+
let cid_str = cid.to_string();
161
+
let did_clone = self.did.clone();
162
+
163
+
// Update the quarantine flag in the database
164
+
_ = self
165
+
.db
166
+
.get()
167
+
.await?
168
+
.interact(move |conn| {
169
+
diesel::update(blobs::table)
170
+
.filter(blobs::cid.eq(&cid_str))
171
+
.filter(blobs::did.eq(&did_clone))
172
+
.set(blobs::quarantined.eq(true))
173
+
.execute(conn)
174
+
.context("Failed to quarantine blob")
175
+
})
176
+
.await
177
+
.expect("Failed to update quarantine status")?;
178
+
179
+
Ok(())
104
180
}
105
181
182
+
/// Unquarantine a blob
106
183
pub async fn unquarantine(&self, cid: Cid) -> Result<()> {
107
-
// self.move_object(MoveObject {
108
-
// from: self.get_quarantined_path(cid),
109
-
// to: self.get_stored_path(cid),
110
-
// })
111
-
// .await
112
-
todo!();
184
+
let cid_str = cid.to_string();
185
+
let did_clone = self.did.clone();
186
+
187
+
// Update the quarantine flag in the database
188
+
_ = self
189
+
.db
190
+
.get()
191
+
.await?
192
+
.interact(move |conn| {
193
+
diesel::update(blobs::table)
194
+
.filter(blobs::cid.eq(&cid_str))
195
+
.filter(blobs::did.eq(&did_clone))
196
+
.set(blobs::quarantined.eq(false))
197
+
.execute(conn)
198
+
.context("Failed to unquarantine blob")
199
+
})
200
+
.await
201
+
.expect("Failed to update unquarantine status")?;
202
+
203
+
Ok(())
113
204
}
114
205
115
-
async fn get_object(&self, cid: Cid) -> Result<ByteStream> {
116
-
// let res = self
117
-
// .client
118
-
// .get_object()
119
-
// .bucket(&self.bucket)
120
-
// .key(self.get_stored_path(cid))
121
-
// .send()
122
-
// .await;
123
-
// match res {
124
-
// Ok(res) => Ok(res.body),
125
-
// Err(SdkError::ServiceError(s)) => Err(anyhow::Error::new(s.into_err())),
126
-
// Err(e) => Err(anyhow::Error::new(e.into_service_error())),
127
-
// }
128
-
todo!();
206
+
/// Get a blob as a stream
207
+
pub async fn get_object(&self, blob_cid: Cid) -> Result<ByteStream> {
208
+
use self::blobs::dsl::*;
209
+
210
+
let cid_str = blob_cid.to_string();
211
+
let did_clone = self.did.clone();
212
+
213
+
// Get the blob data from the database
214
+
let blob_data = self
215
+
.db
216
+
.get()
217
+
.await?
218
+
.interact(move |conn| {
219
+
blobs
220
+
.filter(self::blobs::cid.eq(&cid_str))
221
+
.filter(did.eq(&did_clone))
222
+
.filter(quarantined.eq(false))
223
+
.select(data)
224
+
.first::<Vec<u8>>(conn)
225
+
.optional()
226
+
.context("Failed to query blob data")
227
+
})
228
+
.await
229
+
.expect("Failed to get blob data")?;
230
+
231
+
if let Some(bytes) = blob_data {
232
+
Ok(ByteStream::new(bytes))
233
+
} else {
234
+
anyhow::bail!("Blob not found: {}", blob_cid)
235
+
}
129
236
}
130
237
238
+
/// Get blob bytes
131
239
pub async fn get_bytes(&self, cid: Cid) -> Result<Vec<u8>> {
132
-
let res = self.get_object(cid).await?;
133
-
let bytes = res.collect().await.map(|data| data.into_bytes())?;
134
-
Ok(bytes.to_vec())
240
+
let stream = self.get_object(cid).await?;
241
+
stream.collect().await
135
242
}
136
243
244
+
/// Get a blob as a stream
137
245
pub async fn get_stream(&self, cid: Cid) -> Result<ByteStream> {
138
246
self.get_object(cid).await
139
247
}
140
248
141
-
pub async fn delete(&self, cid: String) -> Result<()> {
142
-
self.delete_key(self.get_stored_path(Cid::from_str(&cid)?))
249
+
/// Delete a blob by CID string
250
+
pub async fn delete(&self, blob_cid: String) -> Result<()> {
251
+
use self::blobs::dsl::*;
252
+
253
+
let did_clone = self.did.clone();
254
+
255
+
// Delete from database
256
+
_ = self
257
+
.db
258
+
.get()
259
+
.await?
260
+
.interact(move |conn| {
261
+
diesel::delete(blobs)
262
+
.filter(self::blobs::cid.eq(&blob_cid))
263
+
.filter(did.eq(&did_clone))
264
+
.execute(conn)
265
+
.context("Failed to delete blob")
266
+
})
143
267
.await
268
+
.expect("Failed to delete blob")?;
269
+
270
+
Ok(())
144
271
}
145
272
273
+
/// Delete multiple blobs by CID
146
274
pub async fn delete_many(&self, cids: Vec<Cid>) -> Result<()> {
147
-
let keys: Vec<String> = cids
148
-
.into_iter()
149
-
.map(|cid| self.get_stored_path(cid))
150
-
.collect();
151
-
self.delete_many_keys(keys).await
152
-
}
275
+
use self::blobs::dsl::*;
276
+
277
+
let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
278
+
let did_clone = self.did.clone();
153
279
154
-
pub async fn has_stored(&self, cid: Cid) -> Result<bool> {
155
-
Ok(self.has_key(self.get_stored_path(cid)).await)
156
-
}
280
+
// Delete all blobs in one operation
281
+
_ = self
282
+
.db
283
+
.get()
284
+
.await?
285
+
.interact(move |conn| {
286
+
diesel::delete(blobs)
287
+
.filter(self::blobs::cid.eq_any(cid_strings))
288
+
.filter(did.eq(&did_clone))
289
+
.execute(conn)
290
+
.context("Failed to delete multiple blobs")
291
+
})
292
+
.await
293
+
.expect("Failed to delete multiple blobs")?;
157
294
158
-
pub async fn has_temp(&self, key: String) -> Result<bool> {
159
-
Ok(self.has_key(self.get_tmp_path(&key)).await)
295
+
Ok(())
160
296
}
161
297
162
-
async fn has_key(&self, key: String) -> bool {
163
-
// let res = self
164
-
// .client
165
-
// .head_object()
166
-
// .bucket(&self.bucket)
167
-
// .key(key)
168
-
// .send()
169
-
// .await;
170
-
// res.is_ok()
171
-
todo!();
172
-
}
298
+
/// Check if a blob is stored
299
+
pub async fn has_stored(&self, blob_cid: Cid) -> Result<bool> {
300
+
use self::blobs::dsl::*;
173
301
174
-
async fn delete_key(&self, key: String) -> Result<()> {
175
-
// self.client
176
-
// .delete_object()
177
-
// .bucket(&self.bucket)
178
-
// .key(key)
179
-
// .send()
180
-
// .await?;
181
-
// Ok(())
182
-
todo!();
183
-
}
302
+
let cid_str = blob_cid.to_string();
303
+
let did_clone = self.did.clone();
184
304
185
-
async fn delete_many_keys(&self, keys: Vec<String>) -> Result<()> {
186
-
// let objects: Vec<ObjectIdentifier> = keys
187
-
// .into_iter()
188
-
// .map(|key| Ok(ObjectIdentifier::builder().key(key).build()?))
189
-
// .collect::<Result<Vec<ObjectIdentifier>>>()?;
190
-
// let deletes = Delete::builder().set_objects(Some(objects)).build()?;
191
-
// self.client
192
-
// .delete_objects()
193
-
// .bucket(&self.bucket)
194
-
// .delete(deletes)
195
-
// .send()
196
-
// .await?;
197
-
// Ok(())
198
-
todo!();
305
+
let exists = self
306
+
.db
307
+
.get()
308
+
.await?
309
+
.interact(move |conn| {
310
+
diesel::select(diesel::dsl::exists(
311
+
blobs
312
+
.filter(self::blobs::cid.eq(&cid_str))
313
+
.filter(did.eq(&did_clone)),
314
+
))
315
+
.get_result::<bool>(conn)
316
+
.context("Failed to check if blob exists")
317
+
})
318
+
.await
319
+
.expect("Failed to check blob existence")?;
320
+
321
+
Ok(exists)
199
322
}
200
323
201
-
async fn move_object(&self, keys: MoveObject) -> Result<()> {
202
-
// self.client
203
-
// .copy_object()
204
-
// .bucket(&self.bucket)
205
-
// .copy_source(format!(
206
-
// "{0}/{1}/{2}",
207
-
// env_str("AWS_ENDPOINT_BUCKET").unwrap(),
208
-
// self.bucket,
209
-
// keys.from
210
-
// ))
211
-
// .key(keys.to)
212
-
// .acl(ObjectCannedAcl::PublicRead)
213
-
// .send()
214
-
// .await?;
215
-
// self.client
216
-
// .delete_object()
217
-
// .bucket(&self.bucket)
218
-
// .key(keys.from)
219
-
// .send()
220
-
// .await?;
221
-
// Ok(())
222
-
todo!();
324
+
/// Check if a temporary blob exists - now just checks if any blob exists with the key pattern
325
+
pub async fn has_temp(&self, key: String) -> Result<bool> {
326
+
// We don't have temporary blobs anymore, but for compatibility we'll check if
327
+
// there's a blob with a similar CID pattern
328
+
let temp_cid = Cid::try_from(format!("bafy{}", key)).unwrap_or_else(|_| Cid::default());
329
+
self.has_stored(temp_cid).await
223
330
}
224
331
}
+151
-136
src/actor_store/sql_repo.rs
+151
-136
src/actor_store/sql_repo.rs
···
1
-
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/repo/sql_repo.rs
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/actor_store/record/mod.rs
2
2
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
3
//!
4
4
//! Modified for SQLite backend
5
5
6
+
use crate::models::actor_store as models;
7
+
use crate::models::actor_store::RepoBlock;
6
8
use anyhow::Result;
7
9
use cidv10::Cid;
8
10
use diesel::dsl::sql;
···
10
12
use diesel::sql_types::{Bool, Text};
11
13
use diesel::*;
12
14
use futures::{StreamExt, TryStreamExt, stream};
13
-
use rsky_pds::models;
14
-
use rsky_pds::models::RepoBlock;
15
15
use rsky_repo::block_map::{BlockMap, BlocksAndMissing};
16
16
use rsky_repo::car::blocks_to_car_file;
17
17
use rsky_repo::cid_set::CidSet;
···
25
25
use std::sync::Arc;
26
26
use tokio::sync::RwLock;
27
27
28
-
use super::ActorDb;
29
-
30
-
#[derive(Clone, Debug)]
31
28
pub struct SqlRepoReader {
32
29
pub cache: Arc<RwLock<BlockMap>>,
33
-
pub db: ActorDb,
30
+
pub db: deadpool_diesel::sqlite::Object,
34
31
pub root: Option<Cid>,
35
32
pub rev: Option<String>,
36
33
pub now: String,
37
34
pub did: String,
38
35
}
39
36
37
+
impl std::fmt::Debug for SqlRepoReader {
38
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
39
+
f.debug_struct("SqlRepoReader")
40
+
.field("did", &self.did)
41
+
.field("root", &self.root)
42
+
.field("rev", &self.rev)
43
+
.finish()
44
+
}
45
+
}
46
+
40
47
impl ReadableBlockstore for SqlRepoReader {
41
-
fn get_bytes<'a>(
42
-
&'a self,
43
-
cid: &'a Cid,
44
-
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'a>> {
48
+
fn get_bytes<'life>(
49
+
&'life self,
50
+
cid: &'life Cid,
51
+
) -> Pin<Box<dyn Future<Output = Result<Option<Vec<u8>>>> + Send + Sync + 'life>> {
45
52
let did: String = self.did.clone();
46
-
let db: ActorDb = self.db.clone();
47
-
let cid = cid.clone();
53
+
let cid = *cid;
48
54
49
55
Box::pin(async move {
50
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
56
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
51
57
let cached = {
52
58
let cache_guard = self.cache.read().await;
53
-
cache_guard.get(cid).map(|v| v.clone())
59
+
cache_guard.get(cid).cloned()
54
60
};
55
61
if let Some(cached_result) = cached {
56
-
return Ok(Some(cached_result.clone()));
62
+
return Ok(Some(cached_result));
57
63
}
58
64
59
-
let found: Option<Vec<u8>> = db
60
-
.run(move |conn| {
65
+
let found: Option<Vec<u8>> = self
66
+
.db
67
+
.interact(move |conn| {
61
68
RepoBlockSchema::repo_block
62
69
.filter(RepoBlockSchema::cid.eq(cid.to_string()))
63
70
.filter(RepoBlockSchema::did.eq(did))
···
65
72
.first(conn)
66
73
.optional()
67
74
})
68
-
.await?;
75
+
.await
76
+
.expect("Failed to get block")?;
69
77
match found {
70
78
None => Ok(None),
71
79
Some(result) => {
···
79
87
})
80
88
}
81
89
82
-
fn has<'a>(
83
-
&'a self,
90
+
fn has<'life>(
91
+
&'life self,
84
92
cid: Cid,
85
-
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + Sync + 'a>> {
93
+
) -> Pin<Box<dyn Future<Output = Result<bool>> + Send + Sync + 'life>> {
86
94
Box::pin(async move {
87
95
let got = <Self as ReadableBlockstore>::get_bytes(self, &cid).await?;
88
96
Ok(got.is_some())
89
97
})
90
98
}
91
99
92
-
fn get_blocks<'a>(
93
-
&'a self,
100
+
fn get_blocks<'life>(
101
+
&'life self,
94
102
cids: Vec<Cid>,
95
-
) -> Pin<Box<dyn Future<Output = Result<BlocksAndMissing>> + Send + Sync + 'a>> {
103
+
) -> Pin<Box<dyn Future<Output = Result<BlocksAndMissing>> + Send + Sync + 'life>> {
96
104
let did: String = self.did.clone();
97
-
let db: ActorDb = self.db.clone();
98
105
99
106
Box::pin(async move {
100
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
107
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
101
108
let cached = {
102
109
let mut cache_guard = self.cache.write().await;
103
110
cache_guard.get_many(cids)?
···
113
120
let blocks = Arc::new(tokio::sync::Mutex::new(BlockMap::new()));
114
121
let missing_set = Arc::new(tokio::sync::Mutex::new(missing));
115
122
116
-
let _: Vec<_> = stream::iter(missing_strings.chunks(500))
123
+
let stream: Vec<_> = stream::iter(missing_strings.chunks(500))
117
124
.then(|batch| {
118
-
let this_db = db.clone();
119
125
let this_did = did.clone();
120
126
let blocks = Arc::clone(&blocks);
121
127
let missing = Arc::clone(&missing_set);
···
123
129
124
130
async move {
125
131
// Database query
126
-
let rows: Vec<(String, Vec<u8>)> = this_db
127
-
.run(move |conn| {
132
+
let rows: Vec<(String, Vec<u8>)> = self
133
+
.db
134
+
.interact(move |conn| {
128
135
RepoBlockSchema::repo_block
129
136
.filter(RepoBlockSchema::cid.eq_any(batch))
130
137
.filter(RepoBlockSchema::did.eq(this_did))
131
138
.select((RepoBlockSchema::cid, RepoBlockSchema::content))
132
139
.load(conn)
133
140
})
134
-
.await?;
141
+
.await
142
+
.expect("Failed to get blocks")?;
135
143
136
144
// Process rows with locked access
137
145
let mut blocks = blocks.lock().await;
···
148
156
})
149
157
.try_collect()
150
158
.await?;
159
+
drop(stream);
151
160
152
161
// Extract values from synchronization primitives
153
162
let mut blocks = Arc::try_unwrap(blocks)
···
173
182
}
174
183
175
184
impl RepoStorage for SqlRepoReader {
176
-
fn get_root<'a>(&'a self) -> Pin<Box<dyn Future<Output = Option<Cid>> + Send + Sync + 'a>> {
185
+
fn get_root<'life>(
186
+
&'life self,
187
+
) -> Pin<Box<dyn Future<Output = Option<Cid>> + Send + Sync + 'life>> {
177
188
Box::pin(async move {
178
189
match self.get_root_detailed().await {
179
190
Ok(root) => Some(root.cid),
···
182
193
})
183
194
}
184
195
185
-
fn put_block<'a>(
186
-
&'a self,
196
+
fn put_block<'life>(
197
+
&'life self,
187
198
cid: Cid,
188
199
bytes: Vec<u8>,
189
200
rev: String,
190
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
201
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
191
202
let did: String = self.did.clone();
192
-
let db: ActorDb = self.db.clone();
193
203
let bytes_cloned = bytes.clone();
194
204
Box::pin(async move {
195
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
205
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
196
206
197
-
db.run(move |conn| {
198
-
insert_into(RepoBlockSchema::repo_block)
199
-
.values((
200
-
RepoBlockSchema::did.eq(did),
201
-
RepoBlockSchema::cid.eq(cid.to_string()),
202
-
RepoBlockSchema::repoRev.eq(rev),
203
-
RepoBlockSchema::size.eq(bytes.len() as i32),
204
-
RepoBlockSchema::content.eq(bytes),
205
-
))
206
-
.execute(conn)
207
-
})
208
-
.await?;
207
+
_ = self
208
+
.db
209
+
.interact(move |conn| {
210
+
insert_into(RepoBlockSchema::repo_block)
211
+
.values((
212
+
RepoBlockSchema::did.eq(did),
213
+
RepoBlockSchema::cid.eq(cid.to_string()),
214
+
RepoBlockSchema::repoRev.eq(rev),
215
+
RepoBlockSchema::size.eq(bytes.len() as i32),
216
+
RepoBlockSchema::content.eq(bytes),
217
+
))
218
+
.execute(conn)
219
+
})
220
+
.await
221
+
.expect("Failed to put block")?;
209
222
{
210
223
let mut cache_guard = self.cache.write().await;
211
224
cache_guard.set(cid, bytes_cloned);
···
214
227
})
215
228
}
216
229
217
-
fn put_many<'a>(
218
-
&'a self,
230
+
fn put_many<'life>(
231
+
&'life self,
219
232
to_put: BlockMap,
220
233
rev: String,
221
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
234
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
222
235
let did: String = self.did.clone();
223
-
let db: ActorDb = self.db.clone();
224
236
225
237
Box::pin(async move {
226
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
238
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
227
239
228
240
let blocks: Vec<RepoBlock> = to_put
229
241
.map
···
240
252
let chunks: Vec<Vec<RepoBlock>> =
241
253
blocks.chunks(50).map(|chunk| chunk.to_vec()).collect();
242
254
243
-
let _: Vec<_> = stream::iter(chunks)
244
-
.then(|batch| {
245
-
let db = db.clone();
246
-
async move {
247
-
db.run(move |conn| {
248
-
insert_into(RepoBlockSchema::repo_block)
249
-
.values(batch)
250
-
.on_conflict_do_nothing()
251
-
.execute(conn)
252
-
.map(|_| ())
253
-
})
254
-
.await
255
-
.map_err(anyhow::Error::from)
256
-
}
257
-
})
258
-
.collect::<Vec<_>>()
259
-
.await
260
-
.into_iter()
261
-
.collect::<Result<Vec<()>>>()?;
255
+
for batch in chunks {
256
+
_ = self
257
+
.db
258
+
.interact(move |conn| {
259
+
insert_or_ignore_into(RepoBlockSchema::repo_block)
260
+
.values(&batch)
261
+
.execute(conn)
262
+
})
263
+
.await
264
+
.expect("Failed to insert blocks")?;
265
+
}
262
266
263
267
Ok(())
264
268
})
265
269
}
266
-
fn update_root<'a>(
267
-
&'a self,
270
+
fn update_root<'life>(
271
+
&'life self,
268
272
cid: Cid,
269
273
rev: String,
270
274
is_create: Option<bool>,
271
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
275
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
272
276
let did: String = self.did.clone();
273
-
let db: ActorDb = self.db.clone();
274
277
let now: String = self.now.clone();
275
278
276
279
Box::pin(async move {
277
-
use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
280
+
use crate::schema::actor_store::repo_root::dsl as RepoRootSchema;
278
281
279
282
let is_create = is_create.unwrap_or(false);
280
283
if is_create {
281
-
db.run(move |conn| {
282
-
insert_into(RepoRootSchema::repo_root)
283
-
.values((
284
-
RepoRootSchema::did.eq(did),
285
-
RepoRootSchema::cid.eq(cid.to_string()),
286
-
RepoRootSchema::rev.eq(rev),
287
-
RepoRootSchema::indexedAt.eq(now),
288
-
))
289
-
.execute(conn)
290
-
})
291
-
.await?;
284
+
_ = self
285
+
.db
286
+
.interact(move |conn| {
287
+
insert_into(RepoRootSchema::repo_root)
288
+
.values((
289
+
RepoRootSchema::did.eq(did),
290
+
RepoRootSchema::cid.eq(cid.to_string()),
291
+
RepoRootSchema::rev.eq(rev),
292
+
RepoRootSchema::indexedAt.eq(now),
293
+
))
294
+
.execute(conn)
295
+
})
296
+
.await
297
+
.expect("Failed to create root")?;
292
298
} else {
293
-
db.run(move |conn| {
294
-
update(RepoRootSchema::repo_root)
295
-
.filter(RepoRootSchema::did.eq(did))
296
-
.set((
297
-
RepoRootSchema::cid.eq(cid.to_string()),
298
-
RepoRootSchema::rev.eq(rev),
299
-
RepoRootSchema::indexedAt.eq(now),
300
-
))
301
-
.execute(conn)
302
-
})
303
-
.await?;
299
+
_ = self
300
+
.db
301
+
.interact(move |conn| {
302
+
update(RepoRootSchema::repo_root)
303
+
.filter(RepoRootSchema::did.eq(did))
304
+
.set((
305
+
RepoRootSchema::cid.eq(cid.to_string()),
306
+
RepoRootSchema::rev.eq(rev),
307
+
RepoRootSchema::indexedAt.eq(now),
308
+
))
309
+
.execute(conn)
310
+
})
311
+
.await
312
+
.expect("Failed to update root")?;
304
313
}
305
314
Ok(())
306
315
})
307
316
}
308
317
309
-
fn apply_commit<'a>(
310
-
&'a self,
318
+
fn apply_commit<'life>(
319
+
&'life self,
311
320
commit: CommitData,
312
321
is_create: Option<bool>,
313
-
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'a>> {
322
+
) -> Pin<Box<dyn Future<Output = Result<()>> + Send + Sync + 'life>> {
314
323
Box::pin(async move {
315
324
self.update_root(commit.cid, commit.rev.clone(), is_create)
316
325
.await?;
···
323
332
324
333
// Basically handles getting ipld blocks from db
325
334
impl SqlRepoReader {
326
-
pub fn new(did: String, now: Option<String>, db: ActorDb) -> Self {
335
+
pub fn new(did: String, now: Option<String>, db: deadpool_diesel::sqlite::Object) -> Self {
327
336
let now = now.unwrap_or_else(rsky_common::now);
328
-
SqlRepoReader {
337
+
Self {
329
338
cache: Arc::new(RwLock::new(BlockMap::new())),
330
339
root: None,
331
340
rev: None,
···
370
379
cursor: &Option<CidAndRev>,
371
380
) -> Result<Vec<RepoBlock>> {
372
381
let did: String = self.did.clone();
373
-
let db: ActorDb = self.db.clone();
374
382
let since = since.clone();
375
383
let cursor = cursor.clone();
376
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
384
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
377
385
378
-
Ok(db
379
-
.run(move |conn| {
386
+
Ok(self
387
+
.db
388
+
.interact(move |conn| {
380
389
let mut builder = RepoBlockSchema::repo_block
381
390
.select(RepoBlock::as_select())
382
391
.order((RepoBlockSchema::repoRev.desc(), RepoBlockSchema::cid.desc()))
···
403
412
}
404
413
builder.load(conn)
405
414
})
406
-
.await?)
415
+
.await
416
+
.expect("Failed to get block range")?)
407
417
}
408
418
409
419
pub async fn count_blocks(&self) -> Result<i64> {
410
420
let did: String = self.did.clone();
411
-
let db: ActorDb = self.db.clone();
412
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
421
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
413
422
414
-
let res = db
415
-
.run(move |conn| {
423
+
let res = self
424
+
.db
425
+
.interact(move |conn| {
416
426
RepoBlockSchema::repo_block
417
427
.filter(RepoBlockSchema::did.eq(did))
418
428
.count()
419
429
.get_result(conn)
420
430
})
421
-
.await?;
431
+
.await
432
+
.expect("Failed to count blocks")?;
422
433
Ok(res)
423
434
}
424
435
···
428
439
/// Proactively cache all blocks from a particular commit (to prevent multiple roundtrips)
429
440
pub async fn cache_rev(&mut self, rev: String) -> Result<()> {
430
441
let did: String = self.did.clone();
431
-
let db: ActorDb = self.db.clone();
432
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
442
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
433
443
434
-
let res: Vec<(String, Vec<u8>)> = db
435
-
.run(move |conn| {
444
+
let result: Vec<(String, Vec<u8>)> = self
445
+
.db
446
+
.interact(move |conn| {
436
447
RepoBlockSchema::repo_block
437
448
.filter(RepoBlockSchema::did.eq(did))
438
449
.filter(RepoBlockSchema::repoRev.eq(rev))
···
440
451
.limit(15)
441
452
.get_results::<(String, Vec<u8>)>(conn)
442
453
})
443
-
.await?;
444
-
for row in res {
454
+
.await
455
+
.expect("Failed to cache rev")?;
456
+
for row in result {
445
457
let mut cache_guard = self.cache.write().await;
446
458
cache_guard.set(Cid::from_str(&row.0)?, row.1)
447
459
}
···
453
465
return Ok(());
454
466
}
455
467
let did: String = self.did.clone();
456
-
let db: ActorDb = self.db.clone();
457
-
use rsky_pds::schema::pds::repo_block::dsl as RepoBlockSchema;
468
+
use crate::schema::actor_store::repo_block::dsl as RepoBlockSchema;
458
469
459
470
let cid_strings: Vec<String> = cids.into_iter().map(|c| c.to_string()).collect();
460
-
db.run(move |conn| {
461
-
delete(RepoBlockSchema::repo_block)
462
-
.filter(RepoBlockSchema::did.eq(did))
463
-
.filter(RepoBlockSchema::cid.eq_any(cid_strings))
464
-
.execute(conn)
465
-
})
466
-
.await?;
471
+
_ = self
472
+
.db
473
+
.interact(move |conn| {
474
+
delete(RepoBlockSchema::repo_block)
475
+
.filter(RepoBlockSchema::did.eq(did))
476
+
.filter(RepoBlockSchema::cid.eq_any(cid_strings))
477
+
.execute(conn)
478
+
})
479
+
.await
480
+
.expect("Failed to delete many")?;
467
481
Ok(())
468
482
}
469
483
470
484
pub async fn get_root_detailed(&self) -> Result<CidAndRev> {
471
485
let did: String = self.did.clone();
472
-
let db: ActorDb = self.db.clone();
473
-
use rsky_pds::schema::pds::repo_root::dsl as RepoRootSchema;
486
+
use crate::schema::actor_store::repo_root::dsl as RepoRootSchema;
474
487
475
-
let res = db
476
-
.run(move |conn| {
488
+
let res = self
489
+
.db
490
+
.interact(move |conn| {
477
491
RepoRootSchema::repo_root
478
492
.filter(RepoRootSchema::did.eq(did))
479
493
.select(models::RepoRoot::as_select())
480
494
.first(conn)
481
495
})
482
-
.await?;
496
+
.await
497
+
.expect("Failed to get root")?;
483
498
484
499
Ok(CidAndRev {
485
500
cid: Cid::from_str(&res.cid)?,
+245
src/apis/com/atproto/identity/identity.rs
+245
src/apis/com/atproto/identity/identity.rs
···
1
+
//! Identity endpoints (/xrpc/com.atproto.identity.*)
2
+
use std::collections::HashMap;
3
+
4
+
use anyhow::{Context as _, anyhow};
5
+
use atrium_api::{
6
+
com::atproto::identity,
7
+
types::string::{Datetime, Handle},
8
+
};
9
+
use atrium_crypto::keypair::Did as _;
10
+
use atrium_repo::blockstore::{AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256};
11
+
use axum::{
12
+
Json, Router,
13
+
extract::{Query, State},
14
+
http::StatusCode,
15
+
routing::{get, post},
16
+
};
17
+
use constcat::concat;
18
+
19
+
use crate::{
20
+
AppState, Client, Db, Error, Result, RotationKey, SigningKey,
21
+
auth::AuthenticatedUser,
22
+
config::AppConfig,
23
+
did,
24
+
firehose::FirehoseProducer,
25
+
plc::{self, PlcOperation, PlcService},
26
+
};
27
+
28
+
/// (GET) Resolves an atproto handle (hostname) to a DID. Does not necessarily bi-directionally verify against the the DID document.
29
+
/// ### Query Parameters
30
+
/// - handle: The handle to resolve.
31
+
/// ### Responses
32
+
/// - 200 OK: {did: did}
33
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `HandleNotFound`]}
34
+
/// - 401 Unauthorized
35
+
async fn resolve_handle(
36
+
State(db): State<Db>,
37
+
State(client): State<Client>,
38
+
Query(input): Query<identity::resolve_handle::ParametersData>,
39
+
) -> Result<Json<identity::resolve_handle::Output>> {
40
+
let handle = input.handle.as_str();
41
+
if let Ok(did) = sqlx::query_scalar!(r#"SELECT did FROM handles WHERE handle = ?"#, handle)
42
+
.fetch_one(&db)
43
+
.await
44
+
{
45
+
return Ok(Json(
46
+
identity::resolve_handle::OutputData {
47
+
did: atrium_api::types::string::Did::new(did).expect("should be valid DID format"),
48
+
}
49
+
.into(),
50
+
));
51
+
}
52
+
53
+
// HACK: Query bsky to see if they have this handle cached.
54
+
let response = client
55
+
.get(format!(
56
+
"https://api.bsky.app/xrpc/com.atproto.identity.resolveHandle?handle={handle}"
57
+
))
58
+
.send()
59
+
.await
60
+
.context("failed to query upstream server")?
61
+
.json()
62
+
.await
63
+
.context("failed to decode response as JSON")?;
64
+
65
+
Ok(Json(response))
66
+
}
67
+
68
+
#[expect(unused_variables, clippy::todo, reason = "Not yet implemented")]
69
+
/// Request an email with a code to in order to request a signed PLC operation. Requires Auth.
70
+
/// - POST /xrpc/com.atproto.identity.requestPlcOperationSignature
71
+
/// ### Responses
72
+
/// - 200 OK
73
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
74
+
/// - 401 Unauthorized
75
+
async fn request_plc_operation_signature(user: AuthenticatedUser) -> Result<()> {
76
+
todo!()
77
+
}
78
+
79
+
#[expect(unused_variables, clippy::todo, reason = "Not yet implemented")]
80
+
/// Signs a PLC operation to update some value(s) in the requesting DID's document.
81
+
/// - POST /xrpc/com.atproto.identity.signPlcOperation
82
+
/// ### Request Body
83
+
/// - token: string // A token received through com.atproto.identity.requestPlcOperationSignature
84
+
/// - rotationKeys: string[]
85
+
/// - alsoKnownAs: string[]
86
+
/// - verificationMethods: services
87
+
/// ### Responses
88
+
/// - 200 OK: {operation: string}
89
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
90
+
/// - 401 Unauthorized
91
+
async fn sign_plc_operation(
92
+
user: AuthenticatedUser,
93
+
State(skey): State<SigningKey>,
94
+
State(rkey): State<RotationKey>,
95
+
State(config): State<AppConfig>,
96
+
Json(input): Json<identity::sign_plc_operation::Input>,
97
+
) -> Result<Json<identity::sign_plc_operation::Output>> {
98
+
todo!()
99
+
}
100
+
101
+
#[expect(
102
+
clippy::too_many_arguments,
103
+
reason = "Many parameters are required for this endpoint"
104
+
)]
105
+
/// Updates the current account's handle. Verifies handle validity, and updates did:plc document if necessary. Implemented by PDS, and requires auth.
106
+
/// - POST /xrpc/com.atproto.identity.updateHandle
107
+
/// ### Query Parameters
108
+
/// - handle: handle // The new handle.
109
+
/// ### Responses
110
+
/// - 200 OK
111
+
/// ## Errors
112
+
/// - If the handle is already in use.
113
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
114
+
/// - 401 Unauthorized
115
+
/// ## Panics
116
+
/// - If the handle is not valid.
117
+
async fn update_handle(
118
+
user: AuthenticatedUser,
119
+
State(skey): State<SigningKey>,
120
+
State(rkey): State<RotationKey>,
121
+
State(client): State<Client>,
122
+
State(config): State<AppConfig>,
123
+
State(db): State<Db>,
124
+
State(fhp): State<FirehoseProducer>,
125
+
Json(input): Json<identity::update_handle::Input>,
126
+
) -> Result<()> {
127
+
let handle = input.handle.as_str();
128
+
let did_str = user.did();
129
+
let did = atrium_api::types::string::Did::new(user.did()).expect("should be valid DID format");
130
+
131
+
if let Some(existing_did) =
132
+
sqlx::query_scalar!(r#"SELECT did FROM handles WHERE handle = ?"#, handle)
133
+
.fetch_optional(&db)
134
+
.await
135
+
.context("failed to query did count")?
136
+
{
137
+
if existing_did != did_str {
138
+
return Err(Error::with_status(
139
+
StatusCode::BAD_REQUEST,
140
+
anyhow!("attempted to update handle to one that is already in use"),
141
+
));
142
+
}
143
+
}
144
+
145
+
// Ensure the existing DID is resolvable.
146
+
// If not, we need to register the original handle.
147
+
let _did = did::resolve(&client, did.clone())
148
+
.await
149
+
.with_context(|| format!("failed to resolve DID for {did_str}"))
150
+
.context("should be able to resolve DID")?;
151
+
152
+
let op = plc::sign_op(
153
+
&rkey,
154
+
PlcOperation {
155
+
typ: "plc_operation".to_owned(),
156
+
rotation_keys: vec![rkey.did()],
157
+
verification_methods: HashMap::from([("atproto".to_owned(), skey.did())]),
158
+
also_known_as: vec![input.handle.as_str().to_owned()],
159
+
services: HashMap::from([(
160
+
"atproto_pds".to_owned(),
161
+
PlcService::Pds {
162
+
endpoint: config.host_name.clone(),
163
+
},
164
+
)]),
165
+
prev: Some(
166
+
sqlx::query_scalar!(r#"SELECT plc_root FROM accounts WHERE did = ?"#, did_str)
167
+
.fetch_one(&db)
168
+
.await
169
+
.context("failed to fetch user PLC root")?,
170
+
),
171
+
},
172
+
)
173
+
.context("failed to sign plc op")?;
174
+
175
+
if !config.test {
176
+
plc::submit(&client, did.as_str(), &op)
177
+
.await
178
+
.context("failed to submit PLC operation")?;
179
+
}
180
+
181
+
// FIXME: Properly abstract these implementation details.
182
+
let did_hash = did_str
183
+
.strip_prefix("did:plc:")
184
+
.context("should be valid DID format")?;
185
+
let doc = tokio::fs::File::options()
186
+
.read(true)
187
+
.write(true)
188
+
.open(config.plc.path.join(format!("{did_hash}.car")))
189
+
.await
190
+
.context("failed to open did doc")?;
191
+
192
+
let op_bytes = serde_ipld_dagcbor::to_vec(&op).context("failed to encode plc op")?;
193
+
194
+
let plc_cid = CarStore::open(doc)
195
+
.await
196
+
.context("failed to open did carstore")?
197
+
.write_block(DAG_CBOR, SHA2_256, &op_bytes)
198
+
.await
199
+
.context("failed to write genesis commit")?;
200
+
201
+
let cid_str = plc_cid.to_string();
202
+
203
+
_ = sqlx::query!(
204
+
r#"UPDATE accounts SET plc_root = ? WHERE did = ?"#,
205
+
cid_str,
206
+
did_str
207
+
)
208
+
.execute(&db)
209
+
.await
210
+
.context("failed to update account PLC root")?;
211
+
212
+
// Broadcast the identity event now that the new identity is resolvable on the public directory.
213
+
fhp.identity(
214
+
atrium_api::com::atproto::sync::subscribe_repos::IdentityData {
215
+
did: did.clone(),
216
+
handle: Some(Handle::new(handle.to_owned()).expect("should be valid handle")),
217
+
seq: 0, // Filled by firehose later.
218
+
time: Datetime::now(),
219
+
},
220
+
)
221
+
.await;
222
+
223
+
Ok(())
224
+
}
225
+
226
+
async fn todo() -> Result<()> {
227
+
Err(Error::unimplemented(anyhow!("not implemented")))
228
+
}
229
+
230
+
#[rustfmt::skip]
231
+
/// Identity endpoints (/xrpc/com.atproto.identity.*)
232
+
/// ### Routes
233
+
/// - AP /xrpc/com.atproto.identity.updateHandle -> [`update_handle`]
234
+
/// - AP /xrpc/com.atproto.identity.requestPlcOperationSignature -> [`request_plc_operation_signature`]
235
+
/// - AP /xrpc/com.atproto.identity.signPlcOperation -> [`sign_plc_operation`]
236
+
/// - UG /xrpc/com.atproto.identity.resolveHandle -> [`resolve_handle`]
237
+
pub(super) fn routes() -> Router<AppState> {
238
+
Router::new()
239
+
.route(concat!("/", identity::get_recommended_did_credentials::NSID), get(todo))
240
+
.route(concat!("/", identity::request_plc_operation_signature::NSID), post(request_plc_operation_signature))
241
+
.route(concat!("/", identity::resolve_handle::NSID), get(resolve_handle))
242
+
.route(concat!("/", identity::sign_plc_operation::NSID), post(sign_plc_operation))
243
+
.route(concat!("/", identity::submit_plc_operation::NSID), post(todo))
244
+
.route(concat!("/", identity::update_handle::NSID), post(update_handle))
245
+
}
+5
src/apis/com/atproto/mod.rs
+5
src/apis/com/atproto/mod.rs
+142
src/apis/com/atproto/repo/apply_writes.rs
+142
src/apis/com/atproto/repo/apply_writes.rs
···
1
+
//! Apply a batch transaction of repository creates, updates, and deletes. Requires auth, implemented by PDS.
2
+
3
+
use super::*;
4
+
5
+
async fn inner_apply_writes(
6
+
body: ApplyWritesInput,
7
+
auth: AuthenticatedUser,
8
+
sequencer: Arc<RwLock<Sequencer>>,
9
+
actor_pools: HashMap<String, ActorStorage>,
10
+
account_manager: Arc<RwLock<AccountManager>>,
11
+
) -> Result<()> {
12
+
let tx: ApplyWritesInput = body;
13
+
let ApplyWritesInput {
14
+
repo,
15
+
validate,
16
+
swap_commit,
17
+
..
18
+
} = tx;
19
+
let account = account_manager
20
+
.read()
21
+
.await
22
+
.get_account(
23
+
&repo,
24
+
Some(AvailabilityFlags {
25
+
include_deactivated: Some(true),
26
+
include_taken_down: None,
27
+
}),
28
+
)
29
+
.await?;
30
+
31
+
if let Some(account) = account {
32
+
if account.deactivated_at.is_some() {
33
+
bail!("Account is deactivated")
34
+
}
35
+
let did = account.did;
36
+
if did != auth.did() {
37
+
bail!("AuthRequiredError")
38
+
}
39
+
let did: &String = &did;
40
+
if tx.writes.len() > 200 {
41
+
bail!("Too many writes. Max: 200")
42
+
}
43
+
44
+
let writes: Vec<PreparedWrite> = stream::iter(tx.writes)
45
+
.then(async |write| {
46
+
Ok::<PreparedWrite, anyhow::Error>(match write {
47
+
ApplyWritesInputRefWrite::Create(write) => PreparedWrite::Create(
48
+
prepare_create(PrepareCreateOpts {
49
+
did: did.clone(),
50
+
collection: write.collection,
51
+
rkey: write.rkey,
52
+
swap_cid: None,
53
+
record: serde_json::from_value(write.value)?,
54
+
validate,
55
+
})
56
+
.await?,
57
+
),
58
+
ApplyWritesInputRefWrite::Update(write) => PreparedWrite::Update(
59
+
prepare_update(PrepareUpdateOpts {
60
+
did: did.clone(),
61
+
collection: write.collection,
62
+
rkey: write.rkey,
63
+
swap_cid: None,
64
+
record: serde_json::from_value(write.value)?,
65
+
validate,
66
+
})
67
+
.await?,
68
+
),
69
+
ApplyWritesInputRefWrite::Delete(write) => {
70
+
PreparedWrite::Delete(prepare_delete(PrepareDeleteOpts {
71
+
did: did.clone(),
72
+
collection: write.collection,
73
+
rkey: write.rkey,
74
+
swap_cid: None,
75
+
})?)
76
+
}
77
+
})
78
+
})
79
+
.collect::<Vec<_>>()
80
+
.await
81
+
.into_iter()
82
+
.collect::<Result<Vec<PreparedWrite>, _>>()?;
83
+
84
+
let swap_commit_cid = match swap_commit {
85
+
Some(swap_commit) => Some(Cid::from_str(&swap_commit)?),
86
+
None => None,
87
+
};
88
+
89
+
let mut actor_store = ActorStore::from_actor_pools(did, &actor_pools).await;
90
+
91
+
let commit = actor_store
92
+
.process_writes(writes.clone(), swap_commit_cid)
93
+
.await?;
94
+
95
+
_ = sequencer
96
+
.write()
97
+
.await
98
+
.sequence_commit(did.clone(), commit.clone())
99
+
.await?;
100
+
account_manager
101
+
.write()
102
+
.await
103
+
.update_repo_root(
104
+
did.to_string(),
105
+
commit.commit_data.cid,
106
+
commit.commit_data.rev,
107
+
&actor_pools,
108
+
)
109
+
.await?;
110
+
Ok(())
111
+
} else {
112
+
bail!("Could not find repo: `{repo}`")
113
+
}
114
+
}
115
+
116
+
/// Apply a batch transaction of repository creates, updates, and deletes. Requires auth, implemented by PDS.
117
+
/// - POST /xrpc/com.atproto.repo.applyWrites
118
+
/// ### Request Body
119
+
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
120
+
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data across all operations, 'true' to require it, or leave unset to validate only for known Lexicons.
121
+
/// - `writes`: `object[]` // One of:
122
+
/// - - com.atproto.repo.applyWrites.create
123
+
/// - - com.atproto.repo.applyWrites.update
124
+
/// - - com.atproto.repo.applyWrites.delete
125
+
/// - `swap_commit`: `cid` // If provided, the entire operation will fail if the current repo commit CID does not match this value. Used to prevent conflicting repo mutations.
126
+
#[axum::debug_handler(state = AppState)]
127
+
pub(crate) async fn apply_writes(
128
+
auth: AuthenticatedUser,
129
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
130
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
131
+
State(sequencer): State<Arc<RwLock<Sequencer>>>,
132
+
Json(body): Json<ApplyWritesInput>,
133
+
) -> Result<(), ApiError> {
134
+
tracing::debug!("@LOG: debug apply_writes {body:#?}");
135
+
match inner_apply_writes(body, auth, sequencer, actor_pools, account_manager).await {
136
+
Ok(()) => Ok(()),
137
+
Err(error) => {
138
+
tracing::error!("@LOG: ERROR: {error}");
139
+
Err(ApiError::RuntimeError)
140
+
}
141
+
}
142
+
}
+140
src/apis/com/atproto/repo/create_record.rs
+140
src/apis/com/atproto/repo/create_record.rs
···
1
+
//! Create a single new repository record. Requires auth, implemented by PDS.
2
+
3
+
use super::*;
4
+
5
+
async fn inner_create_record(
6
+
body: CreateRecordInput,
7
+
user: AuthenticatedUser,
8
+
sequencer: Arc<RwLock<Sequencer>>,
9
+
actor_pools: HashMap<String, ActorStorage>,
10
+
account_manager: Arc<RwLock<AccountManager>>,
11
+
) -> Result<CreateRecordOutput> {
12
+
let CreateRecordInput {
13
+
repo,
14
+
collection,
15
+
record,
16
+
rkey,
17
+
validate,
18
+
swap_commit,
19
+
} = body;
20
+
let account = account_manager
21
+
.read()
22
+
.await
23
+
.get_account(
24
+
&repo,
25
+
Some(AvailabilityFlags {
26
+
include_deactivated: Some(true),
27
+
include_taken_down: None,
28
+
}),
29
+
)
30
+
.await?;
31
+
if let Some(account) = account {
32
+
if account.deactivated_at.is_some() {
33
+
bail!("Account is deactivated")
34
+
}
35
+
let did = account.did;
36
+
// if did != auth.access.credentials.unwrap().did.unwrap() {
37
+
if did != user.did() {
38
+
bail!("AuthRequiredError")
39
+
}
40
+
let swap_commit_cid = match swap_commit {
41
+
Some(swap_commit) => Some(Cid::from_str(&swap_commit)?),
42
+
None => None,
43
+
};
44
+
let write = prepare_create(PrepareCreateOpts {
45
+
did: did.clone(),
46
+
collection: collection.clone(),
47
+
record: serde_json::from_value(record)?,
48
+
rkey,
49
+
validate,
50
+
swap_cid: None,
51
+
})
52
+
.await?;
53
+
54
+
let did: &String = &did;
55
+
let mut actor_store = ActorStore::from_actor_pools(did, &actor_pools).await;
56
+
let backlink_conflicts: Vec<AtUri> = match validate {
57
+
Some(true) => {
58
+
let write_at_uri: AtUri = write.uri.clone().try_into()?;
59
+
actor_store
60
+
.record
61
+
.get_backlink_conflicts(&write_at_uri, &write.record)
62
+
.await?
63
+
}
64
+
_ => Vec::new(),
65
+
};
66
+
67
+
let backlink_deletions: Vec<PreparedDelete> = backlink_conflicts
68
+
.iter()
69
+
.map(|at_uri| {
70
+
prepare_delete(PrepareDeleteOpts {
71
+
did: at_uri.get_hostname().to_string(),
72
+
collection: at_uri.get_collection(),
73
+
rkey: at_uri.get_rkey(),
74
+
swap_cid: None,
75
+
})
76
+
})
77
+
.collect::<Result<Vec<PreparedDelete>>>()?;
78
+
let mut writes: Vec<PreparedWrite> = vec![PreparedWrite::Create(write.clone())];
79
+
for delete in backlink_deletions {
80
+
writes.push(PreparedWrite::Delete(delete));
81
+
}
82
+
let commit = actor_store
83
+
.process_writes(writes.clone(), swap_commit_cid)
84
+
.await?;
85
+
86
+
_ = sequencer
87
+
.write()
88
+
.await
89
+
.sequence_commit(did.clone(), commit.clone())
90
+
.await?;
91
+
account_manager
92
+
.write()
93
+
.await
94
+
.update_repo_root(
95
+
did.to_string(),
96
+
commit.commit_data.cid,
97
+
commit.commit_data.rev,
98
+
&actor_pools,
99
+
)
100
+
.await?;
101
+
102
+
Ok(CreateRecordOutput {
103
+
uri: write.uri.clone(),
104
+
cid: write.cid.to_string(),
105
+
})
106
+
} else {
107
+
bail!("Could not find repo: `{repo}`")
108
+
}
109
+
}
110
+
111
+
/// Create a single new repository record. Requires auth, implemented by PDS.
112
+
/// - POST /xrpc/com.atproto.repo.createRecord
113
+
/// ### Request Body
114
+
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
115
+
/// - `collection`: `nsid` // The NSID of the record collection.
116
+
/// - `rkey`: `string` // The record key. <= 512 characters.
117
+
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data, 'true' to require it, or leave unset to validate only for known Lexicons.
118
+
/// - `record`
119
+
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
120
+
/// ### Responses
121
+
/// - 200 OK: {`cid`: `cid`, `uri`: `at-uri`, `commit`: {`cid`: `cid`, `rev`: `tid`}, `validation_status`: [`valid`, `unknown`]}
122
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidSwap`]}
123
+
/// - 401 Unauthorized
124
+
#[axum::debug_handler(state = AppState)]
125
+
pub async fn create_record(
126
+
user: AuthenticatedUser,
127
+
State(db_actors): State<HashMap<String, ActorStorage, RandomState>>,
128
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
129
+
State(sequencer): State<Arc<RwLock<Sequencer>>>,
130
+
Json(body): Json<CreateRecordInput>,
131
+
) -> Result<Json<CreateRecordOutput>, ApiError> {
132
+
tracing::debug!("@LOG: debug create_record {body:#?}");
133
+
match inner_create_record(body, user, sequencer, db_actors, account_manager).await {
134
+
Ok(res) => Ok(Json(res)),
135
+
Err(error) => {
136
+
tracing::error!("@LOG: ERROR: {error}");
137
+
Err(ApiError::RuntimeError)
138
+
}
139
+
}
140
+
}
+117
src/apis/com/atproto/repo/delete_record.rs
+117
src/apis/com/atproto/repo/delete_record.rs
···
1
+
//! Delete a repository record, or ensure it doesn't exist. Requires auth, implemented by PDS.
2
+
use super::*;
3
+
4
+
async fn inner_delete_record(
5
+
body: DeleteRecordInput,
6
+
user: AuthenticatedUser,
7
+
sequencer: Arc<RwLock<Sequencer>>,
8
+
actor_pools: HashMap<String, ActorStorage>,
9
+
account_manager: Arc<RwLock<AccountManager>>,
10
+
) -> Result<()> {
11
+
let DeleteRecordInput {
12
+
repo,
13
+
collection,
14
+
rkey,
15
+
swap_record,
16
+
swap_commit,
17
+
} = body;
18
+
let account = account_manager
19
+
.read()
20
+
.await
21
+
.get_account(
22
+
&repo,
23
+
Some(AvailabilityFlags {
24
+
include_deactivated: Some(true),
25
+
include_taken_down: None,
26
+
}),
27
+
)
28
+
.await?;
29
+
match account {
30
+
None => bail!("Could not find repo: `{repo}`"),
31
+
Some(account) if account.deactivated_at.is_some() => bail!("Account is deactivated"),
32
+
Some(account) => {
33
+
let did = account.did;
34
+
// if did != auth.access.credentials.unwrap().did.unwrap() {
35
+
if did != user.did() {
36
+
bail!("AuthRequiredError")
37
+
}
38
+
39
+
let swap_commit_cid = match swap_commit {
40
+
Some(swap_commit) => Some(Cid::from_str(&swap_commit)?),
41
+
None => None,
42
+
};
43
+
let swap_record_cid = match swap_record {
44
+
Some(swap_record) => Some(Cid::from_str(&swap_record)?),
45
+
None => None,
46
+
};
47
+
48
+
let write = prepare_delete(PrepareDeleteOpts {
49
+
did: did.clone(),
50
+
collection,
51
+
rkey,
52
+
swap_cid: swap_record_cid,
53
+
})?;
54
+
let mut actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
55
+
let write_at_uri: AtUri = write.uri.clone().try_into()?;
56
+
let record = actor_store
57
+
.record
58
+
.get_record(&write_at_uri, None, Some(true))
59
+
.await?;
60
+
let commit = match record {
61
+
None => return Ok(()), // No-op if record already doesn't exist
62
+
Some(_) => {
63
+
actor_store
64
+
.process_writes(vec![PreparedWrite::Delete(write.clone())], swap_commit_cid)
65
+
.await?
66
+
}
67
+
};
68
+
69
+
_ = sequencer
70
+
.write()
71
+
.await
72
+
.sequence_commit(did.clone(), commit.clone())
73
+
.await?;
74
+
account_manager
75
+
.write()
76
+
.await
77
+
.update_repo_root(
78
+
did,
79
+
commit.commit_data.cid,
80
+
commit.commit_data.rev,
81
+
&actor_pools,
82
+
)
83
+
.await?;
84
+
85
+
Ok(())
86
+
}
87
+
}
88
+
}
89
+
90
+
/// Delete a repository record, or ensure it doesn't exist. Requires auth, implemented by PDS.
91
+
/// - POST /xrpc/com.atproto.repo.deleteRecord
92
+
/// ### Request Body
93
+
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
94
+
/// - `collection`: `nsid` // The NSID of the record collection.
95
+
/// - `rkey`: `string` // The record key. <= 512 characters.
96
+
/// - `swap_record`: `boolean` // Compare and swap with the previous record by CID.
97
+
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
98
+
/// ### Responses
99
+
/// - 200 OK: {"commit": {"cid": "string","rev": "string"}}
100
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidSwap`]}
101
+
/// - 401 Unauthorized
102
+
#[axum::debug_handler(state = AppState)]
103
+
pub async fn delete_record(
104
+
user: AuthenticatedUser,
105
+
State(db_actors): State<HashMap<String, ActorStorage, RandomState>>,
106
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
107
+
State(sequencer): State<Arc<RwLock<Sequencer>>>,
108
+
Json(body): Json<DeleteRecordInput>,
109
+
) -> Result<(), ApiError> {
110
+
match inner_delete_record(body, user, sequencer, db_actors, account_manager).await {
111
+
Ok(()) => Ok(()),
112
+
Err(error) => {
113
+
tracing::error!("@LOG: ERROR: {error}");
114
+
Err(ApiError::RuntimeError)
115
+
}
116
+
}
117
+
}
+70
src/apis/com/atproto/repo/describe_repo.rs
+70
src/apis/com/atproto/repo/describe_repo.rs
···
1
+
//! Get information about an account and repository, including the list of collections. Does not require auth.
2
+
use super::*;
3
+
4
+
async fn inner_describe_repo(
5
+
repo: String,
6
+
id_resolver: Arc<RwLock<IdResolver>>,
7
+
actor_pools: HashMap<String, ActorStorage>,
8
+
account_manager: Arc<RwLock<AccountManager>>,
9
+
) -> Result<DescribeRepoOutput> {
10
+
let account = account_manager
11
+
.read()
12
+
.await
13
+
.get_account(&repo, None)
14
+
.await?;
15
+
match account {
16
+
None => bail!("Cound not find user: `{repo}`"),
17
+
Some(account) => {
18
+
let did_doc: DidDocument = match id_resolver
19
+
.write()
20
+
.await
21
+
.did
22
+
.ensure_resolve(&account.did, None)
23
+
.await
24
+
{
25
+
Err(err) => bail!("Could not resolve DID: `{err}`"),
26
+
Ok(res) => res,
27
+
};
28
+
let handle = rsky_common::get_handle(&did_doc);
29
+
let handle_is_correct = handle == account.handle;
30
+
31
+
let actor_store =
32
+
ActorStore::from_actor_pools(&account.did.clone(), &actor_pools).await;
33
+
let collections = actor_store.record.list_collections().await?;
34
+
35
+
Ok(DescribeRepoOutput {
36
+
handle: account.handle.unwrap_or_else(|| INVALID_HANDLE.to_owned()),
37
+
did: account.did,
38
+
did_doc: serde_json::to_value(did_doc)?,
39
+
collections,
40
+
handle_is_correct,
41
+
})
42
+
}
43
+
}
44
+
}
45
+
46
+
/// Get information about an account and repository, including the list of collections. Does not require auth.
47
+
/// - GET /xrpc/com.atproto.repo.describeRepo
48
+
/// ### Query Parameters
49
+
/// - `repo`: `at-identifier` // The handle or DID of the repo.
50
+
/// ### Responses
51
+
/// - 200 OK: {"handle": "string","did": "string","didDoc": {},"collections": [string],"handleIsCorrect": true} \
52
+
/// handeIsCorrect - boolean - Indicates if handle is currently valid (resolves bi-directionally)
53
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
54
+
/// - 401 Unauthorized
55
+
#[tracing::instrument(skip_all)]
56
+
#[axum::debug_handler(state = AppState)]
57
+
pub async fn describe_repo(
58
+
Query(input): Query<atrium_repo::describe_repo::ParametersData>,
59
+
State(db_actors): State<HashMap<String, ActorStorage, RandomState>>,
60
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
61
+
State(id_resolver): State<Arc<RwLock<IdResolver>>>,
62
+
) -> Result<Json<DescribeRepoOutput>, ApiError> {
63
+
match inner_describe_repo(input.repo.into(), id_resolver, db_actors, account_manager).await {
64
+
Ok(res) => Ok(Json(res)),
65
+
Err(error) => {
66
+
tracing::error!("{error:?}");
67
+
Err(ApiError::RuntimeError)
68
+
}
69
+
}
70
+
}
+37
src/apis/com/atproto/repo/ex.rs
+37
src/apis/com/atproto/repo/ex.rs
···
1
+
//!
2
+
use crate::account_manager::AccountManager;
3
+
use crate::serve::ActorStorage;
4
+
use crate::{actor_store::ActorStore, error::ApiError, serve::AppState};
5
+
use anyhow::{Result, bail};
6
+
use axum::extract::Query;
7
+
use axum::{Json, extract::State};
8
+
use rsky_identity::IdResolver;
9
+
use rsky_pds::sequencer::Sequencer;
10
+
use std::collections::HashMap;
11
+
use std::hash::RandomState;
12
+
use std::sync::Arc;
13
+
use tokio::sync::RwLock;
14
+
15
+
async fn fun(
16
+
actor_pools: HashMap<String, ActorStorage>,
17
+
account_manager: Arc<RwLock<AccountManager>>,
18
+
id_resolver: Arc<RwLock<IdResolver>>,
19
+
sequencer: Arc<RwLock<Sequencer>>,
20
+
) -> Result<_> {
21
+
todo!();
22
+
}
23
+
24
+
///
25
+
#[tracing::instrument(skip_all)]
26
+
#[axum::debug_handler(state = AppState)]
27
+
pub async fn fun(
28
+
auth: AuthenticatedUser,
29
+
Query(input): Query<atrium_api::com::atproto::repo::describe_repo::ParametersData>,
30
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
31
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
32
+
State(id_resolver): State<Arc<RwLock<IdResolver>>>,
33
+
State(sequencer): State<Arc<RwLock<Sequencer>>>,
34
+
Json(body): Json<ApplyWritesInput>,
35
+
) -> Result<Json<_>, ApiError> {
36
+
todo!();
37
+
}
+102
src/apis/com/atproto/repo/get_record.rs
+102
src/apis/com/atproto/repo/get_record.rs
···
1
+
//! Get a single record from a repository. Does not require auth.
2
+
3
+
use crate::pipethrough::{ProxyRequest, pipethrough};
4
+
5
+
use super::*;
6
+
7
+
use rsky_pds::pipethrough::OverrideOpts;
8
+
9
+
async fn inner_get_record(
10
+
repo: String,
11
+
collection: String,
12
+
rkey: String,
13
+
cid: Option<String>,
14
+
req: ProxyRequest,
15
+
actor_pools: HashMap<String, ActorStorage>,
16
+
account_manager: Arc<RwLock<AccountManager>>,
17
+
) -> Result<GetRecordOutput> {
18
+
let did = account_manager
19
+
.read()
20
+
.await
21
+
.get_did_for_actor(&repo, None)
22
+
.await?;
23
+
24
+
// fetch from pds if available, if not then fetch from appview
25
+
if let Some(did) = did {
26
+
let uri = AtUri::make(did.clone(), Some(collection), Some(rkey))?;
27
+
28
+
let mut actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
29
+
30
+
match actor_store.record.get_record(&uri, cid, None).await {
31
+
Ok(Some(record)) if record.takedown_ref.is_none() => Ok(GetRecordOutput {
32
+
uri: uri.to_string(),
33
+
cid: Some(record.cid),
34
+
value: serde_json::to_value(record.value)?,
35
+
}),
36
+
_ => bail!("Could not locate record: `{uri}`"),
37
+
}
38
+
} else {
39
+
match req.cfg.bsky_app_view {
40
+
None => bail!("Could not locate record"),
41
+
Some(_) => match pipethrough(
42
+
&req,
43
+
None,
44
+
OverrideOpts {
45
+
aud: None,
46
+
lxm: None,
47
+
},
48
+
)
49
+
.await
50
+
{
51
+
Err(error) => {
52
+
tracing::error!("@LOG: ERROR: {error}");
53
+
bail!("Could not locate record")
54
+
}
55
+
Ok(res) => {
56
+
let output: GetRecordOutput = serde_json::from_slice(res.buffer.as_slice())?;
57
+
Ok(output)
58
+
}
59
+
},
60
+
}
61
+
}
62
+
}
63
+
64
+
/// Get a single record from a repository. Does not require auth.
65
+
/// - GET /xrpc/com.atproto.repo.getRecord
66
+
/// ### Query Parameters
67
+
/// - `repo`: `at-identifier` // The handle or DID of the repo.
68
+
/// - `collection`: `nsid` // The NSID of the record collection.
69
+
/// - `rkey`: `string` // The record key. <= 512 characters.
70
+
/// - `cid`: `cid` // The CID of the version of the record. If not specified, then return the most recent version.
71
+
/// ### Responses
72
+
/// - 200 OK: {"uri": "string","cid": "string","value": {}}
73
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RecordNotFound`]}
74
+
/// - 401 Unauthorized
75
+
#[tracing::instrument(skip_all)]
76
+
#[axum::debug_handler(state = AppState)]
77
+
pub async fn get_record(
78
+
Query(input): Query<ParametersData>,
79
+
State(db_actors): State<HashMap<String, ActorStorage, RandomState>>,
80
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
81
+
req: ProxyRequest,
82
+
) -> Result<Json<GetRecordOutput>, ApiError> {
83
+
let repo = input.repo;
84
+
let collection = input.collection;
85
+
let rkey = input.rkey;
86
+
let cid = input.cid;
87
+
match inner_get_record(repo, collection, rkey, cid, req, db_actors, account_manager).await {
88
+
Ok(res) => Ok(Json(res)),
89
+
Err(error) => {
90
+
tracing::error!("@LOG: ERROR: {error}");
91
+
Err(ApiError::RecordNotFound)
92
+
}
93
+
}
94
+
}
95
+
96
+
#[derive(serde::Deserialize, Debug)]
97
+
pub struct ParametersData {
98
+
pub cid: Option<String>,
99
+
pub collection: String,
100
+
pub repo: String,
101
+
pub rkey: String,
102
+
}
+183
src/apis/com/atproto/repo/import_repo.rs
+183
src/apis/com/atproto/repo/import_repo.rs
···
1
+
use axum::{body::Bytes, http::HeaderMap};
2
+
use reqwest::header;
3
+
use rsky_common::env::env_int;
4
+
use rsky_repo::block_map::BlockMap;
5
+
use rsky_repo::car::{CarWithRoot, read_stream_car_with_root};
6
+
use rsky_repo::parse::get_and_parse_record;
7
+
use rsky_repo::repo::Repo;
8
+
use rsky_repo::sync::consumer::{VerifyRepoInput, verify_diff};
9
+
use rsky_repo::types::{RecordWriteDescript, VerifiedDiff};
10
+
use ubyte::ToByteUnit;
11
+
12
+
use super::*;
13
+
14
+
async fn from_data(bytes: Bytes) -> Result<CarWithRoot, ApiError> {
15
+
let max_import_size = env_int("IMPORT_REPO_LIMIT").unwrap_or(100).megabytes();
16
+
if bytes.len() > max_import_size {
17
+
return Err(ApiError::InvalidRequest(format!(
18
+
"Content-Length is greater than maximum of {max_import_size}"
19
+
)));
20
+
}
21
+
22
+
let mut cursor = std::io::Cursor::new(bytes);
23
+
match read_stream_car_with_root(&mut cursor).await {
24
+
Ok(car_with_root) => Ok(car_with_root),
25
+
Err(error) => {
26
+
tracing::error!("Error reading stream car with root\n{error}");
27
+
Err(ApiError::InvalidRequest("Invalid CAR file".to_owned()))
28
+
}
29
+
}
30
+
}
31
+
32
+
#[tracing::instrument(skip_all)]
33
+
#[axum::debug_handler(state = AppState)]
34
+
/// Import a repo in the form of a CAR file. Requires Content-Length HTTP header to be set.
35
+
/// Request
36
+
/// mime application/vnd.ipld.car
37
+
/// Body - required
38
+
pub async fn import_repo(
39
+
// auth: AccessFullImport,
40
+
auth: AuthenticatedUser,
41
+
headers: HeaderMap,
42
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
43
+
body: Bytes,
44
+
) -> Result<(), ApiError> {
45
+
// let requester = auth.access.credentials.unwrap().did.unwrap();
46
+
let requester = auth.did();
47
+
let mut actor_store = ActorStore::from_actor_pools(&requester, &actor_pools).await;
48
+
49
+
// Check headers
50
+
let content_length = headers
51
+
.get(header::CONTENT_LENGTH)
52
+
.expect("no content length provided")
53
+
.to_str()
54
+
.map_err(anyhow::Error::from)
55
+
.and_then(|content_length| content_length.parse::<u64>().map_err(anyhow::Error::from))
56
+
.expect("invalid content-length header");
57
+
if content_length > env_int("IMPORT_REPO_LIMIT").unwrap_or(100).megabytes() {
58
+
return Err(ApiError::InvalidRequest(format!(
59
+
"Content-Length is greater than maximum of {}",
60
+
env_int("IMPORT_REPO_LIMIT").unwrap_or(100).megabytes()
61
+
)));
62
+
};
63
+
64
+
// Get current repo if it exists
65
+
let curr_root: Option<Cid> = actor_store.get_repo_root().await;
66
+
let curr_repo: Option<Repo> = match curr_root {
67
+
None => None,
68
+
Some(_root) => Some(Repo::load(actor_store.storage.clone(), curr_root).await?),
69
+
};
70
+
71
+
// Process imported car
72
+
// let car_with_root = import_repo_input.car_with_root;
73
+
let car_with_root: CarWithRoot = match from_data(body).await {
74
+
Ok(car) => car,
75
+
Err(error) => {
76
+
tracing::error!("Error importing repo\n{error:?}");
77
+
return Err(ApiError::InvalidRequest("Invalid CAR file".to_owned()));
78
+
}
79
+
};
80
+
81
+
// Get verified difference from current repo and imported repo
82
+
let mut imported_blocks: BlockMap = car_with_root.blocks;
83
+
let imported_root: Cid = car_with_root.root;
84
+
let opts = VerifyRepoInput {
85
+
ensure_leaves: Some(false),
86
+
};
87
+
88
+
let diff: VerifiedDiff = match verify_diff(
89
+
curr_repo,
90
+
&mut imported_blocks,
91
+
imported_root,
92
+
None,
93
+
None,
94
+
Some(opts),
95
+
)
96
+
.await
97
+
{
98
+
Ok(res) => res,
99
+
Err(error) => {
100
+
tracing::error!("{:?}", error);
101
+
return Err(ApiError::RuntimeError);
102
+
}
103
+
};
104
+
105
+
let commit_data = diff.commit;
106
+
let prepared_writes: Vec<PreparedWrite> =
107
+
prepare_import_repo_writes(requester, diff.writes, &imported_blocks).await?;
108
+
match actor_store
109
+
.process_import_repo(commit_data, prepared_writes)
110
+
.await
111
+
{
112
+
Ok(_res) => {}
113
+
Err(error) => {
114
+
tracing::error!("Error importing repo\n{error}");
115
+
return Err(ApiError::RuntimeError);
116
+
}
117
+
}
118
+
119
+
Ok(())
120
+
}
121
+
122
+
/// Converts list of RecordWriteDescripts into a list of PreparedWrites
123
+
async fn prepare_import_repo_writes(
124
+
did: String,
125
+
writes: Vec<RecordWriteDescript>,
126
+
blocks: &BlockMap,
127
+
) -> Result<Vec<PreparedWrite>, ApiError> {
128
+
match stream::iter(writes)
129
+
.then(|write| {
130
+
let did = did.clone();
131
+
async move {
132
+
Ok::<PreparedWrite, anyhow::Error>(match write {
133
+
RecordWriteDescript::Create(write) => {
134
+
let parsed_record = get_and_parse_record(blocks, write.cid)?;
135
+
PreparedWrite::Create(
136
+
prepare_create(PrepareCreateOpts {
137
+
did: did.clone(),
138
+
collection: write.collection,
139
+
rkey: Some(write.rkey),
140
+
swap_cid: None,
141
+
record: parsed_record.record,
142
+
validate: Some(true),
143
+
})
144
+
.await?,
145
+
)
146
+
}
147
+
RecordWriteDescript::Update(write) => {
148
+
let parsed_record = get_and_parse_record(blocks, write.cid)?;
149
+
PreparedWrite::Update(
150
+
prepare_update(PrepareUpdateOpts {
151
+
did: did.clone(),
152
+
collection: write.collection,
153
+
rkey: write.rkey,
154
+
swap_cid: None,
155
+
record: parsed_record.record,
156
+
validate: Some(true),
157
+
})
158
+
.await?,
159
+
)
160
+
}
161
+
RecordWriteDescript::Delete(write) => {
162
+
PreparedWrite::Delete(prepare_delete(PrepareDeleteOpts {
163
+
did: did.clone(),
164
+
collection: write.collection,
165
+
rkey: write.rkey,
166
+
swap_cid: None,
167
+
})?)
168
+
}
169
+
})
170
+
}
171
+
})
172
+
.collect::<Vec<_>>()
173
+
.await
174
+
.into_iter()
175
+
.collect::<Result<Vec<PreparedWrite>, _>>()
176
+
{
177
+
Ok(res) => Ok(res),
178
+
Err(error) => {
179
+
tracing::error!("Error preparing import repo writes\n{error}");
180
+
Err(ApiError::RuntimeError)
181
+
}
182
+
}
183
+
}
+48
src/apis/com/atproto/repo/list_missing_blobs.rs
+48
src/apis/com/atproto/repo/list_missing_blobs.rs
···
1
+
//! Returns a list of missing blobs for the requesting account. Intended to be used in the account migration flow.
2
+
use rsky_lexicon::com::atproto::repo::ListMissingBlobsOutput;
3
+
use rsky_pds::actor_store::blob::ListMissingBlobsOpts;
4
+
5
+
use super::*;
6
+
7
+
/// Returns a list of missing blobs for the requesting account. Intended to be used in the account migration flow.
8
+
/// Request
9
+
/// Query Parameters
10
+
/// limit integer
11
+
/// Possible values: >= 1 and <= 1000
12
+
/// Default value: 500
13
+
/// cursor string
14
+
/// Responses
15
+
/// cursor string
16
+
/// blobs object[]
17
+
#[tracing::instrument(skip_all)]
18
+
#[axum::debug_handler(state = AppState)]
19
+
pub async fn list_missing_blobs(
20
+
user: AuthenticatedUser,
21
+
Query(input): Query<atrium_repo::list_missing_blobs::ParametersData>,
22
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
23
+
) -> Result<Json<ListMissingBlobsOutput>, ApiError> {
24
+
let cursor = input.cursor;
25
+
let limit = input.limit;
26
+
let default_limit: atrium_api::types::LimitedNonZeroU16<1000> =
27
+
atrium_api::types::LimitedNonZeroU16::try_from(500).expect("default limit");
28
+
let limit: u16 = limit.unwrap_or(default_limit).into();
29
+
// let did = auth.access.credentials.unwrap().did.unwrap();
30
+
let did = user.did();
31
+
32
+
let actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
33
+
34
+
match actor_store
35
+
.blob
36
+
.list_missing_blobs(ListMissingBlobsOpts { cursor, limit })
37
+
.await
38
+
{
39
+
Ok(blobs) => {
40
+
let cursor = blobs.last().map(|last_blob| last_blob.cid.clone());
41
+
Ok(Json(ListMissingBlobsOutput { cursor, blobs }))
42
+
}
43
+
Err(error) => {
44
+
tracing::error!("{error:?}");
45
+
Err(ApiError::RuntimeError)
46
+
}
47
+
}
48
+
}
+146
src/apis/com/atproto/repo/list_records.rs
+146
src/apis/com/atproto/repo/list_records.rs
···
1
+
//! List a range of records in a repository, matching a specific collection. Does not require auth.
2
+
use super::*;
3
+
4
+
// #[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
5
+
// #[serde(rename_all = "camelCase")]
6
+
// /// Parameters for [`list_records`].
7
+
// pub(super) struct ListRecordsParameters {
8
+
// ///The NSID of the record type.
9
+
// pub collection: Nsid,
10
+
// /// The cursor to start from.
11
+
// #[serde(skip_serializing_if = "core::option::Option::is_none")]
12
+
// pub cursor: Option<String>,
13
+
// ///The number of records to return.
14
+
// #[serde(skip_serializing_if = "core::option::Option::is_none")]
15
+
// pub limit: Option<String>,
16
+
// ///The handle or DID of the repo.
17
+
// pub repo: AtIdentifier,
18
+
// ///Flag to reverse the order of the returned records.
19
+
// #[serde(skip_serializing_if = "core::option::Option::is_none")]
20
+
// pub reverse: Option<bool>,
21
+
// ///DEPRECATED: The highest sort-ordered rkey to stop at (exclusive)
22
+
// #[serde(skip_serializing_if = "core::option::Option::is_none")]
23
+
// pub rkey_end: Option<String>,
24
+
// ///DEPRECATED: The lowest sort-ordered rkey to start from (exclusive)
25
+
// #[serde(skip_serializing_if = "core::option::Option::is_none")]
26
+
// pub rkey_start: Option<String>,
27
+
// }
28
+
29
+
#[expect(non_snake_case, clippy::too_many_arguments)]
30
+
async fn inner_list_records(
31
+
// The handle or DID of the repo.
32
+
repo: String,
33
+
// The NSID of the record type.
34
+
collection: String,
35
+
// The number of records to return.
36
+
limit: u16,
37
+
cursor: Option<String>,
38
+
// DEPRECATED: The lowest sort-ordered rkey to start from (exclusive)
39
+
rkeyStart: Option<String>,
40
+
// DEPRECATED: The highest sort-ordered rkey to stop at (exclusive)
41
+
rkeyEnd: Option<String>,
42
+
// Flag to reverse the order of the returned records.
43
+
reverse: bool,
44
+
// The actor pools
45
+
actor_pools: HashMap<String, ActorStorage>,
46
+
account_manager: Arc<RwLock<AccountManager>>,
47
+
) -> Result<ListRecordsOutput> {
48
+
if limit > 100 {
49
+
bail!("Error: limit can not be greater than 100")
50
+
}
51
+
let did = account_manager
52
+
.read()
53
+
.await
54
+
.get_did_for_actor(&repo, None)
55
+
.await?;
56
+
if let Some(did) = did {
57
+
let mut actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
58
+
59
+
let records: Vec<Record> = actor_store
60
+
.record
61
+
.list_records_for_collection(
62
+
collection,
63
+
limit as i64,
64
+
reverse,
65
+
cursor,
66
+
rkeyStart,
67
+
rkeyEnd,
68
+
None,
69
+
)
70
+
.await?
71
+
.into_iter()
72
+
.map(|record| {
73
+
Ok(Record {
74
+
uri: record.uri.clone(),
75
+
cid: record.cid.clone(),
76
+
value: serde_json::to_value(record)?,
77
+
})
78
+
})
79
+
.collect::<Result<Vec<Record>>>()?;
80
+
81
+
let last_record = records.last();
82
+
let cursor: Option<String>;
83
+
if let Some(last_record) = last_record {
84
+
let last_at_uri: AtUri = last_record.uri.clone().try_into()?;
85
+
cursor = Some(last_at_uri.get_rkey());
86
+
} else {
87
+
cursor = None;
88
+
}
89
+
Ok(ListRecordsOutput { records, cursor })
90
+
} else {
91
+
bail!("Could not find repo: {repo}")
92
+
}
93
+
}
94
+
95
+
/// List a range of records in a repository, matching a specific collection. Does not require auth.
96
+
/// - GET /xrpc/com.atproto.repo.listRecords
97
+
/// ### Query Parameters
98
+
/// - `repo`: `at-identifier` // The handle or DID of the repo.
99
+
/// - `collection`: `nsid` // The NSID of the record type.
100
+
/// - `limit`: `integer` // The maximum number of records to return. Default 50, >=1 and <=100.
101
+
/// - `cursor`: `string`
102
+
/// - `reverse`: `boolean` // Flag to reverse the order of the returned records.
103
+
/// ### Responses
104
+
/// - 200 OK: {"cursor": "string","records": [{"uri": "string","cid": "string","value": {}}]}
105
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
106
+
/// - 401 Unauthorized
107
+
#[tracing::instrument(skip_all)]
108
+
#[allow(non_snake_case)]
109
+
#[axum::debug_handler(state = AppState)]
110
+
pub async fn list_records(
111
+
Query(input): Query<atrium_repo::list_records::ParametersData>,
112
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
113
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
114
+
) -> Result<Json<ListRecordsOutput>, ApiError> {
115
+
let repo = input.repo;
116
+
let collection = input.collection;
117
+
let limit: Option<u8> = input.limit.map(u8::from);
118
+
let limit: Option<u16> = limit.map(|x| x.into());
119
+
let cursor = input.cursor;
120
+
let reverse = input.reverse;
121
+
let rkeyStart = None;
122
+
let rkeyEnd = None;
123
+
124
+
let limit = limit.unwrap_or(50);
125
+
let reverse = reverse.unwrap_or(false);
126
+
127
+
match inner_list_records(
128
+
repo.into(),
129
+
collection.into(),
130
+
limit,
131
+
cursor,
132
+
rkeyStart,
133
+
rkeyEnd,
134
+
reverse,
135
+
actor_pools,
136
+
account_manager,
137
+
)
138
+
.await
139
+
{
140
+
Ok(res) => Ok(Json(res)),
141
+
Err(error) => {
142
+
tracing::error!("@LOG: ERROR: {error}");
143
+
Err(ApiError::RuntimeError)
144
+
}
145
+
}
146
+
}
+111
src/apis/com/atproto/repo/mod.rs
+111
src/apis/com/atproto/repo/mod.rs
···
1
+
use atrium_api::com::atproto::repo as atrium_repo;
2
+
use axum::{
3
+
Router,
4
+
routing::{get, post},
5
+
};
6
+
use constcat::concat;
7
+
8
+
pub mod apply_writes;
9
+
pub mod create_record;
10
+
pub mod delete_record;
11
+
pub mod describe_repo;
12
+
pub mod get_record;
13
+
pub mod import_repo;
14
+
pub mod list_missing_blobs;
15
+
pub mod list_records;
16
+
pub mod put_record;
17
+
pub mod upload_blob;
18
+
19
+
use crate::account_manager::AccountManager;
20
+
use crate::account_manager::helpers::account::AvailabilityFlags;
21
+
use crate::{
22
+
actor_store::ActorStore,
23
+
auth::AuthenticatedUser,
24
+
error::ApiError,
25
+
serve::{ActorStorage, AppState},
26
+
};
27
+
use anyhow::{Result, bail};
28
+
use axum::extract::Query;
29
+
use axum::{Json, extract::State};
30
+
use cidv10::Cid;
31
+
use futures::stream::{self, StreamExt};
32
+
use rsky_identity::IdResolver;
33
+
use rsky_identity::types::DidDocument;
34
+
use rsky_lexicon::com::atproto::repo::DeleteRecordInput;
35
+
use rsky_lexicon::com::atproto::repo::DescribeRepoOutput;
36
+
use rsky_lexicon::com::atproto::repo::GetRecordOutput;
37
+
use rsky_lexicon::com::atproto::repo::{ApplyWritesInput, ApplyWritesInputRefWrite};
38
+
use rsky_lexicon::com::atproto::repo::{CreateRecordInput, CreateRecordOutput};
39
+
use rsky_lexicon::com::atproto::repo::{ListRecordsOutput, Record};
40
+
// use rsky_pds::pipethrough::{OverrideOpts, ProxyRequest, pipethrough};
41
+
use rsky_pds::repo::prepare::{
42
+
PrepareCreateOpts, PrepareDeleteOpts, PrepareUpdateOpts, prepare_create, prepare_delete,
43
+
prepare_update,
44
+
};
45
+
use rsky_pds::sequencer::Sequencer;
46
+
use rsky_repo::types::PreparedDelete;
47
+
use rsky_repo::types::PreparedWrite;
48
+
use rsky_syntax::aturi::AtUri;
49
+
use rsky_syntax::handle::INVALID_HANDLE;
50
+
use std::collections::HashMap;
51
+
use std::hash::RandomState;
52
+
use std::str::FromStr;
53
+
use std::sync::Arc;
54
+
use tokio::sync::RwLock;
55
+
56
+
/// These endpoints are part of the atproto PDS repository management APIs. \
57
+
/// Requests usually require authentication (unlike the com.atproto.sync.* endpoints), and are made directly to the user's own PDS instance.
58
+
/// ### Routes
59
+
/// - AP /xrpc/com.atproto.repo.applyWrites -> [`apply_writes`]
60
+
/// - AP /xrpc/com.atproto.repo.createRecord -> [`create_record`]
61
+
/// - AP /xrpc/com.atproto.repo.putRecord -> [`put_record`]
62
+
/// - AP /xrpc/com.atproto.repo.deleteRecord -> [`delete_record`]
63
+
/// - AP /xrpc/com.atproto.repo.uploadBlob -> [`upload_blob`]
64
+
/// - UG /xrpc/com.atproto.repo.describeRepo -> [`describe_repo`]
65
+
/// - UG /xrpc/com.atproto.repo.getRecord -> [`get_record`]
66
+
/// - UG /xrpc/com.atproto.repo.listRecords -> [`list_records`]
67
+
/// - [ ] xx /xrpc/com.atproto.repo.importRepo
68
+
// - [ ] xx /xrpc/com.atproto.repo.listMissingBlobs
69
+
pub(crate) fn routes() -> Router<AppState> {
70
+
Router::new()
71
+
.route(
72
+
concat!("/", atrium_repo::apply_writes::NSID),
73
+
post(apply_writes::apply_writes),
74
+
)
75
+
.route(
76
+
concat!("/", atrium_repo::create_record::NSID),
77
+
post(create_record::create_record),
78
+
)
79
+
.route(
80
+
concat!("/", atrium_repo::put_record::NSID),
81
+
post(put_record::put_record),
82
+
)
83
+
.route(
84
+
concat!("/", atrium_repo::delete_record::NSID),
85
+
post(delete_record::delete_record),
86
+
)
87
+
.route(
88
+
concat!("/", atrium_repo::upload_blob::NSID),
89
+
post(upload_blob::upload_blob),
90
+
)
91
+
.route(
92
+
concat!("/", atrium_repo::describe_repo::NSID),
93
+
get(describe_repo::describe_repo),
94
+
)
95
+
.route(
96
+
concat!("/", atrium_repo::get_record::NSID),
97
+
get(get_record::get_record),
98
+
)
99
+
.route(
100
+
concat!("/", atrium_repo::import_repo::NSID),
101
+
post(import_repo::import_repo),
102
+
)
103
+
.route(
104
+
concat!("/", atrium_repo::list_missing_blobs::NSID),
105
+
get(list_missing_blobs::list_missing_blobs),
106
+
)
107
+
.route(
108
+
concat!("/", atrium_repo::list_records::NSID),
109
+
get(list_records::list_records),
110
+
)
111
+
}
+157
src/apis/com/atproto/repo/put_record.rs
+157
src/apis/com/atproto/repo/put_record.rs
···
1
+
//! Write a repository record, creating or updating it as needed. Requires auth, implemented by PDS.
2
+
use anyhow::bail;
3
+
use rsky_lexicon::com::atproto::repo::{PutRecordInput, PutRecordOutput};
4
+
use rsky_repo::types::CommitDataWithOps;
5
+
6
+
use super::*;
7
+
8
+
#[tracing::instrument(skip_all)]
9
+
async fn inner_put_record(
10
+
body: PutRecordInput,
11
+
auth: AuthenticatedUser,
12
+
sequencer: Arc<RwLock<Sequencer>>,
13
+
actor_pools: HashMap<String, ActorStorage>,
14
+
account_manager: Arc<RwLock<AccountManager>>,
15
+
) -> Result<PutRecordOutput> {
16
+
let PutRecordInput {
17
+
repo,
18
+
collection,
19
+
rkey,
20
+
validate,
21
+
record,
22
+
swap_record,
23
+
swap_commit,
24
+
} = body;
25
+
let account = account_manager
26
+
.read()
27
+
.await
28
+
.get_account(
29
+
&repo,
30
+
Some(AvailabilityFlags {
31
+
include_deactivated: Some(true),
32
+
include_taken_down: None,
33
+
}),
34
+
)
35
+
.await?;
36
+
if let Some(account) = account {
37
+
if account.deactivated_at.is_some() {
38
+
bail!("Account is deactivated")
39
+
}
40
+
let did = account.did;
41
+
// if did != auth.access.credentials.unwrap().did.unwrap() {
42
+
if did != auth.did() {
43
+
bail!("AuthRequiredError")
44
+
}
45
+
let uri = AtUri::make(did.clone(), Some(collection.clone()), Some(rkey.clone()))?;
46
+
let swap_commit_cid = match swap_commit {
47
+
Some(swap_commit) => Some(Cid::from_str(&swap_commit)?),
48
+
None => None,
49
+
};
50
+
let swap_record_cid = match swap_record {
51
+
Some(swap_record) => Some(Cid::from_str(&swap_record)?),
52
+
None => None,
53
+
};
54
+
let (commit, write): (Option<CommitDataWithOps>, PreparedWrite) = {
55
+
let mut actor_store = ActorStore::from_actor_pools(&did, &actor_pools).await;
56
+
57
+
let current = actor_store
58
+
.record
59
+
.get_record(&uri, None, Some(true))
60
+
.await?;
61
+
tracing::debug!("@LOG: debug inner_put_record, current: {current:?}");
62
+
let write: PreparedWrite = if current.is_some() {
63
+
PreparedWrite::Update(
64
+
prepare_update(PrepareUpdateOpts {
65
+
did: did.clone(),
66
+
collection,
67
+
rkey,
68
+
swap_cid: swap_record_cid,
69
+
record: serde_json::from_value(record)?,
70
+
validate,
71
+
})
72
+
.await?,
73
+
)
74
+
} else {
75
+
PreparedWrite::Create(
76
+
prepare_create(PrepareCreateOpts {
77
+
did: did.clone(),
78
+
collection,
79
+
rkey: Some(rkey),
80
+
swap_cid: swap_record_cid,
81
+
record: serde_json::from_value(record)?,
82
+
validate,
83
+
})
84
+
.await?,
85
+
)
86
+
};
87
+
88
+
match current {
89
+
Some(current) if current.cid == write.cid().expect("write cid").to_string() => {
90
+
(None, write)
91
+
}
92
+
_ => {
93
+
let commit = actor_store
94
+
.process_writes(vec![write.clone()], swap_commit_cid)
95
+
.await?;
96
+
(Some(commit), write)
97
+
}
98
+
}
99
+
};
100
+
101
+
if let Some(commit) = commit {
102
+
_ = sequencer
103
+
.write()
104
+
.await
105
+
.sequence_commit(did.clone(), commit.clone())
106
+
.await?;
107
+
account_manager
108
+
.write()
109
+
.await
110
+
.update_repo_root(
111
+
did,
112
+
commit.commit_data.cid,
113
+
commit.commit_data.rev,
114
+
&actor_pools,
115
+
)
116
+
.await?;
117
+
}
118
+
Ok(PutRecordOutput {
119
+
uri: write.uri().to_string(),
120
+
cid: write.cid().expect("write cid").to_string(),
121
+
})
122
+
} else {
123
+
bail!("Could not find repo: `{repo}`")
124
+
}
125
+
}
126
+
127
+
/// Write a repository record, creating or updating it as needed. Requires auth, implemented by PDS.
128
+
/// - POST /xrpc/com.atproto.repo.putRecord
129
+
/// ### Request Body
130
+
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
131
+
/// - `collection`: `nsid` // The NSID of the record collection.
132
+
/// - `rkey`: `string` // The record key. <= 512 characters.
133
+
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data, 'true' to require it, or leave unset to validate only for known Lexicons.
134
+
/// - `record`
135
+
/// - `swap_record`: `boolean` // Compare and swap with the previous record by CID. WARNING: nullable and optional field; may cause problems with golang implementation
136
+
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
137
+
/// ### Responses
138
+
/// - 200 OK: {"uri": "string","cid": "string","commit": {"cid": "string","rev": "string"},"validationStatus": "valid | unknown"}
139
+
/// - 400 Bad Request: {error:"`InvalidRequest` | `ExpiredToken` | `InvalidToken` | `InvalidSwap`"}
140
+
/// - 401 Unauthorized
141
+
#[tracing::instrument(skip_all)]
142
+
pub async fn put_record(
143
+
auth: AuthenticatedUser,
144
+
State(sequencer): State<Arc<RwLock<Sequencer>>>,
145
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
146
+
State(account_manager): State<Arc<RwLock<AccountManager>>>,
147
+
Json(body): Json<PutRecordInput>,
148
+
) -> Result<Json<PutRecordOutput>, ApiError> {
149
+
tracing::debug!("@LOG: debug put_record {body:#?}");
150
+
match inner_put_record(body, auth, sequencer, actor_pools, account_manager).await {
151
+
Ok(res) => Ok(Json(res)),
152
+
Err(error) => {
153
+
tracing::error!("@LOG: ERROR: {error}");
154
+
Err(ApiError::RuntimeError)
155
+
}
156
+
}
157
+
}
+117
src/apis/com/atproto/repo/upload_blob.rs
+117
src/apis/com/atproto/repo/upload_blob.rs
···
1
+
//! Upload a new blob, to be referenced from a repository record.
2
+
use crate::config::AppConfig;
3
+
use anyhow::Context as _;
4
+
use axum::{
5
+
body::Bytes,
6
+
http::{self, HeaderMap},
7
+
};
8
+
use rsky_lexicon::com::atproto::repo::{Blob, BlobOutput};
9
+
use rsky_repo::types::{BlobConstraint, PreparedBlobRef};
10
+
// use rsky_common::BadContentTypeError;
11
+
12
+
use super::*;
13
+
14
+
async fn inner_upload_blob(
15
+
auth: AuthenticatedUser,
16
+
blob: Bytes,
17
+
content_type: String,
18
+
actor_pools: HashMap<String, ActorStorage>,
19
+
) -> Result<BlobOutput> {
20
+
// let requester = auth.access.credentials.unwrap().did.unwrap();
21
+
let requester = auth.did();
22
+
23
+
let actor_store = ActorStore::from_actor_pools(&requester, &actor_pools).await;
24
+
25
+
let metadata = actor_store
26
+
.blob
27
+
.upload_blob_and_get_metadata(content_type, blob)
28
+
.await?;
29
+
let blobref = actor_store.blob.track_untethered_blob(metadata).await?;
30
+
31
+
// make the blob permanent if an associated record is already indexed
32
+
let records_for_blob = actor_store
33
+
.blob
34
+
.get_records_for_blob(blobref.get_cid()?)
35
+
.await?;
36
+
37
+
if !records_for_blob.is_empty() {
38
+
actor_store
39
+
.blob
40
+
.verify_blob_and_make_permanent(PreparedBlobRef {
41
+
cid: blobref.get_cid()?,
42
+
mime_type: blobref.get_mime_type().to_string(),
43
+
constraints: BlobConstraint {
44
+
max_size: None,
45
+
accept: None,
46
+
},
47
+
})
48
+
.await?;
49
+
}
50
+
51
+
Ok(BlobOutput {
52
+
blob: Blob {
53
+
r#type: Some("blob".to_owned()),
54
+
r#ref: Some(blobref.get_cid()?),
55
+
cid: None,
56
+
mime_type: blobref.get_mime_type().to_string(),
57
+
size: blobref.get_size(),
58
+
original: None,
59
+
},
60
+
})
61
+
}
62
+
63
+
/// Upload a new blob, to be referenced from a repository record. \
64
+
/// The blob will be deleted if it is not referenced within a time window (eg, minutes). \
65
+
/// Blob restrictions (mimetype, size, etc) are enforced when the reference is created. \
66
+
/// Requires auth, implemented by PDS.
67
+
/// - POST /xrpc/com.atproto.repo.uploadBlob
68
+
/// ### Request Body
69
+
/// ### Responses
70
+
/// - 200 OK: {"blob": "binary"}
71
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
72
+
/// - 401 Unauthorized
73
+
#[tracing::instrument(skip_all)]
74
+
#[axum::debug_handler(state = AppState)]
75
+
pub async fn upload_blob(
76
+
auth: AuthenticatedUser,
77
+
headers: HeaderMap,
78
+
State(config): State<AppConfig>,
79
+
State(actor_pools): State<HashMap<String, ActorStorage, RandomState>>,
80
+
blob: Bytes,
81
+
) -> Result<Json<BlobOutput>, ApiError> {
82
+
let content_length = headers
83
+
.get(http::header::CONTENT_LENGTH)
84
+
.context("no content length provided")?
85
+
.to_str()
86
+
.map_err(anyhow::Error::from)
87
+
.and_then(|content_length| content_length.parse::<u64>().map_err(anyhow::Error::from))
88
+
.context("invalid content-length header")?;
89
+
let content_type = headers
90
+
.get(http::header::CONTENT_TYPE)
91
+
.context("no content-type provided")?
92
+
.to_str()
93
+
// .map_err(BadContentTypeError::MissingType)
94
+
.context("invalid content-type provided")?
95
+
.to_owned();
96
+
97
+
if content_length > config.blob.limit {
98
+
return Err(ApiError::InvalidRequest(format!(
99
+
"Content-Length is greater than maximum of {}",
100
+
config.blob.limit
101
+
)));
102
+
};
103
+
if blob.len() as u64 > config.blob.limit {
104
+
return Err(ApiError::InvalidRequest(format!(
105
+
"Blob size is greater than maximum of {} despite content-length header",
106
+
config.blob.limit
107
+
)));
108
+
};
109
+
110
+
match inner_upload_blob(auth, blob, content_type, actor_pools).await {
111
+
Ok(res) => Ok(Json(res)),
112
+
Err(error) => {
113
+
tracing::error!("{error:?}");
114
+
Err(ApiError::RuntimeError)
115
+
}
116
+
}
117
+
}
+791
src/apis/com/atproto/server/server.rs
+791
src/apis/com/atproto/server/server.rs
···
1
+
//! Server endpoints. (/xrpc/com.atproto.server.*)
2
+
use std::{collections::HashMap, str::FromStr as _};
3
+
4
+
use anyhow::{Context as _, anyhow};
5
+
use argon2::{
6
+
Argon2, PasswordHash, PasswordHasher as _, PasswordVerifier as _, password_hash::SaltString,
7
+
};
8
+
use atrium_api::{
9
+
com::atproto::server,
10
+
types::string::{Datetime, Did, Handle, Tid},
11
+
};
12
+
use atrium_crypto::keypair::Did as _;
13
+
use atrium_repo::{
14
+
Cid, Repository,
15
+
blockstore::{AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256},
16
+
};
17
+
use axum::{
18
+
Json, Router,
19
+
extract::{Query, Request, State},
20
+
http::StatusCode,
21
+
routing::{get, post},
22
+
};
23
+
use constcat::concat;
24
+
use metrics::counter;
25
+
use rand::Rng as _;
26
+
use sha2::Digest as _;
27
+
use uuid::Uuid;
28
+
29
+
use crate::{
30
+
AppState, Client, Db, Error, Result, RotationKey, SigningKey,
31
+
auth::{self, AuthenticatedUser},
32
+
config::AppConfig,
33
+
firehose::{Commit, FirehoseProducer},
34
+
metrics::AUTH_FAILED,
35
+
plc::{self, PlcOperation, PlcService},
36
+
storage,
37
+
};
38
+
39
+
/// This is a dummy password that can be used in absence of a real password.
40
+
const DUMMY_PASSWORD: &str = "$argon2id$v=19$m=19456,t=2,p=1$En2LAfHjeO0SZD5IUU1Abg$RpS8nHhhqY4qco2uyd41p9Y/1C+Lvi214MAWukzKQMI";
41
+
42
+
/// Create an invite code.
43
+
/// - POST /xrpc/com.atproto.server.createInviteCode
44
+
/// ### Request Body
45
+
/// - `useCount`: integer
46
+
/// - `forAccount`: string (optional)
47
+
/// ### Responses
48
+
/// - 200 OK: {code: string}
49
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
50
+
/// - 401 Unauthorized
51
+
async fn create_invite_code(
52
+
_user: AuthenticatedUser,
53
+
State(db): State<Db>,
54
+
Json(input): Json<server::create_invite_code::Input>,
55
+
) -> Result<Json<server::create_invite_code::Output>> {
56
+
let uuid = Uuid::new_v4().to_string();
57
+
let did = input.for_account.as_deref();
58
+
let count = std::cmp::min(input.use_count, 100); // Maximum of 100 uses for any code.
59
+
60
+
if count <= 0 {
61
+
return Err(anyhow!("use_count must be greater than 0").into());
62
+
}
63
+
64
+
Ok(Json(
65
+
server::create_invite_code::OutputData {
66
+
code: sqlx::query_scalar!(
67
+
r#"
68
+
INSERT INTO invites (id, did, count, created_at)
69
+
VALUES (?, ?, ?, datetime('now'))
70
+
RETURNING id
71
+
"#,
72
+
uuid,
73
+
did,
74
+
count,
75
+
)
76
+
.fetch_one(&db)
77
+
.await
78
+
.context("failed to create new invite code")?,
79
+
}
80
+
.into(),
81
+
))
82
+
}
83
+
84
+
#[expect(clippy::too_many_lines, reason = "TODO: refactor")]
85
+
/// Create an account. Implemented by PDS.
86
+
/// - POST /xrpc/com.atproto.server.createAccount
87
+
/// ### Request Body
88
+
/// - `email`: string
89
+
/// - `handle`: string (required)
90
+
/// - `did`: string - Pre-existing atproto DID, being imported to a new account.
91
+
/// - `inviteCode`: string
92
+
/// - `verificationCode`: string
93
+
/// - `verificationPhone`: string
94
+
/// - `password`: string - Initial account password. May need to meet instance-specific password strength requirements.
95
+
/// - `recoveryKey`: string - DID PLC rotation key (aka, recovery key) to be included in PLC creation operation.
96
+
/// - `plcOp`: object
97
+
/// ## Responses
98
+
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {}}
99
+
/// - 400 Bad Request: {error: [`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidHandle`, `InvalidPassword`, \
100
+
/// `InvalidInviteCode`, `HandleNotAvailable`, `UnsupportedDomain`, `UnresolvableDid`, `IncompatibleDidDoc`)}
101
+
/// - 401 Unauthorized
102
+
async fn create_account(
103
+
State(db): State<Db>,
104
+
State(skey): State<SigningKey>,
105
+
State(rkey): State<RotationKey>,
106
+
State(client): State<Client>,
107
+
State(config): State<AppConfig>,
108
+
State(fhp): State<FirehoseProducer>,
109
+
Json(input): Json<server::create_account::Input>,
110
+
) -> Result<Json<server::create_account::Output>> {
111
+
let email = input.email.as_deref().context("no email provided")?;
112
+
// Hash the user's password.
113
+
let pass = Argon2::default()
114
+
.hash_password(
115
+
input
116
+
.password
117
+
.as_deref()
118
+
.context("no password provided")?
119
+
.as_bytes(),
120
+
SaltString::generate(&mut rand::thread_rng()).as_salt(),
121
+
)
122
+
.context("failed to hash password")?
123
+
.to_string();
124
+
let handle = input.handle.as_str().to_owned();
125
+
126
+
// TODO: Handle the account migration flow.
127
+
// Users will hit this endpoint with a service-level authentication token.
128
+
//
129
+
// https://github.com/bluesky-social/pds/blob/main/ACCOUNT_MIGRATION.md
130
+
131
+
// TODO: `input.plc_op`
132
+
if input.plc_op.is_some() {
133
+
return Err(Error::unimplemented(anyhow!("plc_op")));
134
+
}
135
+
136
+
let recovery_keys = if let Some(ref key) = input.recovery_key {
137
+
// Ensure the provided recovery key is valid.
138
+
if let Err(error) = atrium_crypto::did::parse_did_key(key) {
139
+
return Err(Error::with_status(
140
+
StatusCode::BAD_REQUEST,
141
+
anyhow::Error::new(error).context("provided recovery key is in invalid format"),
142
+
));
143
+
}
144
+
145
+
// Enroll the user-provided recovery key at a higher priority than our own.
146
+
vec![key.clone(), rkey.did()]
147
+
} else {
148
+
vec![rkey.did()]
149
+
};
150
+
151
+
// Begin a new transaction to actually create the user's profile.
152
+
// Unless committed, the transaction will be automatically rolled back.
153
+
let mut tx = db.begin().await.context("failed to begin transaction")?;
154
+
155
+
// TODO: Make this its own toggle instead of tied to test mode
156
+
if !config.test {
157
+
let _invite = match input.invite_code {
158
+
Some(ref code) => {
159
+
let invite: Option<String> = sqlx::query_scalar!(
160
+
r#"
161
+
UPDATE invites
162
+
SET count = count - 1
163
+
WHERE id = ?
164
+
AND count > 0
165
+
RETURNING id
166
+
"#,
167
+
code
168
+
)
169
+
.fetch_optional(&mut *tx)
170
+
.await
171
+
.context("failed to check invite code")?;
172
+
173
+
invite.context("invalid invite code")?
174
+
}
175
+
None => {
176
+
return Err(anyhow!("invite code required").into());
177
+
}
178
+
};
179
+
}
180
+
181
+
// Account can be created. Synthesize a new DID for the user.
182
+
// https://github.com/did-method-plc/did-method-plc?tab=readme-ov-file#did-creation
183
+
let op = plc::sign_op(
184
+
&rkey,
185
+
PlcOperation {
186
+
typ: "plc_operation".to_owned(),
187
+
rotation_keys: recovery_keys,
188
+
verification_methods: HashMap::from([("atproto".to_owned(), skey.did())]),
189
+
also_known_as: vec![format!("at://{}", input.handle.as_str())],
190
+
services: HashMap::from([(
191
+
"atproto_pds".to_owned(),
192
+
PlcService::Pds {
193
+
endpoint: format!("https://{}", config.host_name),
194
+
},
195
+
)]),
196
+
prev: None,
197
+
},
198
+
)
199
+
.context("failed to sign genesis op")?;
200
+
let op_bytes = serde_ipld_dagcbor::to_vec(&op).context("failed to encode genesis op")?;
201
+
202
+
let did_hash = {
203
+
let digest = base32::encode(
204
+
base32::Alphabet::Rfc4648Lower { padding: false },
205
+
sha2::Sha256::digest(&op_bytes).as_slice(),
206
+
);
207
+
if digest.len() < 24 {
208
+
return Err(anyhow!("digest too short").into());
209
+
}
210
+
#[expect(clippy::string_slice, reason = "digest length confirmed")]
211
+
digest[..24].to_owned()
212
+
};
213
+
let did = format!("did:plc:{did_hash}");
214
+
215
+
let doc = tokio::fs::File::create(config.plc.path.join(format!("{did_hash}.car")))
216
+
.await
217
+
.context("failed to create did doc")?;
218
+
219
+
let mut plc_doc = CarStore::create(doc)
220
+
.await
221
+
.context("failed to create did doc")?;
222
+
223
+
let plc_cid = plc_doc
224
+
.write_block(DAG_CBOR, SHA2_256, &op_bytes)
225
+
.await
226
+
.context("failed to write genesis commit")?
227
+
.to_string();
228
+
229
+
if !config.test {
230
+
// Send the new account's data to the PLC directory.
231
+
plc::submit(&client, &did, &op)
232
+
.await
233
+
.context("failed to submit PLC operation to directory")?;
234
+
}
235
+
236
+
// Write out an initial commit for the user.
237
+
// https://atproto.com/guides/account-lifecycle
238
+
let (cid, rev, store) = async {
239
+
let store = storage::create_storage_for_did(&config.repo, &did_hash)
240
+
.await
241
+
.context("failed to create storage")?;
242
+
243
+
// Initialize the repository with the storage
244
+
let repo_builder = Repository::create(
245
+
store,
246
+
Did::from_str(&did).expect("should be valid DID format"),
247
+
)
248
+
.await
249
+
.context("failed to initialize user repo")?;
250
+
251
+
// Sign the root commit.
252
+
let sig = skey
253
+
.sign(&repo_builder.bytes())
254
+
.context("failed to sign root commit")?;
255
+
let mut repo = repo_builder
256
+
.finalize(sig)
257
+
.await
258
+
.context("failed to attach signature to root commit")?;
259
+
260
+
let root = repo.root();
261
+
let rev = repo.commit().rev();
262
+
263
+
// Create a temporary CAR store for firehose events
264
+
let mut mem = Vec::new();
265
+
let mut firehose_store =
266
+
CarStore::create_with_roots(std::io::Cursor::new(&mut mem), [repo.root()])
267
+
.await
268
+
.context("failed to create temp carstore")?;
269
+
270
+
repo.export_into(&mut firehose_store)
271
+
.await
272
+
.context("failed to export repository")?;
273
+
274
+
Ok::<(Cid, Tid, Vec<u8>), anyhow::Error>((root, rev, mem))
275
+
}
276
+
.await
277
+
.context("failed to create user repo")?;
278
+
279
+
let cid_str = cid.to_string();
280
+
let rev_str = rev.as_str();
281
+
282
+
_ = sqlx::query!(
283
+
r#"
284
+
INSERT INTO accounts (did, email, password, root, plc_root, rev, created_at)
285
+
VALUES (?, ?, ?, ?, ?, ?, datetime('now'));
286
+
287
+
INSERT INTO handles (did, handle, created_at)
288
+
VALUES (?, ?, datetime('now'));
289
+
290
+
-- Cleanup stale invite codes
291
+
DELETE FROM invites
292
+
WHERE count <= 0;
293
+
"#,
294
+
did,
295
+
email,
296
+
pass,
297
+
cid_str,
298
+
plc_cid,
299
+
rev_str,
300
+
did,
301
+
handle
302
+
)
303
+
.execute(&mut *tx)
304
+
.await
305
+
.context("failed to create new account")?;
306
+
307
+
// The account is fully created. Commit the SQL transaction to the database.
308
+
tx.commit().await.context("failed to commit transaction")?;
309
+
310
+
// Broadcast the identity event now that the new identity is resolvable on the public directory.
311
+
fhp.identity(
312
+
atrium_api::com::atproto::sync::subscribe_repos::IdentityData {
313
+
did: Did::from_str(&did).expect("should be valid DID format"),
314
+
handle: Some(Handle::new(handle).expect("should be valid handle")),
315
+
seq: 0, // Filled by firehose later.
316
+
time: Datetime::now(),
317
+
},
318
+
)
319
+
.await;
320
+
321
+
// The new account is now active on this PDS, so we can broadcast the account firehose event.
322
+
fhp.account(
323
+
atrium_api::com::atproto::sync::subscribe_repos::AccountData {
324
+
active: true,
325
+
did: Did::from_str(&did).expect("should be valid DID format"),
326
+
seq: 0, // Filled by firehose later.
327
+
status: None, // "takedown" / "suspended" / "deactivated"
328
+
time: Datetime::now(),
329
+
},
330
+
)
331
+
.await;
332
+
333
+
let did = Did::from_str(&did).expect("should be valid DID format");
334
+
335
+
fhp.commit(Commit {
336
+
car: store,
337
+
ops: Vec::new(),
338
+
cid,
339
+
rev: rev.to_string(),
340
+
did: did.clone(),
341
+
pcid: None,
342
+
blobs: Vec::new(),
343
+
})
344
+
.await;
345
+
346
+
// Finally, sign some authentication tokens for the new user.
347
+
let token = auth::sign(
348
+
&skey,
349
+
"at+jwt",
350
+
&serde_json::json!({
351
+
"scope": "com.atproto.access",
352
+
"sub": did,
353
+
"iat": chrono::Utc::now().timestamp(),
354
+
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
355
+
"aud": format!("did:web:{}", config.host_name)
356
+
}),
357
+
)
358
+
.context("failed to sign jwt")?;
359
+
360
+
let refresh_token = auth::sign(
361
+
&skey,
362
+
"refresh+jwt",
363
+
&serde_json::json!({
364
+
"scope": "com.atproto.refresh",
365
+
"sub": did,
366
+
"iat": chrono::Utc::now().timestamp(),
367
+
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
368
+
"aud": format!("did:web:{}", config.host_name)
369
+
}),
370
+
)
371
+
.context("failed to sign refresh jwt")?;
372
+
373
+
Ok(Json(
374
+
server::create_account::OutputData {
375
+
access_jwt: token,
376
+
did,
377
+
did_doc: None,
378
+
handle: input.handle.clone(),
379
+
refresh_jwt: refresh_token,
380
+
}
381
+
.into(),
382
+
))
383
+
}
384
+
385
+
/// Create an authentication session.
386
+
/// - POST /xrpc/com.atproto.server.createSession
387
+
/// ### Request Body
388
+
/// - `identifier`: string - Handle or other identifier supported by the server for the authenticating user.
389
+
/// - `password`: string - Password for the authenticating user.
390
+
/// - `authFactorToken` - string (optional)
391
+
/// - `allowTakedown` - boolean (optional) - When true, instead of throwing error for takendown accounts, a valid response with a narrow scoped token will be returned
392
+
/// ### Responses
393
+
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {},"email": "string","emailConfirmed": true,"emailAuthFactor": true,"active": true,"status": "takendown"}
394
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `AccountTakedown`, `AuthFactorTokenRequired`]}
395
+
/// - 401 Unauthorized
396
+
async fn create_session(
397
+
State(db): State<Db>,
398
+
State(skey): State<SigningKey>,
399
+
State(config): State<AppConfig>,
400
+
Json(input): Json<server::create_session::Input>,
401
+
) -> Result<Json<server::create_session::Output>> {
402
+
let handle = &input.identifier;
403
+
let password = &input.password;
404
+
405
+
// TODO: `input.allow_takedown`
406
+
// TODO: `input.auth_factor_token`
407
+
408
+
let Some(account) = sqlx::query!(
409
+
r#"
410
+
WITH LatestHandles AS (
411
+
SELECT did, handle
412
+
FROM handles
413
+
WHERE (did, created_at) IN (
414
+
SELECT did, MAX(created_at) AS max_created_at
415
+
FROM handles
416
+
GROUP BY did
417
+
)
418
+
)
419
+
SELECT a.did, a.password, h.handle
420
+
FROM accounts a
421
+
LEFT JOIN LatestHandles h ON a.did = h.did
422
+
WHERE h.handle = ?
423
+
"#,
424
+
handle
425
+
)
426
+
.fetch_optional(&db)
427
+
.await
428
+
.context("failed to authenticate")?
429
+
else {
430
+
counter!(AUTH_FAILED).increment(1);
431
+
432
+
// SEC: Call argon2's `verify_password` to simulate password verification and discard the result.
433
+
// We do this to avoid exposing a timing attack where attackers can measure the response time to
434
+
// determine whether or not an account exists.
435
+
_ = Argon2::default().verify_password(
436
+
password.as_bytes(),
437
+
&PasswordHash::new(DUMMY_PASSWORD).context("should be valid password hash")?,
438
+
);
439
+
440
+
return Err(Error::with_status(
441
+
StatusCode::UNAUTHORIZED,
442
+
anyhow!("failed to validate credentials"),
443
+
));
444
+
};
445
+
446
+
match Argon2::default().verify_password(
447
+
password.as_bytes(),
448
+
&PasswordHash::new(account.password.as_str()).context("invalid password hash in db")?,
449
+
) {
450
+
Ok(()) => {}
451
+
Err(_e) => {
452
+
counter!(AUTH_FAILED).increment(1);
453
+
454
+
return Err(Error::with_status(
455
+
StatusCode::UNAUTHORIZED,
456
+
anyhow!("failed to validate credentials"),
457
+
));
458
+
}
459
+
}
460
+
461
+
let did = account.did;
462
+
463
+
let token = auth::sign(
464
+
&skey,
465
+
"at+jwt",
466
+
&serde_json::json!({
467
+
"scope": "com.atproto.access",
468
+
"sub": did,
469
+
"iat": chrono::Utc::now().timestamp(),
470
+
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
471
+
"aud": format!("did:web:{}", config.host_name)
472
+
}),
473
+
)
474
+
.context("failed to sign jwt")?;
475
+
476
+
let refresh_token = auth::sign(
477
+
&skey,
478
+
"refresh+jwt",
479
+
&serde_json::json!({
480
+
"scope": "com.atproto.refresh",
481
+
"sub": did,
482
+
"iat": chrono::Utc::now().timestamp(),
483
+
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
484
+
"aud": format!("did:web:{}", config.host_name)
485
+
}),
486
+
)
487
+
.context("failed to sign refresh jwt")?;
488
+
489
+
Ok(Json(
490
+
server::create_session::OutputData {
491
+
access_jwt: token,
492
+
refresh_jwt: refresh_token,
493
+
494
+
active: Some(true),
495
+
did: Did::from_str(&did).expect("should be valid DID format"),
496
+
did_doc: None,
497
+
email: None,
498
+
email_auth_factor: None,
499
+
email_confirmed: None,
500
+
handle: Handle::new(account.handle).expect("should be valid handle"),
501
+
status: None,
502
+
}
503
+
.into(),
504
+
))
505
+
}
506
+
507
+
/// Refresh an authentication session. Requires auth using the 'refreshJwt' (not the 'accessJwt').
508
+
/// - POST /xrpc/com.atproto.server.refreshSession
509
+
/// ### Responses
510
+
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {},"active": true,"status": "takendown"}
511
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `AccountTakedown`]}
512
+
/// - 401 Unauthorized
513
+
async fn refresh_session(
514
+
State(db): State<Db>,
515
+
State(skey): State<SigningKey>,
516
+
State(config): State<AppConfig>,
517
+
req: Request,
518
+
) -> Result<Json<server::refresh_session::Output>> {
519
+
// TODO: store hashes of refresh tokens and enforce single-use
520
+
let auth_token = req
521
+
.headers()
522
+
.get(axum::http::header::AUTHORIZATION)
523
+
.context("no authorization header provided")?
524
+
.to_str()
525
+
.ok()
526
+
.and_then(|auth| auth.strip_prefix("Bearer "))
527
+
.context("invalid authentication token")?;
528
+
529
+
let (typ, claims) =
530
+
auth::verify(&skey.did(), auth_token).context("failed to verify refresh token")?;
531
+
if typ != "refresh+jwt" {
532
+
return Err(Error::with_status(
533
+
StatusCode::UNAUTHORIZED,
534
+
anyhow!("invalid refresh token"),
535
+
));
536
+
}
537
+
if claims
538
+
.get("exp")
539
+
.and_then(serde_json::Value::as_i64)
540
+
.context("failed to get `exp`")?
541
+
< chrono::Utc::now().timestamp()
542
+
{
543
+
return Err(Error::with_status(
544
+
StatusCode::UNAUTHORIZED,
545
+
anyhow!("refresh token expired"),
546
+
));
547
+
}
548
+
if claims
549
+
.get("aud")
550
+
.and_then(|audience| audience.as_str())
551
+
.context("invalid jwt")?
552
+
!= format!("did:web:{}", config.host_name)
553
+
{
554
+
return Err(Error::with_status(
555
+
StatusCode::UNAUTHORIZED,
556
+
anyhow!("invalid audience"),
557
+
));
558
+
}
559
+
560
+
let did = claims
561
+
.get("sub")
562
+
.and_then(|subject| subject.as_str())
563
+
.context("invalid jwt")?;
564
+
565
+
let user = sqlx::query!(
566
+
r#"
567
+
SELECT a.status, h.handle
568
+
FROM accounts a
569
+
JOIN handles h ON a.did = h.did
570
+
WHERE a.did = ?
571
+
ORDER BY h.created_at ASC
572
+
LIMIT 1
573
+
"#,
574
+
did
575
+
)
576
+
.fetch_one(&db)
577
+
.await
578
+
.context("failed to fetch user account")?;
579
+
580
+
let token = auth::sign(
581
+
&skey,
582
+
"at+jwt",
583
+
&serde_json::json!({
584
+
"scope": "com.atproto.access",
585
+
"sub": did,
586
+
"iat": chrono::Utc::now().timestamp(),
587
+
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
588
+
"aud": format!("did:web:{}", config.host_name)
589
+
}),
590
+
)
591
+
.context("failed to sign jwt")?;
592
+
593
+
let refresh_token = auth::sign(
594
+
&skey,
595
+
"refresh+jwt",
596
+
&serde_json::json!({
597
+
"scope": "com.atproto.refresh",
598
+
"sub": did,
599
+
"iat": chrono::Utc::now().timestamp(),
600
+
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
601
+
"aud": format!("did:web:{}", config.host_name)
602
+
}),
603
+
)
604
+
.context("failed to sign refresh jwt")?;
605
+
606
+
let active = user.status == "active";
607
+
let status = if active { None } else { Some(user.status) };
608
+
609
+
Ok(Json(
610
+
server::refresh_session::OutputData {
611
+
access_jwt: token,
612
+
refresh_jwt: refresh_token,
613
+
614
+
active: Some(active), // TODO?
615
+
did: Did::new(did.to_owned()).expect("should be valid DID format"),
616
+
did_doc: None,
617
+
handle: Handle::new(user.handle).expect("should be valid handle"),
618
+
status,
619
+
}
620
+
.into(),
621
+
))
622
+
}
623
+
624
+
/// Get a signed token on behalf of the requesting DID for the requested service.
625
+
/// - GET /xrpc/com.atproto.server.getServiceAuth
626
+
/// ### Request Query Parameters
627
+
/// - `aud`: string - The DID of the service that the token will be used to authenticate with
628
+
/// - `exp`: integer (optional) - The time in Unix Epoch seconds that the JWT expires. Defaults to 60 seconds in the future. The service may enforce certain time bounds on tokens depending on the requested scope.
629
+
/// - `lxm`: string (optional) - Lexicon (XRPC) method to bind the requested token to
630
+
/// ### Responses
631
+
/// - 200 OK: {token: string}
632
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `BadExpiration`]}
633
+
/// - 401 Unauthorized
634
+
async fn get_service_auth(
635
+
user: AuthenticatedUser,
636
+
State(skey): State<SigningKey>,
637
+
Query(input): Query<server::get_service_auth::ParametersData>,
638
+
) -> Result<Json<server::get_service_auth::Output>> {
639
+
let user_did = user.did();
640
+
let aud = input.aud.as_str();
641
+
642
+
let exp = (chrono::Utc::now().checked_add_signed(chrono::Duration::minutes(1)))
643
+
.context("should be valid expiration datetime")?
644
+
.timestamp();
645
+
let jti = rand::thread_rng()
646
+
.sample_iter(rand::distributions::Alphanumeric)
647
+
.take(10)
648
+
.map(char::from)
649
+
.collect::<String>();
650
+
651
+
let mut claims = serde_json::json!({
652
+
"iss": user_did.as_str(),
653
+
"aud": aud,
654
+
"exp": exp,
655
+
"jti": jti,
656
+
});
657
+
658
+
if let Some(ref lxm) = input.lxm {
659
+
claims = claims
660
+
.as_object_mut()
661
+
.context("should be a valid object")?
662
+
.insert("lxm".to_owned(), serde_json::Value::String(lxm.to_string()))
663
+
.context("should be able to insert lxm into claims")?;
664
+
}
665
+
666
+
// Mint a bearer token by signing a JSON web token.
667
+
let token = auth::sign(&skey, "JWT", &claims).context("failed to sign jwt")?;
668
+
669
+
Ok(Json(server::get_service_auth::OutputData { token }.into()))
670
+
}
671
+
672
+
/// Get information about the current auth session. Requires auth.
673
+
/// - GET /xrpc/com.atproto.server.getSession
674
+
/// ### Responses
675
+
/// - 200 OK: {"handle": "string","did": "string","email": "string","emailConfirmed": true,"emailAuthFactor": true,"didDoc": {},"active": true,"status": "takendown"}
676
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
677
+
/// - 401 Unauthorized
678
+
async fn get_session(
679
+
user: AuthenticatedUser,
680
+
State(db): State<Db>,
681
+
) -> Result<Json<server::get_session::Output>> {
682
+
let did = user.did();
683
+
#[expect(clippy::shadow_unrelated, reason = "is related")]
684
+
if let Some(user) = sqlx::query!(
685
+
r#"
686
+
SELECT a.email, a.status, (
687
+
SELECT h.handle
688
+
FROM handles h
689
+
WHERE h.did = a.did
690
+
ORDER BY h.created_at ASC
691
+
LIMIT 1
692
+
) AS handle
693
+
FROM accounts a
694
+
WHERE a.did = ?
695
+
"#,
696
+
did
697
+
)
698
+
.fetch_optional(&db)
699
+
.await
700
+
.context("failed to fetch session")?
701
+
{
702
+
let active = user.status == "active";
703
+
let status = if active { None } else { Some(user.status) };
704
+
705
+
Ok(Json(
706
+
server::get_session::OutputData {
707
+
active: Some(active),
708
+
did: Did::from_str(&did).expect("should be valid DID format"),
709
+
did_doc: None,
710
+
email: Some(user.email),
711
+
email_auth_factor: None,
712
+
email_confirmed: None,
713
+
handle: Handle::new(user.handle).expect("should be valid handle"),
714
+
status,
715
+
}
716
+
.into(),
717
+
))
718
+
} else {
719
+
Err(Error::with_status(
720
+
StatusCode::UNAUTHORIZED,
721
+
anyhow!("user not found"),
722
+
))
723
+
}
724
+
}
725
+
726
+
/// Describes the server's account creation requirements and capabilities. Implemented by PDS.
727
+
/// - GET /xrpc/com.atproto.server.describeServer
728
+
/// ### Responses
729
+
/// - 200 OK: {"inviteCodeRequired": true,"phoneVerificationRequired": true,"availableUserDomains": [`string`],"links": {"privacyPolicy": "string","termsOfService": "string"},"contact": {"email": "string"},"did": "string"}
730
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
731
+
/// - 401 Unauthorized
732
+
async fn describe_server(
733
+
State(config): State<AppConfig>,
734
+
) -> Result<Json<server::describe_server::Output>> {
735
+
Ok(Json(
736
+
server::describe_server::OutputData {
737
+
available_user_domains: vec![],
738
+
contact: None,
739
+
did: Did::from_str(&format!("did:web:{}", config.host_name))
740
+
.expect("should be valid DID format"),
741
+
invite_code_required: Some(true),
742
+
links: None,
743
+
phone_verification_required: Some(false), // email verification
744
+
}
745
+
.into(),
746
+
))
747
+
}
748
+
749
+
async fn todo() -> Result<()> {
750
+
Err(Error::unimplemented(anyhow!("not implemented")))
751
+
}
752
+
753
+
#[rustfmt::skip]
754
+
/// These endpoints are part of the atproto PDS server and account management APIs. \
755
+
/// Requests often require authentication and are made directly to the user's own PDS instance.
756
+
/// ### Routes
757
+
/// - `POST /xrpc/com.atproto.server.createAccount` -> [`create_account`]
758
+
/// - `POST /xrpc/com.atproto.server.createInviteCode` -> [`create_invite_code`]
759
+
/// - `POST /xrpc/com.atproto.server.createSession` -> [`create_session`]
760
+
/// - `GET /xrpc/com.atproto.server.describeServer` -> [`describe_server`]
761
+
/// - `GET /xrpc/com.atproto.server.getServiceAuth` -> [`get_service_auth`]
762
+
/// - `GET /xrpc/com.atproto.server.getSession` -> [`get_session`]
763
+
/// - `POST /xrpc/com.atproto.server.refreshSession` -> [`refresh_session`]
764
+
pub(super) fn routes() -> Router<AppState> {
765
+
Router::new()
766
+
.route(concat!("/", server::activate_account::NSID), post(todo))
767
+
.route(concat!("/", server::check_account_status::NSID), post(todo))
768
+
.route(concat!("/", server::confirm_email::NSID), post(todo))
769
+
.route(concat!("/", server::create_account::NSID), post(create_account))
770
+
.route(concat!("/", server::create_app_password::NSID), post(todo))
771
+
.route(concat!("/", server::create_invite_code::NSID), post(create_invite_code))
772
+
.route(concat!("/", server::create_invite_codes::NSID), post(todo))
773
+
.route(concat!("/", server::create_session::NSID), post(create_session))
774
+
.route(concat!("/", server::deactivate_account::NSID), post(todo))
775
+
.route(concat!("/", server::delete_account::NSID), post(todo))
776
+
.route(concat!("/", server::delete_session::NSID), post(todo))
777
+
.route(concat!("/", server::describe_server::NSID), get(describe_server))
778
+
.route(concat!("/", server::get_account_invite_codes::NSID), post(todo))
779
+
.route(concat!("/", server::get_service_auth::NSID), get(get_service_auth))
780
+
.route(concat!("/", server::get_session::NSID), get(get_session))
781
+
.route(concat!("/", server::list_app_passwords::NSID), post(todo))
782
+
.route(concat!("/", server::refresh_session::NSID), post(refresh_session))
783
+
.route(concat!("/", server::request_account_delete::NSID), post(todo))
784
+
.route(concat!("/", server::request_email_confirmation::NSID), post(todo))
785
+
.route(concat!("/", server::request_email_update::NSID), post(todo))
786
+
.route(concat!("/", server::request_password_reset::NSID), post(todo))
787
+
.route(concat!("/", server::reserve_signing_key::NSID), post(todo))
788
+
.route(concat!("/", server::reset_password::NSID), post(todo))
789
+
.route(concat!("/", server::revoke_app_password::NSID), post(todo))
790
+
.route(concat!("/", server::update_email::NSID), post(todo))
791
+
}
+428
src/apis/com/atproto/sync/sync.rs
+428
src/apis/com/atproto/sync/sync.rs
···
1
+
//! Endpoints for the `ATProto` sync API. (/xrpc/com.atproto.sync.*)
2
+
use std::str::FromStr as _;
3
+
4
+
use anyhow::{Context as _, anyhow};
5
+
use atrium_api::{
6
+
com::atproto::sync,
7
+
types::{LimitedNonZeroU16, string::Did},
8
+
};
9
+
use atrium_repo::{
10
+
Cid,
11
+
blockstore::{
12
+
AsyncBlockStoreRead as _, AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256,
13
+
},
14
+
};
15
+
use axum::{
16
+
Json, Router,
17
+
body::Body,
18
+
extract::{Query, State, WebSocketUpgrade},
19
+
http::{self, Response, StatusCode},
20
+
response::IntoResponse,
21
+
routing::get,
22
+
};
23
+
use constcat::concat;
24
+
use futures::stream::TryStreamExt as _;
25
+
use tokio_util::io::ReaderStream;
26
+
27
+
use crate::{
28
+
AppState, Db, Error, Result,
29
+
config::AppConfig,
30
+
firehose::FirehoseProducer,
31
+
storage::{open_repo_db, open_store},
32
+
};
33
+
34
+
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
35
+
#[serde(rename_all = "camelCase")]
36
+
/// Parameters for `/xrpc/com.atproto.sync.listBlobs` \
37
+
/// HACK: `limit` may be passed as a string, so we must treat it as one.
38
+
pub(super) struct ListBlobsParameters {
39
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
40
+
/// Optional cursor to paginate through blobs.
41
+
pub cursor: Option<String>,
42
+
///The DID of the repo.
43
+
pub did: Did,
44
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
45
+
/// Optional limit of blobs to return.
46
+
pub limit: Option<String>,
47
+
///Optional revision of the repo to list blobs since.
48
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
49
+
pub since: Option<String>,
50
+
}
51
+
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
52
+
#[serde(rename_all = "camelCase")]
53
+
/// Parameters for `/xrpc/com.atproto.sync.listRepos` \
54
+
/// HACK: `limit` may be passed as a string, so we must treat it as one.
55
+
pub(super) struct ListReposParameters {
56
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
57
+
/// Optional cursor to paginate through repos.
58
+
pub cursor: Option<String>,
59
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
60
+
/// Optional limit of repos to return.
61
+
pub limit: Option<String>,
62
+
}
63
+
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
64
+
#[serde(rename_all = "camelCase")]
65
+
/// Parameters for `/xrpc/com.atproto.sync.subscribeRepos` \
66
+
/// HACK: `cursor` may be passed as a string, so we must treat it as one.
67
+
pub(super) struct SubscribeReposParametersData {
68
+
///The last known event seq number to backfill from.
69
+
#[serde(skip_serializing_if = "core::option::Option::is_none")]
70
+
pub cursor: Option<String>,
71
+
}
72
+
73
+
async fn get_blob(
74
+
State(config): State<AppConfig>,
75
+
Query(input): Query<sync::get_blob::ParametersData>,
76
+
) -> Result<Response<Body>> {
77
+
let blob = config
78
+
.blob
79
+
.path
80
+
.join(format!("{}.blob", input.cid.as_ref()));
81
+
82
+
let f = tokio::fs::File::open(blob)
83
+
.await
84
+
.context("blob not found")?;
85
+
let len = f
86
+
.metadata()
87
+
.await
88
+
.context("failed to query file metadata")?
89
+
.len();
90
+
91
+
let s = ReaderStream::new(f);
92
+
93
+
Ok(Response::builder()
94
+
.header(http::header::CONTENT_LENGTH, format!("{len}"))
95
+
.body(Body::from_stream(s))
96
+
.context("failed to construct response")?)
97
+
}
98
+
99
+
/// Enumerates which accounts the requesting account is currently blocking. Requires auth.
100
+
/// - GET /xrpc/com.atproto.sync.getBlocks
101
+
/// ### Query Parameters
102
+
/// - `limit`: integer, optional, default: 50, >=1 and <=100
103
+
/// - `cursor`: string, optional
104
+
/// ### Responses
105
+
/// - 200 OK: ...
106
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
107
+
/// - 401 Unauthorized
108
+
async fn get_blocks(
109
+
State(config): State<AppConfig>,
110
+
Query(input): Query<sync::get_blocks::ParametersData>,
111
+
) -> Result<Response<Body>> {
112
+
let mut repo = open_store(&config.repo, input.did.as_str())
113
+
.await
114
+
.context("failed to open repository")?;
115
+
116
+
let mut mem = Vec::new();
117
+
let mut store = CarStore::create(std::io::Cursor::new(&mut mem))
118
+
.await
119
+
.context("failed to create intermediate carstore")?;
120
+
121
+
for cid in &input.cids {
122
+
// SEC: This can potentially fetch stale blocks from a repository (e.g. those that were deleted).
123
+
// We'll want to prevent accesses to stale blocks eventually just to respect a user's right to be forgotten.
124
+
_ = store
125
+
.write_block(
126
+
DAG_CBOR,
127
+
SHA2_256,
128
+
&repo
129
+
.read_block(*cid.as_ref())
130
+
.await
131
+
.context("failed to read block")?,
132
+
)
133
+
.await
134
+
.context("failed to write block")?;
135
+
}
136
+
137
+
Ok(Response::builder()
138
+
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
139
+
.body(Body::from(mem))
140
+
.context("failed to construct response")?)
141
+
}
142
+
143
+
/// Get the current commit CID & revision of the specified repo. Does not require auth.
144
+
/// ### Query Parameters
145
+
/// - `did`: The DID of the repo.
146
+
/// ### Responses
147
+
/// - 200 OK: {"cid": "string","rev": "string"}
148
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoTakendown`, `RepoSuspended`, `RepoDeactivated`]}
149
+
async fn get_latest_commit(
150
+
State(config): State<AppConfig>,
151
+
State(db): State<Db>,
152
+
Query(input): Query<sync::get_latest_commit::ParametersData>,
153
+
) -> Result<Json<sync::get_latest_commit::Output>> {
154
+
let repo = open_repo_db(&config.repo, &db, input.did.as_str())
155
+
.await
156
+
.context("failed to open repository")?;
157
+
158
+
let cid = repo.root();
159
+
let commit = repo.commit();
160
+
161
+
Ok(Json(
162
+
sync::get_latest_commit::OutputData {
163
+
cid: atrium_api::types::string::Cid::new(cid),
164
+
rev: commit.rev(),
165
+
}
166
+
.into(),
167
+
))
168
+
}
169
+
170
+
/// Get data blocks needed to prove the existence or non-existence of record in the current version of repo. Does not require auth.
171
+
/// ### Query Parameters
172
+
/// - `did`: The DID of the repo.
173
+
/// - `collection`: nsid
174
+
/// - `rkey`: record-key
175
+
/// ### Responses
176
+
/// - 200 OK: ...
177
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RecordNotFound`, `RepoNotFound`, `RepoTakendown`,
178
+
/// `RepoSuspended`, `RepoDeactivated`]}
179
+
async fn get_record(
180
+
State(config): State<AppConfig>,
181
+
State(db): State<Db>,
182
+
Query(input): Query<sync::get_record::ParametersData>,
183
+
) -> Result<Response<Body>> {
184
+
let mut repo = open_repo_db(&config.repo, &db, input.did.as_str())
185
+
.await
186
+
.context("failed to open repo")?;
187
+
188
+
let key = format!("{}/{}", input.collection.as_str(), input.rkey.as_str());
189
+
190
+
let mut contents = Vec::new();
191
+
let mut ret_store =
192
+
CarStore::create_with_roots(std::io::Cursor::new(&mut contents), [repo.root()])
193
+
.await
194
+
.context("failed to create car store")?;
195
+
196
+
repo.extract_raw_into(&key, &mut ret_store)
197
+
.await
198
+
.context("failed to extract records")?;
199
+
200
+
Ok(Response::builder()
201
+
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
202
+
.body(Body::from(contents))
203
+
.context("failed to construct response")?)
204
+
}
205
+
206
+
/// Get the hosting status for a repository, on this server. Expected to be implemented by PDS and Relay.
207
+
/// ### Query Parameters
208
+
/// - `did`: The DID of the repo.
209
+
/// ### Responses
210
+
/// - 200 OK: {"did": "string","active": true,"status": "takendown","rev": "string"}
211
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`]}
212
+
async fn get_repo_status(
213
+
State(db): State<Db>,
214
+
Query(input): Query<sync::get_repo::ParametersData>,
215
+
) -> Result<Json<sync::get_repo_status::Output>> {
216
+
let did = input.did.as_str();
217
+
let r = sqlx::query!(r#"SELECT rev, status FROM accounts WHERE did = ?"#, did)
218
+
.fetch_optional(&db)
219
+
.await
220
+
.context("failed to execute query")?;
221
+
222
+
let Some(r) = r else {
223
+
return Err(Error::with_status(
224
+
StatusCode::NOT_FOUND,
225
+
anyhow!("account not found"),
226
+
));
227
+
};
228
+
229
+
let active = r.status == "active";
230
+
let status = if active { None } else { Some(r.status) };
231
+
232
+
Ok(Json(
233
+
sync::get_repo_status::OutputData {
234
+
active,
235
+
status,
236
+
did: input.did.clone(),
237
+
rev: Some(
238
+
atrium_api::types::string::Tid::new(r.rev).expect("should be able to convert Tid"),
239
+
),
240
+
}
241
+
.into(),
242
+
))
243
+
}
244
+
245
+
/// Download a repository export as CAR file. Optionally only a 'diff' since a previous revision.
246
+
/// Does not require auth; implemented by PDS.
247
+
/// ### Query Parameters
248
+
/// - `did`: The DID of the repo.
249
+
/// - `since`: The revision ('rev') of the repo to create a diff from.
250
+
/// ### Responses
251
+
/// - 200 OK: ...
252
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`,
253
+
/// `RepoTakendown`, `RepoSuspended`, `RepoDeactivated`]}
254
+
async fn get_repo(
255
+
State(config): State<AppConfig>,
256
+
State(db): State<Db>,
257
+
Query(input): Query<sync::get_repo::ParametersData>,
258
+
) -> Result<Response<Body>> {
259
+
let mut repo = open_repo_db(&config.repo, &db, input.did.as_str())
260
+
.await
261
+
.context("failed to open repo")?;
262
+
263
+
let mut contents = Vec::new();
264
+
let mut store = CarStore::create_with_roots(std::io::Cursor::new(&mut contents), [repo.root()])
265
+
.await
266
+
.context("failed to create car store")?;
267
+
268
+
repo.export_into(&mut store)
269
+
.await
270
+
.context("failed to extract records")?;
271
+
272
+
Ok(Response::builder()
273
+
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
274
+
.body(Body::from(contents))
275
+
.context("failed to construct response")?)
276
+
}
277
+
278
+
/// List blob CIDs for an account, since some repo revision. Does not require auth; implemented by PDS.
279
+
/// ### Query Parameters
280
+
/// - `did`: The DID of the repo. Required.
281
+
/// - `since`: Optional revision of the repo to list blobs since.
282
+
/// - `limit`: >= 1 and <= 1000, default 500
283
+
/// - `cursor`: string
284
+
/// ### Responses
285
+
/// - 200 OK: {"cursor": "string","cids": [string]}
286
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`, `RepoTakendown`,
287
+
/// `RepoSuspended`, `RepoDeactivated`]}
288
+
async fn list_blobs(
289
+
State(db): State<Db>,
290
+
Query(input): Query<sync::list_blobs::ParametersData>,
291
+
) -> Result<Json<sync::list_blobs::Output>> {
292
+
let did_str = input.did.as_str();
293
+
294
+
// TODO: `input.since`
295
+
// TODO: `input.limit`
296
+
// TODO: `input.cursor`
297
+
298
+
let cids = sqlx::query_scalar!(r#"SELECT cid FROM blob_ref WHERE did = ?"#, did_str)
299
+
.fetch_all(&db)
300
+
.await
301
+
.context("failed to query blobs")?;
302
+
303
+
let cids = cids
304
+
.into_iter()
305
+
.map(|c| {
306
+
Cid::from_str(&c)
307
+
.map(atrium_api::types::string::Cid::new)
308
+
.map_err(anyhow::Error::new)
309
+
})
310
+
.collect::<anyhow::Result<Vec<_>>>()
311
+
.context("failed to convert cids")?;
312
+
313
+
Ok(Json(
314
+
sync::list_blobs::OutputData { cursor: None, cids }.into(),
315
+
))
316
+
}
317
+
318
+
/// Enumerates all the DID, rev, and commit CID for all repos hosted by this service.
319
+
/// Does not require auth; implemented by PDS and Relay.
320
+
/// ### Query Parameters
321
+
/// - `limit`: >= 1 and <= 1000, default 500
322
+
/// - `cursor`: string
323
+
/// ### Responses
324
+
/// - 200 OK: {"cursor": "string","repos": [{"did": "string","head": "string","rev": "string","active": true,"status": "takendown"}]}
325
+
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
326
+
async fn list_repos(
327
+
State(db): State<Db>,
328
+
Query(input): Query<sync::list_repos::ParametersData>,
329
+
) -> Result<Json<sync::list_repos::Output>> {
330
+
struct Record {
331
+
/// The DID of the repo.
332
+
did: String,
333
+
/// The commit CID of the repo.
334
+
rev: String,
335
+
/// The root CID of the repo.
336
+
root: String,
337
+
}
338
+
339
+
let limit: u16 = input.limit.unwrap_or(LimitedNonZeroU16::MAX).into();
340
+
341
+
let r = if let Some(ref cursor) = input.cursor {
342
+
let r = sqlx::query_as!(
343
+
Record,
344
+
r#"SELECT did, root, rev FROM accounts WHERE did > ? LIMIT ?"#,
345
+
cursor,
346
+
limit
347
+
)
348
+
.fetch(&db);
349
+
350
+
r.try_collect::<Vec<_>>()
351
+
.await
352
+
.context("failed to fetch profiles")?
353
+
} else {
354
+
let r = sqlx::query_as!(
355
+
Record,
356
+
r#"SELECT did, root, rev FROM accounts LIMIT ?"#,
357
+
limit
358
+
)
359
+
.fetch(&db);
360
+
361
+
r.try_collect::<Vec<_>>()
362
+
.await
363
+
.context("failed to fetch profiles")?
364
+
};
365
+
366
+
let cursor = r.last().map(|r| r.did.clone());
367
+
let repos = r
368
+
.into_iter()
369
+
.map(|r| {
370
+
sync::list_repos::RepoData {
371
+
active: Some(true),
372
+
did: Did::new(r.did).expect("should be a valid DID"),
373
+
head: atrium_api::types::string::Cid::new(
374
+
Cid::from_str(&r.root).expect("should be a valid CID"),
375
+
),
376
+
rev: atrium_api::types::string::Tid::new(r.rev)
377
+
.expect("should be able to convert Tid"),
378
+
status: None,
379
+
}
380
+
.into()
381
+
})
382
+
.collect::<Vec<_>>();
383
+
384
+
Ok(Json(sync::list_repos::OutputData { cursor, repos }.into()))
385
+
}
386
+
387
+
/// Repository event stream, aka Firehose endpoint. Outputs repo commits with diff data, and identity update events,
388
+
/// for all repositories on the current server. See the atproto specifications for details around stream sequencing,
389
+
/// repo versioning, CAR diff format, and more. Public and does not require auth; implemented by PDS and Relay.
390
+
/// ### Query Parameters
391
+
/// - `cursor`: The last known event seq number to backfill from.
392
+
/// ### Responses
393
+
/// - 200 OK: ...
394
+
async fn subscribe_repos(
395
+
ws_up: WebSocketUpgrade,
396
+
State(fh): State<FirehoseProducer>,
397
+
Query(input): Query<sync::subscribe_repos::ParametersData>,
398
+
) -> impl IntoResponse {
399
+
ws_up.on_upgrade(async move |ws| {
400
+
fh.client_connection(ws, input.cursor).await;
401
+
})
402
+
}
403
+
404
+
#[rustfmt::skip]
405
+
/// These endpoints are part of the atproto repository synchronization APIs. Requests usually do not require authentication,
406
+
/// and can be made to PDS intances or Relay instances.
407
+
/// ### Routes
408
+
/// - `GET /xrpc/com.atproto.sync.getBlob` -> [`get_blob`]
409
+
/// - `GET /xrpc/com.atproto.sync.getBlocks` -> [`get_blocks`]
410
+
/// - `GET /xrpc/com.atproto.sync.getLatestCommit` -> [`get_latest_commit`]
411
+
/// - `GET /xrpc/com.atproto.sync.getRecord` -> [`get_record`]
412
+
/// - `GET /xrpc/com.atproto.sync.getRepoStatus` -> [`get_repo_status`]
413
+
/// - `GET /xrpc/com.atproto.sync.getRepo` -> [`get_repo`]
414
+
/// - `GET /xrpc/com.atproto.sync.listBlobs` -> [`list_blobs`]
415
+
/// - `GET /xrpc/com.atproto.sync.listRepos` -> [`list_repos`]
416
+
/// - `GET /xrpc/com.atproto.sync.subscribeRepos` -> [`subscribe_repos`]
417
+
pub(super) fn routes() -> Router<AppState> {
418
+
Router::new()
419
+
.route(concat!("/", sync::get_blob::NSID), get(get_blob))
420
+
.route(concat!("/", sync::get_blocks::NSID), get(get_blocks))
421
+
.route(concat!("/", sync::get_latest_commit::NSID), get(get_latest_commit))
422
+
.route(concat!("/", sync::get_record::NSID), get(get_record))
423
+
.route(concat!("/", sync::get_repo_status::NSID), get(get_repo_status))
424
+
.route(concat!("/", sync::get_repo::NSID), get(get_repo))
425
+
.route(concat!("/", sync::list_blobs::NSID), get(list_blobs))
426
+
.route(concat!("/", sync::list_repos::NSID), get(list_repos))
427
+
.route(concat!("/", sync::subscribe_repos::NSID), get(subscribe_repos))
428
+
}
+1
src/apis/com/mod.rs
+1
src/apis/com/mod.rs
···
1
+
pub mod atproto;
+27
src/apis/mod.rs
+27
src/apis/mod.rs
···
1
+
//! Root module for all endpoints.
2
+
// mod identity;
3
+
mod com;
4
+
// mod server;
5
+
// mod sync;
6
+
7
+
use axum::{Json, Router, routing::get};
8
+
use serde_json::json;
9
+
10
+
use crate::serve::{AppState, Result};
11
+
12
+
/// Health check endpoint. Returns name and version of the service.
13
+
pub(crate) async fn health() -> Result<Json<serde_json::Value>> {
14
+
Ok(Json(json!({
15
+
"version": concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")),
16
+
})))
17
+
}
18
+
19
+
/// Register all root routes.
20
+
pub(crate) fn routes() -> Router<AppState> {
21
+
Router::new()
22
+
.route("/_health", get(health))
23
+
// .merge(identity::routes()) // com.atproto.identity
24
+
.merge(com::atproto::repo::routes()) // com.atproto.repo
25
+
// .merge(server::routes()) // com.atproto.server
26
+
// .merge(sync::routes()) // com.atproto.sync
27
+
}
+75
-27
src/auth.rs
+75
-27
src/auth.rs
···
5
5
};
6
6
use axum::{extract::FromRequestParts, http::StatusCode};
7
7
use base64::Engine as _;
8
+
use diesel::prelude::*;
8
9
use sha2::{Digest as _, Sha256};
9
10
10
-
use crate::{AppState, Error, error::ErrorMessage};
11
+
use crate::{
12
+
error::{Error, ErrorMessage},
13
+
serve::AppState,
14
+
};
11
15
12
16
/// Request extractor for authenticated users.
13
17
/// If specified in an API endpoint, this guarantees the API can only be called
···
129
133
130
134
// Extract subject (DID)
131
135
if let Some(did) = claims.get("sub").and_then(serde_json::Value::as_str) {
132
-
let _status = sqlx::query_scalar!(r#"SELECT status FROM accounts WHERE did = ?"#, did)
133
-
.fetch_one(&state.db)
136
+
use crate::schema::pds::account::dsl as AccountSchema;
137
+
let did_clone = did.to_owned();
138
+
139
+
let _did = state
140
+
.db
141
+
.get()
142
+
.await
143
+
.expect("failed to get db connection")
144
+
.interact(move |conn| {
145
+
AccountSchema::account
146
+
.filter(AccountSchema::did.eq(did_clone))
147
+
.select(AccountSchema::did)
148
+
.first::<String>(conn)
149
+
})
134
150
.await
135
-
.with_context(|| format!("failed to query account {did}"))
136
-
.context("should fetch account status")?;
151
+
.expect("failed to query account");
137
152
138
153
Ok(AuthenticatedUser {
139
154
did: did.to_owned(),
···
326
341
327
342
let timestamp = chrono::Utc::now().timestamp();
328
343
344
+
use crate::schema::pds::oauth_used_jtis::dsl as JtiSchema;
345
+
329
346
// Check if JTI has been used before
330
-
let jti_used =
331
-
sqlx::query_scalar!(r#"SELECT COUNT(*) FROM oauth_used_jtis WHERE jti = ?"#, jti)
332
-
.fetch_one(&state.db)
333
-
.await
334
-
.context("failed to check JTI")?;
347
+
let jti_string = jti.to_owned();
348
+
let jti_used = state
349
+
.db
350
+
.get()
351
+
.await
352
+
.expect("failed to get db connection")
353
+
.interact(move |conn| {
354
+
JtiSchema::oauth_used_jtis
355
+
.filter(JtiSchema::jti.eq(jti_string))
356
+
.count()
357
+
.get_result::<i64>(conn)
358
+
})
359
+
.await
360
+
.expect("failed to query JTI")
361
+
.expect("failed to get JTI count");
335
362
336
363
if jti_used > 0 {
337
364
return Err(Error::with_status(
···
347
374
.and_then(serde_json::Value::as_i64)
348
375
.unwrap_or_else(|| timestamp.checked_add(60).unwrap_or(timestamp));
349
376
350
-
_ = sqlx::query!(
351
-
r#"
352
-
INSERT INTO oauth_used_jtis (jti, issuer, created_at, expires_at)
353
-
VALUES (?, ?, ?, ?)
354
-
"#,
355
-
jti,
356
-
calculated_thumbprint, // Use thumbprint as issuer identifier
357
-
timestamp,
358
-
exp
359
-
)
360
-
.execute(&state.db)
361
-
.await
362
-
.context("failed to store JTI")?;
377
+
// Convert SQLx INSERT to Diesel
378
+
let jti_str = jti.to_owned();
379
+
let thumbprint_str = calculated_thumbprint.to_string();
380
+
let _ = state
381
+
.db
382
+
.get()
383
+
.await
384
+
.expect("failed to get db connection")
385
+
.interact(move |conn| {
386
+
diesel::insert_into(JtiSchema::oauth_used_jtis)
387
+
.values((
388
+
JtiSchema::jti.eq(jti_str),
389
+
JtiSchema::issuer.eq(thumbprint_str),
390
+
JtiSchema::created_at.eq(timestamp),
391
+
JtiSchema::expires_at.eq(exp),
392
+
))
393
+
.execute(conn)
394
+
})
395
+
.await
396
+
.expect("failed to insert JTI")
397
+
.expect("failed to insert JTI");
363
398
364
399
// Extract subject (DID) from access token
365
400
if let Some(did) = claims.get("sub").and_then(|v| v.as_str()) {
366
-
let _status = sqlx::query_scalar!(r#"SELECT status FROM accounts WHERE did = ?"#, did)
367
-
.fetch_one(&state.db)
401
+
use crate::schema::pds::account::dsl as AccountSchema;
402
+
403
+
let did_clone = did.to_owned();
404
+
405
+
let _did = state
406
+
.db
407
+
.get()
408
+
.await
409
+
.expect("failed to get db connection")
410
+
.interact(move |conn| {
411
+
AccountSchema::account
412
+
.filter(AccountSchema::did.eq(did_clone))
413
+
.select(AccountSchema::did)
414
+
.first::<String>(conn)
415
+
})
368
416
.await
369
-
.with_context(|| format!("failed to query account {did}"))
370
-
.context("should fetch account status")?;
417
+
.expect("failed to query account")
418
+
.expect("failed to get account");
371
419
372
420
Ok(AuthenticatedUser {
373
421
did: did.to_owned(),
-176
src/db/mod.rs
-176
src/db/mod.rs
···
1
-
use anyhow::{Context, Result};
2
-
use diesel::connection::SimpleConnection;
3
-
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
4
-
use diesel::sqlite::Sqlite;
5
-
use diesel::*;
6
-
use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations};
7
-
use std::path::Path;
8
-
use std::time::Duration;
9
-
10
-
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
11
-
pub type SqlitePool = Pool<ConnectionManager<SqliteConnection>>;
12
-
pub type SqlitePooledConnection = PooledConnection<ConnectionManager<SqliteConnection>>;
13
-
14
-
/// Database type for all queries
15
-
pub type DbType = Sqlite;
16
-
17
-
/// Database connection wrapper
18
-
#[derive(Clone, Debug)]
19
-
pub struct DatabaseConnection {
20
-
pub pool: SqlitePool,
21
-
}
22
-
23
-
impl DatabaseConnection {
24
-
/// Create a new database connection with optional pragmas
25
-
pub async fn new(path: &str, pragmas: Option<&[(&str, &str)]>) -> Result<Self> {
26
-
// Create the database directory if it doesn't exist
27
-
if let Some(parent) = Path::new(path).parent() {
28
-
if !parent.exists() {
29
-
tokio::fs::create_dir_all(parent)
30
-
.await
31
-
.context(format!("Failed to create directory: {:?}", parent))?;
32
-
}
33
-
}
34
-
35
-
// Sanitize the path for connection string
36
-
let database_url = format!("sqlite:{}", path);
37
-
38
-
// Create a connection manager
39
-
let manager = ConnectionManager::<SqliteConnection>::new(database_url);
40
-
41
-
// Create the connection pool with SQLite-specific configurations
42
-
let pool = Pool::builder()
43
-
.max_size(10)
44
-
.connection_timeout(Duration::from_secs(30))
45
-
.test_on_check_out(true)
46
-
.build(manager)
47
-
.context("Failed to create connection pool")?;
48
-
49
-
// Initialize the database with pragmas
50
-
if let Some(pragmas) = pragmas {
51
-
let conn = &mut pool.get().context("Failed to get connection from pool")?;
52
-
53
-
// Apply all pragmas
54
-
for (pragma, value) in pragmas {
55
-
let sql = format!("PRAGMA {} = {}", pragma, value);
56
-
conn.batch_execute(&sql)
57
-
.context(format!("Failed to set pragma {}", pragma))?;
58
-
}
59
-
}
60
-
61
-
let db = DatabaseConnection { pool };
62
-
Ok(db)
63
-
}
64
-
65
-
/// Run migrations on the database
66
-
pub fn run_migrations(&self) -> Result<()> {
67
-
let mut conn = self
68
-
.pool
69
-
.get()
70
-
.context("Failed to get connection for migrations")?;
71
-
72
-
conn.run_pending_migrations(MIGRATIONS)
73
-
.map_err(|e| anyhow::anyhow!("Failed to run migrations: {}", e))?;
74
-
75
-
Ok(())
76
-
}
77
-
78
-
/// Ensure WAL mode is enabled
79
-
pub async fn ensure_wal(&self) -> Result<()> {
80
-
let conn = &mut self.pool.get().context("Failed to get connection")?;
81
-
conn.batch_execute("PRAGMA journal_mode = WAL;")?;
82
-
conn.batch_execute("PRAGMA synchronous = NORMAL;")?;
83
-
conn.batch_execute("PRAGMA foreign_keys = ON;")?;
84
-
Ok(())
85
-
}
86
-
87
-
/// Execute a database operation with retries for busy errors
88
-
pub async fn run<F, T>(&self, operation: F) -> Result<T>
89
-
where
90
-
F: FnOnce(&mut SqliteConnection) -> QueryResult<T> + Send,
91
-
T: Send + 'static,
92
-
{
93
-
let mut retries = 0;
94
-
let max_retries = 5;
95
-
let mut last_error = None;
96
-
97
-
while retries < max_retries {
98
-
let mut conn = self.pool.get().context("Failed to get connection")?;
99
-
match operation(&mut conn) {
100
-
Ok(result) => return Ok(result),
101
-
// TODO: Busy error handling
102
-
// Err(diesel::result::Error::DatabaseError(
103
-
// diesel::result::DatabaseErrorKind::DatabaseIsLocked,
104
-
// _,
105
-
// )) => {
106
-
// retries += 1;
107
-
// let backoff_ms = 10 * (1 << retries); // Exponential backoff
108
-
// last_error = Some(diesel::result::Error::DatabaseError(
109
-
// diesel::result::DatabaseErrorKind::DatabaseIsLocked,
110
-
// Box::new("Database is locked".to_string()),
111
-
// ));
112
-
// tokio::time::sleep(Duration::from_millis(backoff_ms)).await;
113
-
// }
114
-
Err(e) => return Err(e.into()),
115
-
}
116
-
}
117
-
118
-
Err(anyhow::anyhow!(
119
-
"Max retries exceeded: {}",
120
-
last_error.unwrap_or_else(|| result::Error::RollbackTransaction)
121
-
))
122
-
}
123
-
124
-
/// Check if currently in a transaction
125
-
pub fn assert_transaction(&self) -> Result<()> {
126
-
// SQLite doesn't have a straightforward way to check transaction state
127
-
// We'll implement a simplified version that just returns Ok for now
128
-
Ok(())
129
-
}
130
-
131
-
/// Run a transaction with retry logic for busy database errors
132
-
pub async fn transaction<T, F>(&self, f: F) -> Result<T>
133
-
where
134
-
F: FnOnce(&mut SqliteConnection) -> Result<T> + Send,
135
-
T: Send + 'static,
136
-
{
137
-
self.run(|conn| {
138
-
conn.transaction(|tx| f(tx).map_err(|e| result::Error::RollbackTransaction))
139
-
})
140
-
.await
141
-
}
142
-
143
-
/// Run a transaction with no retry logic
144
-
pub async fn transaction_no_retry<T, F>(&self, f: F) -> Result<T>
145
-
where
146
-
F: FnOnce(&mut SqliteConnection) -> std::result::Result<T, result::Error> + Send,
147
-
T: Send + 'static,
148
-
{
149
-
let mut conn = self
150
-
.pool
151
-
.get()
152
-
.context("Failed to get connection for transaction")?;
153
-
154
-
conn.transaction(|tx| f(tx))
155
-
.map_err(|e| anyhow::anyhow!("Transaction error: {:?}", e))
156
-
}
157
-
}
158
-
159
-
/// Create a connection pool for SQLite
160
-
pub async fn create_sqlite_pool(database_url: &str) -> Result<SqlitePool> {
161
-
let manager = ConnectionManager::<SqliteConnection>::new(database_url);
162
-
let pool = Pool::builder()
163
-
.max_size(10)
164
-
.connection_timeout(Duration::from_secs(30))
165
-
.test_on_check_out(true)
166
-
.build(manager)
167
-
.context("Failed to create connection pool")?;
168
-
169
-
// Apply recommended SQLite settings
170
-
let conn = &mut pool.get()?;
171
-
conn.batch_execute(
172
-
"PRAGMA journal_mode = WAL; PRAGMA synchronous = NORMAL; PRAGMA foreign_keys = ON;",
173
-
)?;
174
-
175
-
Ok(pool)
176
-
}
+16
src/db.rs
+16
src/db.rs
···
1
+
use anyhow::Result;
2
+
use deadpool_diesel::sqlite::{Manager, Pool, Runtime};
3
+
4
+
#[tracing::instrument(skip_all)]
5
+
/// Establish a connection to the database
6
+
/// Takes a database URL as an argument (like "sqlite://data/sqlite.db")
7
+
pub(crate) fn establish_pool(database_url: &str) -> Result<Pool> {
8
+
tracing::debug!("Establishing database connection");
9
+
let manager = Manager::new(database_url, Runtime::Tokio1);
10
+
let pool = Pool::builder(manager)
11
+
.max_size(8)
12
+
.build()
13
+
.expect("should be able to create connection pool");
14
+
tracing::debug!("Database connection established");
15
+
Ok(pool)
16
+
}
+1
-1
src/did.rs
+1
-1
src/did.rs
-245
src/endpoints/identity.rs
-245
src/endpoints/identity.rs
···
1
-
//! Identity endpoints (/xrpc/com.atproto.identity.*)
2
-
use std::collections::HashMap;
3
-
4
-
use anyhow::{Context as _, anyhow};
5
-
use atrium_api::{
6
-
com::atproto::identity,
7
-
types::string::{Datetime, Handle},
8
-
};
9
-
use atrium_crypto::keypair::Did as _;
10
-
use atrium_repo::blockstore::{AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256};
11
-
use axum::{
12
-
Json, Router,
13
-
extract::{Query, State},
14
-
http::StatusCode,
15
-
routing::{get, post},
16
-
};
17
-
use constcat::concat;
18
-
19
-
use crate::{
20
-
AppState, Client, Db, Error, Result, RotationKey, SigningKey,
21
-
auth::AuthenticatedUser,
22
-
config::AppConfig,
23
-
did,
24
-
firehose::FirehoseProducer,
25
-
plc::{self, PlcOperation, PlcService},
26
-
};
27
-
28
-
/// (GET) Resolves an atproto handle (hostname) to a DID. Does not necessarily bi-directionally verify against the the DID document.
29
-
/// ### Query Parameters
30
-
/// - handle: The handle to resolve.
31
-
/// ### Responses
32
-
/// - 200 OK: {did: did}
33
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `HandleNotFound`]}
34
-
/// - 401 Unauthorized
35
-
async fn resolve_handle(
36
-
State(db): State<Db>,
37
-
State(client): State<Client>,
38
-
Query(input): Query<identity::resolve_handle::ParametersData>,
39
-
) -> Result<Json<identity::resolve_handle::Output>> {
40
-
let handle = input.handle.as_str();
41
-
if let Ok(did) = sqlx::query_scalar!(r#"SELECT did FROM handles WHERE handle = ?"#, handle)
42
-
.fetch_one(&db)
43
-
.await
44
-
{
45
-
return Ok(Json(
46
-
identity::resolve_handle::OutputData {
47
-
did: atrium_api::types::string::Did::new(did).expect("should be valid DID format"),
48
-
}
49
-
.into(),
50
-
));
51
-
}
52
-
53
-
// HACK: Query bsky to see if they have this handle cached.
54
-
let response = client
55
-
.get(format!(
56
-
"https://api.bsky.app/xrpc/com.atproto.identity.resolveHandle?handle={handle}"
57
-
))
58
-
.send()
59
-
.await
60
-
.context("failed to query upstream server")?
61
-
.json()
62
-
.await
63
-
.context("failed to decode response as JSON")?;
64
-
65
-
Ok(Json(response))
66
-
}
67
-
68
-
#[expect(unused_variables, clippy::todo, reason = "Not yet implemented")]
69
-
/// Request an email with a code to in order to request a signed PLC operation. Requires Auth.
70
-
/// - POST /xrpc/com.atproto.identity.requestPlcOperationSignature
71
-
/// ### Responses
72
-
/// - 200 OK
73
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
74
-
/// - 401 Unauthorized
75
-
async fn request_plc_operation_signature(user: AuthenticatedUser) -> Result<()> {
76
-
todo!()
77
-
}
78
-
79
-
#[expect(unused_variables, clippy::todo, reason = "Not yet implemented")]
80
-
/// Signs a PLC operation to update some value(s) in the requesting DID's document.
81
-
/// - POST /xrpc/com.atproto.identity.signPlcOperation
82
-
/// ### Request Body
83
-
/// - token: string // A token received through com.atproto.identity.requestPlcOperationSignature
84
-
/// - rotationKeys: string[]
85
-
/// - alsoKnownAs: string[]
86
-
/// - verificationMethods: services
87
-
/// ### Responses
88
-
/// - 200 OK: {operation: string}
89
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
90
-
/// - 401 Unauthorized
91
-
async fn sign_plc_operation(
92
-
user: AuthenticatedUser,
93
-
State(skey): State<SigningKey>,
94
-
State(rkey): State<RotationKey>,
95
-
State(config): State<AppConfig>,
96
-
Json(input): Json<identity::sign_plc_operation::Input>,
97
-
) -> Result<Json<identity::sign_plc_operation::Output>> {
98
-
todo!()
99
-
}
100
-
101
-
#[expect(
102
-
clippy::too_many_arguments,
103
-
reason = "Many parameters are required for this endpoint"
104
-
)]
105
-
/// Updates the current account's handle. Verifies handle validity, and updates did:plc document if necessary. Implemented by PDS, and requires auth.
106
-
/// - POST /xrpc/com.atproto.identity.updateHandle
107
-
/// ### Query Parameters
108
-
/// - handle: handle // The new handle.
109
-
/// ### Responses
110
-
/// - 200 OK
111
-
/// ## Errors
112
-
/// - If the handle is already in use.
113
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
114
-
/// - 401 Unauthorized
115
-
/// ## Panics
116
-
/// - If the handle is not valid.
117
-
async fn update_handle(
118
-
user: AuthenticatedUser,
119
-
State(skey): State<SigningKey>,
120
-
State(rkey): State<RotationKey>,
121
-
State(client): State<Client>,
122
-
State(config): State<AppConfig>,
123
-
State(db): State<Db>,
124
-
State(fhp): State<FirehoseProducer>,
125
-
Json(input): Json<identity::update_handle::Input>,
126
-
) -> Result<()> {
127
-
let handle = input.handle.as_str();
128
-
let did_str = user.did();
129
-
let did = atrium_api::types::string::Did::new(user.did()).expect("should be valid DID format");
130
-
131
-
if let Some(existing_did) =
132
-
sqlx::query_scalar!(r#"SELECT did FROM handles WHERE handle = ?"#, handle)
133
-
.fetch_optional(&db)
134
-
.await
135
-
.context("failed to query did count")?
136
-
{
137
-
if existing_did != did_str {
138
-
return Err(Error::with_status(
139
-
StatusCode::BAD_REQUEST,
140
-
anyhow!("attempted to update handle to one that is already in use"),
141
-
));
142
-
}
143
-
}
144
-
145
-
// Ensure the existing DID is resolvable.
146
-
// If not, we need to register the original handle.
147
-
let _did = did::resolve(&client, did.clone())
148
-
.await
149
-
.with_context(|| format!("failed to resolve DID for {did_str}"))
150
-
.context("should be able to resolve DID")?;
151
-
152
-
let op = plc::sign_op(
153
-
&rkey,
154
-
PlcOperation {
155
-
typ: "plc_operation".to_owned(),
156
-
rotation_keys: vec![rkey.did()],
157
-
verification_methods: HashMap::from([("atproto".to_owned(), skey.did())]),
158
-
also_known_as: vec![input.handle.as_str().to_owned()],
159
-
services: HashMap::from([(
160
-
"atproto_pds".to_owned(),
161
-
PlcService::Pds {
162
-
endpoint: config.host_name.clone(),
163
-
},
164
-
)]),
165
-
prev: Some(
166
-
sqlx::query_scalar!(r#"SELECT plc_root FROM accounts WHERE did = ?"#, did_str)
167
-
.fetch_one(&db)
168
-
.await
169
-
.context("failed to fetch user PLC root")?,
170
-
),
171
-
},
172
-
)
173
-
.context("failed to sign plc op")?;
174
-
175
-
if !config.test {
176
-
plc::submit(&client, did.as_str(), &op)
177
-
.await
178
-
.context("failed to submit PLC operation")?;
179
-
}
180
-
181
-
// FIXME: Properly abstract these implementation details.
182
-
let did_hash = did_str
183
-
.strip_prefix("did:plc:")
184
-
.context("should be valid DID format")?;
185
-
let doc = tokio::fs::File::options()
186
-
.read(true)
187
-
.write(true)
188
-
.open(config.plc.path.join(format!("{did_hash}.car")))
189
-
.await
190
-
.context("failed to open did doc")?;
191
-
192
-
let op_bytes = serde_ipld_dagcbor::to_vec(&op).context("failed to encode plc op")?;
193
-
194
-
let plc_cid = CarStore::open(doc)
195
-
.await
196
-
.context("failed to open did carstore")?
197
-
.write_block(DAG_CBOR, SHA2_256, &op_bytes)
198
-
.await
199
-
.context("failed to write genesis commit")?;
200
-
201
-
let cid_str = plc_cid.to_string();
202
-
203
-
_ = sqlx::query!(
204
-
r#"UPDATE accounts SET plc_root = ? WHERE did = ?"#,
205
-
cid_str,
206
-
did_str
207
-
)
208
-
.execute(&db)
209
-
.await
210
-
.context("failed to update account PLC root")?;
211
-
212
-
// Broadcast the identity event now that the new identity is resolvable on the public directory.
213
-
fhp.identity(
214
-
atrium_api::com::atproto::sync::subscribe_repos::IdentityData {
215
-
did: did.clone(),
216
-
handle: Some(Handle::new(handle.to_owned()).expect("should be valid handle")),
217
-
seq: 0, // Filled by firehose later.
218
-
time: Datetime::now(),
219
-
},
220
-
)
221
-
.await;
222
-
223
-
Ok(())
224
-
}
225
-
226
-
async fn todo() -> Result<()> {
227
-
Err(Error::unimplemented(anyhow!("not implemented")))
228
-
}
229
-
230
-
#[rustfmt::skip]
231
-
/// Identity endpoints (/xrpc/com.atproto.identity.*)
232
-
/// ### Routes
233
-
/// - AP /xrpc/com.atproto.identity.updateHandle -> [`update_handle`]
234
-
/// - AP /xrpc/com.atproto.identity.requestPlcOperationSignature -> [`request_plc_operation_signature`]
235
-
/// - AP /xrpc/com.atproto.identity.signPlcOperation -> [`sign_plc_operation`]
236
-
/// - UG /xrpc/com.atproto.identity.resolveHandle -> [`resolve_handle`]
237
-
pub(super) fn routes() -> Router<AppState> {
238
-
Router::new()
239
-
.route(concat!("/", identity::get_recommended_did_credentials::NSID), get(todo))
240
-
.route(concat!("/", identity::request_plc_operation_signature::NSID), post(request_plc_operation_signature))
241
-
.route(concat!("/", identity::resolve_handle::NSID), get(resolve_handle))
242
-
.route(concat!("/", identity::sign_plc_operation::NSID), post(sign_plc_operation))
243
-
.route(concat!("/", identity::submit_plc_operation::NSID), post(todo))
244
-
.route(concat!("/", identity::update_handle::NSID), post(update_handle))
245
-
}
-27
src/endpoints/mod.rs
-27
src/endpoints/mod.rs
···
1
-
//! Root module for all endpoints.
2
-
mod identity;
3
-
// mod repo;
4
-
mod server;
5
-
mod sync;
6
-
7
-
use axum::{Json, Router, routing::get};
8
-
use serde_json::json;
9
-
10
-
use crate::{AppState, Result};
11
-
12
-
/// Health check endpoint. Returns name and version of the service.
13
-
pub(crate) async fn health() -> Result<Json<serde_json::Value>> {
14
-
Ok(Json(json!({
15
-
"version": concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION")),
16
-
})))
17
-
}
18
-
19
-
/// Register all root routes.
20
-
pub(crate) fn routes() -> Router<AppState> {
21
-
Router::new()
22
-
.route("/_health", get(health))
23
-
.merge(identity::routes()) // com.atproto.identity
24
-
// .merge(repo::routes()) // com.atproto.repo
25
-
.merge(server::routes()) // com.atproto.server
26
-
.merge(sync::routes()) // com.atproto.sync
27
-
}
-484
src/endpoints/repo/apply_writes.rs
-484
src/endpoints/repo/apply_writes.rs
···
1
-
//! Apply a batch transaction of repository creates, updates, and deletes. Requires auth, implemented by PDS.
2
-
use std::{collections::HashSet, str::FromStr};
3
-
4
-
use anyhow::{Context as _, anyhow};
5
-
use atrium_api::com::atproto::repo::apply_writes::{self, InputWritesItem, OutputResultsItem};
6
-
use atrium_api::{
7
-
com::atproto::repo::{self, defs::CommitMetaData},
8
-
types::{
9
-
LimitedU32, Object, TryFromUnknown as _, TryIntoUnknown as _, Unknown,
10
-
string::{AtIdentifier, Nsid, Tid},
11
-
},
12
-
};
13
-
use atrium_repo::{Cid, blockstore::CarStore};
14
-
use axum::{
15
-
Json, Router,
16
-
body::Body,
17
-
extract::{Query, Request, State},
18
-
http::{self, StatusCode},
19
-
routing::{get, post},
20
-
};
21
-
use constcat::concat;
22
-
use futures::TryStreamExt as _;
23
-
use metrics::counter;
24
-
use rsky_syntax::aturi::AtUri;
25
-
use serde::Deserialize;
26
-
use tokio::io::AsyncWriteExt as _;
27
-
28
-
use crate::repo::block_map::cid_for_cbor;
29
-
use crate::repo::types::PreparedCreateOrUpdate;
30
-
use crate::{
31
-
AppState, Db, Error, Result, SigningKey,
32
-
actor_store::{ActorStoreTransactor, ActorStoreWriter},
33
-
auth::AuthenticatedUser,
34
-
config::AppConfig,
35
-
error::ErrorMessage,
36
-
firehose::{self, FirehoseProducer, RepoOp},
37
-
metrics::{REPO_COMMITS, REPO_OP_CREATE, REPO_OP_DELETE, REPO_OP_UPDATE},
38
-
repo::types::{PreparedWrite, WriteOpAction},
39
-
storage,
40
-
};
41
-
42
-
use super::resolve_did;
43
-
44
-
/// Apply a batch transaction of repository creates, updates, and deletes. Requires auth, implemented by PDS.
45
-
/// - POST /xrpc/com.atproto.repo.applyWrites
46
-
/// ### Request Body
47
-
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
48
-
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data across all operations, 'true' to require it, or leave unset to validate only for known Lexicons.
49
-
/// - `writes`: `object[]` // One of:
50
-
/// - - com.atproto.repo.applyWrites.create
51
-
/// - - com.atproto.repo.applyWrites.update
52
-
/// - - com.atproto.repo.applyWrites.delete
53
-
/// - `swap_commit`: `cid` // If provided, the entire operation will fail if the current repo commit CID does not match this value. Used to prevent conflicting repo mutations.
54
-
pub(crate) async fn apply_writes(
55
-
user: AuthenticatedUser,
56
-
State(skey): State<SigningKey>,
57
-
State(config): State<AppConfig>,
58
-
State(db): State<Db>,
59
-
State(fhp): State<FirehoseProducer>,
60
-
Json(input): Json<repo::apply_writes::Input>,
61
-
) -> Result<Json<repo::apply_writes::Output>> {
62
-
todo!();
63
-
// // TODO: `input.validate`
64
-
65
-
// // Resolve DID from identifier
66
-
// let (target_did, _) = resolve_did(&db, &input.repo)
67
-
// .await
68
-
// .context("failed to resolve did")?;
69
-
70
-
// // Ensure that we are updating the correct repository
71
-
// if target_did.as_str() != user.did() {
72
-
// return Err(Error::with_status(
73
-
// StatusCode::BAD_REQUEST,
74
-
// anyhow!("repo did not match the authenticated user"),
75
-
// ));
76
-
// }
77
-
78
-
// // Validate writes count
79
-
// if input.writes.len() > 200 {
80
-
// return Err(Error::with_status(
81
-
// StatusCode::BAD_REQUEST,
82
-
// anyhow!("Too many writes. Max: 200"),
83
-
// ));
84
-
// }
85
-
86
-
// // Convert input writes to prepared format
87
-
// let mut prepared_writes = Vec::with_capacity(input.writes.len());
88
-
// for write in input.writes.iter() {
89
-
// match write {
90
-
// InputWritesItem::Create(create) => {
91
-
// let uri = AtUri::make(
92
-
// user.did(),
93
-
// &create.collection.as_str(),
94
-
// create
95
-
// .rkey
96
-
// .as_deref()
97
-
// .unwrap_or(&Tid::now(LimitedU32::MIN).to_string()),
98
-
// );
99
-
100
-
// let cid = match cid_for_cbor(&create.value) {
101
-
// Ok(cid) => cid,
102
-
// Err(e) => {
103
-
// return Err(Error::with_status(
104
-
// StatusCode::BAD_REQUEST,
105
-
// anyhow!("Failed to encode record: {}", e),
106
-
// ));
107
-
// }
108
-
// };
109
-
110
-
// let blobs = scan_blobs(&create.value)
111
-
// .unwrap_or_default()
112
-
// .into_iter()
113
-
// .map(|cid| {
114
-
// // TODO: Create BlobRef from cid with proper metadata
115
-
// BlobRef {
116
-
// cid,
117
-
// mime_type: "application/octet-stream".to_string(), // Default
118
-
// size: 0, // Unknown at this point
119
-
// }
120
-
// })
121
-
// .collect();
122
-
123
-
// prepared_writes.push(PreparedCreateOrUpdate {
124
-
// action: WriteOpAction::Create,
125
-
// uri: uri?.to_string(),
126
-
// cid,
127
-
// record: create.value.clone(),
128
-
// blobs,
129
-
// swap_cid: None,
130
-
// });
131
-
// }
132
-
// InputWritesItem::Update(update) => {
133
-
// let uri = AtUri::make(
134
-
// user.did(),
135
-
// Some(update.collection.to_string()),
136
-
// Some(update.rkey.to_string()),
137
-
// );
138
-
139
-
// let cid = match cid_for_cbor(&update.value) {
140
-
// Ok(cid) => cid,
141
-
// Err(e) => {
142
-
// return Err(Error::with_status(
143
-
// StatusCode::BAD_REQUEST,
144
-
// anyhow!("Failed to encode record: {}", e),
145
-
// ));
146
-
// }
147
-
// };
148
-
149
-
// let blobs = scan_blobs(&update.value)
150
-
// .unwrap_or_default()
151
-
// .into_iter()
152
-
// .map(|cid| {
153
-
// // TODO: Create BlobRef from cid with proper metadata
154
-
// BlobRef {
155
-
// cid,
156
-
// mime_type: "application/octet-stream".to_string(),
157
-
// size: 0,
158
-
// }
159
-
// })
160
-
// .collect();
161
-
162
-
// prepared_writes.push(PreparedCreateOrUpdate {
163
-
// action: WriteOpAction::Update,
164
-
// uri: uri?.to_string(),
165
-
// cid,
166
-
// record: update.value.clone(),
167
-
// blobs,
168
-
// swap_cid: None,
169
-
// });
170
-
// }
171
-
// InputWritesItem::Delete(delete) => {
172
-
// let uri = AtUri::make(user.did(), &delete.collection.as_str(), &delete.rkey);
173
-
174
-
// prepared_writes.push(PreparedCreateOrUpdate {
175
-
// action: WriteOpAction::Delete,
176
-
// uri: uri?.to_string(),
177
-
// cid: Cid::default(), // Not needed for delete
178
-
// record: serde_json::Value::Null,
179
-
// blobs: vec![],
180
-
// swap_cid: None,
181
-
// });
182
-
// }
183
-
// }
184
-
// }
185
-
186
-
// // Get swap commit CID if provided
187
-
// let swap_commit_cid = input.swap_commit.as_ref().map(|cid| *cid.as_ref());
188
-
189
-
// let did_str = user.did();
190
-
// let mut repo = storage::open_repo_db(&config.repo, &db, did_str)
191
-
// .await
192
-
// .context("failed to open user repo")?;
193
-
// let orig_cid = repo.root();
194
-
// let orig_rev = repo.commit().rev();
195
-
196
-
// let mut blobs = vec![];
197
-
// let mut res = vec![];
198
-
// let mut ops = vec![];
199
-
200
-
// for write in &prepared_writes {
201
-
// let (builder, key) = match write.action {
202
-
// WriteOpAction::Create => {
203
-
// let key = format!("{}/{}", write.uri.collection, write.uri.rkey);
204
-
// let uri = format!("at://{}/{}", user.did(), key);
205
-
206
-
// let (builder, cid) = repo
207
-
// .add_raw(&key, &write.record)
208
-
// .await
209
-
// .context("failed to add record")?;
210
-
211
-
// // Extract and track blobs
212
-
// if let Ok(new_blobs) = scan_blobs(&write.record) {
213
-
// blobs.extend(
214
-
// new_blobs
215
-
// .into_iter()
216
-
// .map(|blob_cid| (key.clone(), blob_cid)),
217
-
// );
218
-
// }
219
-
220
-
// ops.push(RepoOp::Create {
221
-
// cid,
222
-
// path: key.clone(),
223
-
// });
224
-
225
-
// res.push(OutputResultsItem::CreateResult(Box::new(
226
-
// apply_writes::CreateResultData {
227
-
// cid: atrium_api::types::string::Cid::new(cid),
228
-
// uri,
229
-
// validation_status: None,
230
-
// }
231
-
// .into(),
232
-
// )));
233
-
234
-
// (builder, key)
235
-
// }
236
-
// WriteOpAction::Update => {
237
-
// let key = format!("{}/{}", write.uri.collection, write.uri.rkey);
238
-
// let uri = format!("at://{}/{}", user.did(), key);
239
-
240
-
// let prev = repo
241
-
// .tree()
242
-
// .get(&key)
243
-
// .await
244
-
// .context("failed to search MST")?;
245
-
246
-
// if prev.is_none() {
247
-
// // No existing record, treat as create
248
-
// let (create_builder, cid) = repo
249
-
// .add_raw(&key, &write.record)
250
-
// .await
251
-
// .context("failed to add record")?;
252
-
253
-
// if let Ok(new_blobs) = scan_blobs(&write.record) {
254
-
// blobs.extend(
255
-
// new_blobs
256
-
// .into_iter()
257
-
// .map(|blob_cid| (key.clone(), blob_cid)),
258
-
// );
259
-
// }
260
-
261
-
// ops.push(RepoOp::Create {
262
-
// cid,
263
-
// path: key.clone(),
264
-
// });
265
-
266
-
// res.push(OutputResultsItem::CreateResult(Box::new(
267
-
// apply_writes::CreateResultData {
268
-
// cid: atrium_api::types::string::Cid::new(cid),
269
-
// uri,
270
-
// validation_status: None,
271
-
// }
272
-
// .into(),
273
-
// )));
274
-
275
-
// (create_builder, key)
276
-
// } else {
277
-
// // Update existing record
278
-
// let prev = prev.context("should be able to find previous record")?;
279
-
// let (update_builder, cid) = repo
280
-
// .update_raw(&key, &write.record)
281
-
// .await
282
-
// .context("failed to add record")?;
283
-
284
-
// if let Ok(new_blobs) = scan_blobs(&write.record) {
285
-
// blobs.extend(
286
-
// new_blobs
287
-
// .into_iter()
288
-
// .map(|blob_cid| (key.clone(), blob_cid)),
289
-
// );
290
-
// }
291
-
292
-
// ops.push(RepoOp::Update {
293
-
// cid,
294
-
// path: key.clone(),
295
-
// prev,
296
-
// });
297
-
298
-
// res.push(OutputResultsItem::UpdateResult(Box::new(
299
-
// apply_writes::UpdateResultData {
300
-
// cid: atrium_api::types::string::Cid::new(cid),
301
-
// uri,
302
-
// validation_status: None,
303
-
// }
304
-
// .into(),
305
-
// )));
306
-
307
-
// (update_builder, key)
308
-
// }
309
-
// }
310
-
// WriteOpAction::Delete => {
311
-
// let key = format!("{}/{}", write.uri.collection, write.uri.rkey);
312
-
313
-
// let prev = repo
314
-
// .tree()
315
-
// .get(&key)
316
-
// .await
317
-
// .context("failed to search MST")?
318
-
// .context("previous record does not exist")?;
319
-
320
-
// ops.push(RepoOp::Delete {
321
-
// path: key.clone(),
322
-
// prev,
323
-
// });
324
-
325
-
// res.push(OutputResultsItem::DeleteResult(Box::new(
326
-
// apply_writes::DeleteResultData {}.into(),
327
-
// )));
328
-
329
-
// let builder = repo
330
-
// .delete_raw(&key)
331
-
// .await
332
-
// .context("failed to add record")?;
333
-
334
-
// (builder, key)
335
-
// }
336
-
// };
337
-
338
-
// let sig = skey
339
-
// .sign(&builder.bytes())
340
-
// .context("failed to sign commit")?;
341
-
342
-
// _ = builder
343
-
// .finalize(sig)
344
-
// .await
345
-
// .context("failed to write signed commit")?;
346
-
// }
347
-
348
-
// // Construct a firehose record
349
-
// let mut mem = Vec::new();
350
-
// let mut store = CarStore::create_with_roots(std::io::Cursor::new(&mut mem), [repo.root()])
351
-
// .await
352
-
// .context("failed to create temp store")?;
353
-
354
-
// // Extract the records out of the user's repository
355
-
// for write in &prepared_writes {
356
-
// let key = format!("{}/{}", write.uri.collection, write.uri.rkey);
357
-
// repo.extract_raw_into(&key, &mut store)
358
-
// .await
359
-
// .context("failed to extract key")?;
360
-
// }
361
-
362
-
// let mut tx = db.begin().await.context("failed to begin transaction")?;
363
-
364
-
// if !swap_commit(
365
-
// &mut *tx,
366
-
// repo.root(),
367
-
// repo.commit().rev(),
368
-
// input.swap_commit.as_ref().map(|cid| *cid.as_ref()),
369
-
// &user.did(),
370
-
// )
371
-
// .await
372
-
// .context("failed to swap commit")?
373
-
// {
374
-
// // This should always succeed.
375
-
// let old = input
376
-
// .swap_commit
377
-
// .clone()
378
-
// .context("swap_commit should always be Some")?;
379
-
380
-
// // The swap failed. Return the old commit and do not update the repository.
381
-
// return Ok(Json(
382
-
// apply_writes::OutputData {
383
-
// results: None,
384
-
// commit: Some(
385
-
// CommitMetaData {
386
-
// cid: old,
387
-
// rev: orig_rev,
388
-
// }
389
-
// .into(),
390
-
// ),
391
-
// }
392
-
// .into(),
393
-
// ));
394
-
// }
395
-
396
-
// // For updates and removals, unlink the old/deleted record from the blob_ref table
397
-
// for op in &ops {
398
-
// match op {
399
-
// &RepoOp::Update { ref path, .. } | &RepoOp::Delete { ref path, .. } => {
400
-
// // FIXME: This may cause issues if a user deletes more than one record referencing the same blob.
401
-
// _ = &sqlx::query!(
402
-
// r#"UPDATE blob_ref SET record = NULL WHERE did = ? AND record = ?"#,
403
-
// did_str,
404
-
// path
405
-
// )
406
-
// .execute(&mut *tx)
407
-
// .await
408
-
// .context("failed to remove blob_ref")?;
409
-
// }
410
-
// &RepoOp::Create { .. } => {}
411
-
// }
412
-
// }
413
-
414
-
// // Process blobs
415
-
// for (key, cid) in &blobs {
416
-
// let cid_str = cid.to_string();
417
-
418
-
// // Handle the case where a new record references an existing blob
419
-
// if sqlx::query!(
420
-
// r#"UPDATE blob_ref SET record = ? WHERE cid = ? AND did = ? AND record IS NULL"#,
421
-
// key,
422
-
// cid_str,
423
-
// did_str,
424
-
// )
425
-
// .execute(&mut *tx)
426
-
// .await
427
-
// .context("failed to update blob_ref")?
428
-
// .rows_affected()
429
-
// == 0
430
-
// {
431
-
// _ = sqlx::query!(
432
-
// r#"INSERT INTO blob_ref (record, cid, did) VALUES (?, ?, ?)"#,
433
-
// key,
434
-
// cid_str,
435
-
// did_str,
436
-
// )
437
-
// .execute(&mut *tx)
438
-
// .await
439
-
// .context("failed to update blob_ref")?;
440
-
// }
441
-
// }
442
-
443
-
// tx.commit()
444
-
// .await
445
-
// .context("failed to commit blob ref to database")?;
446
-
447
-
// // Update counters
448
-
// counter!(REPO_COMMITS).increment(1);
449
-
// for op in &ops {
450
-
// match *op {
451
-
// RepoOp::Create { .. } => counter!(REPO_OP_CREATE).increment(1),
452
-
// RepoOp::Update { .. } => counter!(REPO_OP_UPDATE).increment(1),
453
-
// RepoOp::Delete { .. } => counter!(REPO_OP_DELETE).increment(1),
454
-
// }
455
-
// }
456
-
457
-
// // We've committed the transaction to the database, and the commit is now stored in the user's
458
-
// // canonical repository.
459
-
// // We can now broadcast this on the firehose.
460
-
// fhp.commit(firehose::Commit {
461
-
// car: mem,
462
-
// ops,
463
-
// cid: repo.root(),
464
-
// rev: repo.commit().rev().to_string(),
465
-
// did: atrium_api::types::string::Did::new(user.did()).expect("should be valid DID"),
466
-
// pcid: Some(orig_cid),
467
-
// blobs: blobs.into_iter().map(|(_, cid)| cid).collect::<Vec<_>>(),
468
-
// })
469
-
// .await;
470
-
471
-
// Ok(Json(
472
-
// apply_writes::OutputData {
473
-
// results: Some(res),
474
-
// commit: Some(
475
-
// CommitMetaData {
476
-
// cid: atrium_api::types::string::Cid::new(repo.root()),
477
-
// rev: repo.commit().rev(),
478
-
// }
479
-
// .into(),
480
-
// ),
481
-
// }
482
-
// .into(),
483
-
// ))
484
-
}
-514
src/endpoints/repo.rs
-514
src/endpoints/repo.rs
···
1
-
//! PDS repository endpoints /xrpc/com.atproto.repo.*)
2
-
mod apply_writes;
3
-
pub(crate) use apply_writes::apply_writes;
4
-
5
-
use std::{collections::HashSet, str::FromStr};
6
-
7
-
use anyhow::{Context as _, anyhow};
8
-
use atrium_api::com::atproto::repo::apply_writes::{
9
-
self as atrium_apply_writes, InputWritesItem, OutputResultsItem,
10
-
};
11
-
use atrium_api::{
12
-
com::atproto::repo::{self, defs::CommitMetaData},
13
-
types::{
14
-
LimitedU32, Object, TryFromUnknown as _, TryIntoUnknown as _, Unknown,
15
-
string::{AtIdentifier, Nsid, Tid},
16
-
},
17
-
};
18
-
use atrium_repo::{Cid, blockstore::CarStore};
19
-
use axum::{
20
-
Json, Router,
21
-
body::Body,
22
-
extract::{Query, Request, State},
23
-
http::{self, StatusCode},
24
-
routing::{get, post},
25
-
};
26
-
use constcat::concat;
27
-
use futures::TryStreamExt as _;
28
-
use metrics::counter;
29
-
use rsky_syntax::aturi::AtUri;
30
-
use serde::Deserialize;
31
-
use tokio::io::AsyncWriteExt as _;
32
-
33
-
use crate::repo::block_map::cid_for_cbor;
34
-
use crate::repo::types::PreparedCreateOrUpdate;
35
-
use crate::{
36
-
AppState, Db, Error, Result, SigningKey,
37
-
actor_store::{ActorStoreTransactor, ActorStoreWriter},
38
-
auth::AuthenticatedUser,
39
-
config::AppConfig,
40
-
error::ErrorMessage,
41
-
firehose::{self, FirehoseProducer, RepoOp},
42
-
metrics::{REPO_COMMITS, REPO_OP_CREATE, REPO_OP_DELETE, REPO_OP_UPDATE},
43
-
repo::types::{PreparedWrite, WriteOpAction},
44
-
storage,
45
-
};
46
-
47
-
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
48
-
#[serde(rename_all = "camelCase")]
49
-
/// Parameters for [`list_records`].
50
-
pub(super) struct ListRecordsParameters {
51
-
///The NSID of the record type.
52
-
pub collection: Nsid,
53
-
/// The cursor to start from.
54
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
55
-
pub cursor: Option<String>,
56
-
///The number of records to return.
57
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
58
-
pub limit: Option<String>,
59
-
///The handle or DID of the repo.
60
-
pub repo: AtIdentifier,
61
-
///Flag to reverse the order of the returned records.
62
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
63
-
pub reverse: Option<bool>,
64
-
///DEPRECATED: The highest sort-ordered rkey to stop at (exclusive)
65
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
66
-
pub rkey_end: Option<String>,
67
-
///DEPRECATED: The lowest sort-ordered rkey to start from (exclusive)
68
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
69
-
pub rkey_start: Option<String>,
70
-
}
71
-
72
-
/// Resolve DID to DID document. Does not bi-directionally verify handle.
73
-
/// - GET /xrpc/com.atproto.repo.resolveDid
74
-
/// ### Query Parameters
75
-
/// - `did`: DID to resolve.
76
-
/// ### Responses
77
-
/// - 200 OK: {`did_doc`: `did_doc`}
78
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `DidNotFound`, `DidDeactivated`]}
79
-
async fn resolve_did(
80
-
db: &Db,
81
-
identifier: &AtIdentifier,
82
-
) -> anyhow::Result<(
83
-
atrium_api::types::string::Did,
84
-
atrium_api::types::string::Handle,
85
-
)> {
86
-
let (handle, did) = match *identifier {
87
-
AtIdentifier::Handle(ref handle) => {
88
-
let handle_as_str = &handle.as_str();
89
-
(
90
-
&handle.to_owned(),
91
-
&atrium_api::types::string::Did::new(
92
-
sqlx::query_scalar!(
93
-
r#"SELECT did FROM handles WHERE handle = ?"#,
94
-
handle_as_str
95
-
)
96
-
.fetch_one(db)
97
-
.await
98
-
.context("failed to query did")?,
99
-
)
100
-
.expect("should be valid DID"),
101
-
)
102
-
}
103
-
AtIdentifier::Did(ref did) => {
104
-
let did_as_str = &did.as_str();
105
-
(
106
-
&atrium_api::types::string::Handle::new(
107
-
sqlx::query_scalar!(r#"SELECT handle FROM handles WHERE did = ?"#, did_as_str)
108
-
.fetch_one(db)
109
-
.await
110
-
.context("failed to query did")?,
111
-
)
112
-
.expect("should be valid handle"),
113
-
&did.to_owned(),
114
-
)
115
-
}
116
-
};
117
-
118
-
Ok((did.to_owned(), handle.to_owned()))
119
-
}
120
-
121
-
/// Create a single new repository record. Requires auth, implemented by PDS.
122
-
/// - POST /xrpc/com.atproto.repo.createRecord
123
-
/// ### Request Body
124
-
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
125
-
/// - `collection`: `nsid` // The NSID of the record collection.
126
-
/// - `rkey`: `string` // The record key. <= 512 characters.
127
-
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data, 'true' to require it, or leave unset to validate only for known Lexicons.
128
-
/// - `record`
129
-
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
130
-
/// ### Responses
131
-
/// - 200 OK: {`cid`: `cid`, `uri`: `at-uri`, `commit`: {`cid`: `cid`, `rev`: `tid`}, `validation_status`: [`valid`, `unknown`]}
132
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidSwap`]}
133
-
/// - 401 Unauthorized
134
-
async fn create_record(
135
-
user: AuthenticatedUser,
136
-
State(actor_store): State<ActorStore>,
137
-
State(skey): State<SigningKey>,
138
-
State(config): State<AppConfig>,
139
-
State(db): State<Db>,
140
-
State(fhp): State<FirehoseProducer>,
141
-
Json(input): Json<repo::create_record::Input>,
142
-
) -> Result<Json<repo::create_record::Output>> {
143
-
todo!();
144
-
// let write_result = apply_writes::apply_writes(
145
-
// user,
146
-
// State(actor_store),
147
-
// State(skey),
148
-
// State(config),
149
-
// State(db),
150
-
// State(fhp),
151
-
// Json(
152
-
// repo::apply_writes::InputData {
153
-
// repo: input.repo.clone(),
154
-
// validate: input.validate,
155
-
// swap_commit: input.swap_commit.clone(),
156
-
// writes: vec![repo::apply_writes::InputWritesItem::Create(Box::new(
157
-
// repo::apply_writes::CreateData {
158
-
// collection: input.collection.clone(),
159
-
// rkey: input.rkey.clone(),
160
-
// value: input.record.clone(),
161
-
// }
162
-
// .into(),
163
-
// ))],
164
-
// }
165
-
// .into(),
166
-
// ),
167
-
// )
168
-
// .await
169
-
// .context("failed to apply writes")?;
170
-
171
-
// let create_result = if let repo::apply_writes::OutputResultsItem::CreateResult(create_result) =
172
-
// write_result
173
-
// .results
174
-
// .clone()
175
-
// .and_then(|result| result.first().cloned())
176
-
// .context("unexpected output from apply_writes")?
177
-
// {
178
-
// Some(create_result)
179
-
// } else {
180
-
// None
181
-
// }
182
-
// .context("unexpected result from apply_writes")?;
183
-
184
-
// Ok(Json(
185
-
// repo::create_record::OutputData {
186
-
// cid: create_result.cid.clone(),
187
-
// commit: write_result.commit.clone(),
188
-
// uri: create_result.uri.clone(),
189
-
// validation_status: Some("unknown".to_owned()),
190
-
// }
191
-
// .into(),
192
-
// ))
193
-
}
194
-
195
-
/// Write a repository record, creating or updating it as needed. Requires auth, implemented by PDS.
196
-
/// - POST /xrpc/com.atproto.repo.putRecord
197
-
/// ### Request Body
198
-
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
199
-
/// - `collection`: `nsid` // The NSID of the record collection.
200
-
/// - `rkey`: `string` // The record key. <= 512 characters.
201
-
/// - `validate`: `boolean` // Can be set to 'false' to skip Lexicon schema validation of record data, 'true' to require it, or leave unset to validate only for known Lexicons.
202
-
/// - `record`
203
-
/// - `swap_record`: `boolean` // Compare and swap with the previous record by CID. WARNING: nullable and optional field; may cause problems with golang implementation
204
-
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
205
-
/// ### Responses
206
-
/// - 200 OK: {"uri": "string","cid": "string","commit": {"cid": "string","rev": "string"},"validationStatus": "valid | unknown"}
207
-
/// - 400 Bad Request: {error:"`InvalidRequest` | `ExpiredToken` | `InvalidToken` | `InvalidSwap`"}
208
-
/// - 401 Unauthorized
209
-
async fn put_record(
210
-
user: AuthenticatedUser,
211
-
State(actor_store): State<ActorStore>,
212
-
State(skey): State<SigningKey>,
213
-
State(config): State<AppConfig>,
214
-
State(db): State<Db>,
215
-
State(fhp): State<FirehoseProducer>,
216
-
Json(input): Json<repo::put_record::Input>,
217
-
) -> Result<Json<repo::put_record::Output>> {
218
-
todo!();
219
-
// // TODO: `input.swap_record`
220
-
// // FIXME: "put" implies that we will create the record if it does not exist.
221
-
// // We currently only update existing records and/or throw an error if one doesn't exist.
222
-
// let input = (*input).clone();
223
-
// let input = repo::apply_writes::InputData {
224
-
// repo: input.repo,
225
-
// validate: input.validate,
226
-
// swap_commit: input.swap_commit,
227
-
// writes: vec![repo::apply_writes::InputWritesItem::Update(Box::new(
228
-
// repo::apply_writes::UpdateData {
229
-
// collection: input.collection,
230
-
// rkey: input.rkey,
231
-
// value: input.record,
232
-
// }
233
-
// .into(),
234
-
// ))],
235
-
// }
236
-
// .into();
237
-
238
-
// let write_result = apply_writes::apply_writes(
239
-
// user,
240
-
// State(actor_store),
241
-
// State(skey),
242
-
// State(config),
243
-
// State(db),
244
-
// State(fhp),
245
-
// Json(input),
246
-
// )
247
-
// .await
248
-
// .context("failed to apply writes")?;
249
-
250
-
// let update_result = write_result
251
-
// .results
252
-
// .clone()
253
-
// .and_then(|result| result.first().cloned())
254
-
// .context("unexpected output from apply_writes")?;
255
-
// let (cid, uri) = match update_result {
256
-
// repo::apply_writes::OutputResultsItem::CreateResult(create_result) => (
257
-
// Some(create_result.cid.clone()),
258
-
// Some(create_result.uri.clone()),
259
-
// ),
260
-
// repo::apply_writes::OutputResultsItem::UpdateResult(update_result) => (
261
-
// Some(update_result.cid.clone()),
262
-
// Some(update_result.uri.clone()),
263
-
// ),
264
-
// repo::apply_writes::OutputResultsItem::DeleteResult(_) => (None, None),
265
-
// };
266
-
// Ok(Json(
267
-
// repo::put_record::OutputData {
268
-
// cid: cid.context("missing cid")?,
269
-
// commit: write_result.commit.clone(),
270
-
// uri: uri.context("missing uri")?,
271
-
// validation_status: Some("unknown".to_owned()),
272
-
// }
273
-
// .into(),
274
-
// ))
275
-
}
276
-
277
-
/// Delete a repository record, or ensure it doesn't exist. Requires auth, implemented by PDS.
278
-
/// - POST /xrpc/com.atproto.repo.deleteRecord
279
-
/// ### Request Body
280
-
/// - `repo`: `at-identifier` // The handle or DID of the repo (aka, current account).
281
-
/// - `collection`: `nsid` // The NSID of the record collection.
282
-
/// - `rkey`: `string` // The record key. <= 512 characters.
283
-
/// - `swap_record`: `boolean` // Compare and swap with the previous record by CID.
284
-
/// - `swap_commit`: `cid` // Compare and swap with the previous commit by CID.
285
-
/// ### Responses
286
-
/// - 200 OK: {"commit": {"cid": "string","rev": "string"}}
287
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidSwap`]}
288
-
/// - 401 Unauthorized
289
-
async fn delete_record(
290
-
user: AuthenticatedUser,
291
-
State(actor_store): State<ActorStore>,
292
-
State(skey): State<SigningKey>,
293
-
State(config): State<AppConfig>,
294
-
State(db): State<Db>,
295
-
State(fhp): State<FirehoseProducer>,
296
-
Json(input): Json<repo::delete_record::Input>,
297
-
) -> Result<Json<repo::delete_record::Output>> {
298
-
todo!();
299
-
// // TODO: `input.swap_record`
300
-
301
-
// Ok(Json(
302
-
// repo::delete_record::OutputData {
303
-
// commit: apply_writes::apply_writes(
304
-
// user,
305
-
// State(actor_store),
306
-
// State(skey),
307
-
// State(config),
308
-
// State(db),
309
-
// State(fhp),
310
-
// Json(
311
-
// repo::apply_writes::InputData {
312
-
// repo: input.repo.clone(),
313
-
// swap_commit: input.swap_commit.clone(),
314
-
// validate: None,
315
-
// writes: vec![repo::apply_writes::InputWritesItem::Delete(Box::new(
316
-
// repo::apply_writes::DeleteData {
317
-
// collection: input.collection.clone(),
318
-
// rkey: input.rkey.clone(),
319
-
// }
320
-
// .into(),
321
-
// ))],
322
-
// }
323
-
// .into(),
324
-
// ),
325
-
// )
326
-
// .await
327
-
// .context("failed to apply writes")?
328
-
// .commit
329
-
// .clone(),
330
-
// }
331
-
// .into(),
332
-
// ))
333
-
}
334
-
335
-
/// Get information about an account and repository, including the list of collections. Does not require auth.
336
-
/// - GET /xrpc/com.atproto.repo.describeRepo
337
-
/// ### Query Parameters
338
-
/// - `repo`: `at-identifier` // The handle or DID of the repo.
339
-
/// ### Responses
340
-
/// - 200 OK: {"handle": "string","did": "string","didDoc": {},"collections": [string],"handleIsCorrect": true} \
341
-
/// handeIsCorrect - boolean - Indicates if handle is currently valid (resolves bi-directionally)
342
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
343
-
/// - 401 Unauthorized
344
-
async fn describe_repo(
345
-
State(actor_store): State<ActorStore>,
346
-
State(config): State<AppConfig>,
347
-
State(db): State<Db>,
348
-
Query(input): Query<repo::describe_repo::ParametersData>,
349
-
) -> Result<Json<repo::describe_repo::Output>> {
350
-
// Lookup the DID by the provided handle.
351
-
let (did, handle) = resolve_did(&db, &input.repo)
352
-
.await
353
-
.context("failed to resolve handle")?;
354
-
355
-
// Use Actor Store to get the collections
356
-
todo!();
357
-
}
358
-
359
-
/// Get a single record from a repository. Does not require auth.
360
-
/// - GET /xrpc/com.atproto.repo.getRecord
361
-
/// ### Query Parameters
362
-
/// - `repo`: `at-identifier` // The handle or DID of the repo.
363
-
/// - `collection`: `nsid` // The NSID of the record collection.
364
-
/// - `rkey`: `string` // The record key. <= 512 characters.
365
-
/// - `cid`: `cid` // The CID of the version of the record. If not specified, then return the most recent version.
366
-
/// ### Responses
367
-
/// - 200 OK: {"uri": "string","cid": "string","value": {}}
368
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RecordNotFound`]}
369
-
/// - 401 Unauthorized
370
-
async fn get_record(
371
-
State(actor_store): State<ActorStore>,
372
-
State(config): State<AppConfig>,
373
-
State(db): State<Db>,
374
-
Query(input): Query<repo::get_record::ParametersData>,
375
-
) -> Result<Json<repo::get_record::Output>> {
376
-
if input.cid.is_some() {
377
-
return Err(Error::unimplemented(anyhow!(
378
-
"looking up old records is unsupported"
379
-
)));
380
-
}
381
-
382
-
// Lookup the DID by the provided handle.
383
-
let (did, _handle) = resolve_did(&db, &input.repo)
384
-
.await
385
-
.context("failed to resolve handle")?;
386
-
387
-
// Create a URI from the parameters
388
-
let uri = format!(
389
-
"at://{}/{}/{}",
390
-
did.as_str(),
391
-
input.collection.as_str(),
392
-
input.rkey.as_str()
393
-
);
394
-
395
-
// Use Actor Store to get the record
396
-
todo!();
397
-
}
398
-
399
-
/// List a range of records in a repository, matching a specific collection. Does not require auth.
400
-
/// - GET /xrpc/com.atproto.repo.listRecords
401
-
/// ### Query Parameters
402
-
/// - `repo`: `at-identifier` // The handle or DID of the repo.
403
-
/// - `collection`: `nsid` // The NSID of the record type.
404
-
/// - `limit`: `integer` // The maximum number of records to return. Default 50, >=1 and <=100.
405
-
/// - `cursor`: `string`
406
-
/// - `reverse`: `boolean` // Flag to reverse the order of the returned records.
407
-
/// ### Responses
408
-
/// - 200 OK: {"cursor": "string","records": [{"uri": "string","cid": "string","value": {}}]}
409
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
410
-
/// - 401 Unauthorized
411
-
async fn list_records(
412
-
State(actor_store): State<ActorStore>,
413
-
State(config): State<AppConfig>,
414
-
State(db): State<Db>,
415
-
Query(input): Query<Object<ListRecordsParameters>>,
416
-
) -> Result<Json<repo::list_records::Output>> {
417
-
// Lookup the DID by the provided handle.
418
-
let (did, _handle) = resolve_did(&db, &input.repo)
419
-
.await
420
-
.context("failed to resolve handle")?;
421
-
422
-
// Use Actor Store to list records for the collection
423
-
todo!();
424
-
}
425
-
426
-
/// Upload a new blob, to be referenced from a repository record. \
427
-
/// The blob will be deleted if it is not referenced within a time window (eg, minutes). \
428
-
/// Blob restrictions (mimetype, size, etc) are enforced when the reference is created. \
429
-
/// Requires auth, implemented by PDS.
430
-
/// - POST /xrpc/com.atproto.repo.uploadBlob
431
-
/// ### Request Body
432
-
/// ### Responses
433
-
/// - 200 OK: {"blob": "binary"}
434
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
435
-
/// - 401 Unauthorized
436
-
async fn upload_blob(
437
-
user: AuthenticatedUser,
438
-
State(actor_store): State<ActorStore>,
439
-
State(config): State<AppConfig>,
440
-
State(db): State<Db>,
441
-
request: Request<Body>,
442
-
) -> Result<Json<repo::upload_blob::Output>> {
443
-
let length = request
444
-
.headers()
445
-
.get(http::header::CONTENT_LENGTH)
446
-
.context("no content length provided")?
447
-
.to_str()
448
-
.map_err(anyhow::Error::from)
449
-
.and_then(|content_length| content_length.parse::<u64>().map_err(anyhow::Error::from))
450
-
.context("invalid content-length header")?;
451
-
let mime = request
452
-
.headers()
453
-
.get(http::header::CONTENT_TYPE)
454
-
.context("no content-type provided")?
455
-
.to_str()
456
-
.context("invalid content-type provided")?
457
-
.to_owned();
458
-
459
-
if length > config.blob.limit {
460
-
return Err(Error::with_status(
461
-
StatusCode::PAYLOAD_TOO_LARGE,
462
-
anyhow!("size {} above limit {}", length, config.blob.limit),
463
-
));
464
-
}
465
-
466
-
// Read the blob data
467
-
let mut body_data = Vec::new();
468
-
let mut stream = request.into_body().into_data_stream();
469
-
while let Some(bytes) = stream.try_next().await.context("failed to receive file")? {
470
-
body_data.extend_from_slice(&bytes);
471
-
472
-
// Check size limit incrementally
473
-
if body_data.len() as u64 > config.blob.limit {
474
-
return Err(Error::with_status(
475
-
StatusCode::PAYLOAD_TOO_LARGE,
476
-
anyhow!("size above limit and content-length header was wrong"),
477
-
));
478
-
}
479
-
}
480
-
481
-
// Use Actor Store to upload the blob
482
-
todo!();
483
-
}
484
-
485
-
async fn todo() -> Result<()> {
486
-
Err(Error::unimplemented(anyhow!("not implemented")))
487
-
}
488
-
489
-
/// These endpoints are part of the atproto PDS repository management APIs. \
490
-
/// Requests usually require authentication (unlike the com.atproto.sync.* endpoints), and are made directly to the user's own PDS instance.
491
-
/// ### Routes
492
-
/// - AP /xrpc/com.atproto.repo.applyWrites -> [`apply_writes`]
493
-
/// - AP /xrpc/com.atproto.repo.createRecord -> [`create_record`]
494
-
/// - AP /xrpc/com.atproto.repo.putRecord -> [`put_record`]
495
-
/// - AP /xrpc/com.atproto.repo.deleteRecord -> [`delete_record`]
496
-
/// - AP /xrpc/com.atproto.repo.uploadBlob -> [`upload_blob`]
497
-
/// - UG /xrpc/com.atproto.repo.describeRepo -> [`describe_repo`]
498
-
/// - UG /xrpc/com.atproto.repo.getRecord -> [`get_record`]
499
-
/// - UG /xrpc/com.atproto.repo.listRecords -> [`list_records`]
500
-
/// - [ ] xx /xrpc/com.atproto.repo.importRepo
501
-
// - [ ] xx /xrpc/com.atproto.repo.listMissingBlobs
502
-
pub(super) fn routes() -> Router<AppState> {
503
-
Router::new()
504
-
.route(concat!("/", repo::apply_writes::NSID), post(apply_writes))
505
-
// .route(concat!("/", repo::create_record::NSID), post(create_record))
506
-
// .route(concat!("/", repo::put_record::NSID), post(put_record))
507
-
// .route(concat!("/", repo::delete_record::NSID), post(delete_record))
508
-
// .route(concat!("/", repo::upload_blob::NSID), post(upload_blob))
509
-
// .route(concat!("/", repo::describe_repo::NSID), get(describe_repo))
510
-
// .route(concat!("/", repo::get_record::NSID), get(get_record))
511
-
.route(concat!("/", repo::import_repo::NSID), post(todo))
512
-
.route(concat!("/", repo::list_missing_blobs::NSID), get(todo))
513
-
// .route(concat!("/", repo::list_records::NSID), get(list_records))
514
-
}
-791
src/endpoints/server.rs
-791
src/endpoints/server.rs
···
1
-
//! Server endpoints. (/xrpc/com.atproto.server.*)
2
-
use std::{collections::HashMap, str::FromStr as _};
3
-
4
-
use anyhow::{Context as _, anyhow};
5
-
use argon2::{
6
-
Argon2, PasswordHash, PasswordHasher as _, PasswordVerifier as _, password_hash::SaltString,
7
-
};
8
-
use atrium_api::{
9
-
com::atproto::server,
10
-
types::string::{Datetime, Did, Handle, Tid},
11
-
};
12
-
use atrium_crypto::keypair::Did as _;
13
-
use atrium_repo::{
14
-
Cid, Repository,
15
-
blockstore::{AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256},
16
-
};
17
-
use axum::{
18
-
Json, Router,
19
-
extract::{Query, Request, State},
20
-
http::StatusCode,
21
-
routing::{get, post},
22
-
};
23
-
use constcat::concat;
24
-
use metrics::counter;
25
-
use rand::Rng as _;
26
-
use sha2::Digest as _;
27
-
use uuid::Uuid;
28
-
29
-
use crate::{
30
-
AppState, Client, Db, Error, Result, RotationKey, SigningKey,
31
-
auth::{self, AuthenticatedUser},
32
-
config::AppConfig,
33
-
firehose::{Commit, FirehoseProducer},
34
-
metrics::AUTH_FAILED,
35
-
plc::{self, PlcOperation, PlcService},
36
-
storage,
37
-
};
38
-
39
-
/// This is a dummy password that can be used in absence of a real password.
40
-
const DUMMY_PASSWORD: &str = "$argon2id$v=19$m=19456,t=2,p=1$En2LAfHjeO0SZD5IUU1Abg$RpS8nHhhqY4qco2uyd41p9Y/1C+Lvi214MAWukzKQMI";
41
-
42
-
/// Create an invite code.
43
-
/// - POST /xrpc/com.atproto.server.createInviteCode
44
-
/// ### Request Body
45
-
/// - `useCount`: integer
46
-
/// - `forAccount`: string (optional)
47
-
/// ### Responses
48
-
/// - 200 OK: {code: string}
49
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
50
-
/// - 401 Unauthorized
51
-
async fn create_invite_code(
52
-
_user: AuthenticatedUser,
53
-
State(db): State<Db>,
54
-
Json(input): Json<server::create_invite_code::Input>,
55
-
) -> Result<Json<server::create_invite_code::Output>> {
56
-
let uuid = Uuid::new_v4().to_string();
57
-
let did = input.for_account.as_deref();
58
-
let count = std::cmp::min(input.use_count, 100); // Maximum of 100 uses for any code.
59
-
60
-
if count <= 0 {
61
-
return Err(anyhow!("use_count must be greater than 0").into());
62
-
}
63
-
64
-
Ok(Json(
65
-
server::create_invite_code::OutputData {
66
-
code: sqlx::query_scalar!(
67
-
r#"
68
-
INSERT INTO invites (id, did, count, created_at)
69
-
VALUES (?, ?, ?, datetime('now'))
70
-
RETURNING id
71
-
"#,
72
-
uuid,
73
-
did,
74
-
count,
75
-
)
76
-
.fetch_one(&db)
77
-
.await
78
-
.context("failed to create new invite code")?,
79
-
}
80
-
.into(),
81
-
))
82
-
}
83
-
84
-
#[expect(clippy::too_many_lines, reason = "TODO: refactor")]
85
-
/// Create an account. Implemented by PDS.
86
-
/// - POST /xrpc/com.atproto.server.createAccount
87
-
/// ### Request Body
88
-
/// - `email`: string
89
-
/// - `handle`: string (required)
90
-
/// - `did`: string - Pre-existing atproto DID, being imported to a new account.
91
-
/// - `inviteCode`: string
92
-
/// - `verificationCode`: string
93
-
/// - `verificationPhone`: string
94
-
/// - `password`: string - Initial account password. May need to meet instance-specific password strength requirements.
95
-
/// - `recoveryKey`: string - DID PLC rotation key (aka, recovery key) to be included in PLC creation operation.
96
-
/// - `plcOp`: object
97
-
/// ## Responses
98
-
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {}}
99
-
/// - 400 Bad Request: {error: [`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `InvalidHandle`, `InvalidPassword`, \
100
-
/// `InvalidInviteCode`, `HandleNotAvailable`, `UnsupportedDomain`, `UnresolvableDid`, `IncompatibleDidDoc`)}
101
-
/// - 401 Unauthorized
102
-
async fn create_account(
103
-
State(db): State<Db>,
104
-
State(skey): State<SigningKey>,
105
-
State(rkey): State<RotationKey>,
106
-
State(client): State<Client>,
107
-
State(config): State<AppConfig>,
108
-
State(fhp): State<FirehoseProducer>,
109
-
Json(input): Json<server::create_account::Input>,
110
-
) -> Result<Json<server::create_account::Output>> {
111
-
let email = input.email.as_deref().context("no email provided")?;
112
-
// Hash the user's password.
113
-
let pass = Argon2::default()
114
-
.hash_password(
115
-
input
116
-
.password
117
-
.as_deref()
118
-
.context("no password provided")?
119
-
.as_bytes(),
120
-
SaltString::generate(&mut rand::thread_rng()).as_salt(),
121
-
)
122
-
.context("failed to hash password")?
123
-
.to_string();
124
-
let handle = input.handle.as_str().to_owned();
125
-
126
-
// TODO: Handle the account migration flow.
127
-
// Users will hit this endpoint with a service-level authentication token.
128
-
//
129
-
// https://github.com/bluesky-social/pds/blob/main/ACCOUNT_MIGRATION.md
130
-
131
-
// TODO: `input.plc_op`
132
-
if input.plc_op.is_some() {
133
-
return Err(Error::unimplemented(anyhow!("plc_op")));
134
-
}
135
-
136
-
let recovery_keys = if let Some(ref key) = input.recovery_key {
137
-
// Ensure the provided recovery key is valid.
138
-
if let Err(error) = atrium_crypto::did::parse_did_key(key) {
139
-
return Err(Error::with_status(
140
-
StatusCode::BAD_REQUEST,
141
-
anyhow::Error::new(error).context("provided recovery key is in invalid format"),
142
-
));
143
-
}
144
-
145
-
// Enroll the user-provided recovery key at a higher priority than our own.
146
-
vec![key.clone(), rkey.did()]
147
-
} else {
148
-
vec![rkey.did()]
149
-
};
150
-
151
-
// Begin a new transaction to actually create the user's profile.
152
-
// Unless committed, the transaction will be automatically rolled back.
153
-
let mut tx = db.begin().await.context("failed to begin transaction")?;
154
-
155
-
// TODO: Make this its own toggle instead of tied to test mode
156
-
if !config.test {
157
-
let _invite = match input.invite_code {
158
-
Some(ref code) => {
159
-
let invite: Option<String> = sqlx::query_scalar!(
160
-
r#"
161
-
UPDATE invites
162
-
SET count = count - 1
163
-
WHERE id = ?
164
-
AND count > 0
165
-
RETURNING id
166
-
"#,
167
-
code
168
-
)
169
-
.fetch_optional(&mut *tx)
170
-
.await
171
-
.context("failed to check invite code")?;
172
-
173
-
invite.context("invalid invite code")?
174
-
}
175
-
None => {
176
-
return Err(anyhow!("invite code required").into());
177
-
}
178
-
};
179
-
}
180
-
181
-
// Account can be created. Synthesize a new DID for the user.
182
-
// https://github.com/did-method-plc/did-method-plc?tab=readme-ov-file#did-creation
183
-
let op = plc::sign_op(
184
-
&rkey,
185
-
PlcOperation {
186
-
typ: "plc_operation".to_owned(),
187
-
rotation_keys: recovery_keys,
188
-
verification_methods: HashMap::from([("atproto".to_owned(), skey.did())]),
189
-
also_known_as: vec![format!("at://{}", input.handle.as_str())],
190
-
services: HashMap::from([(
191
-
"atproto_pds".to_owned(),
192
-
PlcService::Pds {
193
-
endpoint: format!("https://{}", config.host_name),
194
-
},
195
-
)]),
196
-
prev: None,
197
-
},
198
-
)
199
-
.context("failed to sign genesis op")?;
200
-
let op_bytes = serde_ipld_dagcbor::to_vec(&op).context("failed to encode genesis op")?;
201
-
202
-
let did_hash = {
203
-
let digest = base32::encode(
204
-
base32::Alphabet::Rfc4648Lower { padding: false },
205
-
sha2::Sha256::digest(&op_bytes).as_slice(),
206
-
);
207
-
if digest.len() < 24 {
208
-
return Err(anyhow!("digest too short").into());
209
-
}
210
-
#[expect(clippy::string_slice, reason = "digest length confirmed")]
211
-
digest[..24].to_owned()
212
-
};
213
-
let did = format!("did:plc:{did_hash}");
214
-
215
-
let doc = tokio::fs::File::create(config.plc.path.join(format!("{did_hash}.car")))
216
-
.await
217
-
.context("failed to create did doc")?;
218
-
219
-
let mut plc_doc = CarStore::create(doc)
220
-
.await
221
-
.context("failed to create did doc")?;
222
-
223
-
let plc_cid = plc_doc
224
-
.write_block(DAG_CBOR, SHA2_256, &op_bytes)
225
-
.await
226
-
.context("failed to write genesis commit")?
227
-
.to_string();
228
-
229
-
if !config.test {
230
-
// Send the new account's data to the PLC directory.
231
-
plc::submit(&client, &did, &op)
232
-
.await
233
-
.context("failed to submit PLC operation to directory")?;
234
-
}
235
-
236
-
// Write out an initial commit for the user.
237
-
// https://atproto.com/guides/account-lifecycle
238
-
let (cid, rev, store) = async {
239
-
let store = storage::create_storage_for_did(&config.repo, &did_hash)
240
-
.await
241
-
.context("failed to create storage")?;
242
-
243
-
// Initialize the repository with the storage
244
-
let repo_builder = Repository::create(
245
-
store,
246
-
Did::from_str(&did).expect("should be valid DID format"),
247
-
)
248
-
.await
249
-
.context("failed to initialize user repo")?;
250
-
251
-
// Sign the root commit.
252
-
let sig = skey
253
-
.sign(&repo_builder.bytes())
254
-
.context("failed to sign root commit")?;
255
-
let mut repo = repo_builder
256
-
.finalize(sig)
257
-
.await
258
-
.context("failed to attach signature to root commit")?;
259
-
260
-
let root = repo.root();
261
-
let rev = repo.commit().rev();
262
-
263
-
// Create a temporary CAR store for firehose events
264
-
let mut mem = Vec::new();
265
-
let mut firehose_store =
266
-
CarStore::create_with_roots(std::io::Cursor::new(&mut mem), [repo.root()])
267
-
.await
268
-
.context("failed to create temp carstore")?;
269
-
270
-
repo.export_into(&mut firehose_store)
271
-
.await
272
-
.context("failed to export repository")?;
273
-
274
-
Ok::<(Cid, Tid, Vec<u8>), anyhow::Error>((root, rev, mem))
275
-
}
276
-
.await
277
-
.context("failed to create user repo")?;
278
-
279
-
let cid_str = cid.to_string();
280
-
let rev_str = rev.as_str();
281
-
282
-
_ = sqlx::query!(
283
-
r#"
284
-
INSERT INTO accounts (did, email, password, root, plc_root, rev, created_at)
285
-
VALUES (?, ?, ?, ?, ?, ?, datetime('now'));
286
-
287
-
INSERT INTO handles (did, handle, created_at)
288
-
VALUES (?, ?, datetime('now'));
289
-
290
-
-- Cleanup stale invite codes
291
-
DELETE FROM invites
292
-
WHERE count <= 0;
293
-
"#,
294
-
did,
295
-
email,
296
-
pass,
297
-
cid_str,
298
-
plc_cid,
299
-
rev_str,
300
-
did,
301
-
handle
302
-
)
303
-
.execute(&mut *tx)
304
-
.await
305
-
.context("failed to create new account")?;
306
-
307
-
// The account is fully created. Commit the SQL transaction to the database.
308
-
tx.commit().await.context("failed to commit transaction")?;
309
-
310
-
// Broadcast the identity event now that the new identity is resolvable on the public directory.
311
-
fhp.identity(
312
-
atrium_api::com::atproto::sync::subscribe_repos::IdentityData {
313
-
did: Did::from_str(&did).expect("should be valid DID format"),
314
-
handle: Some(Handle::new(handle).expect("should be valid handle")),
315
-
seq: 0, // Filled by firehose later.
316
-
time: Datetime::now(),
317
-
},
318
-
)
319
-
.await;
320
-
321
-
// The new account is now active on this PDS, so we can broadcast the account firehose event.
322
-
fhp.account(
323
-
atrium_api::com::atproto::sync::subscribe_repos::AccountData {
324
-
active: true,
325
-
did: Did::from_str(&did).expect("should be valid DID format"),
326
-
seq: 0, // Filled by firehose later.
327
-
status: None, // "takedown" / "suspended" / "deactivated"
328
-
time: Datetime::now(),
329
-
},
330
-
)
331
-
.await;
332
-
333
-
let did = Did::from_str(&did).expect("should be valid DID format");
334
-
335
-
fhp.commit(Commit {
336
-
car: store,
337
-
ops: Vec::new(),
338
-
cid,
339
-
rev: rev.to_string(),
340
-
did: did.clone(),
341
-
pcid: None,
342
-
blobs: Vec::new(),
343
-
})
344
-
.await;
345
-
346
-
// Finally, sign some authentication tokens for the new user.
347
-
let token = auth::sign(
348
-
&skey,
349
-
"at+jwt",
350
-
&serde_json::json!({
351
-
"scope": "com.atproto.access",
352
-
"sub": did,
353
-
"iat": chrono::Utc::now().timestamp(),
354
-
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
355
-
"aud": format!("did:web:{}", config.host_name)
356
-
}),
357
-
)
358
-
.context("failed to sign jwt")?;
359
-
360
-
let refresh_token = auth::sign(
361
-
&skey,
362
-
"refresh+jwt",
363
-
&serde_json::json!({
364
-
"scope": "com.atproto.refresh",
365
-
"sub": did,
366
-
"iat": chrono::Utc::now().timestamp(),
367
-
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
368
-
"aud": format!("did:web:{}", config.host_name)
369
-
}),
370
-
)
371
-
.context("failed to sign refresh jwt")?;
372
-
373
-
Ok(Json(
374
-
server::create_account::OutputData {
375
-
access_jwt: token,
376
-
did,
377
-
did_doc: None,
378
-
handle: input.handle.clone(),
379
-
refresh_jwt: refresh_token,
380
-
}
381
-
.into(),
382
-
))
383
-
}
384
-
385
-
/// Create an authentication session.
386
-
/// - POST /xrpc/com.atproto.server.createSession
387
-
/// ### Request Body
388
-
/// - `identifier`: string - Handle or other identifier supported by the server for the authenticating user.
389
-
/// - `password`: string - Password for the authenticating user.
390
-
/// - `authFactorToken` - string (optional)
391
-
/// - `allowTakedown` - boolean (optional) - When true, instead of throwing error for takendown accounts, a valid response with a narrow scoped token will be returned
392
-
/// ### Responses
393
-
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {},"email": "string","emailConfirmed": true,"emailAuthFactor": true,"active": true,"status": "takendown"}
394
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `AccountTakedown`, `AuthFactorTokenRequired`]}
395
-
/// - 401 Unauthorized
396
-
async fn create_session(
397
-
State(db): State<Db>,
398
-
State(skey): State<SigningKey>,
399
-
State(config): State<AppConfig>,
400
-
Json(input): Json<server::create_session::Input>,
401
-
) -> Result<Json<server::create_session::Output>> {
402
-
let handle = &input.identifier;
403
-
let password = &input.password;
404
-
405
-
// TODO: `input.allow_takedown`
406
-
// TODO: `input.auth_factor_token`
407
-
408
-
let Some(account) = sqlx::query!(
409
-
r#"
410
-
WITH LatestHandles AS (
411
-
SELECT did, handle
412
-
FROM handles
413
-
WHERE (did, created_at) IN (
414
-
SELECT did, MAX(created_at) AS max_created_at
415
-
FROM handles
416
-
GROUP BY did
417
-
)
418
-
)
419
-
SELECT a.did, a.password, h.handle
420
-
FROM accounts a
421
-
LEFT JOIN LatestHandles h ON a.did = h.did
422
-
WHERE h.handle = ?
423
-
"#,
424
-
handle
425
-
)
426
-
.fetch_optional(&db)
427
-
.await
428
-
.context("failed to authenticate")?
429
-
else {
430
-
counter!(AUTH_FAILED).increment(1);
431
-
432
-
// SEC: Call argon2's `verify_password` to simulate password verification and discard the result.
433
-
// We do this to avoid exposing a timing attack where attackers can measure the response time to
434
-
// determine whether or not an account exists.
435
-
_ = Argon2::default().verify_password(
436
-
password.as_bytes(),
437
-
&PasswordHash::new(DUMMY_PASSWORD).context("should be valid password hash")?,
438
-
);
439
-
440
-
return Err(Error::with_status(
441
-
StatusCode::UNAUTHORIZED,
442
-
anyhow!("failed to validate credentials"),
443
-
));
444
-
};
445
-
446
-
match Argon2::default().verify_password(
447
-
password.as_bytes(),
448
-
&PasswordHash::new(account.password.as_str()).context("invalid password hash in db")?,
449
-
) {
450
-
Ok(()) => {}
451
-
Err(_e) => {
452
-
counter!(AUTH_FAILED).increment(1);
453
-
454
-
return Err(Error::with_status(
455
-
StatusCode::UNAUTHORIZED,
456
-
anyhow!("failed to validate credentials"),
457
-
));
458
-
}
459
-
}
460
-
461
-
let did = account.did;
462
-
463
-
let token = auth::sign(
464
-
&skey,
465
-
"at+jwt",
466
-
&serde_json::json!({
467
-
"scope": "com.atproto.access",
468
-
"sub": did,
469
-
"iat": chrono::Utc::now().timestamp(),
470
-
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
471
-
"aud": format!("did:web:{}", config.host_name)
472
-
}),
473
-
)
474
-
.context("failed to sign jwt")?;
475
-
476
-
let refresh_token = auth::sign(
477
-
&skey,
478
-
"refresh+jwt",
479
-
&serde_json::json!({
480
-
"scope": "com.atproto.refresh",
481
-
"sub": did,
482
-
"iat": chrono::Utc::now().timestamp(),
483
-
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
484
-
"aud": format!("did:web:{}", config.host_name)
485
-
}),
486
-
)
487
-
.context("failed to sign refresh jwt")?;
488
-
489
-
Ok(Json(
490
-
server::create_session::OutputData {
491
-
access_jwt: token,
492
-
refresh_jwt: refresh_token,
493
-
494
-
active: Some(true),
495
-
did: Did::from_str(&did).expect("should be valid DID format"),
496
-
did_doc: None,
497
-
email: None,
498
-
email_auth_factor: None,
499
-
email_confirmed: None,
500
-
handle: Handle::new(account.handle).expect("should be valid handle"),
501
-
status: None,
502
-
}
503
-
.into(),
504
-
))
505
-
}
506
-
507
-
/// Refresh an authentication session. Requires auth using the 'refreshJwt' (not the 'accessJwt').
508
-
/// - POST /xrpc/com.atproto.server.refreshSession
509
-
/// ### Responses
510
-
/// - 200 OK: {"accessJwt": "string","refreshJwt": "string","handle": "string","did": "string","didDoc": {},"active": true,"status": "takendown"}
511
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `AccountTakedown`]}
512
-
/// - 401 Unauthorized
513
-
async fn refresh_session(
514
-
State(db): State<Db>,
515
-
State(skey): State<SigningKey>,
516
-
State(config): State<AppConfig>,
517
-
req: Request,
518
-
) -> Result<Json<server::refresh_session::Output>> {
519
-
// TODO: store hashes of refresh tokens and enforce single-use
520
-
let auth_token = req
521
-
.headers()
522
-
.get(axum::http::header::AUTHORIZATION)
523
-
.context("no authorization header provided")?
524
-
.to_str()
525
-
.ok()
526
-
.and_then(|auth| auth.strip_prefix("Bearer "))
527
-
.context("invalid authentication token")?;
528
-
529
-
let (typ, claims) =
530
-
auth::verify(&skey.did(), auth_token).context("failed to verify refresh token")?;
531
-
if typ != "refresh+jwt" {
532
-
return Err(Error::with_status(
533
-
StatusCode::UNAUTHORIZED,
534
-
anyhow!("invalid refresh token"),
535
-
));
536
-
}
537
-
if claims
538
-
.get("exp")
539
-
.and_then(serde_json::Value::as_i64)
540
-
.context("failed to get `exp`")?
541
-
< chrono::Utc::now().timestamp()
542
-
{
543
-
return Err(Error::with_status(
544
-
StatusCode::UNAUTHORIZED,
545
-
anyhow!("refresh token expired"),
546
-
));
547
-
}
548
-
if claims
549
-
.get("aud")
550
-
.and_then(|audience| audience.as_str())
551
-
.context("invalid jwt")?
552
-
!= format!("did:web:{}", config.host_name)
553
-
{
554
-
return Err(Error::with_status(
555
-
StatusCode::UNAUTHORIZED,
556
-
anyhow!("invalid audience"),
557
-
));
558
-
}
559
-
560
-
let did = claims
561
-
.get("sub")
562
-
.and_then(|subject| subject.as_str())
563
-
.context("invalid jwt")?;
564
-
565
-
let user = sqlx::query!(
566
-
r#"
567
-
SELECT a.status, h.handle
568
-
FROM accounts a
569
-
JOIN handles h ON a.did = h.did
570
-
WHERE a.did = ?
571
-
ORDER BY h.created_at ASC
572
-
LIMIT 1
573
-
"#,
574
-
did
575
-
)
576
-
.fetch_one(&db)
577
-
.await
578
-
.context("failed to fetch user account")?;
579
-
580
-
let token = auth::sign(
581
-
&skey,
582
-
"at+jwt",
583
-
&serde_json::json!({
584
-
"scope": "com.atproto.access",
585
-
"sub": did,
586
-
"iat": chrono::Utc::now().timestamp(),
587
-
"exp": chrono::Utc::now().checked_add_signed(chrono::Duration::hours(4)).context("should be valid time")?.timestamp(),
588
-
"aud": format!("did:web:{}", config.host_name)
589
-
}),
590
-
)
591
-
.context("failed to sign jwt")?;
592
-
593
-
let refresh_token = auth::sign(
594
-
&skey,
595
-
"refresh+jwt",
596
-
&serde_json::json!({
597
-
"scope": "com.atproto.refresh",
598
-
"sub": did,
599
-
"iat": chrono::Utc::now().timestamp(),
600
-
"exp": chrono::Utc::now().checked_add_days(chrono::Days::new(90)).context("should be valid time")?.timestamp(),
601
-
"aud": format!("did:web:{}", config.host_name)
602
-
}),
603
-
)
604
-
.context("failed to sign refresh jwt")?;
605
-
606
-
let active = user.status == "active";
607
-
let status = if active { None } else { Some(user.status) };
608
-
609
-
Ok(Json(
610
-
server::refresh_session::OutputData {
611
-
access_jwt: token,
612
-
refresh_jwt: refresh_token,
613
-
614
-
active: Some(active), // TODO?
615
-
did: Did::new(did.to_owned()).expect("should be valid DID format"),
616
-
did_doc: None,
617
-
handle: Handle::new(user.handle).expect("should be valid handle"),
618
-
status,
619
-
}
620
-
.into(),
621
-
))
622
-
}
623
-
624
-
/// Get a signed token on behalf of the requesting DID for the requested service.
625
-
/// - GET /xrpc/com.atproto.server.getServiceAuth
626
-
/// ### Request Query Parameters
627
-
/// - `aud`: string - The DID of the service that the token will be used to authenticate with
628
-
/// - `exp`: integer (optional) - The time in Unix Epoch seconds that the JWT expires. Defaults to 60 seconds in the future. The service may enforce certain time bounds on tokens depending on the requested scope.
629
-
/// - `lxm`: string (optional) - Lexicon (XRPC) method to bind the requested token to
630
-
/// ### Responses
631
-
/// - 200 OK: {token: string}
632
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `BadExpiration`]}
633
-
/// - 401 Unauthorized
634
-
async fn get_service_auth(
635
-
user: AuthenticatedUser,
636
-
State(skey): State<SigningKey>,
637
-
Query(input): Query<server::get_service_auth::ParametersData>,
638
-
) -> Result<Json<server::get_service_auth::Output>> {
639
-
let user_did = user.did();
640
-
let aud = input.aud.as_str();
641
-
642
-
let exp = (chrono::Utc::now().checked_add_signed(chrono::Duration::minutes(1)))
643
-
.context("should be valid expiration datetime")?
644
-
.timestamp();
645
-
let jti = rand::thread_rng()
646
-
.sample_iter(rand::distributions::Alphanumeric)
647
-
.take(10)
648
-
.map(char::from)
649
-
.collect::<String>();
650
-
651
-
let mut claims = serde_json::json!({
652
-
"iss": user_did.as_str(),
653
-
"aud": aud,
654
-
"exp": exp,
655
-
"jti": jti,
656
-
});
657
-
658
-
if let Some(ref lxm) = input.lxm {
659
-
claims = claims
660
-
.as_object_mut()
661
-
.context("should be a valid object")?
662
-
.insert("lxm".to_owned(), serde_json::Value::String(lxm.to_string()))
663
-
.context("should be able to insert lxm into claims")?;
664
-
}
665
-
666
-
// Mint a bearer token by signing a JSON web token.
667
-
let token = auth::sign(&skey, "JWT", &claims).context("failed to sign jwt")?;
668
-
669
-
Ok(Json(server::get_service_auth::OutputData { token }.into()))
670
-
}
671
-
672
-
/// Get information about the current auth session. Requires auth.
673
-
/// - GET /xrpc/com.atproto.server.getSession
674
-
/// ### Responses
675
-
/// - 200 OK: {"handle": "string","did": "string","email": "string","emailConfirmed": true,"emailAuthFactor": true,"didDoc": {},"active": true,"status": "takendown"}
676
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
677
-
/// - 401 Unauthorized
678
-
async fn get_session(
679
-
user: AuthenticatedUser,
680
-
State(db): State<Db>,
681
-
) -> Result<Json<server::get_session::Output>> {
682
-
let did = user.did();
683
-
#[expect(clippy::shadow_unrelated, reason = "is related")]
684
-
if let Some(user) = sqlx::query!(
685
-
r#"
686
-
SELECT a.email, a.status, (
687
-
SELECT h.handle
688
-
FROM handles h
689
-
WHERE h.did = a.did
690
-
ORDER BY h.created_at ASC
691
-
LIMIT 1
692
-
) AS handle
693
-
FROM accounts a
694
-
WHERE a.did = ?
695
-
"#,
696
-
did
697
-
)
698
-
.fetch_optional(&db)
699
-
.await
700
-
.context("failed to fetch session")?
701
-
{
702
-
let active = user.status == "active";
703
-
let status = if active { None } else { Some(user.status) };
704
-
705
-
Ok(Json(
706
-
server::get_session::OutputData {
707
-
active: Some(active),
708
-
did: Did::from_str(&did).expect("should be valid DID format"),
709
-
did_doc: None,
710
-
email: Some(user.email),
711
-
email_auth_factor: None,
712
-
email_confirmed: None,
713
-
handle: Handle::new(user.handle).expect("should be valid handle"),
714
-
status,
715
-
}
716
-
.into(),
717
-
))
718
-
} else {
719
-
Err(Error::with_status(
720
-
StatusCode::UNAUTHORIZED,
721
-
anyhow!("user not found"),
722
-
))
723
-
}
724
-
}
725
-
726
-
/// Describes the server's account creation requirements and capabilities. Implemented by PDS.
727
-
/// - GET /xrpc/com.atproto.server.describeServer
728
-
/// ### Responses
729
-
/// - 200 OK: {"inviteCodeRequired": true,"phoneVerificationRequired": true,"availableUserDomains": [`string`],"links": {"privacyPolicy": "string","termsOfService": "string"},"contact": {"email": "string"},"did": "string"}
730
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
731
-
/// - 401 Unauthorized
732
-
async fn describe_server(
733
-
State(config): State<AppConfig>,
734
-
) -> Result<Json<server::describe_server::Output>> {
735
-
Ok(Json(
736
-
server::describe_server::OutputData {
737
-
available_user_domains: vec![],
738
-
contact: None,
739
-
did: Did::from_str(&format!("did:web:{}", config.host_name))
740
-
.expect("should be valid DID format"),
741
-
invite_code_required: Some(true),
742
-
links: None,
743
-
phone_verification_required: Some(false), // email verification
744
-
}
745
-
.into(),
746
-
))
747
-
}
748
-
749
-
async fn todo() -> Result<()> {
750
-
Err(Error::unimplemented(anyhow!("not implemented")))
751
-
}
752
-
753
-
#[rustfmt::skip]
754
-
/// These endpoints are part of the atproto PDS server and account management APIs. \
755
-
/// Requests often require authentication and are made directly to the user's own PDS instance.
756
-
/// ### Routes
757
-
/// - `POST /xrpc/com.atproto.server.createAccount` -> [`create_account`]
758
-
/// - `POST /xrpc/com.atproto.server.createInviteCode` -> [`create_invite_code`]
759
-
/// - `POST /xrpc/com.atproto.server.createSession` -> [`create_session`]
760
-
/// - `GET /xrpc/com.atproto.server.describeServer` -> [`describe_server`]
761
-
/// - `GET /xrpc/com.atproto.server.getServiceAuth` -> [`get_service_auth`]
762
-
/// - `GET /xrpc/com.atproto.server.getSession` -> [`get_session`]
763
-
/// - `POST /xrpc/com.atproto.server.refreshSession` -> [`refresh_session`]
764
-
pub(super) fn routes() -> Router<AppState> {
765
-
Router::new()
766
-
.route(concat!("/", server::activate_account::NSID), post(todo))
767
-
.route(concat!("/", server::check_account_status::NSID), post(todo))
768
-
.route(concat!("/", server::confirm_email::NSID), post(todo))
769
-
.route(concat!("/", server::create_account::NSID), post(create_account))
770
-
.route(concat!("/", server::create_app_password::NSID), post(todo))
771
-
.route(concat!("/", server::create_invite_code::NSID), post(create_invite_code))
772
-
.route(concat!("/", server::create_invite_codes::NSID), post(todo))
773
-
.route(concat!("/", server::create_session::NSID), post(create_session))
774
-
.route(concat!("/", server::deactivate_account::NSID), post(todo))
775
-
.route(concat!("/", server::delete_account::NSID), post(todo))
776
-
.route(concat!("/", server::delete_session::NSID), post(todo))
777
-
.route(concat!("/", server::describe_server::NSID), get(describe_server))
778
-
.route(concat!("/", server::get_account_invite_codes::NSID), post(todo))
779
-
.route(concat!("/", server::get_service_auth::NSID), get(get_service_auth))
780
-
.route(concat!("/", server::get_session::NSID), get(get_session))
781
-
.route(concat!("/", server::list_app_passwords::NSID), post(todo))
782
-
.route(concat!("/", server::refresh_session::NSID), post(refresh_session))
783
-
.route(concat!("/", server::request_account_delete::NSID), post(todo))
784
-
.route(concat!("/", server::request_email_confirmation::NSID), post(todo))
785
-
.route(concat!("/", server::request_email_update::NSID), post(todo))
786
-
.route(concat!("/", server::request_password_reset::NSID), post(todo))
787
-
.route(concat!("/", server::reserve_signing_key::NSID), post(todo))
788
-
.route(concat!("/", server::reset_password::NSID), post(todo))
789
-
.route(concat!("/", server::revoke_app_password::NSID), post(todo))
790
-
.route(concat!("/", server::update_email::NSID), post(todo))
791
-
}
-428
src/endpoints/sync.rs
-428
src/endpoints/sync.rs
···
1
-
//! Endpoints for the `ATProto` sync API. (/xrpc/com.atproto.sync.*)
2
-
use std::str::FromStr as _;
3
-
4
-
use anyhow::{Context as _, anyhow};
5
-
use atrium_api::{
6
-
com::atproto::sync,
7
-
types::{LimitedNonZeroU16, string::Did},
8
-
};
9
-
use atrium_repo::{
10
-
Cid,
11
-
blockstore::{
12
-
AsyncBlockStoreRead as _, AsyncBlockStoreWrite as _, CarStore, DAG_CBOR, SHA2_256,
13
-
},
14
-
};
15
-
use axum::{
16
-
Json, Router,
17
-
body::Body,
18
-
extract::{Query, State, WebSocketUpgrade},
19
-
http::{self, Response, StatusCode},
20
-
response::IntoResponse,
21
-
routing::get,
22
-
};
23
-
use constcat::concat;
24
-
use futures::stream::TryStreamExt as _;
25
-
use tokio_util::io::ReaderStream;
26
-
27
-
use crate::{
28
-
AppState, Db, Error, Result,
29
-
config::AppConfig,
30
-
firehose::FirehoseProducer,
31
-
storage::{open_repo_db, open_store},
32
-
};
33
-
34
-
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
35
-
#[serde(rename_all = "camelCase")]
36
-
/// Parameters for `/xrpc/com.atproto.sync.listBlobs` \
37
-
/// HACK: `limit` may be passed as a string, so we must treat it as one.
38
-
pub(super) struct ListBlobsParameters {
39
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
40
-
/// Optional cursor to paginate through blobs.
41
-
pub cursor: Option<String>,
42
-
///The DID of the repo.
43
-
pub did: Did,
44
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
45
-
/// Optional limit of blobs to return.
46
-
pub limit: Option<String>,
47
-
///Optional revision of the repo to list blobs since.
48
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
49
-
pub since: Option<String>,
50
-
}
51
-
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
52
-
#[serde(rename_all = "camelCase")]
53
-
/// Parameters for `/xrpc/com.atproto.sync.listRepos` \
54
-
/// HACK: `limit` may be passed as a string, so we must treat it as one.
55
-
pub(super) struct ListReposParameters {
56
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
57
-
/// Optional cursor to paginate through repos.
58
-
pub cursor: Option<String>,
59
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
60
-
/// Optional limit of repos to return.
61
-
pub limit: Option<String>,
62
-
}
63
-
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)]
64
-
#[serde(rename_all = "camelCase")]
65
-
/// Parameters for `/xrpc/com.atproto.sync.subscribeRepos` \
66
-
/// HACK: `cursor` may be passed as a string, so we must treat it as one.
67
-
pub(super) struct SubscribeReposParametersData {
68
-
///The last known event seq number to backfill from.
69
-
#[serde(skip_serializing_if = "core::option::Option::is_none")]
70
-
pub cursor: Option<String>,
71
-
}
72
-
73
-
async fn get_blob(
74
-
State(config): State<AppConfig>,
75
-
Query(input): Query<sync::get_blob::ParametersData>,
76
-
) -> Result<Response<Body>> {
77
-
let blob = config
78
-
.blob
79
-
.path
80
-
.join(format!("{}.blob", input.cid.as_ref()));
81
-
82
-
let f = tokio::fs::File::open(blob)
83
-
.await
84
-
.context("blob not found")?;
85
-
let len = f
86
-
.metadata()
87
-
.await
88
-
.context("failed to query file metadata")?
89
-
.len();
90
-
91
-
let s = ReaderStream::new(f);
92
-
93
-
Ok(Response::builder()
94
-
.header(http::header::CONTENT_LENGTH, format!("{len}"))
95
-
.body(Body::from_stream(s))
96
-
.context("failed to construct response")?)
97
-
}
98
-
99
-
/// Enumerates which accounts the requesting account is currently blocking. Requires auth.
100
-
/// - GET /xrpc/com.atproto.sync.getBlocks
101
-
/// ### Query Parameters
102
-
/// - `limit`: integer, optional, default: 50, >=1 and <=100
103
-
/// - `cursor`: string, optional
104
-
/// ### Responses
105
-
/// - 200 OK: ...
106
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
107
-
/// - 401 Unauthorized
108
-
async fn get_blocks(
109
-
State(config): State<AppConfig>,
110
-
Query(input): Query<sync::get_blocks::ParametersData>,
111
-
) -> Result<Response<Body>> {
112
-
let mut repo = open_store(&config.repo, input.did.as_str())
113
-
.await
114
-
.context("failed to open repository")?;
115
-
116
-
let mut mem = Vec::new();
117
-
let mut store = CarStore::create(std::io::Cursor::new(&mut mem))
118
-
.await
119
-
.context("failed to create intermediate carstore")?;
120
-
121
-
for cid in &input.cids {
122
-
// SEC: This can potentially fetch stale blocks from a repository (e.g. those that were deleted).
123
-
// We'll want to prevent accesses to stale blocks eventually just to respect a user's right to be forgotten.
124
-
_ = store
125
-
.write_block(
126
-
DAG_CBOR,
127
-
SHA2_256,
128
-
&repo
129
-
.read_block(*cid.as_ref())
130
-
.await
131
-
.context("failed to read block")?,
132
-
)
133
-
.await
134
-
.context("failed to write block")?;
135
-
}
136
-
137
-
Ok(Response::builder()
138
-
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
139
-
.body(Body::from(mem))
140
-
.context("failed to construct response")?)
141
-
}
142
-
143
-
/// Get the current commit CID & revision of the specified repo. Does not require auth.
144
-
/// ### Query Parameters
145
-
/// - `did`: The DID of the repo.
146
-
/// ### Responses
147
-
/// - 200 OK: {"cid": "string","rev": "string"}
148
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoTakendown`, `RepoSuspended`, `RepoDeactivated`]}
149
-
async fn get_latest_commit(
150
-
State(config): State<AppConfig>,
151
-
State(db): State<Db>,
152
-
Query(input): Query<sync::get_latest_commit::ParametersData>,
153
-
) -> Result<Json<sync::get_latest_commit::Output>> {
154
-
let repo = open_repo_db(&config.repo, &db, input.did.as_str())
155
-
.await
156
-
.context("failed to open repository")?;
157
-
158
-
let cid = repo.root();
159
-
let commit = repo.commit();
160
-
161
-
Ok(Json(
162
-
sync::get_latest_commit::OutputData {
163
-
cid: atrium_api::types::string::Cid::new(cid),
164
-
rev: commit.rev(),
165
-
}
166
-
.into(),
167
-
))
168
-
}
169
-
170
-
/// Get data blocks needed to prove the existence or non-existence of record in the current version of repo. Does not require auth.
171
-
/// ### Query Parameters
172
-
/// - `did`: The DID of the repo.
173
-
/// - `collection`: nsid
174
-
/// - `rkey`: record-key
175
-
/// ### Responses
176
-
/// - 200 OK: ...
177
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RecordNotFound`, `RepoNotFound`, `RepoTakendown`,
178
-
/// `RepoSuspended`, `RepoDeactivated`]}
179
-
async fn get_record(
180
-
State(config): State<AppConfig>,
181
-
State(db): State<Db>,
182
-
Query(input): Query<sync::get_record::ParametersData>,
183
-
) -> Result<Response<Body>> {
184
-
let mut repo = open_repo_db(&config.repo, &db, input.did.as_str())
185
-
.await
186
-
.context("failed to open repo")?;
187
-
188
-
let key = format!("{}/{}", input.collection.as_str(), input.rkey.as_str());
189
-
190
-
let mut contents = Vec::new();
191
-
let mut ret_store =
192
-
CarStore::create_with_roots(std::io::Cursor::new(&mut contents), [repo.root()])
193
-
.await
194
-
.context("failed to create car store")?;
195
-
196
-
repo.extract_raw_into(&key, &mut ret_store)
197
-
.await
198
-
.context("failed to extract records")?;
199
-
200
-
Ok(Response::builder()
201
-
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
202
-
.body(Body::from(contents))
203
-
.context("failed to construct response")?)
204
-
}
205
-
206
-
/// Get the hosting status for a repository, on this server. Expected to be implemented by PDS and Relay.
207
-
/// ### Query Parameters
208
-
/// - `did`: The DID of the repo.
209
-
/// ### Responses
210
-
/// - 200 OK: {"did": "string","active": true,"status": "takendown","rev": "string"}
211
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`]}
212
-
async fn get_repo_status(
213
-
State(db): State<Db>,
214
-
Query(input): Query<sync::get_repo::ParametersData>,
215
-
) -> Result<Json<sync::get_repo_status::Output>> {
216
-
let did = input.did.as_str();
217
-
let r = sqlx::query!(r#"SELECT rev, status FROM accounts WHERE did = ?"#, did)
218
-
.fetch_optional(&db)
219
-
.await
220
-
.context("failed to execute query")?;
221
-
222
-
let Some(r) = r else {
223
-
return Err(Error::with_status(
224
-
StatusCode::NOT_FOUND,
225
-
anyhow!("account not found"),
226
-
));
227
-
};
228
-
229
-
let active = r.status == "active";
230
-
let status = if active { None } else { Some(r.status) };
231
-
232
-
Ok(Json(
233
-
sync::get_repo_status::OutputData {
234
-
active,
235
-
status,
236
-
did: input.did.clone(),
237
-
rev: Some(
238
-
atrium_api::types::string::Tid::new(r.rev).expect("should be able to convert Tid"),
239
-
),
240
-
}
241
-
.into(),
242
-
))
243
-
}
244
-
245
-
/// Download a repository export as CAR file. Optionally only a 'diff' since a previous revision.
246
-
/// Does not require auth; implemented by PDS.
247
-
/// ### Query Parameters
248
-
/// - `did`: The DID of the repo.
249
-
/// - `since`: The revision ('rev') of the repo to create a diff from.
250
-
/// ### Responses
251
-
/// - 200 OK: ...
252
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`,
253
-
/// `RepoTakendown`, `RepoSuspended`, `RepoDeactivated`]}
254
-
async fn get_repo(
255
-
State(config): State<AppConfig>,
256
-
State(db): State<Db>,
257
-
Query(input): Query<sync::get_repo::ParametersData>,
258
-
) -> Result<Response<Body>> {
259
-
let mut repo = open_repo_db(&config.repo, &db, input.did.as_str())
260
-
.await
261
-
.context("failed to open repo")?;
262
-
263
-
let mut contents = Vec::new();
264
-
let mut store = CarStore::create_with_roots(std::io::Cursor::new(&mut contents), [repo.root()])
265
-
.await
266
-
.context("failed to create car store")?;
267
-
268
-
repo.export_into(&mut store)
269
-
.await
270
-
.context("failed to extract records")?;
271
-
272
-
Ok(Response::builder()
273
-
.header(http::header::CONTENT_TYPE, "application/vnd.ipld.car")
274
-
.body(Body::from(contents))
275
-
.context("failed to construct response")?)
276
-
}
277
-
278
-
/// List blob CIDs for an account, since some repo revision. Does not require auth; implemented by PDS.
279
-
/// ### Query Parameters
280
-
/// - `did`: The DID of the repo. Required.
281
-
/// - `since`: Optional revision of the repo to list blobs since.
282
-
/// - `limit`: >= 1 and <= 1000, default 500
283
-
/// - `cursor`: string
284
-
/// ### Responses
285
-
/// - 200 OK: {"cursor": "string","cids": [string]}
286
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`, `RepoNotFound`, `RepoTakendown`,
287
-
/// `RepoSuspended`, `RepoDeactivated`]}
288
-
async fn list_blobs(
289
-
State(db): State<Db>,
290
-
Query(input): Query<sync::list_blobs::ParametersData>,
291
-
) -> Result<Json<sync::list_blobs::Output>> {
292
-
let did_str = input.did.as_str();
293
-
294
-
// TODO: `input.since`
295
-
// TODO: `input.limit`
296
-
// TODO: `input.cursor`
297
-
298
-
let cids = sqlx::query_scalar!(r#"SELECT cid FROM blob_ref WHERE did = ?"#, did_str)
299
-
.fetch_all(&db)
300
-
.await
301
-
.context("failed to query blobs")?;
302
-
303
-
let cids = cids
304
-
.into_iter()
305
-
.map(|c| {
306
-
Cid::from_str(&c)
307
-
.map(atrium_api::types::string::Cid::new)
308
-
.map_err(anyhow::Error::new)
309
-
})
310
-
.collect::<anyhow::Result<Vec<_>>>()
311
-
.context("failed to convert cids")?;
312
-
313
-
Ok(Json(
314
-
sync::list_blobs::OutputData { cursor: None, cids }.into(),
315
-
))
316
-
}
317
-
318
-
/// Enumerates all the DID, rev, and commit CID for all repos hosted by this service.
319
-
/// Does not require auth; implemented by PDS and Relay.
320
-
/// ### Query Parameters
321
-
/// - `limit`: >= 1 and <= 1000, default 500
322
-
/// - `cursor`: string
323
-
/// ### Responses
324
-
/// - 200 OK: {"cursor": "string","repos": [{"did": "string","head": "string","rev": "string","active": true,"status": "takendown"}]}
325
-
/// - 400 Bad Request: {error:[`InvalidRequest`, `ExpiredToken`, `InvalidToken`]}
326
-
async fn list_repos(
327
-
State(db): State<Db>,
328
-
Query(input): Query<sync::list_repos::ParametersData>,
329
-
) -> Result<Json<sync::list_repos::Output>> {
330
-
struct Record {
331
-
/// The DID of the repo.
332
-
did: String,
333
-
/// The commit CID of the repo.
334
-
rev: String,
335
-
/// The root CID of the repo.
336
-
root: String,
337
-
}
338
-
339
-
let limit: u16 = input.limit.unwrap_or(LimitedNonZeroU16::MAX).into();
340
-
341
-
let r = if let Some(ref cursor) = input.cursor {
342
-
let r = sqlx::query_as!(
343
-
Record,
344
-
r#"SELECT did, root, rev FROM accounts WHERE did > ? LIMIT ?"#,
345
-
cursor,
346
-
limit
347
-
)
348
-
.fetch(&db);
349
-
350
-
r.try_collect::<Vec<_>>()
351
-
.await
352
-
.context("failed to fetch profiles")?
353
-
} else {
354
-
let r = sqlx::query_as!(
355
-
Record,
356
-
r#"SELECT did, root, rev FROM accounts LIMIT ?"#,
357
-
limit
358
-
)
359
-
.fetch(&db);
360
-
361
-
r.try_collect::<Vec<_>>()
362
-
.await
363
-
.context("failed to fetch profiles")?
364
-
};
365
-
366
-
let cursor = r.last().map(|r| r.did.clone());
367
-
let repos = r
368
-
.into_iter()
369
-
.map(|r| {
370
-
sync::list_repos::RepoData {
371
-
active: Some(true),
372
-
did: Did::new(r.did).expect("should be a valid DID"),
373
-
head: atrium_api::types::string::Cid::new(
374
-
Cid::from_str(&r.root).expect("should be a valid CID"),
375
-
),
376
-
rev: atrium_api::types::string::Tid::new(r.rev)
377
-
.expect("should be able to convert Tid"),
378
-
status: None,
379
-
}
380
-
.into()
381
-
})
382
-
.collect::<Vec<_>>();
383
-
384
-
Ok(Json(sync::list_repos::OutputData { cursor, repos }.into()))
385
-
}
386
-
387
-
/// Repository event stream, aka Firehose endpoint. Outputs repo commits with diff data, and identity update events,
388
-
/// for all repositories on the current server. See the atproto specifications for details around stream sequencing,
389
-
/// repo versioning, CAR diff format, and more. Public and does not require auth; implemented by PDS and Relay.
390
-
/// ### Query Parameters
391
-
/// - `cursor`: The last known event seq number to backfill from.
392
-
/// ### Responses
393
-
/// - 200 OK: ...
394
-
async fn subscribe_repos(
395
-
ws_up: WebSocketUpgrade,
396
-
State(fh): State<FirehoseProducer>,
397
-
Query(input): Query<sync::subscribe_repos::ParametersData>,
398
-
) -> impl IntoResponse {
399
-
ws_up.on_upgrade(async move |ws| {
400
-
fh.client_connection(ws, input.cursor).await;
401
-
})
402
-
}
403
-
404
-
#[rustfmt::skip]
405
-
/// These endpoints are part of the atproto repository synchronization APIs. Requests usually do not require authentication,
406
-
/// and can be made to PDS intances or Relay instances.
407
-
/// ### Routes
408
-
/// - `GET /xrpc/com.atproto.sync.getBlob` -> [`get_blob`]
409
-
/// - `GET /xrpc/com.atproto.sync.getBlocks` -> [`get_blocks`]
410
-
/// - `GET /xrpc/com.atproto.sync.getLatestCommit` -> [`get_latest_commit`]
411
-
/// - `GET /xrpc/com.atproto.sync.getRecord` -> [`get_record`]
412
-
/// - `GET /xrpc/com.atproto.sync.getRepoStatus` -> [`get_repo_status`]
413
-
/// - `GET /xrpc/com.atproto.sync.getRepo` -> [`get_repo`]
414
-
/// - `GET /xrpc/com.atproto.sync.listBlobs` -> [`list_blobs`]
415
-
/// - `GET /xrpc/com.atproto.sync.listRepos` -> [`list_repos`]
416
-
/// - `GET /xrpc/com.atproto.sync.subscribeRepos` -> [`subscribe_repos`]
417
-
pub(super) fn routes() -> Router<AppState> {
418
-
Router::new()
419
-
.route(concat!("/", sync::get_blob::NSID), get(get_blob))
420
-
.route(concat!("/", sync::get_blocks::NSID), get(get_blocks))
421
-
.route(concat!("/", sync::get_latest_commit::NSID), get(get_latest_commit))
422
-
.route(concat!("/", sync::get_record::NSID), get(get_record))
423
-
.route(concat!("/", sync::get_repo_status::NSID), get(get_repo_status))
424
-
.route(concat!("/", sync::get_repo::NSID), get(get_repo))
425
-
.route(concat!("/", sync::list_blobs::NSID), get(list_blobs))
426
-
.route(concat!("/", sync::list_repos::NSID), get(list_repos))
427
-
.route(concat!("/", sync::subscribe_repos::NSID), get(subscribe_repos))
428
-
}
+151
src/error.rs
+151
src/error.rs
···
4
4
http::StatusCode,
5
5
response::{IntoResponse, Response},
6
6
};
7
+
use rsky_pds::handle::{self, errors::ErrorKind};
7
8
use thiserror::Error;
8
9
use tracing::error;
9
10
···
118
119
}
119
120
}
120
121
}
122
+
123
+
/// API error types that can be returned to clients
124
+
#[derive(Clone, Debug)]
125
+
pub enum ApiError {
126
+
RuntimeError,
127
+
InvalidLogin,
128
+
AccountTakendown,
129
+
InvalidRequest(String),
130
+
ExpiredToken,
131
+
InvalidToken,
132
+
RecordNotFound,
133
+
InvalidHandle,
134
+
InvalidEmail,
135
+
InvalidPassword,
136
+
InvalidInviteCode,
137
+
HandleNotAvailable,
138
+
EmailNotAvailable,
139
+
UnsupportedDomain,
140
+
UnresolvableDid,
141
+
IncompatibleDidDoc,
142
+
WellKnownNotFound,
143
+
AccountNotFound,
144
+
BlobNotFound,
145
+
BadRequest(String, String),
146
+
AuthRequiredError(String),
147
+
}
148
+
149
+
impl ApiError {
150
+
/// Get the appropriate HTTP status code for this error
151
+
const fn status_code(&self) -> StatusCode {
152
+
match self {
153
+
Self::RuntimeError => StatusCode::INTERNAL_SERVER_ERROR,
154
+
Self::InvalidLogin
155
+
| Self::ExpiredToken
156
+
| Self::InvalidToken
157
+
| Self::AuthRequiredError(_) => StatusCode::UNAUTHORIZED,
158
+
Self::AccountTakendown => StatusCode::FORBIDDEN,
159
+
Self::RecordNotFound
160
+
| Self::WellKnownNotFound
161
+
| Self::AccountNotFound
162
+
| Self::BlobNotFound => StatusCode::NOT_FOUND,
163
+
// All bad requests grouped together
164
+
_ => StatusCode::BAD_REQUEST,
165
+
}
166
+
}
167
+
168
+
/// Get the error type string for API responses
169
+
fn error_type(&self) -> String {
170
+
match self {
171
+
Self::RuntimeError => "InternalServerError",
172
+
Self::InvalidLogin => "InvalidLogin",
173
+
Self::AccountTakendown => "AccountTakendown",
174
+
Self::InvalidRequest(_) => "InvalidRequest",
175
+
Self::ExpiredToken => "ExpiredToken",
176
+
Self::InvalidToken => "InvalidToken",
177
+
Self::RecordNotFound => "RecordNotFound",
178
+
Self::InvalidHandle => "InvalidHandle",
179
+
Self::InvalidEmail => "InvalidEmail",
180
+
Self::InvalidPassword => "InvalidPassword",
181
+
Self::InvalidInviteCode => "InvalidInviteCode",
182
+
Self::HandleNotAvailable => "HandleNotAvailable",
183
+
Self::EmailNotAvailable => "EmailNotAvailable",
184
+
Self::UnsupportedDomain => "UnsupportedDomain",
185
+
Self::UnresolvableDid => "UnresolvableDid",
186
+
Self::IncompatibleDidDoc => "IncompatibleDidDoc",
187
+
Self::WellKnownNotFound => "WellKnownNotFound",
188
+
Self::AccountNotFound => "AccountNotFound",
189
+
Self::BlobNotFound => "BlobNotFound",
190
+
Self::BadRequest(error, _) => error,
191
+
Self::AuthRequiredError(_) => "AuthRequiredError",
192
+
}
193
+
.to_owned()
194
+
}
195
+
196
+
/// Get the user-facing error message
197
+
fn message(&self) -> String {
198
+
match self {
199
+
Self::RuntimeError => "Something went wrong",
200
+
Self::InvalidLogin => "Invalid identifier or password",
201
+
Self::AccountTakendown => "Account has been taken down",
202
+
Self::InvalidRequest(msg) => msg,
203
+
Self::ExpiredToken => "Token is expired",
204
+
Self::InvalidToken => "Token is invalid",
205
+
Self::RecordNotFound => "Record could not be found",
206
+
Self::InvalidHandle => "Handle is invalid",
207
+
Self::InvalidEmail => "Invalid email",
208
+
Self::InvalidPassword => "Invalid Password",
209
+
Self::InvalidInviteCode => "Invalid invite code",
210
+
Self::HandleNotAvailable => "Handle not available",
211
+
Self::EmailNotAvailable => "Email not available",
212
+
Self::UnsupportedDomain => "Unsupported domain",
213
+
Self::UnresolvableDid => "Unresolved Did",
214
+
Self::IncompatibleDidDoc => "IncompatibleDidDoc",
215
+
Self::WellKnownNotFound => "User not found",
216
+
Self::AccountNotFound => "Account could not be found",
217
+
Self::BlobNotFound => "Blob could not be found",
218
+
Self::BadRequest(_, msg) => msg,
219
+
Self::AuthRequiredError(msg) => msg,
220
+
}
221
+
.to_owned()
222
+
}
223
+
}
224
+
225
+
impl From<Error> for ApiError {
226
+
fn from(_value: Error) -> Self {
227
+
Self::RuntimeError
228
+
}
229
+
}
230
+
231
+
impl From<anyhow::Error> for ApiError {
232
+
fn from(_value: anyhow::Error) -> Self {
233
+
Self::RuntimeError
234
+
}
235
+
}
236
+
237
+
impl From<handle::errors::Error> for ApiError {
238
+
fn from(value: handle::errors::Error) -> Self {
239
+
match value.kind {
240
+
ErrorKind::InvalidHandle => Self::InvalidHandle,
241
+
ErrorKind::HandleNotAvailable => Self::HandleNotAvailable,
242
+
ErrorKind::UnsupportedDomain => Self::UnsupportedDomain,
243
+
ErrorKind::InternalError => Self::RuntimeError,
244
+
}
245
+
}
246
+
}
247
+
248
+
impl IntoResponse for ApiError {
249
+
fn into_response(self) -> Response {
250
+
let status = self.status_code();
251
+
let error_type = self.error_type();
252
+
let message = self.message();
253
+
254
+
if cfg!(debug_assertions) {
255
+
error!("API Error: {}: {}", error_type, message);
256
+
}
257
+
258
+
// Create the error message and serialize to JSON
259
+
let error_message = ErrorMessage::new(error_type, message);
260
+
let body = serde_json::to_string(&error_message).unwrap_or_else(|_| {
261
+
r#"{"error":"InternalServerError","message":"Error serializing response"}"#.to_owned()
262
+
});
263
+
264
+
// Build the response
265
+
Response::builder()
266
+
.status(status)
267
+
.header("Content-Type", "application/json")
268
+
.body(Body::new(body))
269
+
.expect("should be a valid response")
270
+
}
271
+
}
-426
src/firehose.rs
-426
src/firehose.rs
···
1
-
//! The firehose module.
2
-
use std::{collections::VecDeque, time::Duration};
3
-
4
-
use anyhow::{Result, bail};
5
-
use atrium_api::{
6
-
com::atproto::sync::{self},
7
-
types::string::{Datetime, Did, Tid},
8
-
};
9
-
use atrium_repo::Cid;
10
-
use axum::extract::ws::{Message, WebSocket};
11
-
use metrics::{counter, gauge};
12
-
use rand::Rng as _;
13
-
use serde::{Serialize, ser::SerializeMap as _};
14
-
use tracing::{debug, error, info, warn};
15
-
16
-
use crate::{
17
-
Client,
18
-
config::AppConfig,
19
-
metrics::{FIREHOSE_HISTORY, FIREHOSE_LISTENERS, FIREHOSE_MESSAGES, FIREHOSE_SEQUENCE},
20
-
};
21
-
22
-
enum FirehoseMessage {
23
-
Broadcast(sync::subscribe_repos::Message),
24
-
Connect(Box<(WebSocket, Option<i64>)>),
25
-
}
26
-
27
-
enum FrameHeader {
28
-
Error,
29
-
Message(String),
30
-
}
31
-
32
-
impl Serialize for FrameHeader {
33
-
#[expect(clippy::question_mark_used, reason = "returns a Result")]
34
-
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
35
-
where
36
-
S: serde::Serializer,
37
-
{
38
-
let mut map = serializer.serialize_map(None)?;
39
-
40
-
match *self {
41
-
Self::Message(ref s) => {
42
-
map.serialize_key("op")?;
43
-
map.serialize_value(&1_i32)?;
44
-
map.serialize_key("t")?;
45
-
map.serialize_value(s.as_str())?;
46
-
}
47
-
Self::Error => {
48
-
map.serialize_key("op")?;
49
-
map.serialize_value(&-1_i32)?;
50
-
}
51
-
}
52
-
53
-
map.end()
54
-
}
55
-
}
56
-
57
-
/// A repository operation.
58
-
pub(crate) enum RepoOp {
59
-
/// Create a new record.
60
-
Create {
61
-
/// The CID of the record.
62
-
cid: Cid,
63
-
/// The path of the record.
64
-
path: String,
65
-
},
66
-
/// Delete an existing record.
67
-
Delete {
68
-
/// The path of the record.
69
-
path: String,
70
-
/// The previous CID of the record.
71
-
prev: Cid,
72
-
},
73
-
/// Update an existing record.
74
-
Update {
75
-
/// The CID of the record.
76
-
cid: Cid,
77
-
/// The path of the record.
78
-
path: String,
79
-
/// The previous CID of the record.
80
-
prev: Cid,
81
-
},
82
-
}
83
-
84
-
impl From<RepoOp> for sync::subscribe_repos::RepoOp {
85
-
fn from(val: RepoOp) -> Self {
86
-
let (action, cid, prev, path) = match val {
87
-
RepoOp::Create { cid, path } => ("create", Some(cid), None, path),
88
-
RepoOp::Update { cid, path, prev } => ("update", Some(cid), Some(prev), path),
89
-
RepoOp::Delete { path, prev } => ("delete", None, Some(prev), path),
90
-
};
91
-
92
-
sync::subscribe_repos::RepoOpData {
93
-
action: action.to_owned(),
94
-
cid: cid.map(atrium_api::types::CidLink),
95
-
prev: prev.map(atrium_api::types::CidLink),
96
-
path,
97
-
}
98
-
.into()
99
-
}
100
-
}
101
-
102
-
/// A commit to the repository.
103
-
pub(crate) struct Commit {
104
-
/// Blobs that were created in this commit.
105
-
pub blobs: Vec<Cid>,
106
-
/// The car file containing the commit blocks.
107
-
pub car: Vec<u8>,
108
-
/// The CID of the commit.
109
-
pub cid: Cid,
110
-
/// The DID of the repository changed.
111
-
pub did: Did,
112
-
/// The operations performed in this commit.
113
-
pub ops: Vec<RepoOp>,
114
-
/// The previous commit's CID (if applicable).
115
-
pub pcid: Option<Cid>,
116
-
/// The revision of the commit.
117
-
pub rev: String,
118
-
}
119
-
120
-
impl From<Commit> for sync::subscribe_repos::Commit {
121
-
fn from(val: Commit) -> Self {
122
-
sync::subscribe_repos::CommitData {
123
-
blobs: val
124
-
.blobs
125
-
.into_iter()
126
-
.map(atrium_api::types::CidLink)
127
-
.collect::<Vec<_>>(),
128
-
blocks: val.car,
129
-
commit: atrium_api::types::CidLink(val.cid),
130
-
ops: val.ops.into_iter().map(Into::into).collect::<Vec<_>>(),
131
-
prev_data: val.pcid.map(atrium_api::types::CidLink),
132
-
rebase: false,
133
-
repo: val.did,
134
-
rev: Tid::new(val.rev).expect("should be valid revision"),
135
-
seq: 0,
136
-
since: None,
137
-
time: Datetime::now(),
138
-
too_big: false,
139
-
}
140
-
.into()
141
-
}
142
-
}
143
-
144
-
/// A firehose producer. This is used to transmit messages to the firehose for broadcast.
145
-
#[derive(Clone, Debug)]
146
-
pub(crate) struct FirehoseProducer {
147
-
/// The channel to send messages to the firehose.
148
-
tx: tokio::sync::mpsc::Sender<FirehoseMessage>,
149
-
}
150
-
151
-
impl FirehoseProducer {
152
-
/// Broadcast an `#account` event.
153
-
pub(crate) async fn account(&self, account: impl Into<sync::subscribe_repos::Account>) {
154
-
drop(
155
-
self.tx
156
-
.send(FirehoseMessage::Broadcast(
157
-
sync::subscribe_repos::Message::Account(Box::new(account.into())),
158
-
))
159
-
.await,
160
-
);
161
-
}
162
-
/// Handle client connection.
163
-
pub(crate) async fn client_connection(&self, ws: WebSocket, cursor: Option<i64>) {
164
-
drop(
165
-
self.tx
166
-
.send(FirehoseMessage::Connect(Box::new((ws, cursor))))
167
-
.await,
168
-
);
169
-
}
170
-
/// Broadcast a `#commit` event.
171
-
pub(crate) async fn commit(&self, commit: impl Into<sync::subscribe_repos::Commit>) {
172
-
drop(
173
-
self.tx
174
-
.send(FirehoseMessage::Broadcast(
175
-
sync::subscribe_repos::Message::Commit(Box::new(commit.into())),
176
-
))
177
-
.await,
178
-
);
179
-
}
180
-
/// Broadcast an `#identity` event.
181
-
pub(crate) async fn identity(&self, identity: impl Into<sync::subscribe_repos::Identity>) {
182
-
drop(
183
-
self.tx
184
-
.send(FirehoseMessage::Broadcast(
185
-
sync::subscribe_repos::Message::Identity(Box::new(identity.into())),
186
-
))
187
-
.await,
188
-
);
189
-
}
190
-
}
191
-
192
-
#[expect(
193
-
clippy::as_conversions,
194
-
clippy::cast_possible_truncation,
195
-
clippy::cast_sign_loss,
196
-
clippy::cast_precision_loss,
197
-
clippy::arithmetic_side_effects
198
-
)]
199
-
/// Convert a `usize` to a `f64`.
200
-
const fn convert_usize_f64(x: usize) -> Result<f64, &'static str> {
201
-
let result = x as f64;
202
-
if result as usize - x > 0 {
203
-
return Err("cannot convert");
204
-
}
205
-
Ok(result)
206
-
}
207
-
208
-
/// Serialize a message.
209
-
fn serialize_message(seq: u64, mut msg: sync::subscribe_repos::Message) -> (&'static str, Vec<u8>) {
210
-
let mut dummy_seq = 0_i64;
211
-
#[expect(clippy::pattern_type_mismatch)]
212
-
let (ty, nseq) = match &mut msg {
213
-
sync::subscribe_repos::Message::Account(m) => ("#account", &mut m.seq),
214
-
sync::subscribe_repos::Message::Commit(m) => ("#commit", &mut m.seq),
215
-
sync::subscribe_repos::Message::Identity(m) => ("#identity", &mut m.seq),
216
-
sync::subscribe_repos::Message::Sync(m) => ("#sync", &mut m.seq),
217
-
sync::subscribe_repos::Message::Info(_m) => ("#info", &mut dummy_seq),
218
-
};
219
-
// Set the sequence number.
220
-
*nseq = i64::try_from(seq).expect("should find seq");
221
-
222
-
let hdr = FrameHeader::Message(ty.to_owned());
223
-
224
-
let mut frame = Vec::new();
225
-
serde_ipld_dagcbor::to_writer(&mut frame, &hdr).expect("should serialize header");
226
-
serde_ipld_dagcbor::to_writer(&mut frame, &msg).expect("should serialize message");
227
-
228
-
(ty, frame)
229
-
}
230
-
231
-
/// Broadcast a message out to all clients.
232
-
async fn broadcast_message(clients: &mut Vec<WebSocket>, msg: Message) -> Result<()> {
233
-
counter!(FIREHOSE_MESSAGES).increment(1);
234
-
235
-
for i in (0..clients.len()).rev() {
236
-
let client = clients.get_mut(i).expect("should find client");
237
-
if let Err(e) = client.send(msg.clone()).await {
238
-
debug!("Firehose client disconnected: {e}");
239
-
drop(clients.remove(i));
240
-
}
241
-
}
242
-
243
-
gauge!(FIREHOSE_LISTENERS)
244
-
.set(convert_usize_f64(clients.len()).expect("should find clients length"));
245
-
Ok(())
246
-
}
247
-
248
-
/// Handle a new connection from a websocket client created by subscribeRepos.
249
-
async fn handle_connect(
250
-
mut ws: WebSocket,
251
-
seq: u64,
252
-
history: &VecDeque<(u64, &str, sync::subscribe_repos::Message)>,
253
-
cursor: Option<i64>,
254
-
) -> Result<WebSocket> {
255
-
if let Some(cursor) = cursor {
256
-
let mut frame = Vec::new();
257
-
let cursor = u64::try_from(cursor);
258
-
if cursor.is_err() {
259
-
tracing::warn!("cursor is not a valid u64");
260
-
return Ok(ws);
261
-
}
262
-
let cursor = cursor.expect("should be valid u64");
263
-
// Cursor specified; attempt to backfill the consumer.
264
-
if cursor > seq {
265
-
let hdr = FrameHeader::Error;
266
-
let msg = sync::subscribe_repos::Error::FutureCursor(Some(format!(
267
-
"cursor {cursor} is greater than the current sequence number {seq}"
268
-
)));
269
-
serde_ipld_dagcbor::to_writer(&mut frame, &hdr).expect("should serialize header");
270
-
serde_ipld_dagcbor::to_writer(&mut frame, &msg).expect("should serialize message");
271
-
// Drop the connection.
272
-
drop(ws.send(Message::binary(frame)).await);
273
-
bail!(
274
-
"connection dropped: cursor {cursor} is greater than the current sequence number {seq}"
275
-
);
276
-
}
277
-
278
-
for &(historical_seq, ty, ref msg) in history {
279
-
if cursor > historical_seq {
280
-
continue;
281
-
}
282
-
let hdr = FrameHeader::Message(ty.to_owned());
283
-
serde_ipld_dagcbor::to_writer(&mut frame, &hdr).expect("should serialize header");
284
-
serde_ipld_dagcbor::to_writer(&mut frame, msg).expect("should serialize message");
285
-
if let Err(e) = ws.send(Message::binary(frame.clone())).await {
286
-
debug!("Firehose client disconnected during backfill: {e}");
287
-
break;
288
-
}
289
-
// Clear out the frame to begin a new one.
290
-
frame.clear();
291
-
}
292
-
}
293
-
294
-
Ok(ws)
295
-
}
296
-
297
-
/// Reconnect to upstream relays.
298
-
pub(crate) async fn reconnect_relays(client: &Client, config: &AppConfig) {
299
-
// Avoid connecting to upstream relays in test mode.
300
-
if config.test {
301
-
return;
302
-
}
303
-
304
-
info!("attempting to reconnect to upstream relays");
305
-
for relay in &config.firehose.relays {
306
-
let Some(host) = relay.host_str() else {
307
-
warn!("relay {} has no host specified", relay);
308
-
continue;
309
-
};
310
-
311
-
let r = client
312
-
.post(format!("https://{host}/xrpc/com.atproto.sync.requestCrawl"))
313
-
.json(&serde_json::json!({
314
-
"hostname": format!("https://{}", config.host_name)
315
-
}))
316
-
.send()
317
-
.await;
318
-
319
-
let r = match r {
320
-
Ok(r) => r,
321
-
Err(e) => {
322
-
error!("failed to hit upstream relay {host}: {e}");
323
-
continue;
324
-
}
325
-
};
326
-
327
-
let s = r.status();
328
-
if let Err(e) = r.error_for_status_ref() {
329
-
error!("failed to hit upstream relay {host}: {e}");
330
-
}
331
-
332
-
let b = r.json::<serde_json::Value>().await;
333
-
if let Ok(b) = b {
334
-
info!("relay {host}: {} {}", s, b);
335
-
} else {
336
-
info!("relay {host}: {}", s);
337
-
}
338
-
}
339
-
}
340
-
341
-
/// The main entrypoint for the firehose.
342
-
///
343
-
/// This will broadcast all updates in this PDS out to anyone who is listening.
344
-
///
345
-
/// Reference: <https://atproto.com/specs/sync>
346
-
pub(crate) fn spawn(
347
-
client: Client,
348
-
config: AppConfig,
349
-
) -> (tokio::task::JoinHandle<()>, FirehoseProducer) {
350
-
let (tx, mut rx) = tokio::sync::mpsc::channel(1000);
351
-
let handle = tokio::spawn(async move {
352
-
fn time_since_inception() -> u64 {
353
-
chrono::Utc::now()
354
-
.timestamp_micros()
355
-
.checked_sub(1_743_442_000_000_000)
356
-
.expect("should not wrap")
357
-
.unsigned_abs()
358
-
}
359
-
let mut clients: Vec<WebSocket> = Vec::new();
360
-
let mut history = VecDeque::with_capacity(1000);
361
-
let mut seq = time_since_inception();
362
-
363
-
loop {
364
-
if let Ok(msg) = tokio::time::timeout(Duration::from_secs(30), rx.recv()).await {
365
-
match msg {
366
-
Some(FirehoseMessage::Broadcast(msg)) => {
367
-
let (ty, by) = serialize_message(seq, msg.clone());
368
-
369
-
history.push_back((seq, ty, msg));
370
-
gauge!(FIREHOSE_HISTORY).set(
371
-
convert_usize_f64(history.len()).expect("should find history length"),
372
-
);
373
-
374
-
info!(
375
-
"Broadcasting message {} {} to {} clients",
376
-
seq,
377
-
ty,
378
-
clients.len()
379
-
);
380
-
381
-
counter!(FIREHOSE_SEQUENCE).absolute(seq);
382
-
let now = time_since_inception();
383
-
if now > seq {
384
-
seq = now;
385
-
} else {
386
-
seq = seq.checked_add(1).expect("should not wrap");
387
-
}
388
-
389
-
drop(broadcast_message(&mut clients, Message::binary(by)).await);
390
-
}
391
-
Some(FirehoseMessage::Connect(ws_cursor)) => {
392
-
let (ws, cursor) = *ws_cursor;
393
-
match handle_connect(ws, seq, &history, cursor).await {
394
-
Ok(r) => {
395
-
gauge!(FIREHOSE_LISTENERS).increment(1_i32);
396
-
clients.push(r);
397
-
}
398
-
Err(e) => {
399
-
error!("failed to connect new client: {e}");
400
-
}
401
-
}
402
-
}
403
-
// All producers have been destroyed.
404
-
None => break,
405
-
}
406
-
} else {
407
-
if clients.is_empty() {
408
-
reconnect_relays(&client, &config).await;
409
-
}
410
-
411
-
let contents = rand::thread_rng()
412
-
.sample_iter(rand::distributions::Alphanumeric)
413
-
.take(15)
414
-
.map(char::from)
415
-
.collect::<String>();
416
-
417
-
// Send a websocket ping message.
418
-
// Reference: https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API/Writing_WebSocket_servers#pings_and_pongs_the_heartbeat_of_websockets
419
-
let message = Message::Ping(axum::body::Bytes::from_owner(contents));
420
-
drop(broadcast_message(&mut clients, message).await);
421
-
}
422
-
}
423
-
});
424
-
425
-
(handle, FirehoseProducer { tx })
426
-
}
+42
src/lib.rs
+42
src/lib.rs
···
1
+
//! PDS implementation.
2
+
mod account_manager;
3
+
mod actor_endpoints;
4
+
mod actor_store;
5
+
mod apis;
6
+
mod auth;
7
+
mod config;
8
+
mod db;
9
+
mod did;
10
+
pub mod error;
11
+
mod metrics;
12
+
mod models;
13
+
mod oauth;
14
+
mod pipethrough;
15
+
mod schema;
16
+
mod serve;
17
+
mod service_proxy;
18
+
19
+
pub use serve::run;
20
+
21
+
/// The index (/) route.
22
+
async fn index() -> impl axum::response::IntoResponse {
23
+
r"
24
+
__ __
25
+
/\ \__ /\ \__
26
+
__ \ \ ,_\ _____ _ __ ___\ \ ,_\ ___
27
+
/'__'\ \ \ \/ /\ '__'\/\''__\/ __'\ \ \/ / __'\
28
+
/\ \L\.\_\ \ \_\ \ \L\ \ \ \//\ \L\ \ \ \_/\ \L\ \
29
+
\ \__/.\_\\ \__\\ \ ,__/\ \_\\ \____/\ \__\ \____/
30
+
\/__/\/_/ \/__/ \ \ \/ \/_/ \/___/ \/__/\/___/
31
+
\ \_\
32
+
\/_/
33
+
34
+
35
+
This is an AT Protocol Personal Data Server (aka, an atproto PDS)
36
+
37
+
Most API routes are under /xrpc/
38
+
39
+
Code: https://github.com/DrChat/bluepds
40
+
Protocol: https://atproto.com
41
+
"
42
+
}
+3
-554
src/main.rs
+3
-554
src/main.rs
···
1
-
//! PDS implementation.
2
-
mod actor_store;
3
-
mod auth;
4
-
mod config;
5
-
mod db;
6
-
mod did;
7
-
mod endpoints;
8
-
mod error;
9
-
mod firehose;
10
-
mod metrics;
11
-
mod mmap;
12
-
mod oauth;
13
-
mod plc;
14
-
mod storage;
15
-
#[cfg(test)]
16
-
mod tests;
17
-
18
-
/// HACK: store private user preferences in the PDS.
19
-
///
20
-
/// We shouldn't have to know about any bsky endpoints to store private user data.
21
-
/// This will _very likely_ be changed in the future.
22
-
mod actor_endpoints {
23
-
use atrium_api::app::bsky::actor;
24
-
use axum::{Json, routing::post};
25
-
use constcat::concat;
26
-
27
-
use super::*;
28
-
29
-
async fn put_preferences(
30
-
user: AuthenticatedUser,
31
-
State(db): State<Db>,
32
-
Json(input): Json<actor::put_preferences::Input>,
33
-
) -> Result<()> {
34
-
let did = user.did();
35
-
let prefs = sqlx::types::Json(input.preferences.clone());
36
-
_ = sqlx::query!(
37
-
r#"UPDATE accounts SET private_prefs = ? WHERE did = ?"#,
38
-
prefs,
39
-
did
40
-
)
41
-
.execute(&db)
42
-
.await
43
-
.context("failed to update user preferences")?;
44
-
45
-
Ok(())
46
-
}
47
-
48
-
async fn get_preferences(
49
-
user: AuthenticatedUser,
50
-
State(db): State<Db>,
51
-
) -> Result<Json<actor::get_preferences::Output>> {
52
-
let did = user.did();
53
-
let json: Option<sqlx::types::Json<actor::defs::Preferences>> =
54
-
sqlx::query_scalar("SELECT private_prefs FROM accounts WHERE did = ?")
55
-
.bind(did)
56
-
.fetch_one(&db)
57
-
.await
58
-
.context("failed to fetch preferences")?;
59
-
60
-
if let Some(prefs) = json {
61
-
Ok(Json(
62
-
actor::get_preferences::OutputData {
63
-
preferences: prefs.0,
64
-
}
65
-
.into(),
66
-
))
67
-
} else {
68
-
Ok(Json(
69
-
actor::get_preferences::OutputData {
70
-
preferences: Vec::new(),
71
-
}
72
-
.into(),
73
-
))
74
-
}
75
-
}
76
-
77
-
/// Register all actor endpoints.
78
-
pub(crate) fn routes() -> Router<AppState> {
79
-
// AP /xrpc/app.bsky.actor.putPreferences
80
-
// AG /xrpc/app.bsky.actor.getPreferences
81
-
Router::new()
82
-
.route(
83
-
concat!("/", actor::put_preferences::NSID),
84
-
post(put_preferences),
85
-
)
86
-
.route(
87
-
concat!("/", actor::get_preferences::NSID),
88
-
get(get_preferences),
89
-
)
90
-
}
91
-
}
92
-
93
-
use anyhow::{Context as _, anyhow};
94
-
use atrium_api::types::string::Did;
95
-
use atrium_crypto::keypair::{Export as _, Secp256k1Keypair};
96
-
use auth::AuthenticatedUser;
97
-
use axum::{
98
-
Router,
99
-
body::Body,
100
-
extract::{FromRef, Request, State},
101
-
http::{self, HeaderMap, Response, StatusCode, Uri},
102
-
response::IntoResponse,
103
-
routing::get,
104
-
};
105
-
use azure_core::credentials::TokenCredential;
106
-
use clap::Parser;
107
-
use clap_verbosity_flag::{InfoLevel, Verbosity, log::LevelFilter};
108
-
use config::AppConfig;
109
-
#[expect(clippy::pub_use, clippy::useless_attribute)]
110
-
pub use error::Error;
111
-
use figment::{Figment, providers::Format as _};
112
-
use firehose::FirehoseProducer;
113
-
use http_cache_reqwest::{CacheMode, HttpCacheOptions, MokaManager};
114
-
use rand::Rng as _;
115
-
use serde::{Deserialize, Serialize};
116
-
use sqlx::{SqlitePool, sqlite::SqliteConnectOptions};
117
-
use std::{
118
-
net::{IpAddr, Ipv4Addr, SocketAddr},
119
-
path::PathBuf,
120
-
str::FromStr as _,
121
-
sync::Arc,
122
-
};
123
-
use tokio::net::TcpListener;
124
-
use tower_http::{cors::CorsLayer, trace::TraceLayer};
125
-
use tracing::{info, warn};
126
-
use uuid::Uuid;
1
+
//! BluePDS binary entry point.
127
2
128
-
/// The application user agent. Concatenates the package name and version. e.g. `bluepds/0.0.0`.
129
-
pub const APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
130
-
131
-
/// The application-wide result type.
132
-
pub type Result<T> = std::result::Result<T, Error>;
133
-
/// The reqwest client type with middleware.
134
-
pub type Client = reqwest_middleware::ClientWithMiddleware;
135
-
/// The database connection pool.
136
-
pub type Db = SqlitePool;
137
-
/// The Azure credential type.
138
-
pub type Cred = Arc<dyn TokenCredential>;
139
-
140
-
#[expect(
141
-
clippy::arbitrary_source_item_ordering,
142
-
reason = "serialized data might be structured"
143
-
)]
144
-
#[derive(Serialize, Deserialize, Debug, Clone)]
145
-
/// The key data structure.
146
-
struct KeyData {
147
-
/// Primary signing key for all repo operations.
148
-
skey: Vec<u8>,
149
-
/// Primary signing (rotation) key for all PLC operations.
150
-
rkey: Vec<u8>,
151
-
}
152
-
153
-
// FIXME: We should use P256Keypair instead. SecP256K1 is primarily used for cryptocurrencies,
154
-
// and the implementations of this algorithm are much more limited as compared to P256.
155
-
//
156
-
// Reference: https://soatok.blog/2022/05/19/guidance-for-choosing-an-elliptic-curve-signature-algorithm-in-2022/
157
-
#[derive(Clone)]
158
-
/// The signing key for PLC/DID operations.
159
-
pub struct SigningKey(Arc<Secp256k1Keypair>);
160
-
#[derive(Clone)]
161
-
/// The rotation key for PLC operations.
162
-
pub struct RotationKey(Arc<Secp256k1Keypair>);
163
-
164
-
impl std::ops::Deref for SigningKey {
165
-
type Target = Secp256k1Keypair;
166
-
167
-
fn deref(&self) -> &Self::Target {
168
-
&self.0
169
-
}
170
-
}
171
-
172
-
impl SigningKey {
173
-
/// Import from a private key.
174
-
pub fn import(key: &[u8]) -> Result<Self> {
175
-
let key = Secp256k1Keypair::import(key).context("failed to import signing key")?;
176
-
Ok(Self(Arc::new(key)))
177
-
}
178
-
}
179
-
180
-
impl std::ops::Deref for RotationKey {
181
-
type Target = Secp256k1Keypair;
182
-
183
-
fn deref(&self) -> &Self::Target {
184
-
&self.0
185
-
}
186
-
}
187
-
188
-
#[derive(Parser, Debug, Clone)]
189
-
/// Command line arguments.
190
-
struct Args {
191
-
/// Path to the configuration file
192
-
#[arg(short, long, default_value = "default.toml")]
193
-
config: PathBuf,
194
-
/// The verbosity level.
195
-
#[command(flatten)]
196
-
verbosity: Verbosity<InfoLevel>,
197
-
}
198
-
199
-
#[expect(clippy::arbitrary_source_item_ordering, reason = "arbitrary")]
200
-
#[derive(Clone, FromRef)]
201
-
struct AppState {
202
-
/// The application configuration.
203
-
config: AppConfig,
204
-
/// The Azure credential.
205
-
cred: Cred,
206
-
/// The database connection pool.
207
-
db: Db,
208
-
209
-
/// The HTTP client with middleware.
210
-
client: Client,
211
-
/// The simple HTTP client.
212
-
simple_client: reqwest::Client,
213
-
/// The firehose producer.
214
-
firehose: FirehoseProducer,
215
-
216
-
/// The signing key.
217
-
signing_key: SigningKey,
218
-
/// The rotation key.
219
-
rotation_key: RotationKey,
220
-
}
221
-
222
-
/// The index (/) route.
223
-
async fn index() -> impl IntoResponse {
224
-
r"
225
-
__ __
226
-
/\ \__ /\ \__
227
-
__ \ \ ,_\ _____ _ __ ___\ \ ,_\ ___
228
-
/'__'\ \ \ \/ /\ '__'\/\''__\/ __'\ \ \/ / __'\
229
-
/\ \L\.\_\ \ \_\ \ \L\ \ \ \//\ \L\ \ \ \_/\ \L\ \
230
-
\ \__/.\_\\ \__\\ \ ,__/\ \_\\ \____/\ \__\ \____/
231
-
\/__/\/_/ \/__/ \ \ \/ \/_/ \/___/ \/__/\/___/
232
-
\ \_\
233
-
\/_/
234
-
235
-
236
-
This is an AT Protocol Personal Data Server (aka, an atproto PDS)
237
-
238
-
Most API routes are under /xrpc/
239
-
240
-
Code: https://github.com/DrChat/bluepds
241
-
Protocol: https://atproto.com
242
-
"
243
-
}
244
-
245
-
/// Service proxy.
246
-
///
247
-
/// Reference: <https://atproto.com/specs/xrpc#service-proxying>
248
-
async fn service_proxy(
249
-
uri: Uri,
250
-
user: AuthenticatedUser,
251
-
State(skey): State<SigningKey>,
252
-
State(client): State<reqwest::Client>,
253
-
headers: HeaderMap,
254
-
request: Request<Body>,
255
-
) -> Result<Response<Body>> {
256
-
let url_path = uri.path_and_query().context("invalid service proxy url")?;
257
-
let lxm = url_path
258
-
.path()
259
-
.strip_prefix("/")
260
-
.with_context(|| format!("invalid service proxy url prefix: {}", url_path.path()))?;
261
-
262
-
let user_did = user.did();
263
-
let (did, id) = match headers.get("atproto-proxy") {
264
-
Some(val) => {
265
-
let val =
266
-
std::str::from_utf8(val.as_bytes()).context("proxy header not valid utf-8")?;
267
-
268
-
let (did, id) = val.split_once('#').context("invalid proxy header")?;
269
-
270
-
let did =
271
-
Did::from_str(did).map_err(|e| anyhow!("atproto proxy not a valid DID: {e}"))?;
272
-
273
-
(did, format!("#{id}"))
274
-
}
275
-
// HACK: Assume the bluesky appview by default.
276
-
None => (
277
-
Did::new("did:web:api.bsky.app".to_owned())
278
-
.expect("service proxy should be a valid DID"),
279
-
"#bsky_appview".to_owned(),
280
-
),
281
-
};
282
-
283
-
let did_doc = did::resolve(&Client::new(client.clone(), []), did.clone())
284
-
.await
285
-
.with_context(|| format!("failed to resolve did document {}", did.as_str()))?;
286
-
287
-
let Some(service) = did_doc.service.iter().find(|s| s.id == id) else {
288
-
return Err(Error::with_status(
289
-
StatusCode::BAD_REQUEST,
290
-
anyhow!("could not find resolve service #{id}"),
291
-
));
292
-
};
293
-
294
-
let target_url: url::Url = service
295
-
.service_endpoint
296
-
.join(&format!("/xrpc{url_path}"))
297
-
.context("failed to construct target url")?;
298
-
299
-
let exp = (chrono::Utc::now().checked_add_signed(chrono::Duration::minutes(1)))
300
-
.context("should be valid expiration datetime")?
301
-
.timestamp();
302
-
let jti = rand::thread_rng()
303
-
.sample_iter(rand::distributions::Alphanumeric)
304
-
.take(10)
305
-
.map(char::from)
306
-
.collect::<String>();
307
-
308
-
// Mint a bearer token by signing a JSON web token.
309
-
// https://github.com/DavidBuchanan314/millipds/blob/5c7529a739d394e223c0347764f1cf4e8fd69f94/src/millipds/appview_proxy.py#L47-L59
310
-
let token = auth::sign(
311
-
&skey,
312
-
"JWT",
313
-
&serde_json::json!({
314
-
"iss": user_did.as_str(),
315
-
"aud": did.as_str(),
316
-
"lxm": lxm,
317
-
"exp": exp,
318
-
"jti": jti,
319
-
}),
320
-
)
321
-
.context("failed to sign jwt")?;
322
-
323
-
let mut h = HeaderMap::new();
324
-
if let Some(hdr) = request.headers().get("atproto-accept-labelers") {
325
-
drop(h.insert("atproto-accept-labelers", hdr.clone()));
326
-
}
327
-
if let Some(hdr) = request.headers().get(http::header::CONTENT_TYPE) {
328
-
drop(h.insert(http::header::CONTENT_TYPE, hdr.clone()));
329
-
}
330
-
331
-
let r = client
332
-
.request(request.method().clone(), target_url)
333
-
.headers(h)
334
-
.header(http::header::AUTHORIZATION, format!("Bearer {token}"))
335
-
.body(reqwest::Body::wrap_stream(
336
-
request.into_body().into_data_stream(),
337
-
))
338
-
.send()
339
-
.await
340
-
.context("failed to send request")?;
341
-
342
-
let mut resp = Response::builder().status(r.status());
343
-
if let Some(hdrs) = resp.headers_mut() {
344
-
*hdrs = r.headers().clone();
345
-
}
346
-
347
-
let resp = resp
348
-
.body(Body::from_stream(r.bytes_stream()))
349
-
.context("failed to construct response")?;
350
-
351
-
Ok(resp)
352
-
}
353
-
354
-
/// The main application entry point.
355
-
#[expect(
356
-
clippy::cognitive_complexity,
357
-
clippy::too_many_lines,
358
-
reason = "main function has high complexity"
359
-
)]
360
-
async fn run() -> anyhow::Result<()> {
361
-
let args = Args::parse();
362
-
363
-
// Set up trace logging to console and account for the user-provided verbosity flag.
364
-
if args.verbosity.log_level_filter() != LevelFilter::Off {
365
-
let lvl = match args.verbosity.log_level_filter() {
366
-
LevelFilter::Error => tracing::Level::ERROR,
367
-
LevelFilter::Warn => tracing::Level::WARN,
368
-
LevelFilter::Info | LevelFilter::Off => tracing::Level::INFO,
369
-
LevelFilter::Debug => tracing::Level::DEBUG,
370
-
LevelFilter::Trace => tracing::Level::TRACE,
371
-
};
372
-
tracing_subscriber::fmt().with_max_level(lvl).init();
373
-
}
374
-
375
-
if !args.config.exists() {
376
-
// Throw up a warning if the config file does not exist.
377
-
//
378
-
// This is not fatal because users can specify all configuration settings via
379
-
// the environment, but the most likely scenario here is that a user accidentally
380
-
// omitted the config file for some reason (e.g. forgot to mount it into Docker).
381
-
warn!(
382
-
"configuration file {} does not exist",
383
-
args.config.display()
384
-
);
385
-
}
386
-
387
-
// Read and parse the user-provided configuration.
388
-
let config: AppConfig = Figment::new()
389
-
.admerge(figment::providers::Toml::file(args.config))
390
-
.admerge(figment::providers::Env::prefixed("BLUEPDS_"))
391
-
.extract()
392
-
.context("failed to load configuration")?;
393
-
394
-
if config.test {
395
-
warn!("BluePDS starting up in TEST mode.");
396
-
warn!("This means the application will not federate with the rest of the network.");
397
-
warn!(
398
-
"If you want to turn this off, either set `test` to false in the config or define `BLUEPDS_TEST = false`"
399
-
);
400
-
}
401
-
402
-
// Initialize metrics reporting.
403
-
metrics::setup(config.metrics.as_ref()).context("failed to set up metrics exporter")?;
404
-
405
-
// Create a reqwest client that will be used for all outbound requests.
406
-
let simple_client = reqwest::Client::builder()
407
-
.user_agent(APP_USER_AGENT)
408
-
.build()
409
-
.context("failed to build requester client")?;
410
-
let client = reqwest_middleware::ClientBuilder::new(simple_client.clone())
411
-
.with(http_cache_reqwest::Cache(http_cache_reqwest::HttpCache {
412
-
mode: CacheMode::Default,
413
-
manager: MokaManager::default(),
414
-
options: HttpCacheOptions::default(),
415
-
}))
416
-
.build();
417
-
418
-
tokio::fs::create_dir_all(&config.key.parent().context("should have parent")?)
419
-
.await
420
-
.context("failed to create key directory")?;
421
-
422
-
// Check if crypto keys exist. If not, create new ones.
423
-
let (skey, rkey) = if let Ok(f) = std::fs::File::open(&config.key) {
424
-
let keys: KeyData = serde_ipld_dagcbor::from_reader(std::io::BufReader::new(f))
425
-
.context("failed to deserialize crypto keys")?;
426
-
427
-
let skey = Secp256k1Keypair::import(&keys.skey).context("failed to import signing key")?;
428
-
let rkey = Secp256k1Keypair::import(&keys.rkey).context("failed to import rotation key")?;
429
-
430
-
(SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
431
-
} else {
432
-
info!("signing keys not found, generating new ones");
433
-
434
-
let skey = Secp256k1Keypair::create(&mut rand::thread_rng());
435
-
let rkey = Secp256k1Keypair::create(&mut rand::thread_rng());
436
-
437
-
let keys = KeyData {
438
-
skey: skey.export(),
439
-
rkey: rkey.export(),
440
-
};
441
-
442
-
let mut f = std::fs::File::create(&config.key).context("failed to create key file")?;
443
-
serde_ipld_dagcbor::to_writer(&mut f, &keys).context("failed to serialize crypto keys")?;
444
-
445
-
(SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
446
-
};
447
-
448
-
tokio::fs::create_dir_all(&config.repo.path).await?;
449
-
tokio::fs::create_dir_all(&config.plc.path).await?;
450
-
tokio::fs::create_dir_all(&config.blob.path).await?;
451
-
452
-
let cred = azure_identity::DefaultAzureCredential::new()
453
-
.context("failed to create Azure credential")?;
454
-
let opts = SqliteConnectOptions::from_str(&config.db)
455
-
.context("failed to parse database options")?
456
-
.create_if_missing(true);
457
-
let db = SqlitePool::connect_with(opts).await?;
458
-
459
-
sqlx::migrate!()
460
-
.run(&db)
461
-
.await
462
-
.context("failed to apply migrations")?;
463
-
464
-
let (_fh, fhp) = firehose::spawn(client.clone(), config.clone());
465
-
466
-
let addr = config
467
-
.listen_address
468
-
.unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8000));
469
-
470
-
let app = Router::new()
471
-
.route("/", get(index))
472
-
.merge(oauth::routes())
473
-
.nest(
474
-
"/xrpc",
475
-
endpoints::routes()
476
-
.merge(actor_endpoints::routes())
477
-
.fallback(service_proxy),
478
-
)
479
-
// .layer(RateLimitLayer::new(30, Duration::from_secs(30)))
480
-
.layer(CorsLayer::permissive())
481
-
.layer(TraceLayer::new_for_http())
482
-
.with_state(AppState {
483
-
cred,
484
-
config: config.clone(),
485
-
db: db.clone(),
486
-
client: client.clone(),
487
-
simple_client,
488
-
firehose: fhp,
489
-
signing_key: skey,
490
-
rotation_key: rkey,
491
-
});
492
-
493
-
info!("listening on {addr}");
494
-
info!("connect to: http://127.0.0.1:{}", addr.port());
495
-
496
-
// Determine whether or not this was the first startup (i.e. no accounts exist and no invite codes were created).
497
-
// If so, create an invite code and share it via the console.
498
-
let c = sqlx::query_scalar!(
499
-
r#"
500
-
SELECT
501
-
(SELECT COUNT(*) FROM accounts) + (SELECT COUNT(*) FROM invites)
502
-
AS total_count
503
-
"#
504
-
)
505
-
.fetch_one(&db)
506
-
.await
507
-
.context("failed to query database")?;
508
-
509
-
#[expect(clippy::print_stdout)]
510
-
if c == 0 {
511
-
let uuid = Uuid::new_v4().to_string();
512
-
513
-
_ = sqlx::query!(
514
-
r#"
515
-
INSERT INTO invites (id, did, count, created_at)
516
-
VALUES (?, NULL, 1, datetime('now'))
517
-
"#,
518
-
uuid,
519
-
)
520
-
.execute(&db)
521
-
.await
522
-
.context("failed to create new invite code")?;
523
-
524
-
// N.B: This is a sensitive message, so we're bypassing `tracing` here and
525
-
// logging it directly to console.
526
-
println!("=====================================");
527
-
println!(" FIRST STARTUP ");
528
-
println!("=====================================");
529
-
println!("Use this code to create an account:");
530
-
println!("{uuid}");
531
-
println!("=====================================");
532
-
}
533
-
534
-
let listener = TcpListener::bind(&addr)
535
-
.await
536
-
.context("failed to bind address")?;
537
-
538
-
// Serve the app, and request crawling from upstream relays.
539
-
let serve = tokio::spawn(async move {
540
-
axum::serve(listener, app.into_make_service())
541
-
.await
542
-
.context("failed to serve app")
543
-
});
544
-
545
-
// Now that the app is live, request a crawl from upstream relays.
546
-
firehose::reconnect_relays(&client, &config).await;
547
-
548
-
serve
549
-
.await
550
-
.map_err(Into::into)
551
-
.and_then(|r| r)
552
-
.context("failed to serve app")
553
-
}
3
+
use anyhow::Context as _;
554
4
555
5
#[tokio::main(flavor = "multi_thread")]
556
6
async fn main() -> anyhow::Result<()> {
557
-
// Dispatch out to a separate function without a derive macro to help rust-analyzer along.
558
-
run().await
7
+
bluepds::run().await.context("failed to run application")
559
8
}
-274
src/mmap.rs
-274
src/mmap.rs
···
1
-
#![allow(clippy::arbitrary_source_item_ordering)]
2
-
use std::io::{ErrorKind, Read as _, Seek as _, Write as _};
3
-
4
-
#[cfg(unix)]
5
-
use std::os::fd::AsRawFd as _;
6
-
#[cfg(windows)]
7
-
use std::os::windows::io::AsRawHandle;
8
-
9
-
use memmap2::{MmapMut, MmapOptions};
10
-
11
-
pub(crate) struct MappedFile {
12
-
/// The underlying file handle.
13
-
file: std::fs::File,
14
-
/// The length of the file.
15
-
len: u64,
16
-
/// The mapped memory region.
17
-
map: MmapMut,
18
-
/// Our current offset into the file.
19
-
off: u64,
20
-
}
21
-
22
-
impl MappedFile {
23
-
pub(crate) fn new(mut f: std::fs::File) -> std::io::Result<Self> {
24
-
let len = f.seek(std::io::SeekFrom::End(0))?;
25
-
26
-
#[cfg(windows)]
27
-
let raw = f.as_raw_handle();
28
-
#[cfg(unix)]
29
-
let raw = f.as_raw_fd();
30
-
31
-
#[expect(unsafe_code)]
32
-
Ok(Self {
33
-
// SAFETY:
34
-
// All file-backed memory map constructors are marked \
35
-
// unsafe because of the potential for Undefined Behavior (UB) \
36
-
// using the map if the underlying file is subsequently modified, in or out of process.
37
-
map: unsafe { MmapOptions::new().map_mut(raw)? },
38
-
file: f,
39
-
len,
40
-
off: 0,
41
-
})
42
-
}
43
-
44
-
/// Resize the memory-mapped file. This will reallocate the memory mapping.
45
-
#[expect(unsafe_code)]
46
-
fn resize(&mut self, len: u64) -> std::io::Result<()> {
47
-
// Resize the file.
48
-
self.file.set_len(len)?;
49
-
50
-
#[cfg(windows)]
51
-
let raw = self.file.as_raw_handle();
52
-
#[cfg(unix)]
53
-
let raw = self.file.as_raw_fd();
54
-
55
-
// SAFETY:
56
-
// All file-backed memory map constructors are marked \
57
-
// unsafe because of the potential for Undefined Behavior (UB) \
58
-
// using the map if the underlying file is subsequently modified, in or out of process.
59
-
self.map = unsafe { MmapOptions::new().map_mut(raw)? };
60
-
self.len = len;
61
-
62
-
Ok(())
63
-
}
64
-
}
65
-
66
-
impl std::io::Read for MappedFile {
67
-
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
68
-
if self.off == self.len {
69
-
// If we're at EOF, return an EOF error code. `Ok(0)` tends to trip up some implementations.
70
-
return Err(std::io::Error::new(ErrorKind::UnexpectedEof, "eof"));
71
-
}
72
-
73
-
// Calculate the number of bytes we're going to read.
74
-
let remaining_bytes = self.len.saturating_sub(self.off);
75
-
let buf_len = u64::try_from(buf.len()).unwrap_or(u64::MAX);
76
-
let len = usize::try_from(std::cmp::min(remaining_bytes, buf_len)).unwrap_or(usize::MAX);
77
-
78
-
let off = usize::try_from(self.off).map_err(|e| {
79
-
std::io::Error::new(
80
-
ErrorKind::InvalidInput,
81
-
format!("offset too large for this platform: {e}"),
82
-
)
83
-
})?;
84
-
85
-
if let (Some(dest), Some(src)) = (
86
-
buf.get_mut(..len),
87
-
self.map.get(off..off.saturating_add(len)),
88
-
) {
89
-
dest.copy_from_slice(src);
90
-
self.off = self.off.saturating_add(u64::try_from(len).unwrap_or(0));
91
-
Ok(len)
92
-
} else {
93
-
Err(std::io::Error::new(
94
-
ErrorKind::InvalidInput,
95
-
"invalid buffer range",
96
-
))
97
-
}
98
-
}
99
-
}
100
-
101
-
impl std::io::Write for MappedFile {
102
-
fn flush(&mut self) -> std::io::Result<()> {
103
-
// This is done by the system.
104
-
Ok(())
105
-
}
106
-
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
107
-
// Determine if we need to resize the file.
108
-
let buf_len = u64::try_from(buf.len()).map_err(|e| {
109
-
std::io::Error::new(
110
-
ErrorKind::InvalidInput,
111
-
format!("buffer length too large for this platform: {e}"),
112
-
)
113
-
})?;
114
-
115
-
if self.off.saturating_add(buf_len) >= self.len {
116
-
self.resize(self.off.saturating_add(buf_len))?;
117
-
}
118
-
119
-
let off = usize::try_from(self.off).map_err(|e| {
120
-
std::io::Error::new(
121
-
ErrorKind::InvalidInput,
122
-
format!("offset too large for this platform: {e}"),
123
-
)
124
-
})?;
125
-
let len = buf.len();
126
-
127
-
if let Some(dest) = self.map.get_mut(off..off.saturating_add(len)) {
128
-
dest.copy_from_slice(buf);
129
-
self.off = self.off.saturating_add(buf_len);
130
-
Ok(len)
131
-
} else {
132
-
Err(std::io::Error::new(
133
-
ErrorKind::InvalidInput,
134
-
"invalid buffer range",
135
-
))
136
-
}
137
-
}
138
-
}
139
-
140
-
impl std::io::Seek for MappedFile {
141
-
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
142
-
let off = match pos {
143
-
std::io::SeekFrom::Start(i) => i,
144
-
std::io::SeekFrom::End(i) => {
145
-
if i <= 0 {
146
-
// If i is negative or zero, we're seeking backwards from the end
147
-
// or exactly at the end
148
-
self.len.saturating_sub(i.unsigned_abs())
149
-
} else {
150
-
// If i is positive, we're seeking beyond the end, which is allowed
151
-
// but requires extending the file
152
-
self.len.saturating_add(i.unsigned_abs())
153
-
}
154
-
}
155
-
std::io::SeekFrom::Current(i) => {
156
-
if i >= 0 {
157
-
self.off.saturating_add(i.unsigned_abs())
158
-
} else {
159
-
self.off.saturating_sub(i.unsigned_abs())
160
-
}
161
-
}
162
-
};
163
-
164
-
// If the offset is beyond EOF, extend the file to the new size.
165
-
if off > self.len {
166
-
self.resize(off)?;
167
-
}
168
-
169
-
self.off = off;
170
-
Ok(off)
171
-
}
172
-
}
173
-
174
-
impl tokio::io::AsyncRead for MappedFile {
175
-
fn poll_read(
176
-
mut self: std::pin::Pin<&mut Self>,
177
-
_cx: &mut std::task::Context<'_>,
178
-
buf: &mut tokio::io::ReadBuf<'_>,
179
-
) -> std::task::Poll<std::io::Result<()>> {
180
-
let wbuf = buf.initialize_unfilled();
181
-
let len = wbuf.len();
182
-
183
-
std::task::Poll::Ready(match self.read(wbuf) {
184
-
Ok(_) => {
185
-
buf.advance(len);
186
-
Ok(())
187
-
}
188
-
Err(e) => Err(e),
189
-
})
190
-
}
191
-
}
192
-
193
-
impl tokio::io::AsyncWrite for MappedFile {
194
-
fn poll_flush(
195
-
self: std::pin::Pin<&mut Self>,
196
-
_cx: &mut std::task::Context<'_>,
197
-
) -> std::task::Poll<Result<(), std::io::Error>> {
198
-
std::task::Poll::Ready(Ok(()))
199
-
}
200
-
201
-
fn poll_shutdown(
202
-
self: std::pin::Pin<&mut Self>,
203
-
_cx: &mut std::task::Context<'_>,
204
-
) -> std::task::Poll<Result<(), std::io::Error>> {
205
-
std::task::Poll::Ready(Ok(()))
206
-
}
207
-
208
-
fn poll_write(
209
-
mut self: std::pin::Pin<&mut Self>,
210
-
_cx: &mut std::task::Context<'_>,
211
-
buf: &[u8],
212
-
) -> std::task::Poll<Result<usize, std::io::Error>> {
213
-
std::task::Poll::Ready(self.write(buf))
214
-
}
215
-
}
216
-
217
-
impl tokio::io::AsyncSeek for MappedFile {
218
-
fn poll_complete(
219
-
self: std::pin::Pin<&mut Self>,
220
-
_cx: &mut std::task::Context<'_>,
221
-
) -> std::task::Poll<std::io::Result<u64>> {
222
-
std::task::Poll::Ready(Ok(self.off))
223
-
}
224
-
225
-
fn start_seek(
226
-
mut self: std::pin::Pin<&mut Self>,
227
-
position: std::io::SeekFrom,
228
-
) -> std::io::Result<()> {
229
-
self.seek(position).map(|_p| ())
230
-
}
231
-
}
232
-
233
-
#[cfg(test)]
234
-
mod test {
235
-
use rand::Rng as _;
236
-
use std::io::Write as _;
237
-
238
-
use super::*;
239
-
240
-
#[test]
241
-
fn basic_rw() {
242
-
let tmp = std::env::temp_dir().join(
243
-
rand::thread_rng()
244
-
.sample_iter(rand::distributions::Alphanumeric)
245
-
.take(10)
246
-
.map(char::from)
247
-
.collect::<String>(),
248
-
);
249
-
250
-
let mut m = MappedFile::new(
251
-
std::fs::File::options()
252
-
.create(true)
253
-
.truncate(true)
254
-
.read(true)
255
-
.write(true)
256
-
.open(&tmp)
257
-
.expect("Failed to open temporary file"),
258
-
)
259
-
.expect("Failed to create MappedFile");
260
-
261
-
m.write_all(b"abcd123").expect("Failed to write data");
262
-
let _: u64 = m
263
-
.seek(std::io::SeekFrom::Start(0))
264
-
.expect("Failed to seek to start");
265
-
266
-
let mut buf = [0_u8; 7];
267
-
m.read_exact(&mut buf).expect("Failed to read data");
268
-
269
-
assert_eq!(&buf, b"abcd123");
270
-
271
-
drop(m);
272
-
std::fs::remove_file(tmp).expect("Failed to remove temporary file");
273
-
}
274
-
}
+809
src/models.rs
+809
src/models.rs
···
1
+
// Generated by diesel_ext
2
+
3
+
#![allow(unused, non_snake_case)]
4
+
#![allow(clippy::all)]
5
+
6
+
pub mod pds {
7
+
8
+
#![allow(unnameable_types, unused_qualifications)]
9
+
use anyhow::{Result, bail};
10
+
use chrono::DateTime;
11
+
use chrono::offset::Utc;
12
+
use diesel::backend::Backend;
13
+
use diesel::deserialize::FromSql;
14
+
use diesel::prelude::*;
15
+
use diesel::serialize::{Output, ToSql};
16
+
use diesel::sql_types::Text;
17
+
use diesel::sqlite::Sqlite;
18
+
use diesel::*;
19
+
use serde::{Deserialize, Serialize};
20
+
21
+
#[derive(
22
+
Queryable,
23
+
Identifiable,
24
+
Selectable,
25
+
Clone,
26
+
Debug,
27
+
PartialEq,
28
+
Default,
29
+
Serialize,
30
+
Deserialize,
31
+
)]
32
+
#[diesel(primary_key(request_uri))]
33
+
#[diesel(table_name = crate::schema::pds::oauth_par_requests)]
34
+
#[diesel(check_for_backend(Sqlite))]
35
+
pub struct OauthParRequest {
36
+
pub request_uri: String,
37
+
pub client_id: String,
38
+
pub response_type: String,
39
+
pub code_challenge: String,
40
+
pub code_challenge_method: String,
41
+
pub state: Option<String>,
42
+
pub login_hint: Option<String>,
43
+
pub scope: Option<String>,
44
+
pub redirect_uri: Option<String>,
45
+
pub response_mode: Option<String>,
46
+
pub display: Option<String>,
47
+
pub created_at: i64,
48
+
pub expires_at: i64,
49
+
}
50
+
51
+
#[derive(
52
+
Queryable,
53
+
Identifiable,
54
+
Selectable,
55
+
Clone,
56
+
Debug,
57
+
PartialEq,
58
+
Default,
59
+
Serialize,
60
+
Deserialize,
61
+
)]
62
+
#[diesel(primary_key(code))]
63
+
#[diesel(table_name = crate::schema::pds::oauth_authorization_codes)]
64
+
#[diesel(check_for_backend(Sqlite))]
65
+
pub struct OauthAuthorizationCode {
66
+
pub code: String,
67
+
pub client_id: String,
68
+
pub subject: String,
69
+
pub code_challenge: String,
70
+
pub code_challenge_method: String,
71
+
pub redirect_uri: String,
72
+
pub scope: Option<String>,
73
+
pub created_at: i64,
74
+
pub expires_at: i64,
75
+
pub used: bool,
76
+
}
77
+
78
+
#[derive(
79
+
Queryable,
80
+
Identifiable,
81
+
Selectable,
82
+
Clone,
83
+
Debug,
84
+
PartialEq,
85
+
Default,
86
+
Serialize,
87
+
Deserialize,
88
+
)]
89
+
#[diesel(primary_key(token))]
90
+
#[diesel(table_name = crate::schema::pds::oauth_refresh_tokens)]
91
+
#[diesel(check_for_backend(Sqlite))]
92
+
pub struct OauthRefreshToken {
93
+
pub token: String,
94
+
pub client_id: String,
95
+
pub subject: String,
96
+
pub dpop_thumbprint: String,
97
+
pub scope: Option<String>,
98
+
pub created_at: i64,
99
+
pub expires_at: i64,
100
+
pub revoked: bool,
101
+
}
102
+
103
+
#[derive(
104
+
Queryable,
105
+
Identifiable,
106
+
Selectable,
107
+
Clone,
108
+
Debug,
109
+
PartialEq,
110
+
Default,
111
+
Serialize,
112
+
Deserialize,
113
+
)]
114
+
#[diesel(primary_key(jti))]
115
+
#[diesel(table_name = crate::schema::pds::oauth_used_jtis)]
116
+
#[diesel(check_for_backend(Sqlite))]
117
+
pub struct OauthUsedJti {
118
+
pub jti: String,
119
+
pub issuer: String,
120
+
pub created_at: i64,
121
+
pub expires_at: i64,
122
+
}
123
+
124
+
#[derive(
125
+
Queryable,
126
+
Identifiable,
127
+
Selectable,
128
+
Clone,
129
+
Debug,
130
+
PartialEq,
131
+
Default,
132
+
Serialize,
133
+
Deserialize,
134
+
)]
135
+
#[diesel(primary_key(did))]
136
+
#[diesel(table_name = crate::schema::pds::account)]
137
+
#[diesel(check_for_backend(Sqlite))]
138
+
pub struct Account {
139
+
pub did: String,
140
+
pub email: String,
141
+
#[diesel(column_name = recoveryKey)]
142
+
#[serde(rename = "recoveryKey")]
143
+
pub recovery_key: Option<String>,
144
+
pub password: String,
145
+
#[diesel(column_name = createdAt)]
146
+
#[serde(rename = "createdAt")]
147
+
pub created_at: String,
148
+
#[diesel(column_name = invitesDisabled)]
149
+
#[serde(rename = "invitesDisabled")]
150
+
pub invites_disabled: i16,
151
+
#[diesel(column_name = emailConfirmedAt)]
152
+
#[serde(rename = "emailConfirmedAt")]
153
+
pub email_confirmed_at: Option<String>,
154
+
}
155
+
156
+
#[derive(
157
+
Queryable,
158
+
Identifiable,
159
+
Selectable,
160
+
Clone,
161
+
Debug,
162
+
PartialEq,
163
+
Default,
164
+
Serialize,
165
+
Deserialize,
166
+
)]
167
+
#[diesel(primary_key(did))]
168
+
#[diesel(table_name = crate::schema::pds::actor)]
169
+
#[diesel(check_for_backend(Sqlite))]
170
+
pub struct Actor {
171
+
pub did: String,
172
+
pub handle: Option<String>,
173
+
#[diesel(column_name = createdAt)]
174
+
#[serde(rename = "createdAt")]
175
+
pub created_at: String,
176
+
#[diesel(column_name = takedownRef)]
177
+
#[serde(rename = "takedownRef")]
178
+
pub takedown_ref: Option<String>,
179
+
#[diesel(column_name = deactivatedAt)]
180
+
#[serde(rename = "deactivatedAt")]
181
+
pub deactivated_at: Option<String>,
182
+
#[diesel(column_name = deleteAfter)]
183
+
#[serde(rename = "deleteAfter")]
184
+
pub delete_after: Option<String>,
185
+
}
186
+
187
+
#[derive(
188
+
Queryable,
189
+
Identifiable,
190
+
Selectable,
191
+
Clone,
192
+
Debug,
193
+
PartialEq,
194
+
Default,
195
+
Serialize,
196
+
Deserialize,
197
+
)]
198
+
#[diesel(primary_key(did, name))]
199
+
#[diesel(table_name = crate::schema::pds::app_password)]
200
+
#[diesel(check_for_backend(Sqlite))]
201
+
pub struct AppPassword {
202
+
pub did: String,
203
+
pub name: String,
204
+
pub password: String,
205
+
#[diesel(column_name = createdAt)]
206
+
#[serde(rename = "createdAt")]
207
+
pub created_at: String,
208
+
}
209
+
210
+
#[derive(
211
+
Queryable,
212
+
Identifiable,
213
+
Selectable,
214
+
Clone,
215
+
Debug,
216
+
PartialEq,
217
+
Default,
218
+
Serialize,
219
+
Deserialize,
220
+
)]
221
+
#[diesel(primary_key(did))]
222
+
#[diesel(table_name = crate::schema::pds::did_doc)]
223
+
#[diesel(check_for_backend(Sqlite))]
224
+
pub struct DidDoc {
225
+
pub did: String,
226
+
pub doc: String,
227
+
#[diesel(column_name = updatedAt)]
228
+
#[serde(rename = "updatedAt")]
229
+
pub updated_at: i64,
230
+
}
231
+
232
+
#[derive(
233
+
Clone, Copy, Debug, PartialEq, Eq, Hash, Default, Serialize, Deserialize, AsExpression,
234
+
)]
235
+
#[diesel(sql_type = Text)]
236
+
pub enum EmailTokenPurpose {
237
+
#[default]
238
+
ConfirmEmail,
239
+
UpdateEmail,
240
+
ResetPassword,
241
+
DeleteAccount,
242
+
PlcOperation,
243
+
}
244
+
245
+
impl EmailTokenPurpose {
246
+
pub fn as_str(&self) -> &'static str {
247
+
match self {
248
+
EmailTokenPurpose::ConfirmEmail => "confirm_email",
249
+
EmailTokenPurpose::UpdateEmail => "update_email",
250
+
EmailTokenPurpose::ResetPassword => "reset_password",
251
+
EmailTokenPurpose::DeleteAccount => "delete_account",
252
+
EmailTokenPurpose::PlcOperation => "plc_operation",
253
+
}
254
+
}
255
+
256
+
pub fn from_str(s: &str) -> Result<Self> {
257
+
match s {
258
+
"confirm_email" => Ok(EmailTokenPurpose::ConfirmEmail),
259
+
"update_email" => Ok(EmailTokenPurpose::UpdateEmail),
260
+
"reset_password" => Ok(EmailTokenPurpose::ResetPassword),
261
+
"delete_account" => Ok(EmailTokenPurpose::DeleteAccount),
262
+
"plc_operation" => Ok(EmailTokenPurpose::PlcOperation),
263
+
_ => bail!("Unable to parse as EmailTokenPurpose: `{s:?}`"),
264
+
}
265
+
}
266
+
}
267
+
268
+
impl<DB> Queryable<sql_types::Text, DB> for EmailTokenPurpose
269
+
where
270
+
DB: backend::Backend,
271
+
String: deserialize::FromSql<sql_types::Text, DB>,
272
+
{
273
+
type Row = String;
274
+
275
+
fn build(s: String) -> deserialize::Result<Self> {
276
+
Ok(Self::from_str(&s)?)
277
+
}
278
+
}
279
+
280
+
impl serialize::ToSql<sql_types::Text, sqlite::Sqlite> for EmailTokenPurpose
281
+
where
282
+
String: serialize::ToSql<sql_types::Text, sqlite::Sqlite>,
283
+
{
284
+
fn to_sql<'lifetime>(
285
+
&'lifetime self,
286
+
out: &mut serialize::Output<'lifetime, '_, sqlite::Sqlite>,
287
+
) -> serialize::Result {
288
+
serialize::ToSql::<sql_types::Text, sqlite::Sqlite>::to_sql(
289
+
match self {
290
+
Self::ConfirmEmail => "confirm_email",
291
+
Self::UpdateEmail => "update_email",
292
+
Self::ResetPassword => "reset_password",
293
+
Self::DeleteAccount => "delete_account",
294
+
Self::PlcOperation => "plc_operation",
295
+
},
296
+
out,
297
+
)
298
+
}
299
+
}
300
+
301
+
#[derive(
302
+
Queryable,
303
+
Identifiable,
304
+
Selectable,
305
+
Clone,
306
+
Debug,
307
+
PartialEq,
308
+
Default,
309
+
Serialize,
310
+
Deserialize,
311
+
)]
312
+
#[diesel(primary_key(purpose, did))]
313
+
#[diesel(table_name = crate::schema::pds::email_token)]
314
+
#[diesel(check_for_backend(Sqlite))]
315
+
pub struct EmailToken {
316
+
pub purpose: EmailTokenPurpose,
317
+
pub did: String,
318
+
pub token: String,
319
+
#[diesel(column_name = requestedAt)]
320
+
#[serde(rename = "requestedAt")]
321
+
pub requested_at: String,
322
+
}
323
+
324
+
#[derive(
325
+
Queryable,
326
+
Identifiable,
327
+
Insertable,
328
+
Selectable,
329
+
Clone,
330
+
Debug,
331
+
PartialEq,
332
+
Default,
333
+
Serialize,
334
+
Deserialize,
335
+
)]
336
+
#[diesel(primary_key(code))]
337
+
#[diesel(table_name = crate::schema::pds::invite_code)]
338
+
#[diesel(check_for_backend(Sqlite))]
339
+
pub struct InviteCode {
340
+
pub code: String,
341
+
#[diesel(column_name = availableUses)]
342
+
#[serde(rename = "availableUses")]
343
+
pub available_uses: i32,
344
+
pub disabled: i16,
345
+
#[diesel(column_name = forAccount)]
346
+
#[serde(rename = "forAccount")]
347
+
pub for_account: String,
348
+
#[diesel(column_name = createdBy)]
349
+
#[serde(rename = "createdBy")]
350
+
pub created_by: String,
351
+
#[diesel(column_name = createdAt)]
352
+
#[serde(rename = "createdAt")]
353
+
pub created_at: String,
354
+
}
355
+
356
+
#[derive(
357
+
Queryable,
358
+
Identifiable,
359
+
Selectable,
360
+
Clone,
361
+
Debug,
362
+
PartialEq,
363
+
Default,
364
+
Serialize,
365
+
Deserialize,
366
+
)]
367
+
#[diesel(primary_key(code, usedBy))]
368
+
#[diesel(table_name = crate::schema::pds::invite_code_use)]
369
+
#[diesel(check_for_backend(Sqlite))]
370
+
pub struct InviteCodeUse {
371
+
pub code: String,
372
+
#[diesel(column_name = usedBy)]
373
+
#[serde(rename = "usedBy")]
374
+
pub used_by: String,
375
+
#[diesel(column_name = usedAt)]
376
+
#[serde(rename = "usedAt")]
377
+
pub used_at: String,
378
+
}
379
+
380
+
#[derive(
381
+
Queryable,
382
+
Identifiable,
383
+
Selectable,
384
+
Clone,
385
+
Debug,
386
+
PartialEq,
387
+
Default,
388
+
Serialize,
389
+
Deserialize,
390
+
)]
391
+
#[diesel(table_name = crate::schema::pds::refresh_token)]
392
+
#[diesel(check_for_backend(Sqlite))]
393
+
pub struct RefreshToken {
394
+
pub id: String,
395
+
pub did: String,
396
+
#[diesel(column_name = expiresAt)]
397
+
#[serde(rename = "expiresAt")]
398
+
pub expires_at: String,
399
+
#[diesel(column_name = nextId)]
400
+
#[serde(rename = "nextId")]
401
+
pub next_id: Option<String>,
402
+
#[diesel(column_name = appPasswordName)]
403
+
#[serde(rename = "appPasswordName")]
404
+
pub app_password_name: Option<String>,
405
+
}
406
+
407
+
#[derive(
408
+
Queryable,
409
+
Identifiable,
410
+
Selectable,
411
+
Insertable,
412
+
Clone,
413
+
Debug,
414
+
PartialEq,
415
+
Default,
416
+
Serialize,
417
+
Deserialize,
418
+
)]
419
+
#[diesel(primary_key(seq))]
420
+
#[diesel(table_name = crate::schema::pds::repo_seq)]
421
+
#[diesel(check_for_backend(Sqlite))]
422
+
pub struct RepoSeq {
423
+
#[diesel(deserialize_as = i64)]
424
+
pub seq: Option<i64>,
425
+
pub did: String,
426
+
#[diesel(column_name = eventType)]
427
+
#[serde(rename = "eventType")]
428
+
pub event_type: String,
429
+
#[diesel(sql_type = Bytea)]
430
+
pub event: Vec<u8>,
431
+
#[diesel(deserialize_as = i16)]
432
+
pub invalidated: Option<i16>,
433
+
#[diesel(column_name = sequencedAt)]
434
+
#[serde(rename = "sequencedAt")]
435
+
pub sequenced_at: String,
436
+
}
437
+
438
+
impl RepoSeq {
439
+
pub fn new(did: String, event_type: String, event: Vec<u8>, sequenced_at: String) -> Self {
440
+
RepoSeq {
441
+
did,
442
+
event_type,
443
+
event,
444
+
sequenced_at,
445
+
invalidated: None, // default values used on insert
446
+
seq: None, // default values used on insert
447
+
}
448
+
}
449
+
}
450
+
451
+
#[derive(
452
+
Queryable,
453
+
Identifiable,
454
+
Insertable,
455
+
Selectable,
456
+
Clone,
457
+
Debug,
458
+
PartialEq,
459
+
Default,
460
+
Serialize,
461
+
Deserialize,
462
+
)]
463
+
#[diesel(primary_key(id))]
464
+
#[diesel(table_name = crate::schema::pds::token)]
465
+
#[diesel(check_for_backend(Sqlite))]
466
+
pub struct Token {
467
+
pub id: String,
468
+
pub did: String,
469
+
#[diesel(column_name = tokenId)]
470
+
#[serde(rename = "tokenId")]
471
+
pub token_id: String,
472
+
#[diesel(column_name = createdAt)]
473
+
#[serde(rename = "createdAt")]
474
+
pub created_at: DateTime<Utc>,
475
+
#[diesel(column_name = updatedAt)]
476
+
#[serde(rename = "updatedAt")]
477
+
pub updated_at: DateTime<Utc>,
478
+
#[diesel(column_name = expiresAt)]
479
+
#[serde(rename = "expiresAt")]
480
+
pub expires_at: DateTime<Utc>,
481
+
#[diesel(column_name = clientId)]
482
+
#[serde(rename = "clientId")]
483
+
pub client_id: String,
484
+
#[diesel(column_name = clientAuth)]
485
+
#[serde(rename = "clientAuth")]
486
+
pub client_auth: String,
487
+
#[diesel(column_name = deviceId)]
488
+
#[serde(rename = "deviceId")]
489
+
pub device_id: Option<String>,
490
+
pub parameters: String,
491
+
pub details: Option<String>,
492
+
pub code: Option<String>,
493
+
#[diesel(column_name = currentRefreshToken)]
494
+
#[serde(rename = "currentRefreshToken")]
495
+
pub current_refresh_token: Option<String>,
496
+
}
497
+
498
+
#[derive(
499
+
Queryable,
500
+
Identifiable,
501
+
Insertable,
502
+
Selectable,
503
+
Clone,
504
+
Debug,
505
+
PartialEq,
506
+
Default,
507
+
Serialize,
508
+
Deserialize,
509
+
)]
510
+
#[diesel(primary_key(id))]
511
+
#[diesel(table_name = crate::schema::pds::device)]
512
+
#[diesel(check_for_backend(Sqlite))]
513
+
pub struct Device {
514
+
pub id: String,
515
+
#[diesel(column_name = sessionId)]
516
+
#[serde(rename = "sessionId")]
517
+
pub session_id: Option<String>,
518
+
#[diesel(column_name = userAgent)]
519
+
#[serde(rename = "userAgent")]
520
+
pub user_agent: Option<String>,
521
+
#[diesel(column_name = ipAddress)]
522
+
#[serde(rename = "ipAddress")]
523
+
pub ip_address: String,
524
+
#[diesel(column_name = lastSeenAt)]
525
+
#[serde(rename = "lastSeenAt")]
526
+
pub last_seen_at: DateTime<Utc>,
527
+
}
528
+
529
+
#[derive(
530
+
Queryable,
531
+
Identifiable,
532
+
Insertable,
533
+
Selectable,
534
+
Clone,
535
+
Debug,
536
+
PartialEq,
537
+
Default,
538
+
Serialize,
539
+
Deserialize,
540
+
)]
541
+
#[diesel(primary_key(did))]
542
+
#[diesel(table_name = crate::schema::pds::device_account)]
543
+
#[diesel(check_for_backend(Sqlite))]
544
+
pub struct DeviceAccount {
545
+
pub did: String,
546
+
#[diesel(column_name = deviceId)]
547
+
#[serde(rename = "deviceId")]
548
+
pub device_id: String,
549
+
#[diesel(column_name = authenticatedAt)]
550
+
#[serde(rename = "authenticatedAt")]
551
+
pub authenticated_at: DateTime<Utc>,
552
+
pub remember: bool,
553
+
#[diesel(column_name = authorizedClients)]
554
+
#[serde(rename = "authorizedClients")]
555
+
pub authorized_clients: String,
556
+
}
557
+
558
+
#[derive(
559
+
Queryable,
560
+
Identifiable,
561
+
Insertable,
562
+
Selectable,
563
+
Clone,
564
+
Debug,
565
+
PartialEq,
566
+
Default,
567
+
Serialize,
568
+
Deserialize,
569
+
)]
570
+
#[diesel(primary_key(id))]
571
+
#[diesel(table_name = crate::schema::pds::authorization_request)]
572
+
#[diesel(check_for_backend(Sqlite))]
573
+
pub struct AuthorizationRequest {
574
+
pub id: String,
575
+
pub did: Option<String>,
576
+
#[diesel(column_name = deviceId)]
577
+
#[serde(rename = "deviceId")]
578
+
pub device_id: Option<String>,
579
+
#[diesel(column_name = clientId)]
580
+
#[serde(rename = "clientId")]
581
+
pub client_id: String,
582
+
#[diesel(column_name = clientAuth)]
583
+
#[serde(rename = "clientAuth")]
584
+
pub client_auth: String,
585
+
pub parameters: String,
586
+
#[diesel(column_name = expiresAt)]
587
+
#[serde(rename = "expiresAt")]
588
+
pub expires_at: DateTime<Utc>,
589
+
pub code: Option<String>,
590
+
}
591
+
592
+
#[derive(
593
+
Queryable, Insertable, Selectable, Clone, Debug, PartialEq, Default, Serialize, Deserialize,
594
+
)]
595
+
#[diesel(table_name = crate::schema::pds::used_refresh_token)]
596
+
#[diesel(check_for_backend(Sqlite))]
597
+
pub struct UsedRefreshToken {
598
+
#[diesel(column_name = tokenId)]
599
+
#[serde(rename = "tokenId")]
600
+
pub token_id: String,
601
+
#[diesel(column_name = refreshToken)]
602
+
#[serde(rename = "refreshToken")]
603
+
pub refresh_token: String,
604
+
}
605
+
}
606
+
607
+
pub mod actor_store {
608
+
609
+
#![allow(unnameable_types, unused_qualifications)]
610
+
use anyhow::{Result, bail};
611
+
use chrono::DateTime;
612
+
use chrono::offset::Utc;
613
+
use diesel::backend::Backend;
614
+
use diesel::deserialize::FromSql;
615
+
use diesel::prelude::*;
616
+
use diesel::serialize::{Output, ToSql};
617
+
use diesel::sql_types::Text;
618
+
use diesel::sqlite::Sqlite;
619
+
use diesel::*;
620
+
use serde::{Deserialize, Serialize};
621
+
622
+
#[derive(
623
+
Queryable,
624
+
Identifiable,
625
+
Insertable,
626
+
Selectable,
627
+
Clone,
628
+
Debug,
629
+
PartialEq,
630
+
Default,
631
+
Serialize,
632
+
Deserialize,
633
+
)]
634
+
#[diesel(table_name = crate::schema::actor_store::account_pref)]
635
+
#[diesel(check_for_backend(Sqlite))]
636
+
pub struct AccountPref {
637
+
pub id: i32,
638
+
pub name: String,
639
+
#[diesel(column_name = valueJson)]
640
+
#[serde(rename = "valueJson")]
641
+
pub value_json: Option<String>,
642
+
}
643
+
644
+
#[derive(
645
+
Queryable,
646
+
Identifiable,
647
+
Insertable,
648
+
Selectable,
649
+
Clone,
650
+
Debug,
651
+
PartialEq,
652
+
Default,
653
+
Serialize,
654
+
Deserialize,
655
+
)]
656
+
#[diesel(primary_key(uri, path))]
657
+
#[diesel(table_name = crate::schema::actor_store::backlink)]
658
+
#[diesel(check_for_backend(Sqlite))]
659
+
pub struct Backlink {
660
+
pub uri: String,
661
+
pub path: String,
662
+
#[diesel(column_name = linkTo)]
663
+
#[serde(rename = "linkTo")]
664
+
pub link_to: String,
665
+
}
666
+
667
+
#[derive(
668
+
Queryable,
669
+
Identifiable,
670
+
Selectable,
671
+
Clone,
672
+
Debug,
673
+
PartialEq,
674
+
Default,
675
+
Serialize,
676
+
Deserialize,
677
+
)]
678
+
#[diesel(treat_none_as_null = true)]
679
+
#[diesel(primary_key(cid))]
680
+
#[diesel(table_name = crate::schema::actor_store::blob)]
681
+
#[diesel(check_for_backend(Sqlite))]
682
+
pub struct Blob {
683
+
pub cid: String,
684
+
pub did: String,
685
+
#[diesel(column_name = mimeType)]
686
+
#[serde(rename = "mimeType")]
687
+
pub mime_type: String,
688
+
pub size: i32,
689
+
#[diesel(column_name = tempKey)]
690
+
#[serde(rename = "tempKey")]
691
+
pub temp_key: Option<String>,
692
+
pub width: Option<i32>,
693
+
pub height: Option<i32>,
694
+
#[diesel(column_name = createdAt)]
695
+
#[serde(rename = "createdAt")]
696
+
pub created_at: String,
697
+
#[diesel(column_name = takedownRef)]
698
+
#[serde(rename = "takedownRef")]
699
+
pub takedown_ref: Option<String>,
700
+
}
701
+
702
+
#[derive(
703
+
Queryable,
704
+
Identifiable,
705
+
Insertable,
706
+
Selectable,
707
+
Clone,
708
+
Debug,
709
+
PartialEq,
710
+
Default,
711
+
Serialize,
712
+
Deserialize,
713
+
)]
714
+
#[diesel(primary_key(uri))]
715
+
#[diesel(table_name = crate::schema::actor_store::record)]
716
+
#[diesel(check_for_backend(Sqlite))]
717
+
pub struct Record {
718
+
pub uri: String,
719
+
pub cid: String,
720
+
pub did: String,
721
+
pub collection: String,
722
+
pub rkey: String,
723
+
#[diesel(column_name = repoRev)]
724
+
#[serde(rename = "repoRev")]
725
+
pub repo_rev: Option<String>,
726
+
#[diesel(column_name = indexedAt)]
727
+
#[serde(rename = "indexedAt")]
728
+
pub indexed_at: String,
729
+
#[diesel(column_name = takedownRef)]
730
+
#[serde(rename = "takedownRef")]
731
+
pub takedown_ref: Option<String>,
732
+
}
733
+
734
+
#[derive(
735
+
QueryableByName,
736
+
Queryable,
737
+
Identifiable,
738
+
Selectable,
739
+
Clone,
740
+
Debug,
741
+
PartialEq,
742
+
Default,
743
+
Serialize,
744
+
Deserialize,
745
+
)]
746
+
#[diesel(primary_key(blobCid, recordUri))]
747
+
#[diesel(table_name = crate::schema::actor_store::record_blob)]
748
+
#[diesel(check_for_backend(Sqlite))]
749
+
pub struct RecordBlob {
750
+
#[diesel(column_name = blobCid, sql_type = Text)]
751
+
#[serde(rename = "blobCid")]
752
+
pub blob_cid: String,
753
+
#[diesel(column_name = recordUri, sql_type = Text)]
754
+
#[serde(rename = "recordUri")]
755
+
pub record_uri: String,
756
+
#[diesel(sql_type = Text)]
757
+
pub did: String,
758
+
}
759
+
760
+
#[derive(
761
+
Queryable,
762
+
Identifiable,
763
+
Selectable,
764
+
Insertable,
765
+
Clone,
766
+
Debug,
767
+
PartialEq,
768
+
Default,
769
+
Serialize,
770
+
Deserialize,
771
+
)]
772
+
#[diesel(primary_key(cid))]
773
+
#[diesel(table_name = crate::schema::actor_store::repo_block)]
774
+
#[diesel(check_for_backend(Sqlite))]
775
+
pub struct RepoBlock {
776
+
#[diesel(sql_type = Text)]
777
+
pub cid: String,
778
+
pub did: String,
779
+
#[diesel(column_name = repoRev)]
780
+
#[serde(rename = "repoRev")]
781
+
pub repo_rev: String,
782
+
pub size: i32,
783
+
#[diesel(sql_type = Bytea)]
784
+
pub content: Vec<u8>,
785
+
}
786
+
787
+
#[derive(
788
+
Queryable,
789
+
Identifiable,
790
+
Selectable,
791
+
Clone,
792
+
Debug,
793
+
PartialEq,
794
+
Default,
795
+
Serialize,
796
+
Deserialize,
797
+
)]
798
+
#[diesel(primary_key(did))]
799
+
#[diesel(table_name = crate::schema::actor_store::repo_root)]
800
+
#[diesel(check_for_backend(Sqlite))]
801
+
pub struct RepoRoot {
802
+
pub did: String,
803
+
pub cid: String,
804
+
pub rev: String,
805
+
#[diesel(column_name = indexedAt)]
806
+
#[serde(rename = "indexedAt")]
807
+
pub indexed_at: String,
808
+
}
809
+
}
+451
-240
src/oauth.rs
+451
-240
src/oauth.rs
···
1
1
//! OAuth endpoints
2
-
2
+
#![allow(unnameable_types, unused_qualifications)]
3
+
use crate::config::AppConfig;
4
+
use crate::error::Error;
3
5
use crate::metrics::AUTH_FAILED;
4
-
use crate::{AppConfig, AppState, Client, Db, Error, Result, SigningKey};
6
+
use crate::serve::{AppState, Client, Result, SigningKey};
5
7
use anyhow::{Context as _, anyhow};
6
8
use argon2::{Argon2, PasswordHash, PasswordVerifier as _};
7
9
use atrium_crypto::keypair::Did as _;
···
14
16
routing::{get, post},
15
17
};
16
18
use base64::Engine as _;
19
+
use deadpool_diesel::sqlite::Pool;
20
+
use diesel::*;
17
21
use metrics::counter;
18
22
use rand::distributions::Alphanumeric;
19
23
use rand::{Rng as _, thread_rng};
···
252
256
/// POST `/oauth/par`
253
257
#[expect(clippy::too_many_lines)]
254
258
async fn par(
255
-
State(db): State<Db>,
259
+
State(db): State<Pool>,
256
260
State(client): State<Client>,
257
261
Json(form_data): Json<HashMap<String, String>>,
258
262
) -> Result<Json<Value>> {
···
357
361
.context("failed to compute expiration time")?
358
362
.timestamp();
359
363
360
-
_ = sqlx::query!(
361
-
r#"
362
-
INSERT INTO oauth_par_requests (
363
-
request_uri, client_id, response_type, code_challenge, code_challenge_method,
364
-
state, login_hint, scope, redirect_uri, response_mode, display,
365
-
created_at, expires_at
366
-
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
367
-
"#,
368
-
request_uri,
369
-
client_id,
370
-
response_type,
371
-
code_challenge,
372
-
code_challenge_method,
373
-
state,
374
-
login_hint,
375
-
scope,
376
-
redirect_uri,
377
-
response_mode,
378
-
display,
379
-
created_at,
380
-
expires_at
381
-
)
382
-
.execute(&db)
383
-
.await
384
-
.context("failed to store PAR request")?;
364
+
use crate::schema::pds::oauth_par_requests::dsl as ParRequestSchema;
365
+
let client_id = client_id.to_owned();
366
+
let request_uri_cloned = request_uri.to_owned();
367
+
let response_type = response_type.to_owned();
368
+
let code_challenge = code_challenge.to_owned();
369
+
let code_challenge_method = code_challenge_method.to_owned();
370
+
_ = db
371
+
.get()
372
+
.await
373
+
.expect("Failed to get database connection")
374
+
.interact(move |conn| {
375
+
insert_into(ParRequestSchema::oauth_par_requests)
376
+
.values((
377
+
ParRequestSchema::request_uri.eq(&request_uri_cloned),
378
+
ParRequestSchema::client_id.eq(client_id),
379
+
ParRequestSchema::response_type.eq(response_type),
380
+
ParRequestSchema::code_challenge.eq(code_challenge),
381
+
ParRequestSchema::code_challenge_method.eq(code_challenge_method),
382
+
ParRequestSchema::state.eq(state),
383
+
ParRequestSchema::login_hint.eq(login_hint),
384
+
ParRequestSchema::scope.eq(scope),
385
+
ParRequestSchema::redirect_uri.eq(redirect_uri),
386
+
ParRequestSchema::response_mode.eq(response_mode),
387
+
ParRequestSchema::display.eq(display),
388
+
ParRequestSchema::created_at.eq(created_at),
389
+
ParRequestSchema::expires_at.eq(expires_at),
390
+
))
391
+
.execute(conn)
392
+
})
393
+
.await
394
+
.expect("Failed to store PAR request")
395
+
.expect("Failed to store PAR request");
385
396
386
397
Ok(Json(json!({
387
398
"request_uri": request_uri,
···
392
403
/// OAuth Authorization endpoint
393
404
/// GET `/oauth/authorize`
394
405
async fn authorize(
395
-
State(db): State<Db>,
406
+
State(db): State<Pool>,
396
407
State(client): State<Client>,
397
408
Query(params): Query<HashMap<String, String>>,
398
409
) -> Result<impl IntoResponse> {
···
407
418
let timestamp = chrono::Utc::now().timestamp();
408
419
409
420
// Retrieve the PAR request from the database
410
-
let par_request = sqlx::query!(
411
-
r#"
412
-
SELECT * FROM oauth_par_requests
413
-
WHERE request_uri = ? AND client_id = ? AND expires_at > ?
414
-
"#,
415
-
request_uri,
416
-
client_id,
417
-
timestamp
418
-
)
419
-
.fetch_optional(&db)
420
-
.await
421
-
.context("failed to query PAR request")?
422
-
.context("PAR request not found or expired")?;
421
+
use crate::schema::pds::oauth_par_requests::dsl as ParRequestSchema;
422
+
423
+
let request_uri_clone = request_uri.to_owned();
424
+
let client_id_clone = client_id.to_owned();
425
+
let timestamp_clone = timestamp.clone();
426
+
let login_hint = db
427
+
.get()
428
+
.await
429
+
.expect("Failed to get database connection")
430
+
.interact(move |conn| {
431
+
ParRequestSchema::oauth_par_requests
432
+
.select(ParRequestSchema::login_hint)
433
+
.filter(ParRequestSchema::request_uri.eq(request_uri_clone))
434
+
.filter(ParRequestSchema::client_id.eq(client_id_clone))
435
+
.filter(ParRequestSchema::expires_at.gt(timestamp_clone))
436
+
.first::<Option<String>>(conn)
437
+
.optional()
438
+
})
439
+
.await
440
+
.expect("Failed to query PAR request")
441
+
.expect("Failed to query PAR request")
442
+
.expect("Failed to query PAR request");
423
443
424
444
// Validate client metadata
425
445
let client_metadata = fetch_client_metadata(&client, client_id).await?;
426
446
427
447
// Authorization page with login form
428
-
let login_hint = par_request.login_hint.unwrap_or_default();
448
+
let login_hint = login_hint.unwrap_or_default();
429
449
let html = format!(
430
450
r#"<!DOCTYPE html>
431
451
<html>
···
491
511
/// POST `/oauth/authorize/sign-in`
492
512
#[expect(clippy::too_many_lines)]
493
513
async fn authorize_signin(
494
-
State(db): State<Db>,
514
+
State(db): State<Pool>,
495
515
State(config): State<AppConfig>,
496
516
State(client): State<Client>,
497
517
extract::Form(form_data): extract::Form<HashMap<String, String>>,
···
511
531
let timestamp = chrono::Utc::now().timestamp();
512
532
513
533
// Retrieve the PAR request
514
-
let par_request = sqlx::query!(
515
-
r#"
516
-
SELECT * FROM oauth_par_requests
517
-
WHERE request_uri = ? AND client_id = ? AND expires_at > ?
518
-
"#,
519
-
request_uri,
520
-
client_id,
521
-
timestamp
522
-
)
523
-
.fetch_optional(&db)
524
-
.await
525
-
.context("failed to query PAR request")?
526
-
.context("PAR request not found or expired")?;
534
+
use crate::schema::pds::oauth_par_requests::dsl as ParRequestSchema;
535
+
#[derive(Queryable, Selectable)]
536
+
#[diesel(table_name = crate::schema::pds::oauth_par_requests)]
537
+
#[diesel(check_for_backend(sqlite::Sqlite))]
538
+
struct ParRequest {
539
+
request_uri: String,
540
+
client_id: String,
541
+
response_type: String,
542
+
code_challenge: String,
543
+
code_challenge_method: String,
544
+
state: Option<String>,
545
+
login_hint: Option<String>,
546
+
scope: Option<String>,
547
+
redirect_uri: Option<String>,
548
+
response_mode: Option<String>,
549
+
display: Option<String>,
550
+
created_at: i64,
551
+
expires_at: i64,
552
+
}
553
+
let request_uri_clone = request_uri.to_owned();
554
+
let client_id_clone = client_id.to_owned();
555
+
let timestamp_clone = timestamp.clone();
556
+
let par_request = db
557
+
.get()
558
+
.await
559
+
.expect("Failed to get database connection")
560
+
.interact(move |conn| {
561
+
ParRequestSchema::oauth_par_requests
562
+
.filter(ParRequestSchema::request_uri.eq(request_uri_clone))
563
+
.filter(ParRequestSchema::client_id.eq(client_id_clone))
564
+
.filter(ParRequestSchema::expires_at.gt(timestamp_clone))
565
+
.first::<ParRequest>(conn)
566
+
.optional()
567
+
})
568
+
.await
569
+
.expect("Failed to query PAR request")
570
+
.expect("Failed to query PAR request")
571
+
.expect("Failed to query PAR request");
527
572
528
573
// Authenticate the user
529
-
let account = sqlx::query!(
530
-
r#"
531
-
WITH LatestHandles AS (
532
-
SELECT did, handle
533
-
FROM handles
534
-
WHERE (did, created_at) IN (
535
-
SELECT did, MAX(created_at) AS max_created_at
536
-
FROM handles
537
-
GROUP BY did
538
-
)
539
-
)
540
-
SELECT a.did, a.email, a.password, h.handle
541
-
FROM accounts a
542
-
LEFT JOIN LatestHandles h ON a.did = h.did
543
-
WHERE h.handle = ?
544
-
"#,
545
-
username
546
-
)
547
-
.fetch_optional(&db)
548
-
.await
549
-
.context("failed to query database")?
550
-
.context("user not found")?;
574
+
use crate::schema::pds::account::dsl as AccountSchema;
575
+
use crate::schema::pds::actor::dsl as ActorSchema;
576
+
let username_clone = username.to_owned();
577
+
let account = db
578
+
.get()
579
+
.await
580
+
.expect("Failed to get database connection")
581
+
.interact(move |conn| {
582
+
AccountSchema::account
583
+
.filter(AccountSchema::email.eq(username_clone))
584
+
.first::<crate::models::pds::Account>(conn)
585
+
.optional()
586
+
})
587
+
.await
588
+
.expect("Failed to query account")
589
+
.expect("Failed to query account")
590
+
.expect("Failed to query account");
591
+
// let actor = db
592
+
// .get()
593
+
// .await
594
+
// .expect("Failed to get database connection")
595
+
// .interact(move |conn| {
596
+
// ActorSchema::actor
597
+
// .filter(ActorSchema::did.eq(did))
598
+
// .first::<rsky_pds::models::Actor>(conn)
599
+
// .optional()
600
+
// })
601
+
// .await
602
+
// .expect("Failed to query actor")
603
+
// .expect("Failed to query actor")
604
+
// .expect("Failed to query actor");
551
605
552
606
// Verify password - fixed to use equality check instead of pattern matching
553
607
if Argon2::default().verify_password(
···
592
646
.context("failed to compute expiration time")?
593
647
.timestamp();
594
648
595
-
_ = sqlx::query!(
596
-
r#"
597
-
INSERT INTO oauth_authorization_codes (
598
-
code, client_id, subject, code_challenge, code_challenge_method,
599
-
redirect_uri, scope, created_at, expires_at, used
600
-
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
601
-
"#,
602
-
code,
603
-
client_id,
604
-
account.did,
605
-
par_request.code_challenge,
606
-
par_request.code_challenge_method,
607
-
redirect_uri,
608
-
par_request.scope,
609
-
created_at,
610
-
expires_at,
611
-
false
612
-
)
613
-
.execute(&db)
614
-
.await
615
-
.context("failed to store authorization code")?;
649
+
use crate::schema::pds::oauth_authorization_codes::dsl as AuthCodeSchema;
650
+
let code_cloned = code.to_owned();
651
+
let client_id = client_id.to_owned();
652
+
let subject = account.did.to_owned();
653
+
let code_challenge = par_request.code_challenge.to_owned();
654
+
let code_challenge_method = par_request.code_challenge_method.to_owned();
655
+
let redirect_uri_cloned = redirect_uri.to_owned();
656
+
let scope = par_request.scope.to_owned();
657
+
let used = false;
658
+
_ = db
659
+
.get()
660
+
.await
661
+
.expect("Failed to get database connection")
662
+
.interact(move |conn| {
663
+
insert_into(AuthCodeSchema::oauth_authorization_codes)
664
+
.values((
665
+
AuthCodeSchema::code.eq(code_cloned),
666
+
AuthCodeSchema::client_id.eq(client_id),
667
+
AuthCodeSchema::subject.eq(subject),
668
+
AuthCodeSchema::code_challenge.eq(code_challenge),
669
+
AuthCodeSchema::code_challenge_method.eq(code_challenge_method),
670
+
AuthCodeSchema::redirect_uri.eq(redirect_uri_cloned),
671
+
AuthCodeSchema::scope.eq(scope),
672
+
AuthCodeSchema::created_at.eq(created_at),
673
+
AuthCodeSchema::expires_at.eq(expires_at),
674
+
AuthCodeSchema::used.eq(used),
675
+
))
676
+
.execute(conn)
677
+
})
678
+
.await
679
+
.expect("Failed to store authorization code")
680
+
.expect("Failed to store authorization code");
616
681
617
682
// Use state from the PAR request or generate one
618
683
let state = par_request.state.unwrap_or_else(|| {
···
673
738
dpop_token: &str,
674
739
http_method: &str,
675
740
http_uri: &str,
676
-
db: &Db,
741
+
db: &Pool,
677
742
access_token: Option<&str>,
678
743
bound_key_thumbprint: Option<&str>,
679
744
) -> Result<String> {
···
811
876
}
812
877
813
878
// 11. Check for replay attacks via JTI tracking
814
-
let jti_used =
815
-
sqlx::query_scalar!(r#"SELECT COUNT(*) FROM oauth_used_jtis WHERE jti = ?"#, jti)
816
-
.fetch_one(db)
817
-
.await
818
-
.context("failed to check JTI")?;
879
+
use crate::schema::pds::oauth_used_jtis::dsl as JtiSchema;
880
+
let jti_clone = jti.to_owned();
881
+
let jti_used = db
882
+
.get()
883
+
.await
884
+
.expect("Failed to get database connection")
885
+
.interact(move |conn| {
886
+
JtiSchema::oauth_used_jtis
887
+
.filter(JtiSchema::jti.eq(jti_clone))
888
+
.count()
889
+
.get_result::<i64>(conn)
890
+
.optional()
891
+
})
892
+
.await
893
+
.expect("Failed to check JTI")
894
+
.expect("Failed to check JTI")
895
+
.unwrap_or(0);
819
896
820
897
if jti_used > 0 {
821
898
return Err(Error::with_status(
···
825
902
}
826
903
827
904
// 12. Store the JTI to prevent replay attacks
828
-
_ = sqlx::query!(
829
-
r#"
830
-
INSERT INTO oauth_used_jtis (jti, issuer, created_at, expires_at)
831
-
VALUES (?, ?, ?, ?)
832
-
"#,
833
-
jti,
834
-
thumbprint, // Use thumbprint as issuer identifier
835
-
now,
836
-
exp
837
-
)
838
-
.execute(db)
839
-
.await
840
-
.context("failed to store JTI")?;
905
+
let jti_cloned = jti.to_owned();
906
+
let issuer = thumbprint.to_owned();
907
+
let created_at = now;
908
+
let expires_at = exp;
909
+
_ = db
910
+
.get()
911
+
.await
912
+
.expect("Failed to get database connection")
913
+
.interact(move |conn| {
914
+
insert_into(JtiSchema::oauth_used_jtis)
915
+
.values((
916
+
JtiSchema::jti.eq(jti_cloned),
917
+
JtiSchema::issuer.eq(issuer),
918
+
JtiSchema::created_at.eq(created_at),
919
+
JtiSchema::expires_at.eq(expires_at),
920
+
))
921
+
.execute(conn)
922
+
})
923
+
.await
924
+
.expect("Failed to store JTI")
925
+
.expect("Failed to store JTI");
841
926
842
927
// 13. Cleanup expired JTIs periodically (1% chance on each request)
843
928
if thread_rng().gen_range(0_i32..100_i32) == 0_i32 {
844
-
_ = sqlx::query!(r#"DELETE FROM oauth_used_jtis WHERE expires_at < ?"#, now)
845
-
.execute(db)
929
+
let now_clone = now.to_owned();
930
+
_ = db
931
+
.get()
932
+
.await
933
+
.expect("Failed to get database connection")
934
+
.interact(move |conn| {
935
+
delete(JtiSchema::oauth_used_jtis)
936
+
.filter(JtiSchema::expires_at.lt(now_clone))
937
+
.execute(conn)
938
+
})
846
939
.await
847
-
.context("failed to clean up expired JTIs")?;
940
+
.expect("Failed to clean up expired JTIs")
941
+
.expect("Failed to clean up expired JTIs");
848
942
}
849
943
850
944
Ok(thumbprint)
···
882
976
/// Handles both `authorization_code` and `refresh_token` grants
883
977
#[expect(clippy::too_many_lines)]
884
978
async fn token(
885
-
State(db): State<Db>,
979
+
State(db): State<Pool>,
886
980
State(skey): State<SigningKey>,
887
981
State(config): State<AppConfig>,
888
982
State(client): State<Client>,
···
913
1007
== "private_key_jwt";
914
1008
915
1009
// Verify DPoP proof
916
-
let dpop_thumbprint = verify_dpop_proof(
1010
+
let dpop_thumbprint_res = verify_dpop_proof(
917
1011
dpop_token,
918
1012
"POST",
919
1013
&format!("https://{}/oauth/token", config.host_name),
···
959
1053
// }
960
1054
} else {
961
1055
// Rule 2: For public clients, check if this DPoP key has been used before
962
-
let is_key_reused = sqlx::query_scalar!(
963
-
r#"SELECT COUNT(*) FROM oauth_refresh_tokens WHERE dpop_thumbprint = ? AND client_id = ?"#,
964
-
dpop_thumbprint,
965
-
client_id
966
-
)
967
-
.fetch_one(&db)
968
-
.await
969
-
.context("failed to check key usage history")? > 0;
1056
+
use crate::schema::pds::oauth_refresh_tokens::dsl as RefreshTokenSchema;
1057
+
let dpop_thumbprint_clone = dpop_thumbprint_res.to_owned();
1058
+
let client_id_clone = client_id.to_owned();
1059
+
let is_key_reused = db
1060
+
.get()
1061
+
.await
1062
+
.expect("Failed to get database connection")
1063
+
.interact(move |conn| {
1064
+
RefreshTokenSchema::oauth_refresh_tokens
1065
+
.filter(RefreshTokenSchema::dpop_thumbprint.eq(dpop_thumbprint_clone))
1066
+
.filter(RefreshTokenSchema::client_id.eq(client_id_clone))
1067
+
.count()
1068
+
.get_result::<i64>(conn)
1069
+
.optional()
1070
+
})
1071
+
.await
1072
+
.expect("Failed to check key usage history")
1073
+
.expect("Failed to check key usage history")
1074
+
.unwrap_or(0)
1075
+
> 0;
970
1076
971
1077
if is_key_reused && grant_type == "authorization_code" {
972
1078
return Err(Error::with_status(
···
990
1096
let timestamp = chrono::Utc::now().timestamp();
991
1097
992
1098
// Retrieve and validate the authorization code
993
-
let auth_code = sqlx::query!(
994
-
r#"
995
-
SELECT * FROM oauth_authorization_codes
996
-
WHERE code = ? AND client_id = ? AND redirect_uri = ? AND expires_at > ? AND used = FALSE
997
-
"#,
998
-
code,
999
-
client_id,
1000
-
redirect_uri,
1001
-
timestamp
1002
-
)
1003
-
.fetch_optional(&db)
1004
-
.await
1005
-
.context("failed to query authorization code")?
1006
-
.context("authorization code not found, expired, or already used")?;
1099
+
use crate::schema::pds::oauth_authorization_codes::dsl as AuthCodeSchema;
1100
+
#[derive(Queryable, Selectable, Serialize)]
1101
+
#[diesel(table_name = crate::schema::pds::oauth_authorization_codes)]
1102
+
#[diesel(check_for_backend(sqlite::Sqlite))]
1103
+
struct AuthCode {
1104
+
code: String,
1105
+
client_id: String,
1106
+
subject: String,
1107
+
code_challenge: String,
1108
+
code_challenge_method: String,
1109
+
redirect_uri: String,
1110
+
scope: Option<String>,
1111
+
created_at: i64,
1112
+
expires_at: i64,
1113
+
used: bool,
1114
+
}
1115
+
let code_clone = code.to_owned();
1116
+
let client_id_clone = client_id.to_owned();
1117
+
let redirect_uri_clone = redirect_uri.to_owned();
1118
+
let auth_code = db
1119
+
.get()
1120
+
.await
1121
+
.expect("Failed to get database connection")
1122
+
.interact(move |conn| {
1123
+
AuthCodeSchema::oauth_authorization_codes
1124
+
.filter(AuthCodeSchema::code.eq(code_clone))
1125
+
.filter(AuthCodeSchema::client_id.eq(client_id_clone))
1126
+
.filter(AuthCodeSchema::redirect_uri.eq(redirect_uri_clone))
1127
+
.filter(AuthCodeSchema::expires_at.gt(timestamp))
1128
+
.filter(AuthCodeSchema::used.eq(false))
1129
+
.first::<AuthCode>(conn)
1130
+
.optional()
1131
+
})
1132
+
.await
1133
+
.expect("Failed to query authorization code")
1134
+
.expect("Failed to query authorization code")
1135
+
.expect("Failed to query authorization code");
1007
1136
1008
1137
// Verify PKCE code challenge
1009
1138
verify_pkce(
···
1013
1142
)?;
1014
1143
1015
1144
// Mark the code as used
1016
-
_ = sqlx::query!(
1017
-
r#"UPDATE oauth_authorization_codes SET used = TRUE WHERE code = ?"#,
1018
-
code
1019
-
)
1020
-
.execute(&db)
1021
-
.await
1022
-
.context("failed to mark code as used")?;
1145
+
let code_cloned = code.to_owned();
1146
+
_ = db
1147
+
.get()
1148
+
.await
1149
+
.expect("Failed to get database connection")
1150
+
.interact(move |conn| {
1151
+
update(AuthCodeSchema::oauth_authorization_codes)
1152
+
.filter(AuthCodeSchema::code.eq(code_cloned))
1153
+
.set(AuthCodeSchema::used.eq(true))
1154
+
.execute(conn)
1155
+
})
1156
+
.await
1157
+
.expect("Failed to mark code as used")
1158
+
.expect("Failed to mark code as used");
1023
1159
1024
1160
// Generate tokens with appropriate lifetimes
1025
1161
let now = chrono::Utc::now().timestamp();
···
1043
1179
"exp": access_token_expires_at,
1044
1180
"iat": now,
1045
1181
"cnf": {
1046
-
"jkt": dpop_thumbprint // Rule 1: Bind to DPoP key
1182
+
"jkt": dpop_thumbprint_res // Rule 1: Bind to DPoP key
1047
1183
},
1048
1184
"scope": auth_code.scope
1049
1185
});
···
1059
1195
"exp": refresh_token_expires_at,
1060
1196
"iat": now,
1061
1197
"cnf": {
1062
-
"jkt": dpop_thumbprint // Rule 1: Bind to DPoP key
1198
+
"jkt": dpop_thumbprint_res // Rule 1: Bind to DPoP key
1063
1199
},
1064
1200
"scope": auth_code.scope
1065
1201
});
···
1068
1204
.context("failed to sign refresh token")?;
1069
1205
1070
1206
// Store the refresh token with DPoP binding
1071
-
_ = sqlx::query!(
1072
-
r#"
1073
-
INSERT INTO oauth_refresh_tokens (
1074
-
token, client_id, subject, dpop_thumbprint, scope, created_at, expires_at, revoked
1075
-
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
1076
-
"#,
1077
-
refresh_token,
1078
-
client_id,
1079
-
auth_code.subject,
1080
-
dpop_thumbprint,
1081
-
auth_code.scope,
1082
-
now,
1083
-
refresh_token_expires_at,
1084
-
false
1085
-
)
1086
-
.execute(&db)
1087
-
.await
1088
-
.context("failed to store refresh token")?;
1207
+
use crate::schema::pds::oauth_refresh_tokens::dsl as RefreshTokenSchema;
1208
+
let refresh_token_cloned = refresh_token.to_owned();
1209
+
let client_id_cloned = client_id.to_owned();
1210
+
let subject = auth_code.subject.to_owned();
1211
+
let dpop_thumbprint_cloned = dpop_thumbprint_res.to_owned();
1212
+
let scope = auth_code.scope.to_owned();
1213
+
let created_at = now;
1214
+
let expires_at = refresh_token_expires_at;
1215
+
_ = db
1216
+
.get()
1217
+
.await
1218
+
.expect("Failed to get database connection")
1219
+
.interact(move |conn| {
1220
+
insert_into(RefreshTokenSchema::oauth_refresh_tokens)
1221
+
.values((
1222
+
RefreshTokenSchema::token.eq(refresh_token_cloned),
1223
+
RefreshTokenSchema::client_id.eq(client_id_cloned),
1224
+
RefreshTokenSchema::subject.eq(subject),
1225
+
RefreshTokenSchema::dpop_thumbprint.eq(dpop_thumbprint_cloned),
1226
+
RefreshTokenSchema::scope.eq(scope),
1227
+
RefreshTokenSchema::created_at.eq(created_at),
1228
+
RefreshTokenSchema::expires_at.eq(expires_at),
1229
+
RefreshTokenSchema::revoked.eq(false),
1230
+
))
1231
+
.execute(conn)
1232
+
})
1233
+
.await
1234
+
.expect("Failed to store refresh token")
1235
+
.expect("Failed to store refresh token");
1089
1236
1090
1237
// Return token response with the subject claim
1091
1238
Ok(Json(json!({
···
1107
1254
1108
1255
// Rules 7 & 8: Verify refresh token and DPoP consistency
1109
1256
// Retrieve the refresh token
1110
-
let token_data = sqlx::query!(
1111
-
r#"
1112
-
SELECT * FROM oauth_refresh_tokens
1113
-
WHERE token = ? AND client_id = ? AND expires_at > ? AND revoked = FALSE AND dpop_thumbprint = ?
1114
-
"#,
1115
-
refresh_token,
1116
-
client_id,
1117
-
timestamp,
1118
-
dpop_thumbprint // Rule 8: Must use same DPoP key
1119
-
)
1120
-
.fetch_optional(&db)
1121
-
.await
1122
-
.context("failed to query refresh token")?
1123
-
.context("refresh token not found, expired, revoked, or invalid for this DPoP key")?;
1257
+
use crate::schema::pds::oauth_refresh_tokens::dsl as RefreshTokenSchema;
1258
+
#[derive(Queryable, Selectable, Serialize)]
1259
+
#[diesel(table_name = crate::schema::pds::oauth_refresh_tokens)]
1260
+
#[diesel(check_for_backend(sqlite::Sqlite))]
1261
+
struct TokenData {
1262
+
token: String,
1263
+
client_id: String,
1264
+
subject: String,
1265
+
dpop_thumbprint: String,
1266
+
scope: Option<String>,
1267
+
created_at: i64,
1268
+
expires_at: i64,
1269
+
revoked: bool,
1270
+
}
1271
+
let dpop_thumbprint_clone = dpop_thumbprint_res.to_owned();
1272
+
let refresh_token_clone = refresh_token.to_owned();
1273
+
let client_id_clone = client_id.to_owned();
1274
+
let token_data = db
1275
+
.get()
1276
+
.await
1277
+
.expect("Failed to get database connection")
1278
+
.interact(move |conn| {
1279
+
RefreshTokenSchema::oauth_refresh_tokens
1280
+
.filter(RefreshTokenSchema::token.eq(refresh_token_clone))
1281
+
.filter(RefreshTokenSchema::client_id.eq(client_id_clone))
1282
+
.filter(RefreshTokenSchema::expires_at.gt(timestamp))
1283
+
.filter(RefreshTokenSchema::revoked.eq(false))
1284
+
.filter(RefreshTokenSchema::dpop_thumbprint.eq(dpop_thumbprint_clone))
1285
+
.first::<TokenData>(conn)
1286
+
.optional()
1287
+
})
1288
+
.await
1289
+
.expect("Failed to query refresh token")
1290
+
.expect("Failed to query refresh token")
1291
+
.expect("Failed to query refresh token");
1124
1292
1125
1293
// Rule 10: For confidential clients, verify key is still advertised in their jwks
1126
1294
if is_confidential_client {
1127
1295
let client_still_advertises_key = true; // Implement actual check against client jwks
1128
1296
if !client_still_advertises_key {
1129
1297
// Revoke all tokens bound to this key
1130
-
_ = sqlx::query!(
1131
-
r#"UPDATE oauth_refresh_tokens SET revoked = TRUE
1132
-
WHERE client_id = ? AND dpop_thumbprint = ?"#,
1133
-
client_id,
1134
-
dpop_thumbprint
1135
-
)
1136
-
.execute(&db)
1137
-
.await
1138
-
.context("failed to revoke tokens")?;
1298
+
let client_id_cloned = client_id.to_owned();
1299
+
let dpop_thumbprint_cloned = dpop_thumbprint_res.to_owned();
1300
+
_ = db
1301
+
.get()
1302
+
.await
1303
+
.expect("Failed to get database connection")
1304
+
.interact(move |conn| {
1305
+
update(RefreshTokenSchema::oauth_refresh_tokens)
1306
+
.filter(RefreshTokenSchema::client_id.eq(client_id_cloned))
1307
+
.filter(
1308
+
RefreshTokenSchema::dpop_thumbprint.eq(dpop_thumbprint_cloned),
1309
+
)
1310
+
.set(RefreshTokenSchema::revoked.eq(true))
1311
+
.execute(conn)
1312
+
})
1313
+
.await
1314
+
.expect("Failed to revoke tokens")
1315
+
.expect("Failed to revoke tokens");
1139
1316
1140
1317
return Err(Error::with_status(
1141
1318
StatusCode::BAD_REQUEST,
···
1145
1322
}
1146
1323
1147
1324
// Rotate the refresh token
1148
-
_ = sqlx::query!(
1149
-
r#"UPDATE oauth_refresh_tokens SET revoked = TRUE WHERE token = ?"#,
1150
-
refresh_token
1151
-
)
1152
-
.execute(&db)
1153
-
.await
1154
-
.context("failed to revoke old refresh token")?;
1325
+
let refresh_token_cloned = refresh_token.to_owned();
1326
+
_ = db
1327
+
.get()
1328
+
.await
1329
+
.expect("Failed to get database connection")
1330
+
.interact(move |conn| {
1331
+
update(RefreshTokenSchema::oauth_refresh_tokens)
1332
+
.filter(RefreshTokenSchema::token.eq(refresh_token_cloned))
1333
+
.set(RefreshTokenSchema::revoked.eq(true))
1334
+
.execute(conn)
1335
+
})
1336
+
.await
1337
+
.expect("Failed to revoke old refresh token")
1338
+
.expect("Failed to revoke old refresh token");
1155
1339
1156
1340
// Generate new tokens
1157
1341
let now = chrono::Utc::now().timestamp();
···
1170
1354
"exp": access_token_expires_at,
1171
1355
"iat": now,
1172
1356
"cnf": {
1173
-
"jkt": dpop_thumbprint
1357
+
"jkt": dpop_thumbprint_res
1174
1358
},
1175
1359
"scope": token_data.scope
1176
1360
});
···
1186
1370
"exp": refresh_token_expires_at,
1187
1371
"iat": now,
1188
1372
"cnf": {
1189
-
"jkt": dpop_thumbprint
1373
+
"jkt": dpop_thumbprint_res
1190
1374
},
1191
1375
"scope": token_data.scope
1192
1376
});
···
1195
1379
.context("failed to sign refresh token")?;
1196
1380
1197
1381
// Store the new refresh token
1198
-
_ = sqlx::query!(
1199
-
r#"
1200
-
INSERT INTO oauth_refresh_tokens (
1201
-
token, client_id, subject, dpop_thumbprint, scope, created_at, expires_at, revoked
1202
-
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
1203
-
"#,
1204
-
new_refresh_token,
1205
-
client_id,
1206
-
token_data.subject,
1207
-
dpop_thumbprint,
1208
-
token_data.scope,
1209
-
now,
1210
-
refresh_token_expires_at,
1211
-
false
1212
-
)
1213
-
.execute(&db)
1214
-
.await
1215
-
.context("failed to store refresh token")?;
1382
+
let new_refresh_token_cloned = new_refresh_token.to_owned();
1383
+
let client_id_cloned = client_id.to_owned();
1384
+
let subject = token_data.subject.to_owned();
1385
+
let dpop_thumbprint_cloned = dpop_thumbprint_res.to_owned();
1386
+
let scope = token_data.scope.to_owned();
1387
+
let created_at = now;
1388
+
let expires_at = refresh_token_expires_at;
1389
+
_ = db
1390
+
.get()
1391
+
.await
1392
+
.expect("Failed to get database connection")
1393
+
.interact(move |conn| {
1394
+
insert_into(RefreshTokenSchema::oauth_refresh_tokens)
1395
+
.values((
1396
+
RefreshTokenSchema::token.eq(new_refresh_token_cloned),
1397
+
RefreshTokenSchema::client_id.eq(client_id_cloned),
1398
+
RefreshTokenSchema::subject.eq(subject),
1399
+
RefreshTokenSchema::dpop_thumbprint.eq(dpop_thumbprint_cloned),
1400
+
RefreshTokenSchema::scope.eq(scope),
1401
+
RefreshTokenSchema::created_at.eq(created_at),
1402
+
RefreshTokenSchema::expires_at.eq(expires_at),
1403
+
RefreshTokenSchema::revoked.eq(false),
1404
+
))
1405
+
.execute(conn)
1406
+
})
1407
+
.await
1408
+
.expect("Failed to store refresh token")
1409
+
.expect("Failed to store refresh token");
1216
1410
1217
1411
// Return token response
1218
1412
Ok(Json(json!({
···
1289
1483
///
1290
1484
/// Implements RFC7009 for revoking refresh tokens
1291
1485
async fn revoke(
1292
-
State(db): State<Db>,
1486
+
State(db): State<Pool>,
1293
1487
Json(form_data): Json<HashMap<String, String>>,
1294
1488
) -> Result<Json<Value>> {
1295
1489
// Extract required parameters
···
1308
1502
}
1309
1503
1310
1504
// Revoke the token
1311
-
_ = sqlx::query!(
1312
-
r#"UPDATE oauth_refresh_tokens SET revoked = TRUE WHERE token = ?"#,
1313
-
token
1314
-
)
1315
-
.execute(&db)
1316
-
.await
1317
-
.context("failed to revoke token")?;
1505
+
use crate::schema::pds::oauth_refresh_tokens::dsl as RefreshTokenSchema;
1506
+
let token_cloned = token.to_owned();
1507
+
_ = db
1508
+
.get()
1509
+
.await
1510
+
.expect("Failed to get database connection")
1511
+
.interact(move |conn| {
1512
+
update(RefreshTokenSchema::oauth_refresh_tokens)
1513
+
.filter(RefreshTokenSchema::token.eq(token_cloned))
1514
+
.set(RefreshTokenSchema::revoked.eq(true))
1515
+
.execute(conn)
1516
+
})
1517
+
.await
1518
+
.expect("Failed to revoke token")
1519
+
.expect("Failed to revoke token");
1318
1520
1319
1521
// RFC7009 requires a 200 OK with an empty response
1320
1522
Ok(Json(json!({})))
···
1325
1527
///
1326
1528
/// Implements RFC7662 for introspecting tokens
1327
1529
async fn introspect(
1328
-
State(db): State<Db>,
1530
+
State(db): State<Pool>,
1329
1531
State(skey): State<SigningKey>,
1330
1532
Json(form_data): Json<HashMap<String, String>>,
1331
1533
) -> Result<Json<Value>> {
···
1368
1570
1369
1571
// For refresh tokens, check if it's been revoked
1370
1572
if is_refresh_token {
1371
-
let is_revoked = sqlx::query_scalar!(
1372
-
r#"SELECT revoked FROM oauth_refresh_tokens WHERE token = ?"#,
1373
-
token
1374
-
)
1375
-
.fetch_optional(&db)
1376
-
.await
1377
-
.context("failed to query token")?
1378
-
.unwrap_or(true);
1573
+
use crate::schema::pds::oauth_refresh_tokens::dsl as RefreshTokenSchema;
1574
+
let token_cloned = token.to_owned();
1575
+
let is_revoked = db
1576
+
.get()
1577
+
.await
1578
+
.expect("Failed to get database connection")
1579
+
.interact(move |conn| {
1580
+
RefreshTokenSchema::oauth_refresh_tokens
1581
+
.filter(RefreshTokenSchema::token.eq(token_cloned))
1582
+
.select(RefreshTokenSchema::revoked)
1583
+
.first::<bool>(conn)
1584
+
.optional()
1585
+
})
1586
+
.await
1587
+
.expect("Failed to query token")
1588
+
.expect("Failed to query token")
1589
+
.unwrap_or(true);
1379
1590
1380
1591
if is_revoked {
1381
1592
return Ok(Json(json!({"active": false})));
+606
src/pipethrough.rs
+606
src/pipethrough.rs
···
1
+
//! Based on https://github.com/blacksky-algorithms/rsky/blob/main/rsky-pds/src/pipethrough.rs
2
+
//! blacksky-algorithms/rsky is licensed under the Apache License 2.0
3
+
//!
4
+
//! Modified for Axum instead of Rocket
5
+
6
+
use anyhow::{Result, bail};
7
+
use axum::extract::{FromRequestParts, State};
8
+
use rsky_identity::IdResolver;
9
+
use rsky_pds::apis::ApiError;
10
+
use rsky_pds::auth_verifier::{AccessOutput, AccessStandard};
11
+
use rsky_pds::config::{ServerConfig, ServiceConfig, env_to_cfg};
12
+
use rsky_pds::pipethrough::{OverrideOpts, ProxyHeader, UrlAndAud};
13
+
use rsky_pds::xrpc_server::types::{HandlerPipeThrough, InvalidRequestError, XRPCError};
14
+
use rsky_pds::{APP_USER_AGENT, SharedIdResolver, context};
15
+
// use lazy_static::lazy_static;
16
+
use reqwest::header::{CONTENT_TYPE, HeaderValue};
17
+
use reqwest::{Client, Method, RequestBuilder, Response};
18
+
// use rocket::data::ToByteUnit;
19
+
// use rocket::http::{Method, Status};
20
+
// use rocket::request::{FromRequest, Outcome, Request};
21
+
// use rocket::{Data, State};
22
+
use axum::{
23
+
body::Bytes,
24
+
http::{self, HeaderMap},
25
+
};
26
+
use rsky_common::{GetServiceEndpointOpts, get_service_endpoint};
27
+
use rsky_repo::types::Ids;
28
+
use serde::de::DeserializeOwned;
29
+
use serde_json::Value as JsonValue;
30
+
use std::collections::{BTreeMap, HashSet};
31
+
use std::str::FromStr;
32
+
use std::sync::Arc;
33
+
use std::time::Duration;
34
+
use ubyte::ToByteUnit as _;
35
+
use url::Url;
36
+
37
+
use crate::serve::AppState;
38
+
39
+
// pub struct OverrideOpts {
40
+
// pub aud: Option<String>,
41
+
// pub lxm: Option<String>,
42
+
// }
43
+
44
+
// pub struct UrlAndAud {
45
+
// pub url: Url,
46
+
// pub aud: String,
47
+
// pub lxm: String,
48
+
// }
49
+
50
+
// pub struct ProxyHeader {
51
+
// pub did: String,
52
+
// pub service_url: String,
53
+
// }
54
+
55
+
pub struct ProxyRequest {
56
+
pub headers: BTreeMap<String, String>,
57
+
pub query: Option<String>,
58
+
pub path: String,
59
+
pub method: Method,
60
+
pub id_resolver: Arc<tokio::sync::RwLock<rsky_identity::IdResolver>>,
61
+
pub cfg: ServerConfig,
62
+
}
63
+
impl FromRequestParts<AppState> for ProxyRequest {
64
+
// type Rejection = ApiError;
65
+
type Rejection = axum::response::Response;
66
+
67
+
async fn from_request_parts(
68
+
parts: &mut axum::http::request::Parts,
69
+
state: &AppState,
70
+
) -> Result<Self, Self::Rejection> {
71
+
let headers = parts
72
+
.headers
73
+
.iter()
74
+
.map(|(k, v)| (k.to_string(), v.to_str().unwrap_or("").to_string()))
75
+
.collect::<BTreeMap<String, String>>();
76
+
let query = parts.uri.query().map(|s| s.to_string());
77
+
let path = parts.uri.path().to_string();
78
+
let method = parts.method.clone();
79
+
let id_resolver = state.id_resolver.clone();
80
+
// let cfg = state.cfg.clone();
81
+
let cfg = env_to_cfg(); // TODO: use state.cfg.clone();
82
+
83
+
Ok(Self {
84
+
headers,
85
+
query,
86
+
path,
87
+
method,
88
+
id_resolver,
89
+
cfg,
90
+
})
91
+
}
92
+
}
93
+
94
+
// #[rocket::async_trait]
95
+
// impl<'r> FromRequest<'r> for HandlerPipeThrough {
96
+
// type Error = anyhow::Error;
97
+
98
+
// #[tracing::instrument(skip_all)]
99
+
// async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
100
+
// match AccessStandard::from_request(req).await {
101
+
// Outcome::Success(output) => {
102
+
// let AccessOutput { credentials, .. } = output.access;
103
+
// let requester: Option<String> = match credentials {
104
+
// None => None,
105
+
// Some(credentials) => credentials.did,
106
+
// };
107
+
// let headers = req.headers().clone().into_iter().fold(
108
+
// BTreeMap::new(),
109
+
// |mut acc: BTreeMap<String, String>, cur| {
110
+
// let _ = acc.insert(cur.name().to_string(), cur.value().to_string());
111
+
// acc
112
+
// },
113
+
// );
114
+
// let proxy_req = ProxyRequest {
115
+
// headers,
116
+
// query: match req.uri().query() {
117
+
// None => None,
118
+
// Some(query) => Some(query.to_string()),
119
+
// },
120
+
// path: req.uri().path().to_string(),
121
+
// method: req.method(),
122
+
// id_resolver: req.guard::<&State<SharedIdResolver>>().await.unwrap(),
123
+
// cfg: req.guard::<&State<ServerConfig>>().await.unwrap(),
124
+
// };
125
+
// match pipethrough(
126
+
// &proxy_req,
127
+
// requester,
128
+
// OverrideOpts {
129
+
// aud: None,
130
+
// lxm: None,
131
+
// },
132
+
// )
133
+
// .await
134
+
// {
135
+
// Ok(res) => Outcome::Success(res),
136
+
// Err(error) => match error.downcast_ref() {
137
+
// Some(InvalidRequestError::XRPCError(xrpc)) => {
138
+
// if let XRPCError::FailedResponse {
139
+
// status,
140
+
// error,
141
+
// message,
142
+
// headers,
143
+
// } = xrpc
144
+
// {
145
+
// tracing::error!(
146
+
// "@LOG: XRPC ERROR Status:{status}; Message: {message:?}; Error: {error:?}; Headers: {headers:?}"
147
+
// );
148
+
// }
149
+
// req.local_cache(|| Some(ApiError::InvalidRequest(error.to_string())));
150
+
// Outcome::Error((Status::BadRequest, error))
151
+
// }
152
+
// _ => {
153
+
// req.local_cache(|| Some(ApiError::InvalidRequest(error.to_string())));
154
+
// Outcome::Error((Status::BadRequest, error))
155
+
// }
156
+
// },
157
+
// }
158
+
// }
159
+
// Outcome::Error(err) => {
160
+
// req.local_cache(|| Some(ApiError::RuntimeError));
161
+
// Outcome::Error((
162
+
// Status::BadRequest,
163
+
// anyhow::Error::new(InvalidRequestError::AuthError(err.1)),
164
+
// ))
165
+
// }
166
+
// _ => panic!("Unexpected outcome during Pipethrough"),
167
+
// }
168
+
// }
169
+
// }
170
+
171
+
// #[rocket::async_trait]
172
+
// impl<'r> FromRequest<'r> for ProxyRequest<'r> {
173
+
// type Error = anyhow::Error;
174
+
175
+
// async fn from_request(req: &'r Request<'_>) -> Outcome<Self, Self::Error> {
176
+
// let headers = req.headers().clone().into_iter().fold(
177
+
// BTreeMap::new(),
178
+
// |mut acc: BTreeMap<String, String>, cur| {
179
+
// let _ = acc.insert(cur.name().to_string(), cur.value().to_string());
180
+
// acc
181
+
// },
182
+
// );
183
+
// Outcome::Success(Self {
184
+
// headers,
185
+
// query: match req.uri().query() {
186
+
// None => None,
187
+
// Some(query) => Some(query.to_string()),
188
+
// },
189
+
// path: req.uri().path().to_string(),
190
+
// method: req.method(),
191
+
// id_resolver: req.guard::<&State<SharedIdResolver>>().await.unwrap(),
192
+
// cfg: req.guard::<&State<ServerConfig>>().await.unwrap(),
193
+
// })
194
+
// }
195
+
// }
196
+
197
+
pub async fn pipethrough(
198
+
req: &ProxyRequest,
199
+
requester: Option<String>,
200
+
override_opts: OverrideOpts,
201
+
) -> Result<HandlerPipeThrough> {
202
+
let UrlAndAud {
203
+
url,
204
+
aud,
205
+
lxm: nsid,
206
+
} = format_url_and_aud(req, override_opts.aud).await?;
207
+
let lxm = override_opts.lxm.unwrap_or(nsid);
208
+
let headers = format_headers(req, aud, lxm, requester).await?;
209
+
let req_init = format_req_init(req, url, headers, None)?;
210
+
let res = make_request(req_init).await?;
211
+
parse_proxy_res(res).await
212
+
}
213
+
214
+
pub async fn pipethrough_procedure<T: serde::Serialize>(
215
+
req: &ProxyRequest,
216
+
requester: Option<String>,
217
+
body: Option<T>,
218
+
) -> Result<HandlerPipeThrough> {
219
+
let UrlAndAud {
220
+
url,
221
+
aud,
222
+
lxm: nsid,
223
+
} = format_url_and_aud(req, None).await?;
224
+
let headers = format_headers(req, aud, nsid, requester).await?;
225
+
let encoded_body: Option<Vec<u8>> = match body {
226
+
None => None,
227
+
Some(body) => Some(serde_json::to_string(&body)?.into_bytes()),
228
+
};
229
+
let req_init = format_req_init(req, url, headers, encoded_body)?;
230
+
let res = make_request(req_init).await?;
231
+
parse_proxy_res(res).await
232
+
}
233
+
234
+
#[tracing::instrument(skip_all)]
235
+
pub async fn pipethrough_procedure_post(
236
+
req: &ProxyRequest,
237
+
requester: Option<String>,
238
+
body: Option<Bytes>,
239
+
) -> Result<HandlerPipeThrough, ApiError> {
240
+
let UrlAndAud {
241
+
url,
242
+
aud,
243
+
lxm: nsid,
244
+
} = format_url_and_aud(req, None).await?;
245
+
let headers = format_headers(req, aud, nsid, requester).await?;
246
+
let encoded_body: Option<JsonValue>;
247
+
match body {
248
+
None => encoded_body = None,
249
+
Some(body) => {
250
+
// let res = match body.open(50.megabytes()).into_string().await {
251
+
// Ok(res1) => {
252
+
// tracing::info!(res1.value);
253
+
// res1.value
254
+
// }
255
+
// Err(error) => {
256
+
// tracing::error!("{error}");
257
+
// return Err(ApiError::RuntimeError);
258
+
// }
259
+
// };
260
+
let res = String::from_utf8(body.to_vec()).expect("Invalid UTF-8");
261
+
262
+
match serde_json::from_str(res.as_str()) {
263
+
Ok(res) => {
264
+
encoded_body = Some(res);
265
+
}
266
+
Err(error) => {
267
+
tracing::error!("{error}");
268
+
return Err(ApiError::RuntimeError);
269
+
}
270
+
}
271
+
}
272
+
};
273
+
let req_init = format_req_init_with_value(req, url, headers, encoded_body)?;
274
+
let res = make_request(req_init).await?;
275
+
Ok(parse_proxy_res(res).await?)
276
+
}
277
+
278
+
// Request setup/formatting
279
+
// -------------------
280
+
281
+
const REQ_HEADERS_TO_FORWARD: [&str; 4] = [
282
+
"accept-language",
283
+
"content-type",
284
+
"atproto-accept-labelers",
285
+
"x-bsky-topics",
286
+
];
287
+
288
+
#[tracing::instrument(skip_all)]
289
+
pub async fn format_url_and_aud(
290
+
req: &ProxyRequest,
291
+
aud_override: Option<String>,
292
+
) -> Result<UrlAndAud> {
293
+
let proxy_to = parse_proxy_header(req).await?;
294
+
let nsid = parse_req_nsid(req);
295
+
let default_proxy = default_service(req, &nsid).await;
296
+
let service_url = match proxy_to {
297
+
Some(ref proxy_to) => {
298
+
tracing::info!(
299
+
"@LOG: format_url_and_aud() proxy_to: {:?}",
300
+
proxy_to.service_url
301
+
);
302
+
Some(proxy_to.service_url.clone())
303
+
}
304
+
None => match default_proxy {
305
+
Some(ref default_proxy) => Some(default_proxy.url.clone()),
306
+
None => None,
307
+
},
308
+
};
309
+
let aud = match aud_override {
310
+
Some(_) => aud_override,
311
+
None => match proxy_to {
312
+
Some(proxy_to) => Some(proxy_to.did),
313
+
None => match default_proxy {
314
+
Some(default_proxy) => Some(default_proxy.did),
315
+
None => None,
316
+
},
317
+
},
318
+
};
319
+
match (service_url, aud) {
320
+
(Some(service_url), Some(aud)) => {
321
+
let mut url = Url::parse(format!("{0}{1}", service_url, req.path).as_str())?;
322
+
if let Some(ref params) = req.query {
323
+
url.set_query(Some(params.as_str()));
324
+
}
325
+
if !req.cfg.service.dev_mode && !is_safe_url(url.clone()) {
326
+
bail!(InvalidRequestError::InvalidServiceUrl(url.to_string()));
327
+
}
328
+
Ok(UrlAndAud {
329
+
url,
330
+
aud,
331
+
lxm: nsid,
332
+
})
333
+
}
334
+
_ => bail!(InvalidRequestError::NoServiceConfigured(req.path.clone())),
335
+
}
336
+
}
337
+
338
+
pub async fn format_headers(
339
+
req: &ProxyRequest,
340
+
aud: String,
341
+
lxm: String,
342
+
requester: Option<String>,
343
+
) -> Result<HeaderMap> {
344
+
let mut headers: HeaderMap = match requester {
345
+
Some(requester) => context::service_auth_headers(&requester, &aud, &lxm).await?,
346
+
None => HeaderMap::new(),
347
+
};
348
+
// forward select headers to upstream services
349
+
for header in REQ_HEADERS_TO_FORWARD {
350
+
let val = req.headers.get(header);
351
+
if let Some(val) = val {
352
+
headers.insert(header, HeaderValue::from_str(val)?);
353
+
}
354
+
}
355
+
Ok(headers)
356
+
}
357
+
358
+
pub fn format_req_init(
359
+
req: &ProxyRequest,
360
+
url: Url,
361
+
headers: HeaderMap,
362
+
body: Option<Vec<u8>>,
363
+
) -> Result<RequestBuilder> {
364
+
match req.method {
365
+
Method::GET => {
366
+
let client = Client::builder()
367
+
.user_agent(APP_USER_AGENT)
368
+
.http2_keep_alive_while_idle(true)
369
+
.http2_keep_alive_timeout(Duration::from_secs(5))
370
+
.default_headers(headers)
371
+
.build()?;
372
+
Ok(client.get(url))
373
+
}
374
+
Method::HEAD => {
375
+
let client = Client::builder()
376
+
.user_agent(APP_USER_AGENT)
377
+
.http2_keep_alive_while_idle(true)
378
+
.http2_keep_alive_timeout(Duration::from_secs(5))
379
+
.default_headers(headers)
380
+
.build()?;
381
+
Ok(client.head(url))
382
+
}
383
+
Method::POST => {
384
+
let client = Client::builder()
385
+
.user_agent(APP_USER_AGENT)
386
+
.http2_keep_alive_while_idle(true)
387
+
.http2_keep_alive_timeout(Duration::from_secs(5))
388
+
.default_headers(headers)
389
+
.build()?;
390
+
Ok(client.post(url).body(body.unwrap()))
391
+
}
392
+
_ => bail!(InvalidRequestError::MethodNotFound),
393
+
}
394
+
}
395
+
396
+
pub fn format_req_init_with_value(
397
+
req: &ProxyRequest,
398
+
url: Url,
399
+
headers: HeaderMap,
400
+
body: Option<JsonValue>,
401
+
) -> Result<RequestBuilder> {
402
+
match req.method {
403
+
Method::GET => {
404
+
let client = Client::builder()
405
+
.user_agent(APP_USER_AGENT)
406
+
.http2_keep_alive_while_idle(true)
407
+
.http2_keep_alive_timeout(Duration::from_secs(5))
408
+
.default_headers(headers)
409
+
.build()?;
410
+
Ok(client.get(url))
411
+
}
412
+
Method::HEAD => {
413
+
let client = Client::builder()
414
+
.user_agent(APP_USER_AGENT)
415
+
.http2_keep_alive_while_idle(true)
416
+
.http2_keep_alive_timeout(Duration::from_secs(5))
417
+
.default_headers(headers)
418
+
.build()?;
419
+
Ok(client.head(url))
420
+
}
421
+
Method::POST => {
422
+
let client = Client::builder()
423
+
.user_agent(APP_USER_AGENT)
424
+
.http2_keep_alive_while_idle(true)
425
+
.http2_keep_alive_timeout(Duration::from_secs(5))
426
+
.default_headers(headers)
427
+
.build()?;
428
+
Ok(client.post(url).json(&body.unwrap()))
429
+
}
430
+
_ => bail!(InvalidRequestError::MethodNotFound),
431
+
}
432
+
}
433
+
434
+
pub async fn parse_proxy_header(req: &ProxyRequest) -> Result<Option<ProxyHeader>> {
435
+
let headers = &req.headers;
436
+
let proxy_to: Option<&String> = headers.get("atproto-proxy");
437
+
match proxy_to {
438
+
None => Ok(None),
439
+
Some(proxy_to) => {
440
+
let parts: Vec<&str> = proxy_to.split("#").collect::<Vec<&str>>();
441
+
match (parts.get(0), parts.get(1), parts.get(2)) {
442
+
(Some(did), Some(service_id), None) => {
443
+
let did = did.to_string();
444
+
let mut lock = req.id_resolver.write().await;
445
+
match lock.did.resolve(did.clone(), None).await? {
446
+
None => bail!(InvalidRequestError::CannotResolveProxyDid),
447
+
Some(did_doc) => {
448
+
match get_service_endpoint(
449
+
did_doc,
450
+
GetServiceEndpointOpts {
451
+
id: format!("#{service_id}"),
452
+
r#type: None,
453
+
},
454
+
) {
455
+
None => bail!(InvalidRequestError::CannotResolveServiceUrl),
456
+
Some(service_url) => Ok(Some(ProxyHeader { did, service_url })),
457
+
}
458
+
}
459
+
}
460
+
}
461
+
(_, None, _) => bail!(InvalidRequestError::NoServiceId),
462
+
_ => bail!("error parsing atproto-proxy header"),
463
+
}
464
+
}
465
+
}
466
+
}
467
+
468
+
pub fn parse_req_nsid(req: &ProxyRequest) -> String {
469
+
let nsid = req.path.as_str().replace("/xrpc/", "");
470
+
match nsid.ends_with("/") {
471
+
false => nsid,
472
+
true => nsid
473
+
.trim_end_matches(|c| c == nsid.chars().last().unwrap())
474
+
.to_string(),
475
+
}
476
+
}
477
+
478
+
// Sending request
479
+
// -------------------
480
+
#[tracing::instrument(skip_all)]
481
+
pub async fn make_request(req_init: RequestBuilder) -> Result<Response> {
482
+
let res = req_init.send().await;
483
+
match res {
484
+
Err(e) => {
485
+
tracing::error!("@LOG WARN: pipethrough network error {}", e.to_string());
486
+
bail!(InvalidRequestError::XRPCError(XRPCError::UpstreamFailure))
487
+
}
488
+
Ok(res) => match res.error_for_status_ref() {
489
+
Ok(_) => Ok(res),
490
+
Err(_) => {
491
+
let status = res.status().to_string();
492
+
let headers = res.headers().clone();
493
+
let error_body = res.json::<JsonValue>().await?;
494
+
bail!(InvalidRequestError::XRPCError(XRPCError::FailedResponse {
495
+
status,
496
+
headers,
497
+
error: match error_body["error"].as_str() {
498
+
None => None,
499
+
Some(error_body_error) => Some(error_body_error.to_string()),
500
+
},
501
+
message: match error_body["message"].as_str() {
502
+
None => None,
503
+
Some(error_body_message) => Some(error_body_message.to_string()),
504
+
}
505
+
}))
506
+
}
507
+
},
508
+
}
509
+
}
510
+
511
+
// Response parsing/forwarding
512
+
// -------------------
513
+
514
+
const RES_HEADERS_TO_FORWARD: [&str; 4] = [
515
+
"content-type",
516
+
"content-language",
517
+
"atproto-repo-rev",
518
+
"atproto-content-labelers",
519
+
];
520
+
521
+
pub async fn parse_proxy_res(res: Response) -> Result<HandlerPipeThrough> {
522
+
let encoding = match res.headers().get(CONTENT_TYPE) {
523
+
Some(content_type) => content_type.to_str()?,
524
+
None => "application/json",
525
+
};
526
+
// Release borrow
527
+
let encoding = encoding.to_string();
528
+
let res_headers = RES_HEADERS_TO_FORWARD.into_iter().fold(
529
+
BTreeMap::new(),
530
+
|mut acc: BTreeMap<String, String>, cur| {
531
+
let _ = match res.headers().get(cur) {
532
+
Some(res_header_val) => acc.insert(
533
+
cur.to_string(),
534
+
res_header_val.clone().to_str().unwrap().to_string(),
535
+
),
536
+
None => None,
537
+
};
538
+
acc
539
+
},
540
+
);
541
+
let buffer = read_array_buffer_res(res).await?;
542
+
Ok(HandlerPipeThrough {
543
+
encoding,
544
+
buffer,
545
+
headers: Some(res_headers),
546
+
})
547
+
}
548
+
549
+
// Utils
550
+
// -------------------
551
+
552
+
pub async fn default_service(req: &ProxyRequest, nsid: &str) -> Option<ServiceConfig> {
553
+
let cfg = req.cfg.clone();
554
+
match Ids::from_str(nsid) {
555
+
Ok(Ids::ToolsOzoneTeamAddMember) => cfg.mod_service,
556
+
Ok(Ids::ToolsOzoneTeamDeleteMember) => cfg.mod_service,
557
+
Ok(Ids::ToolsOzoneTeamUpdateMember) => cfg.mod_service,
558
+
Ok(Ids::ToolsOzoneTeamListMembers) => cfg.mod_service,
559
+
Ok(Ids::ToolsOzoneCommunicationCreateTemplate) => cfg.mod_service,
560
+
Ok(Ids::ToolsOzoneCommunicationDeleteTemplate) => cfg.mod_service,
561
+
Ok(Ids::ToolsOzoneCommunicationUpdateTemplate) => cfg.mod_service,
562
+
Ok(Ids::ToolsOzoneCommunicationListTemplates) => cfg.mod_service,
563
+
Ok(Ids::ToolsOzoneModerationEmitEvent) => cfg.mod_service,
564
+
Ok(Ids::ToolsOzoneModerationGetEvent) => cfg.mod_service,
565
+
Ok(Ids::ToolsOzoneModerationGetRecord) => cfg.mod_service,
566
+
Ok(Ids::ToolsOzoneModerationGetRepo) => cfg.mod_service,
567
+
Ok(Ids::ToolsOzoneModerationQueryEvents) => cfg.mod_service,
568
+
Ok(Ids::ToolsOzoneModerationQueryStatuses) => cfg.mod_service,
569
+
Ok(Ids::ToolsOzoneModerationSearchRepos) => cfg.mod_service,
570
+
Ok(Ids::ComAtprotoModerationCreateReport) => cfg.report_service,
571
+
_ => cfg.bsky_app_view,
572
+
}
573
+
}
574
+
575
+
pub fn parse_res<T: DeserializeOwned>(_nsid: String, res: HandlerPipeThrough) -> Result<T> {
576
+
let buffer = res.buffer;
577
+
let record = serde_json::from_slice::<T>(buffer.as_slice())?;
578
+
Ok(record)
579
+
}
580
+
581
+
#[tracing::instrument(skip_all)]
582
+
pub async fn read_array_buffer_res(res: Response) -> Result<Vec<u8>> {
583
+
match res.bytes().await {
584
+
Ok(bytes) => Ok(bytes.to_vec()),
585
+
Err(err) => {
586
+
tracing::error!("@LOG WARN: pipethrough network error {}", err.to_string());
587
+
bail!("UpstreamFailure")
588
+
}
589
+
}
590
+
}
591
+
592
+
pub fn is_safe_url(url: Url) -> bool {
593
+
if url.scheme() != "https" {
594
+
return false;
595
+
}
596
+
match url.host_str() {
597
+
None => false,
598
+
Some(hostname) if hostname == "localhost" => false,
599
+
Some(hostname) => {
600
+
if std::net::IpAddr::from_str(hostname).is_ok() {
601
+
return false;
602
+
}
603
+
true
604
+
}
605
+
}
606
+
}
-114
src/plc.rs
-114
src/plc.rs
···
1
-
//! PLC operations.
2
-
use std::collections::HashMap;
3
-
4
-
use anyhow::{Context as _, bail};
5
-
use base64::Engine as _;
6
-
use serde::{Deserialize, Serialize};
7
-
use tracing::debug;
8
-
9
-
use crate::{Client, RotationKey};
10
-
11
-
/// The URL of the public PLC directory.
12
-
const PLC_DIRECTORY: &str = "https://plc.directory/";
13
-
14
-
#[derive(Debug, Deserialize, Serialize, Clone)]
15
-
#[serde(rename_all = "camelCase", tag = "type")]
16
-
/// A PLC service.
17
-
pub(crate) enum PlcService {
18
-
#[serde(rename = "AtprotoPersonalDataServer")]
19
-
/// A personal data server.
20
-
Pds {
21
-
/// The URL of the PDS.
22
-
endpoint: String,
23
-
},
24
-
}
25
-
26
-
#[expect(
27
-
clippy::arbitrary_source_item_ordering,
28
-
reason = "serialized data might be structured"
29
-
)]
30
-
#[derive(Debug, Deserialize, Serialize, Clone)]
31
-
#[serde(rename_all = "camelCase")]
32
-
pub(crate) struct PlcOperation {
33
-
#[serde(rename = "type")]
34
-
pub typ: String,
35
-
pub rotation_keys: Vec<String>,
36
-
pub verification_methods: HashMap<String, String>,
37
-
pub also_known_as: Vec<String>,
38
-
pub services: HashMap<String, PlcService>,
39
-
pub prev: Option<String>,
40
-
}
41
-
42
-
impl PlcOperation {
43
-
/// Sign an operation with the provided signature.
44
-
pub(crate) fn sign(self, sig: Vec<u8>) -> SignedPlcOperation {
45
-
SignedPlcOperation {
46
-
typ: self.typ,
47
-
rotation_keys: self.rotation_keys,
48
-
verification_methods: self.verification_methods,
49
-
also_known_as: self.also_known_as,
50
-
services: self.services,
51
-
prev: self.prev,
52
-
sig: base64::prelude::BASE64_URL_SAFE_NO_PAD.encode(sig),
53
-
}
54
-
}
55
-
}
56
-
57
-
#[expect(
58
-
clippy::arbitrary_source_item_ordering,
59
-
reason = "serialized data might be structured"
60
-
)]
61
-
#[derive(Debug, Deserialize, Serialize, Clone)]
62
-
#[serde(rename_all = "camelCase")]
63
-
/// A signed PLC operation.
64
-
pub(crate) struct SignedPlcOperation {
65
-
#[serde(rename = "type")]
66
-
pub typ: String,
67
-
pub rotation_keys: Vec<String>,
68
-
pub verification_methods: HashMap<String, String>,
69
-
pub also_known_as: Vec<String>,
70
-
pub services: HashMap<String, PlcService>,
71
-
pub prev: Option<String>,
72
-
pub sig: String,
73
-
}
74
-
75
-
pub(crate) fn sign_op(rkey: &RotationKey, op: PlcOperation) -> anyhow::Result<SignedPlcOperation> {
76
-
let bytes = serde_ipld_dagcbor::to_vec(&op).context("failed to encode op")?;
77
-
let bytes = rkey.sign(&bytes).context("failed to sign op")?;
78
-
79
-
Ok(op.sign(bytes))
80
-
}
81
-
82
-
/// Submit a PLC operation to the public directory.
83
-
pub(crate) async fn submit(
84
-
client: &Client,
85
-
did: &str,
86
-
op: &SignedPlcOperation,
87
-
) -> anyhow::Result<()> {
88
-
debug!(
89
-
"submitting {} {}",
90
-
did,
91
-
serde_json::to_string(&op).context("should serialize")?
92
-
);
93
-
94
-
let res = client
95
-
.post(format!("{PLC_DIRECTORY}{did}"))
96
-
.json(&op)
97
-
.send()
98
-
.await
99
-
.context("failed to send directory request")?;
100
-
101
-
if res.status().is_success() {
102
-
Ok(())
103
-
} else {
104
-
let e = res
105
-
.json::<serde_json::Value>()
106
-
.await
107
-
.context("failed to read error response")?;
108
-
109
-
bail!(
110
-
"error from PLC directory: {}",
111
-
serde_json::to_string(&e).context("should serialize")?
112
-
);
113
-
}
114
-
}
+313
src/schema.rs
+313
src/schema.rs
···
1
+
#![allow(unnameable_types, unused_qualifications)]
2
+
pub mod pds {
3
+
4
+
// Legacy tables
5
+
6
+
diesel::table! {
7
+
oauth_par_requests (request_uri) {
8
+
request_uri -> Varchar,
9
+
client_id -> Varchar,
10
+
response_type -> Varchar,
11
+
code_challenge -> Varchar,
12
+
code_challenge_method -> Varchar,
13
+
state -> Nullable<Varchar>,
14
+
login_hint -> Nullable<Varchar>,
15
+
scope -> Nullable<Varchar>,
16
+
redirect_uri -> Nullable<Varchar>,
17
+
response_mode -> Nullable<Varchar>,
18
+
display -> Nullable<Varchar>,
19
+
created_at -> Int8,
20
+
expires_at -> Int8,
21
+
}
22
+
}
23
+
diesel::table! {
24
+
oauth_authorization_codes (code) {
25
+
code -> Varchar,
26
+
client_id -> Varchar,
27
+
subject -> Varchar,
28
+
code_challenge -> Varchar,
29
+
code_challenge_method -> Varchar,
30
+
redirect_uri -> Varchar,
31
+
scope -> Nullable<Varchar>,
32
+
created_at -> Int8,
33
+
expires_at -> Int8,
34
+
used -> Bool,
35
+
}
36
+
}
37
+
diesel::table! {
38
+
oauth_refresh_tokens (token) {
39
+
token -> Varchar,
40
+
client_id -> Varchar,
41
+
subject -> Varchar,
42
+
dpop_thumbprint -> Varchar,
43
+
scope -> Nullable<Varchar>,
44
+
created_at -> Int8,
45
+
expires_at -> Int8,
46
+
revoked -> Bool,
47
+
}
48
+
}
49
+
diesel::table! {
50
+
oauth_used_jtis (jti) {
51
+
jti -> Varchar,
52
+
issuer -> Varchar,
53
+
created_at -> Int8,
54
+
expires_at -> Int8,
55
+
}
56
+
}
57
+
58
+
// Upcoming tables
59
+
60
+
diesel::table! {
61
+
account (did) {
62
+
did -> Varchar,
63
+
email -> Varchar,
64
+
recoveryKey -> Nullable<Varchar>,
65
+
password -> Varchar,
66
+
createdAt -> Varchar,
67
+
invitesDisabled -> Int2,
68
+
emailConfirmedAt -> Nullable<Varchar>,
69
+
}
70
+
}
71
+
72
+
diesel::table! {
73
+
actor (did) {
74
+
did -> Varchar,
75
+
handle -> Nullable<Varchar>,
76
+
createdAt -> Varchar,
77
+
takedownRef -> Nullable<Varchar>,
78
+
deactivatedAt -> Nullable<Varchar>,
79
+
deleteAfter -> Nullable<Varchar>,
80
+
}
81
+
}
82
+
83
+
diesel::table! {
84
+
app_password (did, name) {
85
+
did -> Varchar,
86
+
name -> Varchar,
87
+
password -> Varchar,
88
+
createdAt -> Varchar,
89
+
}
90
+
}
91
+
92
+
diesel::table! {
93
+
authorization_request (id) {
94
+
id -> Varchar,
95
+
did -> Nullable<Varchar>,
96
+
deviceId -> Nullable<Varchar>,
97
+
clientId -> Varchar,
98
+
clientAuth -> Varchar,
99
+
parameters -> Varchar,
100
+
expiresAt -> TimestamptzSqlite,
101
+
code -> Nullable<Varchar>,
102
+
}
103
+
}
104
+
105
+
diesel::table! {
106
+
device (id) {
107
+
id -> Varchar,
108
+
sessionId -> Nullable<Varchar>,
109
+
userAgent -> Nullable<Varchar>,
110
+
ipAddress -> Varchar,
111
+
lastSeenAt -> TimestamptzSqlite,
112
+
}
113
+
}
114
+
115
+
diesel::table! {
116
+
device_account (deviceId, did) {
117
+
did -> Varchar,
118
+
deviceId -> Varchar,
119
+
authenticatedAt -> TimestamptzSqlite,
120
+
remember -> Bool,
121
+
authorizedClients -> Varchar,
122
+
}
123
+
}
124
+
125
+
diesel::table! {
126
+
did_doc (did) {
127
+
did -> Varchar,
128
+
doc -> Text,
129
+
updatedAt -> Int8,
130
+
}
131
+
}
132
+
133
+
diesel::table! {
134
+
email_token (purpose, did) {
135
+
purpose -> Varchar,
136
+
did -> Varchar,
137
+
token -> Varchar,
138
+
requestedAt -> Varchar,
139
+
}
140
+
}
141
+
142
+
diesel::table! {
143
+
invite_code (code) {
144
+
code -> Varchar,
145
+
availableUses -> Int4,
146
+
disabled -> Int2,
147
+
forAccount -> Varchar,
148
+
createdBy -> Varchar,
149
+
createdAt -> Varchar,
150
+
}
151
+
}
152
+
153
+
diesel::table! {
154
+
invite_code_use (code, usedBy) {
155
+
code -> Varchar,
156
+
usedBy -> Varchar,
157
+
usedAt -> Varchar,
158
+
}
159
+
}
160
+
161
+
diesel::table! {
162
+
refresh_token (id) {
163
+
id -> Varchar,
164
+
did -> Varchar,
165
+
expiresAt -> Varchar,
166
+
nextId -> Nullable<Varchar>,
167
+
appPasswordName -> Nullable<Varchar>,
168
+
}
169
+
}
170
+
171
+
diesel::table! {
172
+
repo_seq (seq) {
173
+
seq -> Int8,
174
+
did -> Varchar,
175
+
eventType -> Varchar,
176
+
event -> Bytea,
177
+
invalidated -> Int2,
178
+
sequencedAt -> Varchar,
179
+
}
180
+
}
181
+
182
+
diesel::table! {
183
+
token (id) {
184
+
id -> Varchar,
185
+
did -> Varchar,
186
+
tokenId -> Varchar,
187
+
createdAt -> TimestamptzSqlite,
188
+
updatedAt -> TimestamptzSqlite,
189
+
expiresAt -> TimestamptzSqlite,
190
+
clientId -> Varchar,
191
+
clientAuth -> Varchar,
192
+
deviceId -> Nullable<Varchar>,
193
+
parameters -> Varchar,
194
+
details -> Nullable<Varchar>,
195
+
code -> Nullable<Varchar>,
196
+
currentRefreshToken -> Nullable<Varchar>,
197
+
}
198
+
}
199
+
200
+
diesel::table! {
201
+
used_refresh_token (refreshToken) {
202
+
refreshToken -> Varchar,
203
+
tokenId -> Varchar,
204
+
}
205
+
}
206
+
207
+
diesel::allow_tables_to_appear_in_same_query!(
208
+
account,
209
+
actor,
210
+
app_password,
211
+
authorization_request,
212
+
device,
213
+
device_account,
214
+
did_doc,
215
+
email_token,
216
+
invite_code,
217
+
invite_code_use,
218
+
refresh_token,
219
+
repo_seq,
220
+
token,
221
+
used_refresh_token,
222
+
);
223
+
}
224
+
225
+
pub mod actor_store {
226
+
// Actor Store
227
+
228
+
// Blob
229
+
diesel::table! {
230
+
blob (cid, did) {
231
+
cid -> Varchar,
232
+
did -> Varchar,
233
+
mimeType -> Varchar,
234
+
size -> Int4,
235
+
tempKey -> Nullable<Varchar>,
236
+
width -> Nullable<Int4>,
237
+
height -> Nullable<Int4>,
238
+
createdAt -> Varchar,
239
+
takedownRef -> Nullable<Varchar>,
240
+
}
241
+
}
242
+
243
+
diesel::table! {
244
+
record_blob (blobCid, recordUri) {
245
+
blobCid -> Varchar,
246
+
recordUri -> Varchar,
247
+
did -> Varchar,
248
+
}
249
+
}
250
+
251
+
// Preference
252
+
253
+
diesel::table! {
254
+
account_pref (id) {
255
+
id -> Int4,
256
+
did -> Varchar,
257
+
name -> Varchar,
258
+
valueJson -> Nullable<Text>,
259
+
}
260
+
}
261
+
// Record
262
+
263
+
diesel::table! {
264
+
record (uri) {
265
+
uri -> Varchar,
266
+
cid -> Varchar,
267
+
did -> Varchar,
268
+
collection -> Varchar,
269
+
rkey -> Varchar,
270
+
repoRev -> Nullable<Varchar>,
271
+
indexedAt -> Varchar,
272
+
takedownRef -> Nullable<Varchar>,
273
+
}
274
+
}
275
+
276
+
diesel::table! {
277
+
repo_block (cid, did) {
278
+
cid -> Varchar,
279
+
did -> Varchar,
280
+
repoRev -> Varchar,
281
+
size -> Int4,
282
+
content -> Bytea,
283
+
}
284
+
}
285
+
286
+
diesel::table! {
287
+
backlink (uri, path) {
288
+
uri -> Varchar,
289
+
path -> Varchar,
290
+
linkTo -> Varchar,
291
+
}
292
+
}
293
+
// sql_repo
294
+
295
+
diesel::table! {
296
+
repo_root (did) {
297
+
did -> Varchar,
298
+
cid -> Varchar,
299
+
rev -> Varchar,
300
+
indexedAt -> Varchar,
301
+
}
302
+
}
303
+
304
+
diesel::allow_tables_to_appear_in_same_query!(
305
+
account_pref,
306
+
backlink,
307
+
blob,
308
+
record,
309
+
record_blob,
310
+
repo_block,
311
+
repo_root,
312
+
);
313
+
}
+429
src/serve.rs
+429
src/serve.rs
···
1
+
use super::account_manager::AccountManager;
2
+
use super::config::AppConfig;
3
+
use super::db::establish_pool;
4
+
pub use super::error::Error;
5
+
use super::service_proxy::service_proxy;
6
+
use anyhow::Context as _;
7
+
use atrium_api::types::string::Did;
8
+
use atrium_crypto::keypair::{Export as _, Secp256k1Keypair};
9
+
use axum::{Router, extract::FromRef, routing::get};
10
+
use clap::Parser;
11
+
use clap_verbosity_flag::{InfoLevel, Verbosity, log::LevelFilter};
12
+
use deadpool_diesel::sqlite::Pool;
13
+
use diesel::prelude::*;
14
+
use diesel_migrations::{EmbeddedMigrations, embed_migrations};
15
+
use figment::{Figment, providers::Format as _};
16
+
use http_cache_reqwest::{CacheMode, HttpCacheOptions, MokaManager};
17
+
use rsky_common::env::env_list;
18
+
use rsky_identity::IdResolver;
19
+
use rsky_identity::types::{DidCache, IdentityResolverOpts};
20
+
use rsky_pds::{crawlers::Crawlers, sequencer::Sequencer};
21
+
use serde::{Deserialize, Serialize};
22
+
use std::env;
23
+
use std::{
24
+
net::{IpAddr, Ipv4Addr, SocketAddr},
25
+
path::PathBuf,
26
+
str::FromStr as _,
27
+
sync::Arc,
28
+
};
29
+
use tokio::{net::TcpListener, sync::RwLock};
30
+
use tower_http::{cors::CorsLayer, trace::TraceLayer};
31
+
use tracing::{info, warn};
32
+
use uuid::Uuid;
33
+
34
+
/// The application user agent. Concatenates the package name and version. e.g. `bluepds/0.0.0`.
35
+
pub const APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
36
+
37
+
/// Embedded migrations
38
+
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
39
+
pub const MIGRATIONS_ACTOR: EmbeddedMigrations = embed_migrations!("./migrations_actor");
40
+
41
+
/// The application-wide result type.
42
+
pub type Result<T> = std::result::Result<T, Error>;
43
+
/// The reqwest client type with middleware.
44
+
pub type Client = reqwest_middleware::ClientWithMiddleware;
45
+
46
+
#[expect(
47
+
clippy::arbitrary_source_item_ordering,
48
+
reason = "serialized data might be structured"
49
+
)]
50
+
#[derive(Serialize, Deserialize, Debug, Clone)]
51
+
/// The key data structure.
52
+
struct KeyData {
53
+
/// Primary signing key for all repo operations.
54
+
skey: Vec<u8>,
55
+
/// Primary signing (rotation) key for all PLC operations.
56
+
rkey: Vec<u8>,
57
+
}
58
+
59
+
// FIXME: We should use P256Keypair instead. SecP256K1 is primarily used for cryptocurrencies,
60
+
// and the implementations of this algorithm are much more limited as compared to P256.
61
+
//
62
+
// Reference: https://soatok.blog/2022/05/19/guidance-for-choosing-an-elliptic-curve-signature-algorithm-in-2022/
63
+
#[derive(Clone)]
64
+
/// The signing key for PLC/DID operations.
65
+
pub struct SigningKey(Arc<Secp256k1Keypair>);
66
+
#[derive(Clone)]
67
+
/// The rotation key for PLC operations.
68
+
pub struct RotationKey(Arc<Secp256k1Keypair>);
69
+
70
+
impl std::ops::Deref for SigningKey {
71
+
type Target = Secp256k1Keypair;
72
+
73
+
fn deref(&self) -> &Self::Target {
74
+
&self.0
75
+
}
76
+
}
77
+
78
+
impl SigningKey {
79
+
/// Import from a private key.
80
+
pub fn import(key: &[u8]) -> Result<Self> {
81
+
let key = Secp256k1Keypair::import(key).context("failed to import signing key")?;
82
+
Ok(Self(Arc::new(key)))
83
+
}
84
+
}
85
+
86
+
impl std::ops::Deref for RotationKey {
87
+
type Target = Secp256k1Keypair;
88
+
89
+
fn deref(&self) -> &Self::Target {
90
+
&self.0
91
+
}
92
+
}
93
+
94
+
#[derive(Parser, Debug, Clone)]
95
+
/// Command line arguments.
96
+
pub struct Args {
97
+
/// Path to the configuration file
98
+
#[arg(short, long, default_value = "default.toml")]
99
+
pub config: PathBuf,
100
+
/// The verbosity level.
101
+
#[command(flatten)]
102
+
pub verbosity: Verbosity<InfoLevel>,
103
+
}
104
+
105
+
/// The actor pools for the database connections.
106
+
pub struct ActorStorage {
107
+
/// The database connection pool for the actor's repository.
108
+
pub repo: Pool,
109
+
/// The file storage path for the actor's blobs.
110
+
pub blob: PathBuf,
111
+
}
112
+
113
+
impl Clone for ActorStorage {
114
+
fn clone(&self) -> Self {
115
+
Self {
116
+
repo: self.repo.clone(),
117
+
blob: self.blob.clone(),
118
+
}
119
+
}
120
+
}
121
+
122
+
#[expect(clippy::arbitrary_source_item_ordering, reason = "arbitrary")]
123
+
#[derive(Clone, FromRef)]
124
+
/// The application state, shared across all routes.
125
+
pub struct AppState {
126
+
/// The application configuration.
127
+
pub(crate) config: AppConfig,
128
+
/// The main database connection pool. Used for common PDS data, like invite codes.
129
+
pub db: Pool,
130
+
/// Actor-specific database connection pools. Hashed by DID.
131
+
pub db_actors: std::collections::HashMap<String, ActorStorage>,
132
+
133
+
/// The HTTP client with middleware.
134
+
pub client: Client,
135
+
/// The simple HTTP client.
136
+
pub simple_client: reqwest::Client,
137
+
/// The firehose producer.
138
+
pub sequencer: Arc<RwLock<Sequencer>>,
139
+
/// The account manager.
140
+
pub account_manager: Arc<RwLock<AccountManager>>,
141
+
/// The ID resolver.
142
+
pub id_resolver: Arc<RwLock<IdResolver>>,
143
+
144
+
/// The signing key.
145
+
pub signing_key: SigningKey,
146
+
/// The rotation key.
147
+
pub rotation_key: RotationKey,
148
+
}
149
+
150
+
/// The main application entry point.
151
+
#[expect(
152
+
clippy::cognitive_complexity,
153
+
clippy::too_many_lines,
154
+
unused_qualifications,
155
+
reason = "main function has high complexity"
156
+
)]
157
+
pub async fn run() -> anyhow::Result<()> {
158
+
let args = Args::parse();
159
+
160
+
// Set up trace logging to console and account for the user-provided verbosity flag.
161
+
if args.verbosity.log_level_filter() != LevelFilter::Off {
162
+
let lvl = match args.verbosity.log_level_filter() {
163
+
LevelFilter::Error => tracing::Level::ERROR,
164
+
LevelFilter::Warn => tracing::Level::WARN,
165
+
LevelFilter::Info | LevelFilter::Off => tracing::Level::INFO,
166
+
LevelFilter::Debug => tracing::Level::DEBUG,
167
+
LevelFilter::Trace => tracing::Level::TRACE,
168
+
};
169
+
tracing_subscriber::fmt().with_max_level(lvl).init();
170
+
}
171
+
172
+
if !args.config.exists() {
173
+
// Throw up a warning if the config file does not exist.
174
+
//
175
+
// This is not fatal because users can specify all configuration settings via
176
+
// the environment, but the most likely scenario here is that a user accidentally
177
+
// omitted the config file for some reason (e.g. forgot to mount it into Docker).
178
+
warn!(
179
+
"configuration file {} does not exist",
180
+
args.config.display()
181
+
);
182
+
}
183
+
184
+
// Read and parse the user-provided configuration.
185
+
let config: AppConfig = Figment::new()
186
+
.admerge(figment::providers::Toml::file(args.config))
187
+
.admerge(figment::providers::Env::prefixed("BLUEPDS_"))
188
+
.extract()
189
+
.context("failed to load configuration")?;
190
+
191
+
if config.test {
192
+
warn!("BluePDS starting up in TEST mode.");
193
+
warn!("This means the application will not federate with the rest of the network.");
194
+
warn!(
195
+
"If you want to turn this off, either set `test` to false in the config or define `BLUEPDS_TEST = false`"
196
+
);
197
+
}
198
+
199
+
// Initialize metrics reporting.
200
+
super::metrics::setup(config.metrics.as_ref()).context("failed to set up metrics exporter")?;
201
+
202
+
// Create a reqwest client that will be used for all outbound requests.
203
+
let simple_client = reqwest::Client::builder()
204
+
.user_agent(APP_USER_AGENT)
205
+
.build()
206
+
.context("failed to build requester client")?;
207
+
let client = reqwest_middleware::ClientBuilder::new(simple_client.clone())
208
+
.with(http_cache_reqwest::Cache(http_cache_reqwest::HttpCache {
209
+
mode: CacheMode::Default,
210
+
manager: MokaManager::default(),
211
+
options: HttpCacheOptions::default(),
212
+
}))
213
+
.build();
214
+
215
+
tokio::fs::create_dir_all(&config.key.parent().context("should have parent")?)
216
+
.await
217
+
.context("failed to create key directory")?;
218
+
219
+
// Check if crypto keys exist. If not, create new ones.
220
+
let (skey, rkey) = if let Ok(f) = std::fs::File::open(&config.key) {
221
+
let keys: KeyData = serde_ipld_dagcbor::from_reader(std::io::BufReader::new(f))
222
+
.context("failed to deserialize crypto keys")?;
223
+
224
+
let skey = Secp256k1Keypair::import(&keys.skey).context("failed to import signing key")?;
225
+
let rkey = Secp256k1Keypair::import(&keys.rkey).context("failed to import rotation key")?;
226
+
227
+
(SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
228
+
} else {
229
+
info!("signing keys not found, generating new ones");
230
+
231
+
let skey = Secp256k1Keypair::create(&mut rand::thread_rng());
232
+
let rkey = Secp256k1Keypair::create(&mut rand::thread_rng());
233
+
234
+
let keys = KeyData {
235
+
skey: skey.export(),
236
+
rkey: rkey.export(),
237
+
};
238
+
239
+
let mut f = std::fs::File::create(&config.key).context("failed to create key file")?;
240
+
serde_ipld_dagcbor::to_writer(&mut f, &keys).context("failed to serialize crypto keys")?;
241
+
242
+
(SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
243
+
};
244
+
245
+
tokio::fs::create_dir_all(&config.repo.path).await?;
246
+
tokio::fs::create_dir_all(&config.plc.path).await?;
247
+
tokio::fs::create_dir_all(&config.blob.path).await?;
248
+
249
+
// Create a database connection manager and pool for the main database.
250
+
let pool =
251
+
establish_pool(&config.db).context("failed to establish database connection pool")?;
252
+
253
+
// Create a dictionary of database connection pools for each actor.
254
+
let mut actor_pools = std::collections::HashMap::new();
255
+
// We'll determine actors by looking in the data/repo dir for .db files.
256
+
let mut actor_dbs = tokio::fs::read_dir(&config.repo.path)
257
+
.await
258
+
.context("failed to read repo directory")?;
259
+
while let Some(entry) = actor_dbs
260
+
.next_entry()
261
+
.await
262
+
.context("failed to read repo dir")?
263
+
{
264
+
let path = entry.path();
265
+
if path.extension().and_then(|s| s.to_str()) == Some("db") {
266
+
let actor_repo_pool = establish_pool(&format!("sqlite://{}", path.display()))
267
+
.context("failed to create database connection pool")?;
268
+
269
+
let did = Did::from_str(&format!(
270
+
"did:plc:{}",
271
+
path.file_stem()
272
+
.and_then(|s| s.to_str())
273
+
.context("failed to get actor DID")?
274
+
))
275
+
.expect("should be able to parse actor DID")
276
+
.to_string();
277
+
let blob_path = config.blob.path.to_path_buf();
278
+
let actor_storage = ActorStorage {
279
+
repo: actor_repo_pool,
280
+
blob: blob_path.clone(),
281
+
};
282
+
drop(actor_pools.insert(did, actor_storage));
283
+
}
284
+
}
285
+
// Apply pending migrations
286
+
// let conn = pool.get().await?;
287
+
// conn.run_pending_migrations(MIGRATIONS)
288
+
// .expect("should be able to run migrations");
289
+
290
+
let hostname = config.host_name.clone();
291
+
let crawlers: Vec<String> = config
292
+
.firehose
293
+
.relays
294
+
.iter()
295
+
.map(|s| s.to_string())
296
+
.collect();
297
+
let sequencer = Arc::new(RwLock::new(Sequencer::new(
298
+
Crawlers::new(hostname, crawlers.clone()),
299
+
None,
300
+
)));
301
+
let account_manager = Arc::new(RwLock::new(AccountManager::new(pool.clone())));
302
+
let plc_url = if cfg!(debug_assertions) {
303
+
"http://localhost:8000".to_owned() // dummy for debug
304
+
} else {
305
+
env::var("PDS_DID_PLC_URL").unwrap_or("https://plc.directory".to_owned()) // TODO: toml config
306
+
};
307
+
let id_resolver = Arc::new(RwLock::new(IdResolver::new(IdentityResolverOpts {
308
+
timeout: None,
309
+
plc_url: Some(plc_url),
310
+
did_cache: Some(DidCache::new(None, None)),
311
+
backup_nameservers: Some(env_list("PDS_HANDLE_BACKUP_NAMESERVERS")),
312
+
})));
313
+
314
+
let addr = config
315
+
.listen_address
316
+
.unwrap_or(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8000));
317
+
318
+
let app = Router::new()
319
+
.route("/", get(super::index))
320
+
.merge(super::oauth::routes())
321
+
.nest(
322
+
"/xrpc",
323
+
super::apis::routes()
324
+
.merge(super::actor_endpoints::routes())
325
+
.fallback(service_proxy),
326
+
)
327
+
// .layer(RateLimitLayer::new(30, Duration::from_secs(30)))
328
+
.layer(CorsLayer::permissive())
329
+
.layer(TraceLayer::new_for_http())
330
+
.with_state(AppState {
331
+
config: config.clone(),
332
+
db: pool.clone(),
333
+
db_actors: actor_pools.clone(),
334
+
client: client.clone(),
335
+
simple_client,
336
+
sequencer: sequencer.clone(),
337
+
account_manager,
338
+
id_resolver,
339
+
signing_key: skey,
340
+
rotation_key: rkey,
341
+
});
342
+
343
+
info!("listening on {addr}");
344
+
info!("connect to: http://127.0.0.1:{}", addr.port());
345
+
346
+
// Determine whether or not this was the first startup (i.e. no accounts exist and no invite codes were created).
347
+
// If so, create an invite code and share it via the console.
348
+
let conn = pool.get().await.context("failed to get db connection")?;
349
+
350
+
#[derive(QueryableByName)]
351
+
struct TotalCount {
352
+
#[diesel(sql_type = diesel::sql_types::Integer)]
353
+
total_count: i32,
354
+
}
355
+
356
+
let result = conn.interact(move |conn| {
357
+
diesel::sql_query(
358
+
"SELECT (SELECT COUNT(*) FROM account) + (SELECT COUNT(*) FROM invite_code) AS total_count",
359
+
)
360
+
.get_result::<TotalCount>(conn)
361
+
})
362
+
.await
363
+
.expect("should be able to query database")?;
364
+
365
+
let c = result.total_count;
366
+
367
+
#[expect(clippy::print_stdout)]
368
+
if c == 0 {
369
+
let uuid = Uuid::new_v4().to_string();
370
+
371
+
use crate::models::pds as models;
372
+
use crate::schema::pds::invite_code::dsl as InviteCode;
373
+
let uuid_clone = uuid.clone();
374
+
drop(
375
+
conn.interact(move |conn| {
376
+
diesel::insert_into(InviteCode::invite_code)
377
+
.values(models::InviteCode {
378
+
code: uuid_clone,
379
+
available_uses: 1,
380
+
disabled: 0,
381
+
for_account: "None".to_owned(),
382
+
created_by: "None".to_owned(),
383
+
created_at: "None".to_owned(),
384
+
})
385
+
.execute(conn)
386
+
.context("failed to create new invite code")
387
+
})
388
+
.await
389
+
.expect("should be able to create invite code"),
390
+
);
391
+
392
+
// N.B: This is a sensitive message, so we're bypassing `tracing` here and
393
+
// logging it directly to console.
394
+
println!("=====================================");
395
+
println!(" FIRST STARTUP ");
396
+
println!("=====================================");
397
+
println!("Use this code to create an account:");
398
+
println!("{uuid}");
399
+
println!("=====================================");
400
+
}
401
+
402
+
let listener = TcpListener::bind(&addr)
403
+
.await
404
+
.context("failed to bind address")?;
405
+
406
+
// Serve the app, and request crawling from upstream relays.
407
+
let serve = tokio::spawn(async move {
408
+
axum::serve(listener, app.into_make_service())
409
+
.await
410
+
.context("failed to serve app")
411
+
});
412
+
413
+
// Now that the app is live, request a crawl from upstream relays.
414
+
if cfg!(debug_assertions) {
415
+
info!("debug mode: not requesting crawl");
416
+
} else {
417
+
info!("requesting crawl from upstream relays");
418
+
let mut background_sequencer = sequencer.write().await.clone();
419
+
drop(tokio::spawn(
420
+
async move { background_sequencer.start().await },
421
+
));
422
+
}
423
+
424
+
serve
425
+
.await
426
+
.map_err(Into::into)
427
+
.and_then(|r| r)
428
+
.context("failed to serve app")
429
+
}
+123
src/service_proxy.rs
+123
src/service_proxy.rs
···
1
+
/// Service proxy.
2
+
///
3
+
/// Reference: <https://atproto.com/specs/xrpc#service-proxying>
4
+
use anyhow::{Context as _, anyhow};
5
+
use atrium_api::types::string::Did;
6
+
use axum::{
7
+
body::Body,
8
+
extract::{Request, State},
9
+
http::{self, HeaderMap, Response, StatusCode, Uri},
10
+
};
11
+
use rand::Rng as _;
12
+
use std::str::FromStr as _;
13
+
14
+
use super::{
15
+
auth::AuthenticatedUser,
16
+
serve::{Client, Error, Result, SigningKey},
17
+
};
18
+
19
+
pub(super) async fn service_proxy(
20
+
uri: Uri,
21
+
user: AuthenticatedUser,
22
+
State(skey): State<SigningKey>,
23
+
State(client): State<reqwest::Client>,
24
+
headers: HeaderMap,
25
+
request: Request<Body>,
26
+
) -> Result<Response<Body>> {
27
+
let url_path = uri.path_and_query().context("invalid service proxy url")?;
28
+
let lxm = url_path
29
+
.path()
30
+
.strip_prefix("/")
31
+
.with_context(|| format!("invalid service proxy url prefix: {}", url_path.path()))?;
32
+
33
+
let user_did = user.did();
34
+
let (did, id) = match headers.get("atproto-proxy") {
35
+
Some(val) => {
36
+
let val =
37
+
std::str::from_utf8(val.as_bytes()).context("proxy header not valid utf-8")?;
38
+
39
+
let (did, id) = val.split_once('#').context("invalid proxy header")?;
40
+
41
+
let did =
42
+
Did::from_str(did).map_err(|e| anyhow!("atproto proxy not a valid DID: {e}"))?;
43
+
44
+
(did, format!("#{id}"))
45
+
}
46
+
// HACK: Assume the bluesky appview by default.
47
+
None => (
48
+
Did::new("did:web:api.bsky.app".to_owned())
49
+
.expect("service proxy should be a valid DID"),
50
+
"#bsky_appview".to_owned(),
51
+
),
52
+
};
53
+
54
+
let did_doc = super::did::resolve(&Client::new(client.clone(), []), did.clone())
55
+
.await
56
+
.with_context(|| format!("failed to resolve did document {}", did.as_str()))?;
57
+
58
+
let Some(service) = did_doc.service.iter().find(|s| s.id == id) else {
59
+
return Err(Error::with_status(
60
+
StatusCode::BAD_REQUEST,
61
+
anyhow!("could not find resolve service #{id}"),
62
+
));
63
+
};
64
+
65
+
let target_url: url::Url = service
66
+
.service_endpoint
67
+
.join(&format!("/xrpc{url_path}"))
68
+
.context("failed to construct target url")?;
69
+
70
+
let exp = (chrono::Utc::now().checked_add_signed(chrono::Duration::minutes(1)))
71
+
.context("should be valid expiration datetime")?
72
+
.timestamp();
73
+
let jti = rand::thread_rng()
74
+
.sample_iter(rand::distributions::Alphanumeric)
75
+
.take(10)
76
+
.map(char::from)
77
+
.collect::<String>();
78
+
79
+
// Mint a bearer token by signing a JSON web token.
80
+
// https://github.com/DavidBuchanan314/millipds/blob/5c7529a739d394e223c0347764f1cf4e8fd69f94/src/millipds/appview_proxy.py#L47-L59
81
+
let token = super::auth::sign(
82
+
&skey,
83
+
"JWT",
84
+
&serde_json::json!({
85
+
"iss": user_did.as_str(),
86
+
"aud": did.as_str(),
87
+
"lxm": lxm,
88
+
"exp": exp,
89
+
"jti": jti,
90
+
}),
91
+
)
92
+
.context("failed to sign jwt")?;
93
+
94
+
let mut h = HeaderMap::new();
95
+
if let Some(hdr) = request.headers().get("atproto-accept-labelers") {
96
+
drop(h.insert("atproto-accept-labelers", hdr.clone()));
97
+
}
98
+
if let Some(hdr) = request.headers().get(http::header::CONTENT_TYPE) {
99
+
drop(h.insert(http::header::CONTENT_TYPE, hdr.clone()));
100
+
}
101
+
102
+
let r = client
103
+
.request(request.method().clone(), target_url)
104
+
.headers(h)
105
+
.header(http::header::AUTHORIZATION, format!("Bearer {token}"))
106
+
.body(reqwest::Body::wrap_stream(
107
+
request.into_body().into_data_stream(),
108
+
))
109
+
.send()
110
+
.await
111
+
.context("failed to send request")?;
112
+
113
+
let mut resp = Response::builder().status(r.status());
114
+
if let Some(hdrs) = resp.headers_mut() {
115
+
*hdrs = r.headers().clone();
116
+
}
117
+
118
+
let resp = resp
119
+
.body(Body::from_stream(r.bytes_stream()))
120
+
.context("failed to construct response")?;
121
+
122
+
Ok(resp)
123
+
}
-28
src/storage/car.rs
-28
src/storage/car.rs
···
1
-
//! CAR file-based repository storage
2
-
3
-
use anyhow::{Context as _, Result};
4
-
use atrium_repo::blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite, CarStore};
5
-
6
-
use crate::{config::RepoConfig, mmap::MappedFile};
7
-
8
-
/// Open a CAR block store for a given DID.
9
-
pub(crate) async fn open_car_store(
10
-
config: &RepoConfig,
11
-
did: impl AsRef<str>,
12
-
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
13
-
let id = did
14
-
.as_ref()
15
-
.strip_prefix("did:plc:")
16
-
.context("did in unknown format")?;
17
-
18
-
let p = config.path.join(id).with_extension("car");
19
-
20
-
let f = std::fs::File::options()
21
-
.read(true)
22
-
.write(true)
23
-
.open(p)
24
-
.context("failed to open repository file")?;
25
-
let f = MappedFile::new(f).context("failed to map repo")?;
26
-
27
-
CarStore::open(f).await.context("failed to open car store")
28
-
}
-159
src/storage/mod.rs
-159
src/storage/mod.rs
···
1
-
//! `ATProto` user repository datastore functionality.
2
-
3
-
pub(crate) mod car;
4
-
mod sqlite;
5
-
6
-
use anyhow::{Context as _, Result};
7
-
use atrium_repo::{
8
-
Cid, Repository,
9
-
blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite},
10
-
};
11
-
use std::str::FromStr as _;
12
-
13
-
use crate::{Db, config::RepoConfig};
14
-
15
-
// Re-export public items
16
-
pub(crate) use car::open_car_store;
17
-
pub(crate) use sqlite::{SQLiteStore, open_sqlite_store};
18
-
19
-
/// Open a repository for a given DID.
20
-
pub(crate) async fn open_repo_db(
21
-
config: &RepoConfig,
22
-
db: &Db,
23
-
did: impl Into<String>,
24
-
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
25
-
let did = did.into();
26
-
let cid = sqlx::query_scalar!(
27
-
r#"
28
-
SELECT root FROM accounts
29
-
WHERE did = ?
30
-
"#,
31
-
did
32
-
)
33
-
.fetch_one(db)
34
-
.await
35
-
.context("failed to query database")?;
36
-
37
-
open_repo(
38
-
config,
39
-
did,
40
-
Cid::from_str(&cid).context("should be valid CID")?,
41
-
)
42
-
.await
43
-
}
44
-
45
-
/// Open a repository for a given DID and CID.
46
-
pub(crate) async fn open_repo(
47
-
config: &RepoConfig,
48
-
did: impl Into<String>,
49
-
cid: Cid,
50
-
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
51
-
let store = open_car_store(config, did.into()).await?;
52
-
Repository::open(store, cid)
53
-
.await
54
-
.context("failed to open repo")
55
-
}
56
-
/// Open a repository for a given DID and CID.
57
-
/// SQLite backend.
58
-
pub(crate) async fn open_repo_sqlite(
59
-
config: &RepoConfig,
60
-
did: impl Into<String>,
61
-
cid: Cid,
62
-
) -> Result<Repository<impl AsyncBlockStoreRead + AsyncBlockStoreWrite>> {
63
-
let store = open_sqlite_store(config, did.into()).await?;
64
-
return Repository::open(store, cid)
65
-
.await
66
-
.context("failed to open repo");
67
-
}
68
-
69
-
/// Open a block store for a given DID.
70
-
pub(crate) async fn open_store(
71
-
config: &RepoConfig,
72
-
did: impl Into<String>,
73
-
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
74
-
let did = did.into();
75
-
76
-
// if config.use_sqlite {
77
-
return open_sqlite_store(config, did.clone()).await;
78
-
// }
79
-
// Default to CAR store
80
-
// open_car_store(config, &did).await
81
-
}
82
-
83
-
/// Create a storage backend for a DID
84
-
pub(crate) async fn create_storage_for_did(
85
-
config: &RepoConfig,
86
-
did_hash: &str,
87
-
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
88
-
// Use standard file structure but change extension based on type
89
-
// if config.use_sqlite {
90
-
// For SQLite, create a new database file
91
-
let db_path = config.path.join(format!("{}.db", did_hash));
92
-
93
-
// Ensure parent directory exists
94
-
if let Some(parent) = db_path.parent() {
95
-
tokio::fs::create_dir_all(parent)
96
-
.await
97
-
.context("failed to create directory")?;
98
-
}
99
-
100
-
// Create SQLite store
101
-
let pool = sqlx::sqlite::SqlitePoolOptions::new()
102
-
.max_connections(5)
103
-
.connect_with(
104
-
sqlx::sqlite::SqliteConnectOptions::new()
105
-
.filename(&db_path)
106
-
.create_if_missing(true),
107
-
)
108
-
.await
109
-
.context("failed to connect to SQLite database")?;
110
-
111
-
// Initialize tables
112
-
_ = sqlx::query(
113
-
"
114
-
CREATE TABLE IF NOT EXISTS blocks (
115
-
cid TEXT PRIMARY KEY NOT NULL,
116
-
data BLOB NOT NULL,
117
-
multicodec INTEGER NOT NULL,
118
-
multihash INTEGER NOT NULL
119
-
);
120
-
CREATE TABLE IF NOT EXISTS tree_nodes (
121
-
repo_did TEXT NOT NULL,
122
-
key TEXT NOT NULL,
123
-
value_cid TEXT NOT NULL,
124
-
PRIMARY KEY (repo_did, key),
125
-
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
126
-
);
127
-
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
128
-
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
129
-
PRAGMA journal_mode=WAL;
130
-
",
131
-
)
132
-
.execute(&pool)
133
-
.await
134
-
.context("failed to create tables")?;
135
-
136
-
Ok(SQLiteStore {
137
-
pool,
138
-
did: format!("did:plc:{}", did_hash),
139
-
})
140
-
// } else {
141
-
// // For CAR files, create a new file
142
-
// let file_path = config.path.join(format!("{}.car", did_hash));
143
-
144
-
// // Ensure parent directory exists
145
-
// if let Some(parent) = file_path.parent() {
146
-
// tokio::fs::create_dir_all(parent)
147
-
// .await
148
-
// .context("failed to create directory")?;
149
-
// }
150
-
151
-
// let file = tokio::fs::File::create_new(file_path)
152
-
// .await
153
-
// .context("failed to create repo file")?;
154
-
155
-
// CarStore::create(file)
156
-
// .await
157
-
// .context("failed to create carstore")
158
-
// }
159
-
}
-149
src/storage/sqlite.rs
-149
src/storage/sqlite.rs
···
1
-
//! SQLite-based repository storage implementation.
2
-
3
-
use anyhow::{Context as _, Result};
4
-
use atrium_repo::{
5
-
Cid, Multihash,
6
-
blockstore::{AsyncBlockStoreRead, AsyncBlockStoreWrite, Error as BlockstoreError},
7
-
};
8
-
use sha2::Digest;
9
-
use sqlx::SqlitePool;
10
-
11
-
use crate::config::RepoConfig;
12
-
13
-
/// SQLite-based implementation of block storage.
14
-
pub(crate) struct SQLiteStore {
15
-
pub did: String,
16
-
pub pool: SqlitePool,
17
-
}
18
-
19
-
impl AsyncBlockStoreRead for SQLiteStore {
20
-
async fn read_block(&mut self, cid: Cid) -> Result<Vec<u8>, BlockstoreError> {
21
-
let mut contents = Vec::new();
22
-
self.read_block_into(cid, &mut contents).await?;
23
-
Ok(contents)
24
-
}
25
-
async fn read_block_into(
26
-
&mut self,
27
-
cid: Cid,
28
-
contents: &mut Vec<u8>,
29
-
) -> Result<(), BlockstoreError> {
30
-
let cid_str = cid.to_string();
31
-
let record = sqlx::query!(r#"SELECT data FROM blocks WHERE cid = ?"#, cid_str)
32
-
.fetch_optional(&self.pool)
33
-
.await
34
-
.map_err(|e| BlockstoreError::Other(Box::new(e)))?
35
-
.ok_or(BlockstoreError::CidNotFound)?;
36
-
37
-
contents.clear();
38
-
contents.extend_from_slice(&record.data);
39
-
Ok(())
40
-
}
41
-
}
42
-
43
-
impl AsyncBlockStoreWrite for SQLiteStore {
44
-
async fn write_block(
45
-
&mut self,
46
-
codec: u64,
47
-
hash: u64,
48
-
contents: &[u8],
49
-
) -> Result<Cid, BlockstoreError> {
50
-
let digest = match hash {
51
-
atrium_repo::blockstore::SHA2_256 => sha2::Sha256::digest(&contents),
52
-
_ => return Err(BlockstoreError::UnsupportedHash(hash)),
53
-
};
54
-
55
-
let multihash = Multihash::wrap(hash, digest.as_slice())
56
-
.map_err(|_| BlockstoreError::UnsupportedHash(hash))?;
57
-
58
-
let cid = Cid::new_v1(codec, multihash);
59
-
let cid_str = cid.to_string();
60
-
61
-
// Use a transaction for atomicity
62
-
let mut tx = self
63
-
.pool
64
-
.begin()
65
-
.await
66
-
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
67
-
68
-
// Check if block already exists
69
-
let exists = sqlx::query_scalar!(r#"SELECT COUNT(*) FROM blocks WHERE cid = ?"#, cid_str)
70
-
.fetch_one(&mut *tx)
71
-
.await
72
-
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
73
-
74
-
// Only insert if block doesn't exist
75
-
let codec = codec as i64;
76
-
let hash = hash as i64;
77
-
if exists == 0 {
78
-
_ = sqlx::query!(
79
-
r#"INSERT INTO blocks (cid, data, multicodec, multihash) VALUES (?, ?, ?, ?)"#,
80
-
cid_str,
81
-
contents,
82
-
codec,
83
-
hash
84
-
)
85
-
.execute(&mut *tx)
86
-
.await
87
-
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
88
-
}
89
-
90
-
tx.commit()
91
-
.await
92
-
.map_err(|e| BlockstoreError::Other(Box::new(e)))?;
93
-
94
-
Ok(cid)
95
-
}
96
-
}
97
-
98
-
/// Open a SQLite store for the given DID.
99
-
pub(crate) async fn open_sqlite_store(
100
-
config: &RepoConfig,
101
-
did: impl Into<String>,
102
-
) -> Result<impl AsyncBlockStoreRead + AsyncBlockStoreWrite> {
103
-
tracing::info!("Opening SQLite store for DID");
104
-
let did_str = did.into();
105
-
106
-
// Extract the PLC ID from the DID
107
-
let id = did_str
108
-
.strip_prefix("did:plc:")
109
-
.context("DID in unknown format")?;
110
-
111
-
// Create database connection pool
112
-
let db_path = config.path.join(format!("{id}.db"));
113
-
114
-
let pool = sqlx::sqlite::SqlitePoolOptions::new()
115
-
.max_connections(5)
116
-
.connect_with(
117
-
sqlx::sqlite::SqliteConnectOptions::new()
118
-
.filename(&db_path)
119
-
.create_if_missing(true),
120
-
)
121
-
.await
122
-
.context("failed to connect to SQLite database")?;
123
-
124
-
// Ensure tables exist
125
-
_ = sqlx::query(
126
-
"
127
-
CREATE TABLE IF NOT EXISTS blocks (
128
-
cid TEXT PRIMARY KEY NOT NULL,
129
-
data BLOB NOT NULL,
130
-
multicodec INTEGER NOT NULL,
131
-
multihash INTEGER NOT NULL
132
-
);
133
-
CREATE TABLE IF NOT EXISTS tree_nodes (
134
-
repo_did TEXT NOT NULL,
135
-
key TEXT NOT NULL,
136
-
value_cid TEXT NOT NULL,
137
-
PRIMARY KEY (repo_did, key),
138
-
FOREIGN KEY (value_cid) REFERENCES blocks(cid)
139
-
);
140
-
CREATE INDEX IF NOT EXISTS idx_blocks_cid ON blocks(cid);
141
-
CREATE INDEX IF NOT EXISTS idx_tree_nodes_repo ON tree_nodes(repo_did);
142
-
",
143
-
)
144
-
.execute(&pool)
145
-
.await
146
-
.context("failed to create tables")?;
147
-
148
-
Ok(SQLiteStore { pool, did: did_str })
149
-
}
-459
src/tests.rs
-459
src/tests.rs
···
1
-
//! Testing utilities for the PDS.
2
-
#![expect(clippy::arbitrary_source_item_ordering)]
3
-
use std::{
4
-
net::{IpAddr, Ipv4Addr, SocketAddr, TcpListener},
5
-
path::PathBuf,
6
-
time::{Duration, Instant},
7
-
};
8
-
9
-
use anyhow::Result;
10
-
use atrium_api::{
11
-
com::atproto::server,
12
-
types::string::{AtIdentifier, Did, Handle, Nsid, RecordKey},
13
-
};
14
-
use figment::{Figment, providers::Format as _};
15
-
use futures::future::join_all;
16
-
use serde::{Deserialize, Serialize};
17
-
use tokio::sync::OnceCell;
18
-
use uuid::Uuid;
19
-
20
-
use crate::config::AppConfig;
21
-
22
-
/// Global test state, created once for all tests.
23
-
pub(crate) static TEST_STATE: OnceCell<TestState> = OnceCell::const_new();
24
-
25
-
/// A temporary test directory that will be cleaned up when the struct is dropped.
26
-
struct TempDir {
27
-
/// The path to the directory.
28
-
path: PathBuf,
29
-
}
30
-
31
-
impl TempDir {
32
-
/// Create a new temporary directory.
33
-
fn new() -> Result<Self> {
34
-
let path = std::env::temp_dir().join(format!("bluepds-test-{}", Uuid::new_v4()));
35
-
std::fs::create_dir_all(&path)?;
36
-
Ok(Self { path })
37
-
}
38
-
39
-
/// Get the path to the directory.
40
-
fn path(&self) -> &PathBuf {
41
-
&self.path
42
-
}
43
-
}
44
-
45
-
impl Drop for TempDir {
46
-
fn drop(&mut self) {
47
-
drop(std::fs::remove_dir_all(&self.path));
48
-
}
49
-
}
50
-
51
-
/// Test state for the application.
52
-
pub(crate) struct TestState {
53
-
/// The address the test server is listening on.
54
-
address: SocketAddr,
55
-
/// The HTTP client.
56
-
client: reqwest::Client,
57
-
/// The application configuration.
58
-
config: AppConfig,
59
-
/// The temporary directory for test data.
60
-
#[expect(dead_code)]
61
-
temp_dir: TempDir,
62
-
}
63
-
64
-
impl TestState {
65
-
/// Get a base URL for the test server.
66
-
pub(crate) fn base_url(&self) -> String {
67
-
format!("http://{}", self.address)
68
-
}
69
-
70
-
/// Create a test account.
71
-
pub(crate) async fn create_test_account(&self) -> Result<TestAccount> {
72
-
// Create the account
73
-
let handle = "test.handle";
74
-
let response = self
75
-
.client
76
-
.post(format!(
77
-
"http://{}/xrpc/com.atproto.server.createAccount",
78
-
self.address
79
-
))
80
-
.json(&server::create_account::InputData {
81
-
did: None,
82
-
verification_code: None,
83
-
verification_phone: None,
84
-
email: Some(format!("{}@example.com", &handle)),
85
-
handle: Handle::new(handle.to_owned()).expect("should be able to create handle"),
86
-
password: Some("password123".to_owned()),
87
-
invite_code: None,
88
-
recovery_key: None,
89
-
plc_op: None,
90
-
})
91
-
.send()
92
-
.await?;
93
-
94
-
let account: server::create_account::Output = response.json().await?;
95
-
96
-
Ok(TestAccount {
97
-
handle: handle.to_owned(),
98
-
did: account.did.to_string(),
99
-
access_token: account.access_jwt.clone(),
100
-
refresh_token: account.refresh_jwt.clone(),
101
-
})
102
-
}
103
-
104
-
/// Create a new test state.
105
-
#[expect(clippy::unused_async)]
106
-
async fn new() -> Result<Self> {
107
-
// Configure the test app
108
-
#[derive(Serialize, Deserialize)]
109
-
struct TestConfigInput {
110
-
db: Option<String>,
111
-
host_name: Option<String>,
112
-
key: Option<PathBuf>,
113
-
listen_address: Option<SocketAddr>,
114
-
test: Option<bool>,
115
-
}
116
-
// Create a temporary directory for test data
117
-
let temp_dir = TempDir::new()?;
118
-
119
-
// Find a free port
120
-
let listener = TcpListener::bind(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0))?;
121
-
let address = listener.local_addr()?;
122
-
drop(listener);
123
-
124
-
let test_config = TestConfigInput {
125
-
db: Some(format!("sqlite://{}/test.db", temp_dir.path().display())),
126
-
host_name: Some(format!("localhost:{}", address.port())),
127
-
key: Some(temp_dir.path().join("test.key")),
128
-
listen_address: Some(address),
129
-
test: Some(true),
130
-
};
131
-
132
-
let config: AppConfig = Figment::new()
133
-
.admerge(figment::providers::Toml::file("default.toml"))
134
-
.admerge(figment::providers::Env::prefixed("BLUEPDS_"))
135
-
.merge(figment::providers::Serialized::defaults(test_config))
136
-
.merge(
137
-
figment::providers::Toml::string(
138
-
r#"
139
-
[firehose]
140
-
relays = []
141
-
142
-
[repo]
143
-
path = "repo"
144
-
145
-
[plc]
146
-
path = "plc"
147
-
148
-
[blob]
149
-
path = "blob"
150
-
limit = 10485760 # 10 MB
151
-
"#,
152
-
)
153
-
.nested(),
154
-
)
155
-
.extract()?;
156
-
157
-
// Create directories
158
-
std::fs::create_dir_all(temp_dir.path().join("repo"))?;
159
-
std::fs::create_dir_all(temp_dir.path().join("plc"))?;
160
-
std::fs::create_dir_all(temp_dir.path().join("blob"))?;
161
-
162
-
// Create client
163
-
let client = reqwest::Client::builder()
164
-
.timeout(Duration::from_secs(30))
165
-
.build()?;
166
-
167
-
Ok(Self {
168
-
address,
169
-
client,
170
-
config,
171
-
temp_dir,
172
-
})
173
-
}
174
-
175
-
/// Start the application in a background task.
176
-
async fn start_app(&self) -> Result<()> {
177
-
// Get a reference to the config that can be moved into the task
178
-
let config = self.config.clone();
179
-
let address = self.address;
180
-
181
-
// Start the application in a background task
182
-
let _handle = tokio::spawn(async move {
183
-
// Set up the application
184
-
use crate::*;
185
-
186
-
// Initialize metrics (noop in test mode)
187
-
drop(metrics::setup(None));
188
-
189
-
// Create client
190
-
let simple_client = reqwest::Client::builder()
191
-
.user_agent(APP_USER_AGENT)
192
-
.build()
193
-
.context("failed to build requester client")?;
194
-
let client = reqwest_middleware::ClientBuilder::new(simple_client.clone())
195
-
.with(http_cache_reqwest::Cache(http_cache_reqwest::HttpCache {
196
-
mode: CacheMode::Default,
197
-
manager: MokaManager::default(),
198
-
options: HttpCacheOptions::default(),
199
-
}))
200
-
.build();
201
-
202
-
// Create a test keypair
203
-
std::fs::create_dir_all(config.key.parent().context("should have parent")?)?;
204
-
let (skey, rkey) = {
205
-
let skey = Secp256k1Keypair::create(&mut rand::thread_rng());
206
-
let rkey = Secp256k1Keypair::create(&mut rand::thread_rng());
207
-
208
-
let keys = KeyData {
209
-
skey: skey.export(),
210
-
rkey: rkey.export(),
211
-
};
212
-
213
-
let mut f =
214
-
std::fs::File::create(&config.key).context("failed to create key file")?;
215
-
serde_ipld_dagcbor::to_writer(&mut f, &keys)
216
-
.context("failed to serialize crypto keys")?;
217
-
218
-
(SigningKey(Arc::new(skey)), RotationKey(Arc::new(rkey)))
219
-
};
220
-
221
-
// Set up database
222
-
let opts = SqliteConnectOptions::from_str(&config.db)
223
-
.context("failed to parse database options")?
224
-
.create_if_missing(true);
225
-
let db = SqlitePool::connect_with(opts).await?;
226
-
227
-
sqlx::migrate!()
228
-
.run(&db)
229
-
.await
230
-
.context("failed to apply migrations")?;
231
-
232
-
// Create firehose
233
-
let (_fh, fhp) = firehose::spawn(client.clone(), config.clone());
234
-
235
-
// Create the application state
236
-
let app_state = AppState {
237
-
cred: azure_identity::DefaultAzureCredential::new()?,
238
-
config: config.clone(),
239
-
db: db.clone(),
240
-
client: client.clone(),
241
-
simple_client,
242
-
firehose: fhp,
243
-
signing_key: skey,
244
-
rotation_key: rkey,
245
-
};
246
-
247
-
// Create the router
248
-
let app = Router::new()
249
-
.route("/", get(index))
250
-
.merge(oauth::routes())
251
-
.nest(
252
-
"/xrpc",
253
-
endpoints::routes()
254
-
.merge(actor_endpoints::routes())
255
-
.fallback(service_proxy),
256
-
)
257
-
.layer(CorsLayer::permissive())
258
-
.layer(TraceLayer::new_for_http())
259
-
.with_state(app_state);
260
-
261
-
// Listen for connections
262
-
let listener = TcpListener::bind(&address)
263
-
.await
264
-
.context("failed to bind address")?;
265
-
266
-
axum::serve(listener, app.into_make_service())
267
-
.await
268
-
.context("failed to serve app")
269
-
});
270
-
271
-
// Give the server a moment to start
272
-
tokio::time::sleep(Duration::from_millis(500)).await;
273
-
274
-
Ok(())
275
-
}
276
-
}
277
-
278
-
/// A test account that can be used for testing.
279
-
pub(crate) struct TestAccount {
280
-
/// The access token for the account.
281
-
pub(crate) access_token: String,
282
-
/// The account DID.
283
-
pub(crate) did: String,
284
-
/// The account handle.
285
-
pub(crate) handle: String,
286
-
/// The refresh token for the account.
287
-
#[expect(dead_code)]
288
-
pub(crate) refresh_token: String,
289
-
}
290
-
291
-
/// Initialize the test state.
292
-
pub(crate) async fn init_test_state() -> Result<&'static TestState> {
293
-
async fn init_test_state() -> std::result::Result<TestState, anyhow::Error> {
294
-
let state = TestState::new().await?;
295
-
state.start_app().await?;
296
-
Ok(state)
297
-
}
298
-
TEST_STATE.get_or_try_init(init_test_state).await
299
-
}
300
-
301
-
/// Create a record benchmark that creates records and measures the time it takes.
302
-
#[expect(
303
-
clippy::arithmetic_side_effects,
304
-
clippy::integer_division,
305
-
clippy::integer_division_remainder_used,
306
-
clippy::use_debug,
307
-
clippy::print_stdout
308
-
)]
309
-
pub(crate) async fn create_record_benchmark(count: usize, concurrent: usize) -> Result<Duration> {
310
-
// Initialize the test state
311
-
let state = init_test_state().await?;
312
-
313
-
// Create a test account
314
-
let account = state.create_test_account().await?;
315
-
316
-
// Create the client with authorization
317
-
let client = reqwest::Client::builder()
318
-
.timeout(Duration::from_secs(30))
319
-
.build()?;
320
-
321
-
let start = Instant::now();
322
-
323
-
// Split the work into batches
324
-
let mut handles = Vec::new();
325
-
for batch_idx in 0..concurrent {
326
-
let batch_size = count / concurrent;
327
-
let client = client.clone();
328
-
let base_url = state.base_url();
329
-
let account_did = account.did.clone();
330
-
let account_handle = account.handle.clone();
331
-
let access_token = account.access_token.clone();
332
-
333
-
let handle = tokio::spawn(async move {
334
-
let mut results = Vec::new();
335
-
336
-
for i in 0..batch_size {
337
-
let request_start = Instant::now();
338
-
let record_idx = batch_idx * batch_size + i;
339
-
340
-
let result = client
341
-
.post(format!("{base_url}/xrpc/com.atproto.repo.createRecord"))
342
-
.header("Authorization", format!("Bearer {access_token}"))
343
-
.json(&atrium_api::com::atproto::repo::create_record::InputData {
344
-
repo: AtIdentifier::Did(Did::new(account_did.clone()).expect("valid DID")),
345
-
collection: Nsid::new("app.bsky.feed.post".to_owned()).expect("valid NSID"),
346
-
rkey: Some(
347
-
RecordKey::new(format!("test-{record_idx}")).expect("valid record key"),
348
-
),
349
-
validate: None,
350
-
record: serde_json::from_str(
351
-
&serde_json::json!({
352
-
"$type": "app.bsky.feed.post",
353
-
"text": format!("Test post {record_idx} from {account_handle}"),
354
-
"createdAt": chrono::Utc::now().to_rfc3339(),
355
-
})
356
-
.to_string(),
357
-
)
358
-
.expect("valid JSON record"),
359
-
swap_commit: None,
360
-
})
361
-
.send()
362
-
.await;
363
-
364
-
// Fetch the record we just created
365
-
let get_response = client
366
-
.get(format!(
367
-
"{base_url}/xrpc/com.atproto.sync.getRecord?did={account_did}&collection=app.bsky.feed.post&rkey={record_idx}"
368
-
))
369
-
.header("Authorization", format!("Bearer {access_token}"))
370
-
.send()
371
-
.await;
372
-
if get_response.is_err() {
373
-
println!("Failed to fetch record {record_idx}: {get_response:?}");
374
-
results.push(get_response);
375
-
continue;
376
-
}
377
-
378
-
let request_duration = request_start.elapsed();
379
-
if record_idx % 10 == 0 {
380
-
println!("Created record {record_idx} in {request_duration:?}");
381
-
}
382
-
results.push(result);
383
-
}
384
-
385
-
results
386
-
});
387
-
388
-
handles.push(handle);
389
-
}
390
-
391
-
// Wait for all batches to complete
392
-
let results = join_all(handles).await;
393
-
394
-
// Check for errors
395
-
for batch_result in results {
396
-
let batch_responses = batch_result?;
397
-
for response_result in batch_responses {
398
-
match response_result {
399
-
Ok(response) => {
400
-
if !response.status().is_success() {
401
-
return Err(anyhow::anyhow!(
402
-
"Failed to create record: {}",
403
-
response.status()
404
-
));
405
-
}
406
-
}
407
-
Err(err) => {
408
-
return Err(anyhow::anyhow!("Failed to create record: {}", err));
409
-
}
410
-
}
411
-
}
412
-
}
413
-
414
-
let duration = start.elapsed();
415
-
Ok(duration)
416
-
}
417
-
418
-
#[cfg(test)]
419
-
#[expect(clippy::module_inception, clippy::use_debug, clippy::print_stdout)]
420
-
mod tests {
421
-
use super::*;
422
-
use anyhow::anyhow;
423
-
424
-
#[tokio::test]
425
-
async fn test_create_account() -> Result<()> {
426
-
return Ok(());
427
-
#[expect(unreachable_code, reason = "Disabled")]
428
-
let state = init_test_state().await?;
429
-
let account = state.create_test_account().await?;
430
-
431
-
println!("Created test account: {}", account.handle);
432
-
if account.handle.is_empty() {
433
-
return Err(anyhow::anyhow!("Account handle is empty"));
434
-
}
435
-
if account.did.is_empty() {
436
-
return Err(anyhow::anyhow!("Account DID is empty"));
437
-
}
438
-
if account.access_token.is_empty() {
439
-
return Err(anyhow::anyhow!("Account access token is empty"));
440
-
}
441
-
442
-
Ok(())
443
-
}
444
-
445
-
#[tokio::test]
446
-
async fn test_create_record_benchmark() -> Result<()> {
447
-
return Ok(());
448
-
#[expect(unreachable_code, reason = "Disabled")]
449
-
let duration = create_record_benchmark(100, 1).await?;
450
-
451
-
println!("Created 100 records in {duration:?}");
452
-
453
-
if duration.as_secs() >= 10 {
454
-
return Err(anyhow!("Benchmark took too long"));
455
-
}
456
-
457
-
Ok(())
458
-
}
459
-
}