+1
-1
.github/workflows/checks.yml
+1
-1
.github/workflows/checks.yml
···
28
28
- name: get nightly toolchain for jetstream fmt
29
29
run: rustup toolchain install nightly --allow-downgrade -c rustfmt
30
30
- name: fmt
31
-
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i -- --check
31
+
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot -- --check
32
32
- name: fmt jetstream (nightly)
33
33
run: cargo +nightly fmt --package jetstream -- --check
34
34
- name: clippy
+932
-132
Cargo.lock
+932
-132
Cargo.lock
···
24
24
checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011"
25
25
dependencies = [
26
26
"cfg-if",
27
+
"getrandom 0.2.15",
27
28
"once_cell",
28
29
"version_check",
29
30
"zerocopy 0.7.35",
···
122
123
checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223"
123
124
124
125
[[package]]
126
+
name = "arc-swap"
127
+
version = "1.7.1"
128
+
source = "registry+https://github.com/rust-lang/crates.io-index"
129
+
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
130
+
131
+
[[package]]
125
132
name = "arrayvec"
126
133
version = "0.7.6"
127
134
source = "registry+https://github.com/rust-lang/crates.io-index"
···
155
162
"proc-macro2",
156
163
"quote",
157
164
"serde",
158
-
"syn",
165
+
"syn 2.0.103",
159
166
]
160
167
161
168
[[package]]
···
174
181
]
175
182
176
183
[[package]]
184
+
name = "asn1-rs"
185
+
version = "0.7.1"
186
+
source = "registry+https://github.com/rust-lang/crates.io-index"
187
+
checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60"
188
+
dependencies = [
189
+
"asn1-rs-derive",
190
+
"asn1-rs-impl",
191
+
"displaydoc",
192
+
"nom",
193
+
"num-traits",
194
+
"rusticata-macros",
195
+
"thiserror 2.0.12",
196
+
"time",
197
+
]
198
+
199
+
[[package]]
200
+
name = "asn1-rs-derive"
201
+
version = "0.6.0"
202
+
source = "registry+https://github.com/rust-lang/crates.io-index"
203
+
checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c"
204
+
dependencies = [
205
+
"proc-macro2",
206
+
"quote",
207
+
"syn 2.0.103",
208
+
"synstructure",
209
+
]
210
+
211
+
[[package]]
212
+
name = "asn1-rs-impl"
213
+
version = "0.2.0"
214
+
source = "registry+https://github.com/rust-lang/crates.io-index"
215
+
checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7"
216
+
dependencies = [
217
+
"proc-macro2",
218
+
"quote",
219
+
"syn 2.0.103",
220
+
]
221
+
222
+
[[package]]
223
+
name = "async-channel"
224
+
version = "2.5.0"
225
+
source = "registry+https://github.com/rust-lang/crates.io-index"
226
+
checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2"
227
+
dependencies = [
228
+
"concurrent-queue",
229
+
"event-listener-strategy",
230
+
"futures-core",
231
+
"pin-project-lite",
232
+
]
233
+
234
+
[[package]]
177
235
name = "async-compression"
178
236
version = "0.4.25"
179
237
source = "registry+https://github.com/rust-lang/crates.io-index"
···
216
274
dependencies = [
217
275
"proc-macro2",
218
276
"quote",
219
-
"syn",
277
+
"syn 2.0.103",
220
278
]
221
279
222
280
[[package]]
281
+
name = "async-task"
282
+
version = "4.7.1"
283
+
source = "registry+https://github.com/rust-lang/crates.io-index"
284
+
checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"
285
+
286
+
[[package]]
223
287
name = "async-trait"
224
288
version = "0.1.88"
225
289
source = "registry+https://github.com/rust-lang/crates.io-index"
···
227
291
dependencies = [
228
292
"proc-macro2",
229
293
"quote",
230
-
"syn",
294
+
"syn 2.0.103",
231
295
]
232
296
233
297
[[package]]
···
238
302
239
303
[[package]]
240
304
name = "atrium-api"
241
-
version = "0.25.3"
242
-
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
243
-
dependencies = [
244
-
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits)",
245
-
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits)",
246
-
"chrono",
247
-
"http",
248
-
"ipld-core",
249
-
"langtag",
250
-
"regex",
251
-
"serde",
252
-
"serde_bytes",
253
-
"serde_json",
254
-
"thiserror 1.0.69",
255
-
"trait-variant",
256
-
]
257
-
258
-
[[package]]
259
-
name = "atrium-api"
260
305
version = "0.25.4"
261
306
source = "registry+https://github.com/rust-lang/crates.io-index"
262
307
checksum = "46355d3245edc7b3160b2a45fe55d09a6963ebd3eee0252feb6b72fb0eb71463"
263
308
dependencies = [
264
-
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
265
-
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
309
+
"atrium-common",
310
+
"atrium-xrpc",
266
311
"chrono",
267
312
"http",
268
313
"ipld-core",
···
292
337
]
293
338
294
339
[[package]]
295
-
name = "atrium-common"
296
-
version = "0.1.2"
297
-
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
298
-
dependencies = [
299
-
"dashmap",
300
-
"lru",
301
-
"moka",
302
-
"thiserror 1.0.69",
303
-
"tokio",
304
-
"trait-variant",
305
-
"web-time",
306
-
]
307
-
308
-
[[package]]
309
340
name = "atrium-identity"
310
341
version = "0.1.5"
311
342
source = "registry+https://github.com/rust-lang/crates.io-index"
312
343
checksum = "c9e2d42bb4dbea038f4f5f45e3af2a89d61a9894a75f06aa550b74a60d2be380"
313
344
dependencies = [
314
-
"atrium-api 0.25.4",
315
-
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
316
-
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
345
+
"atrium-api",
346
+
"atrium-common",
347
+
"atrium-xrpc",
317
348
"serde",
318
349
"serde_html_form",
319
350
"serde_json",
···
327
358
source = "registry+https://github.com/rust-lang/crates.io-index"
328
359
checksum = "ca22dc4eaf77fd9bf050b21192ac58cd654a437d28e000ec114ebd93a51d36f5"
329
360
dependencies = [
330
-
"atrium-api 0.25.4",
331
-
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
361
+
"atrium-api",
362
+
"atrium-common",
332
363
"atrium-identity",
333
-
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
364
+
"atrium-xrpc",
334
365
"base64 0.22.1",
335
366
"chrono",
336
367
"dashmap",
···
365
396
]
366
397
367
398
[[package]]
368
-
name = "atrium-xrpc"
369
-
version = "0.12.3"
370
-
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
399
+
name = "auto_enums"
400
+
version = "0.8.7"
401
+
source = "registry+https://github.com/rust-lang/crates.io-index"
402
+
checksum = "9c170965892137a3a9aeb000b4524aa3cc022a310e709d848b6e1cdce4ab4781"
371
403
dependencies = [
372
-
"http",
373
-
"serde",
374
-
"serde_html_form",
375
-
"serde_json",
376
-
"thiserror 1.0.69",
377
-
"trait-variant",
404
+
"derive_utils",
405
+
"proc-macro2",
406
+
"quote",
407
+
"syn 2.0.103",
378
408
]
379
409
380
410
[[package]]
···
613
643
"regex",
614
644
"rustc-hash 1.1.0",
615
645
"shlex",
616
-
"syn",
646
+
"syn 2.0.103",
617
647
"which",
618
648
]
619
649
···
632
662
"regex",
633
663
"rustc-hash 1.1.0",
634
664
"shlex",
635
-
"syn",
665
+
"syn 2.0.103",
636
666
]
637
667
638
668
[[package]]
···
650
680
"regex",
651
681
"rustc-hash 2.1.1",
652
682
"shlex",
653
-
"syn",
683
+
"syn 2.0.103",
654
684
]
655
685
656
686
[[package]]
···
813
843
814
844
[[package]]
815
845
name = "clap"
816
-
version = "4.5.40"
846
+
version = "4.5.41"
817
847
source = "registry+https://github.com/rust-lang/crates.io-index"
818
-
checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f"
848
+
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
819
849
dependencies = [
820
850
"clap_builder",
821
851
"clap_derive",
···
823
853
824
854
[[package]]
825
855
name = "clap_builder"
826
-
version = "4.5.40"
856
+
version = "4.5.41"
827
857
source = "registry+https://github.com/rust-lang/crates.io-index"
828
-
checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e"
858
+
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
829
859
dependencies = [
830
860
"anstream",
831
861
"anstyle",
832
862
"clap_lex",
833
-
"strsim",
863
+
"strsim 0.11.1",
834
864
]
835
865
836
866
[[package]]
837
867
name = "clap_derive"
838
-
version = "4.5.40"
868
+
version = "4.5.41"
839
869
source = "registry+https://github.com/rust-lang/crates.io-index"
840
-
checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce"
870
+
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
841
871
dependencies = [
842
872
"heck",
843
873
"proc-macro2",
844
874
"quote",
845
-
"syn",
875
+
"syn 2.0.103",
846
876
]
847
877
848
878
[[package]]
···
869
899
checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0"
870
900
dependencies = [
871
901
"cc",
902
+
]
903
+
904
+
[[package]]
905
+
name = "cmsketch"
906
+
version = "0.2.2"
907
+
source = "registry+https://github.com/rust-lang/crates.io-index"
908
+
checksum = "553c840ee51da812c6cd621f9f7e07dfb00a49f91283a8e6380c78cba4f61aba"
909
+
dependencies = [
910
+
"paste",
872
911
]
873
912
874
913
[[package]]
···
911
950
"clap",
912
951
"ctrlc",
913
952
"flume",
914
-
"fs4",
953
+
"fs4 0.12.0",
915
954
"headers-accept",
916
955
"links",
917
956
"mediatype",
···
1076
1115
1077
1116
[[package]]
1078
1117
name = "darling"
1118
+
version = "0.14.4"
1119
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1120
+
checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850"
1121
+
dependencies = [
1122
+
"darling_core 0.14.4",
1123
+
"darling_macro 0.14.4",
1124
+
]
1125
+
1126
+
[[package]]
1127
+
name = "darling"
1079
1128
version = "0.20.11"
1080
1129
source = "registry+https://github.com/rust-lang/crates.io-index"
1081
1130
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
1082
1131
dependencies = [
1083
-
"darling_core",
1084
-
"darling_macro",
1132
+
"darling_core 0.20.11",
1133
+
"darling_macro 0.20.11",
1134
+
]
1135
+
1136
+
[[package]]
1137
+
name = "darling_core"
1138
+
version = "0.14.4"
1139
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1140
+
checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0"
1141
+
dependencies = [
1142
+
"fnv",
1143
+
"ident_case",
1144
+
"proc-macro2",
1145
+
"quote",
1146
+
"strsim 0.10.0",
1147
+
"syn 1.0.109",
1085
1148
]
1086
1149
1087
1150
[[package]]
···
1094
1157
"ident_case",
1095
1158
"proc-macro2",
1096
1159
"quote",
1097
-
"strsim",
1098
-
"syn",
1160
+
"strsim 0.11.1",
1161
+
"syn 2.0.103",
1162
+
]
1163
+
1164
+
[[package]]
1165
+
name = "darling_macro"
1166
+
version = "0.14.4"
1167
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1168
+
checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e"
1169
+
dependencies = [
1170
+
"darling_core 0.14.4",
1171
+
"quote",
1172
+
"syn 1.0.109",
1099
1173
]
1100
1174
1101
1175
[[package]]
···
1104
1178
source = "registry+https://github.com/rust-lang/crates.io-index"
1105
1179
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
1106
1180
dependencies = [
1107
-
"darling_core",
1181
+
"darling_core 0.20.11",
1108
1182
"quote",
1109
-
"syn",
1183
+
"syn 2.0.103",
1110
1184
]
1111
1185
1112
1186
[[package]]
···
1146
1220
checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f"
1147
1221
dependencies = [
1148
1222
"data-encoding",
1149
-
"syn",
1223
+
"syn 2.0.103",
1150
1224
]
1151
1225
1152
1226
[[package]]
···
1167
1241
]
1168
1242
1169
1243
[[package]]
1244
+
name = "der-parser"
1245
+
version = "10.0.0"
1246
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1247
+
checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6"
1248
+
dependencies = [
1249
+
"asn1-rs",
1250
+
"displaydoc",
1251
+
"nom",
1252
+
"num-bigint",
1253
+
"num-traits",
1254
+
"rusticata-macros",
1255
+
]
1256
+
1257
+
[[package]]
1170
1258
name = "deranged"
1171
1259
version = "0.4.0"
1172
1260
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1191
1279
source = "registry+https://github.com/rust-lang/crates.io-index"
1192
1280
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
1193
1281
dependencies = [
1194
-
"darling",
1282
+
"darling 0.20.11",
1195
1283
"proc-macro2",
1196
1284
"quote",
1197
-
"syn",
1285
+
"syn 2.0.103",
1198
1286
]
1199
1287
1200
1288
[[package]]
···
1204
1292
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
1205
1293
dependencies = [
1206
1294
"derive_builder_core",
1207
-
"syn",
1295
+
"syn 2.0.103",
1296
+
]
1297
+
1298
+
[[package]]
1299
+
name = "derive_more"
1300
+
version = "2.0.1"
1301
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1302
+
checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678"
1303
+
dependencies = [
1304
+
"derive_more-impl",
1305
+
]
1306
+
1307
+
[[package]]
1308
+
name = "derive_more-impl"
1309
+
version = "2.0.1"
1310
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1311
+
checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3"
1312
+
dependencies = [
1313
+
"proc-macro2",
1314
+
"quote",
1315
+
"syn 2.0.103",
1316
+
"unicode-xid",
1317
+
]
1318
+
1319
+
[[package]]
1320
+
name = "derive_utils"
1321
+
version = "0.15.0"
1322
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1323
+
checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0"
1324
+
dependencies = [
1325
+
"proc-macro2",
1326
+
"quote",
1327
+
"syn 2.0.103",
1208
1328
]
1209
1329
1210
1330
[[package]]
···
1248
1368
dependencies = [
1249
1369
"proc-macro2",
1250
1370
"quote",
1251
-
"syn",
1371
+
"syn 2.0.103",
1252
1372
]
1253
1373
1254
1374
[[package]]
···
1258
1378
checksum = "c0d05e1c0dbad51b52c38bda7adceef61b9efc2baf04acfe8726a8c4630a6f57"
1259
1379
1260
1380
[[package]]
1381
+
name = "downcast-rs"
1382
+
version = "1.2.1"
1383
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1384
+
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
1385
+
1386
+
[[package]]
1261
1387
name = "dropshot"
1262
1388
version = "0.16.2"
1263
1389
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1319
1445
"semver",
1320
1446
"serde",
1321
1447
"serde_tokenstream",
1322
-
"syn",
1448
+
"syn 2.0.103",
1323
1449
]
1324
1450
1325
1451
[[package]]
···
1392
1518
"heck",
1393
1519
"proc-macro2",
1394
1520
"quote",
1395
-
"syn",
1521
+
"syn 2.0.103",
1396
1522
]
1397
1523
1398
1524
[[package]]
···
1404
1530
"once_cell",
1405
1531
"proc-macro2",
1406
1532
"quote",
1407
-
"syn",
1533
+
"syn 2.0.103",
1408
1534
]
1409
1535
1410
1536
[[package]]
···
1526
1652
source = "registry+https://github.com/rust-lang/crates.io-index"
1527
1653
checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095"
1528
1654
dependencies = [
1655
+
"futures-core",
1656
+
"futures-sink",
1657
+
"nanorand",
1529
1658
"spin",
1530
1659
]
1531
1660
···
1566
1695
]
1567
1696
1568
1697
[[package]]
1698
+
name = "foyer"
1699
+
version = "0.18.0"
1700
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1701
+
checksum = "0b4d8e96374206ff1b4265f2e2e6e1f80bc3048957b2a1e7fdeef929d68f318f"
1702
+
dependencies = [
1703
+
"equivalent",
1704
+
"foyer-common",
1705
+
"foyer-memory",
1706
+
"foyer-storage",
1707
+
"madsim-tokio",
1708
+
"mixtrics",
1709
+
"pin-project",
1710
+
"serde",
1711
+
"thiserror 2.0.12",
1712
+
"tokio",
1713
+
"tracing",
1714
+
]
1715
+
1716
+
[[package]]
1717
+
name = "foyer-common"
1718
+
version = "0.18.0"
1719
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1720
+
checksum = "911b8e3f23d5fe55b0b240f75af1d2fa5cb7261d3f9b38ef1c57bbc9f0449317"
1721
+
dependencies = [
1722
+
"bincode 1.3.3",
1723
+
"bytes",
1724
+
"cfg-if",
1725
+
"itertools 0.14.0",
1726
+
"madsim-tokio",
1727
+
"mixtrics",
1728
+
"parking_lot",
1729
+
"pin-project",
1730
+
"serde",
1731
+
"thiserror 2.0.12",
1732
+
"tokio",
1733
+
"twox-hash",
1734
+
]
1735
+
1736
+
[[package]]
1737
+
name = "foyer-intrusive-collections"
1738
+
version = "0.10.0-dev"
1739
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1740
+
checksum = "6e4fee46bea69e0596130e3210e65d3424e0ac1e6df3bde6636304bdf1ca4a3b"
1741
+
dependencies = [
1742
+
"memoffset",
1743
+
]
1744
+
1745
+
[[package]]
1746
+
name = "foyer-memory"
1747
+
version = "0.18.0"
1748
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1749
+
checksum = "506883d5a8500dea1b1662f7180f3534bdcbfa718d3253db7179552ef83612fa"
1750
+
dependencies = [
1751
+
"arc-swap",
1752
+
"bitflags",
1753
+
"cmsketch",
1754
+
"equivalent",
1755
+
"foyer-common",
1756
+
"foyer-intrusive-collections",
1757
+
"hashbrown 0.15.2",
1758
+
"itertools 0.14.0",
1759
+
"madsim-tokio",
1760
+
"mixtrics",
1761
+
"parking_lot",
1762
+
"pin-project",
1763
+
"serde",
1764
+
"thiserror 2.0.12",
1765
+
"tokio",
1766
+
"tracing",
1767
+
]
1768
+
1769
+
[[package]]
1770
+
name = "foyer-storage"
1771
+
version = "0.18.0"
1772
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1773
+
checksum = "1ba8403a54a2f2032fb647e49c442e5feeb33f3989f7024f1b178341a016f06d"
1774
+
dependencies = [
1775
+
"allocator-api2",
1776
+
"anyhow",
1777
+
"auto_enums",
1778
+
"bytes",
1779
+
"equivalent",
1780
+
"flume",
1781
+
"foyer-common",
1782
+
"foyer-memory",
1783
+
"fs4 0.13.1",
1784
+
"futures-core",
1785
+
"futures-util",
1786
+
"itertools 0.14.0",
1787
+
"libc",
1788
+
"lz4",
1789
+
"madsim-tokio",
1790
+
"ordered_hash_map",
1791
+
"parking_lot",
1792
+
"paste",
1793
+
"pin-project",
1794
+
"rand 0.9.1",
1795
+
"serde",
1796
+
"thiserror 2.0.12",
1797
+
"tokio",
1798
+
"tracing",
1799
+
"twox-hash",
1800
+
"zstd",
1801
+
]
1802
+
1803
+
[[package]]
1569
1804
name = "fs4"
1570
1805
version = "0.12.0"
1571
1806
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1576
1811
]
1577
1812
1578
1813
[[package]]
1814
+
name = "fs4"
1815
+
version = "0.13.1"
1816
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1817
+
checksum = "8640e34b88f7652208ce9e88b1a37a2ae95227d84abec377ccd3c5cfeb141ed4"
1818
+
dependencies = [
1819
+
"rustix 1.0.5",
1820
+
"windows-sys 0.59.0",
1821
+
]
1822
+
1823
+
[[package]]
1579
1824
name = "fs_extra"
1580
1825
version = "1.3.0"
1581
1826
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1637
1882
dependencies = [
1638
1883
"proc-macro2",
1639
1884
"quote",
1640
-
"syn",
1885
+
"syn 2.0.103",
1641
1886
]
1642
1887
1643
1888
[[package]]
···
1714
1959
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
1715
1960
dependencies = [
1716
1961
"cfg-if",
1962
+
"js-sys",
1717
1963
"libc",
1718
1964
"r-efi",
1719
1965
"wasi 0.14.2+wasi-0.2.4",
1966
+
"wasm-bindgen",
1720
1967
]
1721
1968
1722
1969
[[package]]
···
1789
2036
version = "0.12.3"
1790
2037
source = "registry+https://github.com/rust-lang/crates.io-index"
1791
2038
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
2039
+
2040
+
[[package]]
2041
+
name = "hashbrown"
2042
+
version = "0.13.2"
2043
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2044
+
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
2045
+
dependencies = [
2046
+
"ahash",
2047
+
]
1792
2048
1793
2049
[[package]]
1794
2050
name = "hashbrown"
···
2031
2287
"http",
2032
2288
"hyper",
2033
2289
"hyper-util",
2034
-
"rustls 0.23.28",
2290
+
"rustls 0.23.31",
2035
2291
"rustls-native-certs",
2036
2292
"rustls-pki-types",
2037
2293
"tokio",
···
2057
2313
2058
2314
[[package]]
2059
2315
name = "hyper-util"
2060
-
version = "0.1.14"
2316
+
version = "0.1.16"
2061
2317
source = "registry+https://github.com/rust-lang/crates.io-index"
2062
-
checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb"
2318
+
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
2063
2319
dependencies = [
2064
2320
"base64 0.22.1",
2065
2321
"bytes",
···
2073
2329
"libc",
2074
2330
"percent-encoding",
2075
2331
"pin-project-lite",
2076
-
"socket2",
2332
+
"socket2 0.6.0",
2077
2333
"system-configuration",
2078
2334
"tokio",
2079
2335
"tower-service",
···
2220
2476
dependencies = [
2221
2477
"proc-macro2",
2222
2478
"quote",
2223
-
"syn",
2479
+
"syn 2.0.103",
2224
2480
]
2225
2481
2226
2482
[[package]]
···
2282
2538
]
2283
2539
2284
2540
[[package]]
2541
+
name = "io-uring"
2542
+
version = "0.7.9"
2543
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2544
+
checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4"
2545
+
dependencies = [
2546
+
"bitflags",
2547
+
"cfg-if",
2548
+
"libc",
2549
+
]
2550
+
2551
+
[[package]]
2285
2552
name = "ipconfig"
2286
2553
version = "0.3.2"
2287
2554
source = "registry+https://github.com/rust-lang/crates.io-index"
2288
2555
checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f"
2289
2556
dependencies = [
2290
-
"socket2",
2557
+
"socket2 0.5.9",
2291
2558
"widestring",
2292
2559
"windows-sys 0.48.0",
2293
2560
"winreg",
···
2356
2623
]
2357
2624
2358
2625
[[package]]
2626
+
name = "itertools"
2627
+
version = "0.14.0"
2628
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2629
+
checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285"
2630
+
dependencies = [
2631
+
"either",
2632
+
]
2633
+
2634
+
[[package]]
2359
2635
name = "itoa"
2360
2636
version = "1.0.15"
2361
2637
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2367
2643
dependencies = [
2368
2644
"anyhow",
2369
2645
"async-trait",
2370
-
"atrium-api 0.25.3",
2646
+
"atrium-api",
2371
2647
"chrono",
2372
2648
"clap",
2373
2649
"futures-util",
···
2403
2679
dependencies = [
2404
2680
"proc-macro2",
2405
2681
"quote",
2406
-
"syn",
2682
+
"syn 2.0.103",
2407
2683
]
2408
2684
2409
2685
[[package]]
···
2503
2779
2504
2780
[[package]]
2505
2781
name = "libc"
2506
-
version = "0.2.171"
2782
+
version = "0.2.174"
2507
2783
source = "registry+https://github.com/rust-lang/crates.io-index"
2508
-
checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6"
2784
+
checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
2509
2785
2510
2786
[[package]]
2511
2787
name = "libfuzzer-sys"
···
2648
2924
]
2649
2925
2650
2926
[[package]]
2927
+
name = "lru-slab"
2928
+
version = "0.1.2"
2929
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2930
+
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
2931
+
2932
+
[[package]]
2651
2933
name = "lsm-tree"
2652
2934
version = "2.8.0"
2653
2935
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2672
2954
]
2673
2955
2674
2956
[[package]]
2957
+
name = "lz4"
2958
+
version = "1.28.1"
2959
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2960
+
checksum = "a20b523e860d03443e98350ceaac5e71c6ba89aea7d960769ec3ce37f4de5af4"
2961
+
dependencies = [
2962
+
"lz4-sys",
2963
+
]
2964
+
2965
+
[[package]]
2675
2966
name = "lz4-sys"
2676
2967
version = "1.11.1+lz4-1.10.0"
2677
2968
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2697
2988
]
2698
2989
2699
2990
[[package]]
2991
+
name = "madsim"
2992
+
version = "0.2.32"
2993
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2994
+
checksum = "db6694555643da293dfb89e33c2880a13b62711d64b6588bc7df6ce4110b27f1"
2995
+
dependencies = [
2996
+
"ahash",
2997
+
"async-channel",
2998
+
"async-stream",
2999
+
"async-task",
3000
+
"bincode 1.3.3",
3001
+
"bytes",
3002
+
"downcast-rs",
3003
+
"futures-util",
3004
+
"lazy_static",
3005
+
"libc",
3006
+
"madsim-macros",
3007
+
"naive-timer",
3008
+
"panic-message",
3009
+
"rand 0.8.5",
3010
+
"rand_xoshiro 0.6.0",
3011
+
"rustversion",
3012
+
"serde",
3013
+
"spin",
3014
+
"tokio",
3015
+
"tokio-util",
3016
+
"toml",
3017
+
"tracing",
3018
+
"tracing-subscriber",
3019
+
]
3020
+
3021
+
[[package]]
3022
+
name = "madsim-macros"
3023
+
version = "0.2.12"
3024
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3025
+
checksum = "f3d248e97b1a48826a12c3828d921e8548e714394bf17274dd0a93910dc946e1"
3026
+
dependencies = [
3027
+
"darling 0.14.4",
3028
+
"proc-macro2",
3029
+
"quote",
3030
+
"syn 1.0.109",
3031
+
]
3032
+
3033
+
[[package]]
3034
+
name = "madsim-tokio"
3035
+
version = "0.2.30"
3036
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3037
+
checksum = "7d3eb2acc57c82d21d699119b859e2df70a91dbdb84734885a1e72be83bdecb5"
3038
+
dependencies = [
3039
+
"madsim",
3040
+
"spin",
3041
+
"tokio",
3042
+
]
3043
+
3044
+
[[package]]
2700
3045
name = "match_cfg"
2701
3046
version = "0.1.0"
2702
3047
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2730
3075
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
2731
3076
2732
3077
[[package]]
3078
+
name = "memoffset"
3079
+
version = "0.9.1"
3080
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3081
+
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
3082
+
dependencies = [
3083
+
"autocfg",
3084
+
]
3085
+
3086
+
[[package]]
2733
3087
name = "metrics"
2734
3088
version = "0.24.2"
2735
3089
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2871
3225
]
2872
3226
2873
3227
[[package]]
3228
+
name = "mixtrics"
3229
+
version = "0.2.0"
3230
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3231
+
checksum = "adbcddf5a90b959eea97ae505e0391f5c6dd411fbf546d43b9c59ad1c3bd4391"
3232
+
dependencies = [
3233
+
"itertools 0.14.0",
3234
+
"parking_lot",
3235
+
]
3236
+
3237
+
[[package]]
2874
3238
name = "moka"
2875
3239
version = "0.12.10"
2876
3240
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2906
3270
"memchr",
2907
3271
"mime",
2908
3272
"spin",
3273
+
"tokio",
2909
3274
"version_check",
2910
3275
]
2911
3276
···
2932
3297
]
2933
3298
2934
3299
[[package]]
3300
+
name = "naive-timer"
3301
+
version = "0.2.0"
3302
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3303
+
checksum = "034a0ad7deebf0c2abcf2435950a6666c3c15ea9d8fad0c0f48efa8a7f843fed"
3304
+
3305
+
[[package]]
3306
+
name = "nanorand"
3307
+
version = "0.7.0"
3308
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3309
+
checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3"
3310
+
dependencies = [
3311
+
"getrandom 0.2.15",
3312
+
]
3313
+
3314
+
[[package]]
2935
3315
name = "native-tls"
2936
3316
version = "0.2.14"
2937
3317
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3087
3467
]
3088
3468
3089
3469
[[package]]
3470
+
name = "oid-registry"
3471
+
version = "0.8.1"
3472
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3473
+
checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7"
3474
+
dependencies = [
3475
+
"asn1-rs",
3476
+
]
3477
+
3478
+
[[package]]
3090
3479
name = "once_cell"
3091
3480
version = "1.21.3"
3092
3481
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3130
3519
dependencies = [
3131
3520
"proc-macro2",
3132
3521
"quote",
3133
-
"syn",
3522
+
"syn 2.0.103",
3134
3523
]
3135
3524
3136
3525
[[package]]
···
3162
3551
]
3163
3552
3164
3553
[[package]]
3554
+
name = "ordered_hash_map"
3555
+
version = "0.4.0"
3556
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3557
+
checksum = "ab0e5f22bf6dd04abd854a8874247813a8fa2c8c1260eba6fbb150270ce7c176"
3558
+
dependencies = [
3559
+
"hashbrown 0.13.2",
3560
+
]
3561
+
3562
+
[[package]]
3165
3563
name = "overload"
3166
3564
version = "0.1.1"
3167
3565
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3190
3588
]
3191
3589
3192
3590
[[package]]
3591
+
name = "panic-message"
3592
+
version = "0.3.0"
3593
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3594
+
checksum = "384e52fd8fbd4cbe3c317e8216260c21a0f9134de108cea8a4dd4e7e152c472d"
3595
+
3596
+
[[package]]
3193
3597
name = "parking"
3194
3598
version = "2.2.1"
3195
3599
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3298
3702
"pest_meta",
3299
3703
"proc-macro2",
3300
3704
"quote",
3301
-
"syn",
3705
+
"syn 2.0.103",
3302
3706
]
3303
3707
3304
3708
[[package]]
···
3312
3716
]
3313
3717
3314
3718
[[package]]
3719
+
name = "pin-project"
3720
+
version = "1.1.10"
3721
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3722
+
checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a"
3723
+
dependencies = [
3724
+
"pin-project-internal",
3725
+
]
3726
+
3727
+
[[package]]
3728
+
name = "pin-project-internal"
3729
+
version = "1.1.10"
3730
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3731
+
checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861"
3732
+
dependencies = [
3733
+
"proc-macro2",
3734
+
"quote",
3735
+
"syn 2.0.103",
3736
+
]
3737
+
3738
+
[[package]]
3315
3739
name = "pin-project-lite"
3316
3740
version = "0.2.16"
3317
3741
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3351
3775
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
3352
3776
3353
3777
[[package]]
3778
+
name = "poem"
3779
+
version = "3.1.12"
3780
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3781
+
checksum = "9f977080932c87287147dca052951c3e2696f8759863f6b4e4c0c9ffe7a4cc8b"
3782
+
dependencies = [
3783
+
"base64 0.22.1",
3784
+
"bytes",
3785
+
"chrono",
3786
+
"futures-util",
3787
+
"headers",
3788
+
"http",
3789
+
"http-body-util",
3790
+
"httpdate",
3791
+
"hyper",
3792
+
"hyper-util",
3793
+
"mime",
3794
+
"mime_guess",
3795
+
"multer",
3796
+
"nix",
3797
+
"parking_lot",
3798
+
"percent-encoding",
3799
+
"pin-project-lite",
3800
+
"poem-derive",
3801
+
"quick-xml",
3802
+
"rcgen",
3803
+
"regex",
3804
+
"reqwest",
3805
+
"rfc7239",
3806
+
"ring",
3807
+
"rustls-pemfile",
3808
+
"serde",
3809
+
"serde_json",
3810
+
"serde_urlencoded",
3811
+
"serde_yaml",
3812
+
"smallvec",
3813
+
"sync_wrapper",
3814
+
"tempfile",
3815
+
"thiserror 2.0.12",
3816
+
"tokio",
3817
+
"tokio-rustls 0.26.2",
3818
+
"tokio-stream",
3819
+
"tokio-util",
3820
+
"tracing",
3821
+
"wildmatch",
3822
+
"x509-parser",
3823
+
]
3824
+
3825
+
[[package]]
3826
+
name = "poem-derive"
3827
+
version = "3.1.12"
3828
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3829
+
checksum = "056e2fea6de1cb240ffe23cfc4fc370b629f8be83b5f27e16b7acd5231a72de4"
3830
+
dependencies = [
3831
+
"proc-macro-crate",
3832
+
"proc-macro2",
3833
+
"quote",
3834
+
"syn 2.0.103",
3835
+
]
3836
+
3837
+
[[package]]
3838
+
name = "poem-openapi"
3839
+
version = "5.1.16"
3840
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3841
+
checksum = "1ccbcc395bf4dd03df1da32da351b6b6732e4074ce27ddec315650e52a2be44c"
3842
+
dependencies = [
3843
+
"base64 0.22.1",
3844
+
"bytes",
3845
+
"derive_more",
3846
+
"futures-util",
3847
+
"indexmap 2.9.0",
3848
+
"itertools 0.14.0",
3849
+
"mime",
3850
+
"num-traits",
3851
+
"poem",
3852
+
"poem-openapi-derive",
3853
+
"quick-xml",
3854
+
"regex",
3855
+
"serde",
3856
+
"serde_json",
3857
+
"serde_urlencoded",
3858
+
"serde_yaml",
3859
+
"thiserror 2.0.12",
3860
+
"tokio",
3861
+
]
3862
+
3863
+
[[package]]
3864
+
name = "poem-openapi-derive"
3865
+
version = "5.1.16"
3866
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3867
+
checksum = "41273b691a3d467a8c44d05506afba9f7b6bd56c9cdf80123de13fe52d7ec587"
3868
+
dependencies = [
3869
+
"darling 0.20.11",
3870
+
"http",
3871
+
"indexmap 2.9.0",
3872
+
"mime",
3873
+
"proc-macro-crate",
3874
+
"proc-macro2",
3875
+
"quote",
3876
+
"regex",
3877
+
"syn 2.0.103",
3878
+
"thiserror 2.0.12",
3879
+
]
3880
+
3881
+
[[package]]
3354
3882
name = "portable-atomic"
3355
3883
version = "1.11.0"
3356
3884
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3387
3915
checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55"
3388
3916
dependencies = [
3389
3917
"proc-macro2",
3390
-
"syn",
3918
+
"syn 2.0.103",
3391
3919
]
3392
3920
3393
3921
[[package]]
···
3400
3928
]
3401
3929
3402
3930
[[package]]
3931
+
name = "proc-macro-crate"
3932
+
version = "3.3.0"
3933
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3934
+
checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35"
3935
+
dependencies = [
3936
+
"toml_edit",
3937
+
]
3938
+
3939
+
[[package]]
3403
3940
name = "proc-macro2"
3404
3941
version = "1.0.94"
3405
3942
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3446
3983
]
3447
3984
3448
3985
[[package]]
3986
+
name = "quick-xml"
3987
+
version = "0.36.2"
3988
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3989
+
checksum = "f7649a7b4df05aed9ea7ec6f628c67c9953a43869b8bc50929569b2999d443fe"
3990
+
dependencies = [
3991
+
"memchr",
3992
+
"serde",
3993
+
]
3994
+
3995
+
[[package]]
3449
3996
name = "quick_cache"
3450
3997
version = "0.6.12"
3451
3998
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3453
4000
dependencies = [
3454
4001
"equivalent",
3455
4002
"hashbrown 0.15.2",
4003
+
]
4004
+
4005
+
[[package]]
4006
+
name = "quinn"
4007
+
version = "0.11.8"
4008
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4009
+
checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8"
4010
+
dependencies = [
4011
+
"bytes",
4012
+
"cfg_aliases",
4013
+
"pin-project-lite",
4014
+
"quinn-proto",
4015
+
"quinn-udp",
4016
+
"rustc-hash 2.1.1",
4017
+
"rustls 0.23.31",
4018
+
"socket2 0.5.9",
4019
+
"thiserror 2.0.12",
4020
+
"tokio",
4021
+
"tracing",
4022
+
"web-time",
4023
+
]
4024
+
4025
+
[[package]]
4026
+
name = "quinn-proto"
4027
+
version = "0.11.12"
4028
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4029
+
checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e"
4030
+
dependencies = [
4031
+
"bytes",
4032
+
"getrandom 0.3.3",
4033
+
"lru-slab",
4034
+
"rand 0.9.1",
4035
+
"ring",
4036
+
"rustc-hash 2.1.1",
4037
+
"rustls 0.23.31",
4038
+
"rustls-pki-types",
4039
+
"slab",
4040
+
"thiserror 2.0.12",
4041
+
"tinyvec",
4042
+
"tracing",
4043
+
"web-time",
4044
+
]
4045
+
4046
+
[[package]]
4047
+
name = "quinn-udp"
4048
+
version = "0.5.13"
4049
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4050
+
checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970"
4051
+
dependencies = [
4052
+
"cfg_aliases",
4053
+
"libc",
4054
+
"once_cell",
4055
+
"socket2 0.5.9",
4056
+
"tracing",
4057
+
"windows-sys 0.59.0",
3456
4058
]
3457
4059
3458
4060
[[package]]
···
3568
4170
]
3569
4171
3570
4172
[[package]]
4173
+
name = "rcgen"
4174
+
version = "0.12.1"
4175
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4176
+
checksum = "48406db8ac1f3cbc7dcdb56ec355343817958a356ff430259bb07baf7607e1e1"
4177
+
dependencies = [
4178
+
"pem",
4179
+
"ring",
4180
+
"time",
4181
+
"yasna",
4182
+
]
4183
+
4184
+
[[package]]
3571
4185
name = "redox_syscall"
3572
4186
version = "0.5.11"
3573
4187
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3604
4218
dependencies = [
3605
4219
"proc-macro2",
3606
4220
"quote",
3607
-
"syn",
4221
+
"syn 2.0.103",
3608
4222
]
3609
4223
3610
4224
[[package]]
···
3677
4291
"native-tls",
3678
4292
"percent-encoding",
3679
4293
"pin-project-lite",
4294
+
"quinn",
4295
+
"rustls 0.23.31",
4296
+
"rustls-native-certs",
3680
4297
"rustls-pki-types",
3681
4298
"serde",
3682
4299
"serde_json",
···
3684
4301
"sync_wrapper",
3685
4302
"tokio",
3686
4303
"tokio-native-tls",
4304
+
"tokio-rustls 0.26.2",
3687
4305
"tokio-util",
3688
4306
"tower",
3689
4307
"tower-http",
···
3708
4326
dependencies = [
3709
4327
"hmac",
3710
4328
"subtle",
4329
+
]
4330
+
4331
+
[[package]]
4332
+
name = "rfc7239"
4333
+
version = "0.1.3"
4334
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4335
+
checksum = "4a82f1d1e38e9a85bb58ffcfadf22ed6f2c94e8cd8581ec2b0f80a2a6858350f"
4336
+
dependencies = [
4337
+
"uncased",
3711
4338
]
3712
4339
3713
4340
[[package]]
···
3791
4418
]
3792
4419
3793
4420
[[package]]
4421
+
name = "rusticata-macros"
4422
+
version = "4.1.0"
4423
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4424
+
checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632"
4425
+
dependencies = [
4426
+
"nom",
4427
+
]
4428
+
4429
+
[[package]]
3794
4430
name = "rustix"
3795
4431
version = "0.38.44"
3796
4432
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3832
4468
3833
4469
[[package]]
3834
4470
name = "rustls"
3835
-
version = "0.23.28"
4471
+
version = "0.23.31"
3836
4472
source = "registry+https://github.com/rust-lang/crates.io-index"
3837
-
checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643"
4473
+
checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc"
3838
4474
dependencies = [
3839
4475
"aws-lc-rs",
4476
+
"log",
3840
4477
"once_cell",
4478
+
"ring",
3841
4479
"rustls-pki-types",
3842
-
"rustls-webpki 0.103.3",
4480
+
"rustls-webpki 0.103.4",
3843
4481
"subtle",
3844
4482
"zeroize",
3845
4483
]
···
3867
4505
3868
4506
[[package]]
3869
4507
name = "rustls-pki-types"
3870
-
version = "1.11.0"
4508
+
version = "1.12.0"
3871
4509
source = "registry+https://github.com/rust-lang/crates.io-index"
3872
-
checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c"
4510
+
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
4511
+
dependencies = [
4512
+
"web-time",
4513
+
"zeroize",
4514
+
]
3873
4515
3874
4516
[[package]]
3875
4517
name = "rustls-webpki"
···
3884
4526
3885
4527
[[package]]
3886
4528
name = "rustls-webpki"
3887
-
version = "0.103.3"
4529
+
version = "0.103.4"
3888
4530
source = "registry+https://github.com/rust-lang/crates.io-index"
3889
-
checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435"
4531
+
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
3890
4532
dependencies = [
3891
4533
"aws-lc-rs",
3892
4534
"ring",
···
3947
4589
"proc-macro2",
3948
4590
"quote",
3949
4591
"serde_derive_internals",
3950
-
"syn",
4592
+
"syn 2.0.103",
3951
4593
]
3952
4594
3953
4595
[[package]]
···
4050
4692
dependencies = [
4051
4693
"proc-macro2",
4052
4694
"quote",
4053
-
"syn",
4695
+
"syn 2.0.103",
4054
4696
]
4055
4697
4056
4698
[[package]]
···
4061
4703
dependencies = [
4062
4704
"proc-macro2",
4063
4705
"quote",
4064
-
"syn",
4706
+
"syn 2.0.103",
4065
4707
]
4066
4708
4067
4709
[[package]]
···
4079
4721
4080
4722
[[package]]
4081
4723
name = "serde_json"
4082
-
version = "1.0.140"
4724
+
version = "1.0.141"
4083
4725
source = "registry+https://github.com/rust-lang/crates.io-index"
4084
-
checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373"
4726
+
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
4085
4727
dependencies = [
4086
4728
"itoa",
4087
4729
"memchr",
···
4130
4772
"proc-macro2",
4131
4773
"quote",
4132
4774
"serde",
4133
-
"syn",
4775
+
"syn 2.0.103",
4134
4776
]
4135
4777
4136
4778
[[package]]
···
4169
4811
source = "registry+https://github.com/rust-lang/crates.io-index"
4170
4812
checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e"
4171
4813
dependencies = [
4172
-
"darling",
4814
+
"darling 0.20.11",
4173
4815
"proc-macro2",
4174
4816
"quote",
4175
-
"syn",
4817
+
"syn 2.0.103",
4818
+
]
4819
+
4820
+
[[package]]
4821
+
name = "serde_yaml"
4822
+
version = "0.9.34+deprecated"
4823
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4824
+
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
4825
+
dependencies = [
4826
+
"indexmap 2.9.0",
4827
+
"itoa",
4828
+
"ryu",
4829
+
"serde",
4830
+
"unsafe-libyaml",
4176
4831
]
4177
4832
4178
4833
[[package]]
···
4259
4914
]
4260
4915
4261
4916
[[package]]
4917
+
name = "slingshot"
4918
+
version = "0.1.0"
4919
+
dependencies = [
4920
+
"atrium-api",
4921
+
"atrium-common",
4922
+
"atrium-identity",
4923
+
"atrium-oauth",
4924
+
"clap",
4925
+
"ctrlc",
4926
+
"foyer",
4927
+
"hickory-resolver",
4928
+
"jetstream",
4929
+
"links",
4930
+
"log",
4931
+
"metrics",
4932
+
"metrics-exporter-prometheus 0.17.2",
4933
+
"poem",
4934
+
"poem-openapi",
4935
+
"reqwest",
4936
+
"rustls 0.23.31",
4937
+
"serde",
4938
+
"serde_json",
4939
+
"thiserror 2.0.12",
4940
+
"time",
4941
+
"tokio",
4942
+
"tokio-util",
4943
+
"tracing-subscriber",
4944
+
"url",
4945
+
]
4946
+
4947
+
[[package]]
4262
4948
name = "slog"
4263
4949
version = "2.7.0"
4264
4950
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4330
5016
]
4331
5017
4332
5018
[[package]]
5019
+
name = "socket2"
5020
+
version = "0.6.0"
5021
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5022
+
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
5023
+
dependencies = [
5024
+
"libc",
5025
+
"windows-sys 0.59.0",
5026
+
]
5027
+
5028
+
[[package]]
4333
5029
name = "spacedust"
4334
5030
version = "0.1.0"
4335
5031
dependencies = [
···
4391
5087
4392
5088
[[package]]
4393
5089
name = "strsim"
5090
+
version = "0.10.0"
5091
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5092
+
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
5093
+
5094
+
[[package]]
5095
+
name = "strsim"
4394
5096
version = "0.11.1"
4395
5097
source = "registry+https://github.com/rust-lang/crates.io-index"
4396
5098
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
···
4400
5102
version = "2.6.1"
4401
5103
source = "registry+https://github.com/rust-lang/crates.io-index"
4402
5104
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
5105
+
5106
+
[[package]]
5107
+
name = "syn"
5108
+
version = "1.0.109"
5109
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5110
+
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
5111
+
dependencies = [
5112
+
"proc-macro2",
5113
+
"quote",
5114
+
"unicode-ident",
5115
+
]
4403
5116
4404
5117
[[package]]
4405
5118
name = "syn"
···
4429
5142
dependencies = [
4430
5143
"proc-macro2",
4431
5144
"quote",
4432
-
"syn",
5145
+
"syn 2.0.103",
4433
5146
]
4434
5147
4435
5148
[[package]]
···
4515
5228
dependencies = [
4516
5229
"proc-macro2",
4517
5230
"quote",
4518
-
"syn",
5231
+
"syn 2.0.103",
4519
5232
]
4520
5233
4521
5234
[[package]]
···
4526
5239
dependencies = [
4527
5240
"proc-macro2",
4528
5241
"quote",
4529
-
"syn",
5242
+
"syn 2.0.103",
4530
5243
]
4531
5244
4532
5245
[[package]]
···
4625
5338
4626
5339
[[package]]
4627
5340
name = "tokio"
4628
-
version = "1.45.1"
5341
+
version = "1.47.0"
4629
5342
source = "registry+https://github.com/rust-lang/crates.io-index"
4630
-
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
5343
+
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
4631
5344
dependencies = [
4632
5345
"backtrace",
4633
5346
"bytes",
5347
+
"io-uring",
4634
5348
"libc",
4635
5349
"mio",
4636
5350
"parking_lot",
4637
5351
"pin-project-lite",
4638
5352
"signal-hook-registry",
4639
-
"socket2",
5353
+
"slab",
5354
+
"socket2 0.6.0",
4640
5355
"tokio-macros",
4641
-
"windows-sys 0.52.0",
5356
+
"windows-sys 0.59.0",
4642
5357
]
4643
5358
4644
5359
[[package]]
···
4649
5364
dependencies = [
4650
5365
"proc-macro2",
4651
5366
"quote",
4652
-
"syn",
5367
+
"syn 2.0.103",
4653
5368
]
4654
5369
4655
5370
[[package]]
···
4679
5394
source = "registry+https://github.com/rust-lang/crates.io-index"
4680
5395
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
4681
5396
dependencies = [
4682
-
"rustls 0.23.28",
5397
+
"rustls 0.23.31",
5398
+
"tokio",
5399
+
]
5400
+
5401
+
[[package]]
5402
+
name = "tokio-stream"
5403
+
version = "0.1.17"
5404
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5405
+
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
5406
+
dependencies = [
5407
+
"futures-core",
5408
+
"pin-project-lite",
4683
5409
"tokio",
4684
5410
]
4685
5411
···
4817
5543
dependencies = [
4818
5544
"log",
4819
5545
"pin-project-lite",
5546
+
"tracing-attributes",
4820
5547
"tracing-core",
4821
5548
]
4822
5549
4823
5550
[[package]]
5551
+
name = "tracing-attributes"
5552
+
version = "0.1.30"
5553
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5554
+
checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903"
5555
+
dependencies = [
5556
+
"proc-macro2",
5557
+
"quote",
5558
+
"syn 2.0.103",
5559
+
]
5560
+
5561
+
[[package]]
4824
5562
name = "tracing-core"
4825
5563
version = "0.1.33"
4826
5564
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4867
5605
dependencies = [
4868
5606
"proc-macro2",
4869
5607
"quote",
4870
-
"syn",
5608
+
"syn 2.0.103",
4871
5609
]
4872
5610
4873
5611
[[package]]
···
4913
5651
]
4914
5652
4915
5653
[[package]]
5654
+
name = "twox-hash"
5655
+
version = "2.1.1"
5656
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5657
+
checksum = "8b907da542cbced5261bd3256de1b3a1bf340a3d37f93425a07362a1d687de56"
5658
+
dependencies = [
5659
+
"rand 0.9.1",
5660
+
]
5661
+
5662
+
[[package]]
4916
5663
name = "typenum"
4917
5664
version = "1.18.0"
4918
5665
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4971
5718
]
4972
5719
4973
5720
[[package]]
5721
+
name = "uncased"
5722
+
version = "0.9.10"
5723
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5724
+
checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697"
5725
+
dependencies = [
5726
+
"version_check",
5727
+
]
5728
+
5729
+
[[package]]
4974
5730
name = "unicase"
4975
5731
version = "2.8.1"
4976
5732
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4983
5739
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
4984
5740
4985
5741
[[package]]
5742
+
name = "unicode-xid"
5743
+
version = "0.2.6"
5744
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5745
+
checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
5746
+
5747
+
[[package]]
5748
+
name = "unsafe-libyaml"
5749
+
version = "0.2.11"
5750
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5751
+
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
5752
+
5753
+
[[package]]
4986
5754
name = "unsigned-varint"
4987
5755
version = "0.8.0"
4988
5756
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5157
5925
"log",
5158
5926
"proc-macro2",
5159
5927
"quote",
5160
-
"syn",
5928
+
"syn 2.0.103",
5161
5929
"wasm-bindgen-shared",
5162
5930
]
5163
5931
···
5192
5960
dependencies = [
5193
5961
"proc-macro2",
5194
5962
"quote",
5195
-
"syn",
5963
+
"syn 2.0.103",
5196
5964
"wasm-bindgen-backend",
5197
5965
"wasm-bindgen-shared",
5198
5966
]
···
5242
6010
name = "who-am-i"
5243
6011
version = "0.1.0"
5244
6012
dependencies = [
5245
-
"atrium-api 0.25.4",
5246
-
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
6013
+
"atrium-api",
6014
+
"atrium-common",
5247
6015
"atrium-identity",
5248
6016
"atrium-oauth",
5249
6017
"axum",
···
5278
6046
checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d"
5279
6047
5280
6048
[[package]]
6049
+
name = "wildmatch"
6050
+
version = "2.4.0"
6051
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6052
+
checksum = "68ce1ab1f8c62655ebe1350f589c61e505cf94d385bc6a12899442d9081e71fd"
6053
+
6054
+
[[package]]
5281
6055
name = "winapi"
5282
6056
version = "0.3.9"
5283
6057
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5352
6126
dependencies = [
5353
6127
"proc-macro2",
5354
6128
"quote",
5355
-
"syn",
6129
+
"syn 2.0.103",
5356
6130
]
5357
6131
5358
6132
[[package]]
···
5363
6137
dependencies = [
5364
6138
"proc-macro2",
5365
6139
"quote",
5366
-
"syn",
6140
+
"syn 2.0.103",
5367
6141
]
5368
6142
5369
6143
[[package]]
···
5374
6148
dependencies = [
5375
6149
"proc-macro2",
5376
6150
"quote",
5377
-
"syn",
6151
+
"syn 2.0.103",
5378
6152
]
5379
6153
5380
6154
[[package]]
···
5385
6159
dependencies = [
5386
6160
"proc-macro2",
5387
6161
"quote",
5388
-
"syn",
6162
+
"syn 2.0.103",
5389
6163
]
5390
6164
5391
6165
[[package]]
···
5640
6414
]
5641
6415
5642
6416
[[package]]
6417
+
name = "x509-parser"
6418
+
version = "0.17.0"
6419
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6420
+
checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460"
6421
+
dependencies = [
6422
+
"asn1-rs",
6423
+
"data-encoding",
6424
+
"der-parser",
6425
+
"lazy_static",
6426
+
"nom",
6427
+
"oid-registry",
6428
+
"rusticata-macros",
6429
+
"thiserror 2.0.12",
6430
+
"time",
6431
+
]
6432
+
6433
+
[[package]]
5643
6434
name = "xxhash-rust"
5644
6435
version = "0.8.15"
5645
6436
source = "registry+https://github.com/rust-lang/crates.io-index"
5646
6437
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
5647
6438
5648
6439
[[package]]
6440
+
name = "yasna"
6441
+
version = "0.5.2"
6442
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6443
+
checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd"
6444
+
dependencies = [
6445
+
"time",
6446
+
]
6447
+
6448
+
[[package]]
5649
6449
name = "yoke"
5650
6450
version = "0.7.5"
5651
6451
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5665
6465
dependencies = [
5666
6466
"proc-macro2",
5667
6467
"quote",
5668
-
"syn",
6468
+
"syn 2.0.103",
5669
6469
"synstructure",
5670
6470
]
5671
6471
···
5695
6495
dependencies = [
5696
6496
"proc-macro2",
5697
6497
"quote",
5698
-
"syn",
6498
+
"syn 2.0.103",
5699
6499
]
5700
6500
5701
6501
[[package]]
···
5706
6506
dependencies = [
5707
6507
"proc-macro2",
5708
6508
"quote",
5709
-
"syn",
6509
+
"syn 2.0.103",
5710
6510
]
5711
6511
5712
6512
[[package]]
···
5726
6526
dependencies = [
5727
6527
"proc-macro2",
5728
6528
"quote",
5729
-
"syn",
6529
+
"syn 2.0.103",
5730
6530
"synstructure",
5731
6531
]
5732
6532
···
5758
6558
dependencies = [
5759
6559
"proc-macro2",
5760
6560
"quote",
5761
-
"syn",
6561
+
"syn 2.0.103",
5762
6562
]
5763
6563
5764
6564
[[package]]
+1
-1
Makefile
+1
-1
Makefile
···
5
5
cargo test --all-features
6
6
7
7
fmt:
8
-
cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i
8
+
cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot
9
9
cargo +nightly fmt --package jetstream
10
10
11
11
clippy:
-496
cozy-setup (move to another repo).md
-496
cozy-setup (move to another repo).md
···
1
-
cozy-ucosm
2
-
3
-
4
-
## gateway
5
-
6
-
- tailscale (exit node enabled)
7
-
-> allow ipv4 and ipv6 forwarding
8
-
- caddy
9
-
10
-
```bash
11
-
apt install golang
12
-
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
-
go/bin/xcaddy build \
14
-
--with github.com/caddyserver/cache-handler \
15
-
--with github.com/darkweak/storages/badger/caddy \
16
-
--with github.com/mholt/caddy-ratelimit
17
-
# then https://caddyserver.com/docs/running#manual-installation
18
-
19
-
mkdir /var/cache/caddy-badger
20
-
chown -R caddy:caddy /var/cache/caddy-badger/
21
-
```
22
-
23
-
- `/etc/caddy/Caddyfile`
24
-
25
-
```
26
-
{
27
-
cache {
28
-
badger
29
-
api {
30
-
prometheus
31
-
}
32
-
}
33
-
}
34
-
35
-
links.bsky.bad-example.com {
36
-
reverse_proxy link-aggregator:6789
37
-
38
-
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
-
rate_limit {
40
-
zone global_burst {
41
-
key {remote_host}
42
-
events 10
43
-
window 1s
44
-
}
45
-
zone global_general {
46
-
key {remote_host}
47
-
events 100
48
-
window 60s
49
-
log_key true
50
-
}
51
-
zone website_harsh_limit {
52
-
key {header.Origin}
53
-
match {
54
-
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
-
}
56
-
events 1000
57
-
window 30s
58
-
log_key true
59
-
}
60
-
}
61
-
respond /souin-api/metrics "denied" 403 # does not work
62
-
cache {
63
-
ttl 3s
64
-
stale 1h
65
-
default_cache_control public, s-maxage=3
66
-
badger {
67
-
path /var/cache/caddy-badger/links
68
-
}
69
-
}
70
-
}
71
-
72
-
gateway:80 {
73
-
metrics
74
-
cache
75
-
}
76
-
```
77
-
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
-
79
-
80
-
alternative proxies:
81
-
82
-
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
-
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
-
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
-
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
-
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
-
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
-
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
-
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
-
91
-
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
-
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
-
- apache traffic control has experimental rate-limiting plugins
94
-
95
-
96
-
- victoriametrics
97
-
98
-
```bash
99
-
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
-
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
-
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
-
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
-
104
-
```
105
-
106
-
- `/etc/victoria-metrics/prometheus.yml`
107
-
108
-
```yaml
109
-
global:
110
-
scrape_interval: '15s'
111
-
112
-
scrape_configs:
113
-
- job_name: 'link_aggregator'
114
-
static_configs:
115
-
- targets: ['link-aggregator:8765']
116
-
- job_name: 'gateway:caddy'
117
-
static_configs:
118
-
- targets: ['gateway:80/metrics']
119
-
- job_name: 'gateway:cache'
120
-
static_configs:
121
-
- targets: ['gateway:80/souin-api/metrics']
122
-
```
123
-
124
-
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
-
126
-
```
127
-
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
-
```
129
-
130
-
- grafana
131
-
132
-
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
-
134
-
something something something then
135
-
136
-
```
137
-
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
-
```
139
-
140
-
- raspi node_exporter
141
-
142
-
```bash
143
-
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
-
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
-
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
-
sudo useradd --no-create-home --shell /bin/false node_exporter
147
-
sudo nano /etc/systemd/system/node_exporter.service
148
-
# [Unit]
149
-
# Description=Node Exporter
150
-
# Wants=network-online.target
151
-
# After=network-online.target
152
-
153
-
# [Service]
154
-
# User=node_exporter
155
-
# Group=node_exporter
156
-
# Type=simple
157
-
# ExecStart=/usr/local/bin/node_exporter
158
-
# Restart=always
159
-
# RestartSec=3
160
-
161
-
# [Install]
162
-
# WantedBy=multi-user.target
163
-
sudo systemctl daemon-reload
164
-
sudo systemctl enable node_exporter.service
165
-
sudo systemctl start node_exporter.service
166
-
```
167
-
168
-
todo: get raspi vcgencmd outputs into metrics
169
-
170
-
- nginx on gateway
171
-
172
-
```nginx
173
-
# in http
174
-
175
-
##
176
-
# cozy cache
177
-
##
178
-
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
-
180
-
##
181
-
# cozy limit
182
-
##
183
-
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
-
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
-
186
-
# in sites-available/constellation.microcosm.blue
187
-
188
-
upstream cozy_link_aggregator {
189
-
server link-aggregator:6789;
190
-
keepalive 16;
191
-
}
192
-
193
-
server {
194
-
listen 8080;
195
-
listen [::]:8080;
196
-
197
-
server_name constellation.microcosm.blue;
198
-
199
-
proxy_cache cozy_zone;
200
-
proxy_cache_background_update on;
201
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
-
proxy_cache_lock_age 1s;
204
-
proxy_cache_lock_timeout 2s;
205
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
-
proxy_cache_valid any 15s; # non-200s default
207
-
proxy_read_timeout 5s;
208
-
proxy_send_timeout 15s;
209
-
proxy_socket_keepalive on;
210
-
211
-
limit_req zone=cozy_ip_limit nodelay burst=100;
212
-
limit_req zone=cozy_global_limit;
213
-
limit_req_status 429;
214
-
215
-
location / {
216
-
proxy_pass http://cozy_link_aggregator;
217
-
include proxy_params;
218
-
proxy_http_version 1.1;
219
-
proxy_set_header Connection ""; # for keepalive
220
-
}
221
-
}
222
-
```
223
-
224
-
also `systemctl edit nginx` and paste
225
-
226
-
```
227
-
[Service]
228
-
Restart=always
229
-
```
230
-
231
-
—https://serverfault.com/a/1003373
232
-
233
-
now making browsers redirect to the microcosm.blue url:
234
-
235
-
```
236
-
[...]
237
-
server_name links.bsky.bad-example.com;
238
-
239
-
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
-
if ($http_user_agent ~ ^Mozilla/) {
241
-
# for now send *browsers* to the new location, hopefully without impacting api requests
242
-
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
-
return 301 https://constellation.microcosm.blue$request_uri;
244
-
}
245
-
[...]
246
-
```
247
-
248
-
- nginx metrics
249
-
250
-
- download nginx-prometheus-exporter
251
-
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
-
253
-
- err actually going to make mistakes and try with snap
254
-
`snap install nginx-prometheus-exporter`
255
-
- so it got a binary for me but no systemd task set up. boooo.
256
-
`snap remove nginx-prometheus-exporter`
257
-
258
-
- ```bash
259
-
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
-
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
-
mv nginx-prometheus-exporter /usr/local/bin
262
-
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
-
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
-
# [Unit]
265
-
# Description=NGINX Exporter
266
-
# Wants=network-online.target
267
-
# After=network-online.target
268
-
269
-
# [Service]
270
-
# User=nginx-prometheus-exporter
271
-
# Group=nginx-prometheus-exporter
272
-
# Type=simple
273
-
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
-
# Restart=always
275
-
# RestartSec=3
276
-
277
-
# [Install]
278
-
# WantedBy=multi-user.target
279
-
systemctl daemon-reload
280
-
systemctl start nginx-prometheus-exporter.service
281
-
systemctl enable nginx-prometheus-exporter.service
282
-
```
283
-
284
-
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
-
286
-
```nginx
287
-
server {
288
-
listen 8080;
289
-
listen [::]:8080;
290
-
291
-
server_name gateway;
292
-
293
-
location /stub_status {
294
-
stub_status;
295
-
}
296
-
location / {
297
-
return 404;
298
-
}
299
-
}
300
-
```
301
-
302
-
```bash
303
-
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
-
```
305
-
306
-
307
-
## bootes (pi5)
308
-
309
-
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
-
- raspi-config: enable pcie 3, set hostname, enable ssh
311
-
- put ssh key into `.ssh/authorized_keys`
312
-
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
-
- `sudo apt update && sudo apt upgrade`
314
-
- `sudo apt install xfsprogs`
315
-
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
-
- `sudo mount /dev/nvme0n1 /mnt`
317
-
- set up tailscale
318
-
- `sudo tailscale up`
319
-
- `git clone https://github.com/atcosm/links.git`
320
-
- tailscale: disable bootes key expiry
321
-
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
-
- `cd links/constellation`
323
-
- `sudo apt install libssl-dev` needed
324
-
- `sudo apt install clang` needed for bindgen
325
-
- (in tmux) `cargo build --release`
326
-
- `mkdir ~/backup`
327
-
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
-
- `sudo chown pi:pi /mnt/`
329
-
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
-
etc
331
-
- follow above `- raspi node_exporter`
332
-
- configure victoriametrics to scrape the new pi
333
-
- configure ulimit before starting! `ulimit -n 16384`
334
-
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
-
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
-
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
-
- stop upstreaming requests to older instance in nginx
338
-
339
-
340
-
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
-
342
-
```ini
343
-
[Unit]
344
-
Description=Constellation backlinks index
345
-
After=network.target
346
-
347
-
[Service]
348
-
User=pi
349
-
WorkingDirectory=/home/pi/links/constellation
350
-
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
-
LimitNOFILE=16384
352
-
Restart=always
353
-
354
-
[Install]
355
-
WantedBy=multi-user.target
356
-
```
357
-
358
-
359
-
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
-
- todo: cross-compile for raspi?
361
-
362
-
---
363
-
364
-
some todos
365
-
366
-
- [x] tailscale: exit node
367
-
- [!] link_aggregator: use exit node
368
-
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
-
- [x] caddy: reverse proxy
370
-
- [x] build with cache and rate-limit plugins
371
-
- [x] configure systemd to keep it alive
372
-
- [x] configure caddy cache
373
-
- [x] configure caddy rate-limit
374
-
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
-
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
-
- [x] make some grafana dashboards
377
-
- [ ] raspi: mount /dev/sda on boot
378
-
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
-
380
-
- [x] use nginx instead of caddy
381
-
- [x] nginx: enable cache
382
-
- [x] nginx: rate-limit
383
-
- [ ] nginx: get metrics
384
-
385
-
386
-
387
-
388
-
---
389
-
390
-
nginx cors for constellation + small burst bump
391
-
392
-
```nginx
393
-
upstream cozy_constellation {
394
-
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
-
keepalive 16;
396
-
}
397
-
398
-
server {
399
-
server_name constellation.microcosm.blue;
400
-
401
-
proxy_cache cozy_zone;
402
-
proxy_cache_background_update on;
403
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
-
proxy_cache_lock_age 1s;
406
-
proxy_cache_lock_timeout 2s;
407
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
-
proxy_cache_valid any 2s; # non-200s default
409
-
proxy_read_timeout 5s;
410
-
proxy_send_timeout 15s;
411
-
proxy_socket_keepalive on;
412
-
413
-
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
-
proxy_hide_header 'Access-Control-Allow-Origin';
415
-
proxy_hide_header 'Access-Control-Allowed-Methods';
416
-
proxy_hide_header 'Access-Control-Allow-Headers';
417
-
add_header 'Access-Control-Allow-Origin' '*' always;
418
-
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
-
add_header 'Access-Control-Allow-Headers' '*' always;
420
-
421
-
422
-
limit_req zone=cozy_ip_limit nodelay burst=150;
423
-
limit_req zone=cozy_global_limit burst=1800;
424
-
limit_req_status 429;
425
-
426
-
location / {
427
-
proxy_pass http://cozy_constellation;
428
-
include proxy_params;
429
-
proxy_http_version 1.1;
430
-
proxy_set_header Connection ""; # for keepalive
431
-
}
432
-
433
-
434
-
listen 443 ssl; # managed by Certbot
435
-
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
-
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
-
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
-
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
-
440
-
}
441
-
442
-
server {
443
-
if ($host = constellation.microcosm.blue) {
444
-
return 301 https://$host$request_uri;
445
-
} # managed by Certbot
446
-
447
-
448
-
server_name constellation.microcosm.blue;
449
-
listen 80;
450
-
return 404; # managed by Certbot
451
-
}
452
-
```
453
-
454
-
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
-
456
-
leaving it for now though.
457
-
458
-
459
-
### nginx logs to prom
460
-
461
-
```bash
462
-
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
-
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
-
systemctl enable prometheus-nginxlog-exporter.service
465
-
466
-
```
467
-
468
-
have it run as www-data (maybe not the best idea but...)
469
-
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
-
set User under service and remove capabilities bounding
471
-
472
-
```systemd
473
-
User=www-data
474
-
#CapabilityBoundingSet=
475
-
```
476
-
477
-
in `nginx.conf` in `http`:
478
-
479
-
```nginx
480
-
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
-
```
482
-
483
-
in `sites-available/constellation.microcosm.blue` in `server`:
484
-
485
-
```nginx
486
-
# log format must match prometheus-nginx-log-exporter
487
-
access_log /var/log/nginx/constellation-access.log constellation_format;
488
-
```
489
-
490
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
-
492
-
493
-
494
-
```bash
495
-
systemctl start prometheus-nginxlog-exporter.service
496
-
```
+1
-1
jetstream/Cargo.toml
+1
-1
jetstream/Cargo.toml
···
10
10
11
11
[dependencies]
12
12
async-trait = "0.1.83"
13
-
atrium-api = { git = "https://github.com/uniphil/atrium", branch = "fix/nsid-allow-nonleading-name-digits", default-features = false, features = [
13
+
atrium-api = { version = "0.25.4", default-features = false, features = [
14
14
"namespace-appbsky",
15
15
] }
16
16
tokio = { version = "1.44.2", features = ["full", "sync", "time"] }
+496
legacy/cozy-setup (move to another repo).md
+496
legacy/cozy-setup (move to another repo).md
···
1
+
cozy-ucosm
2
+
3
+
4
+
## gateway
5
+
6
+
- tailscale (exit node enabled)
7
+
-> allow ipv4 and ipv6 forwarding
8
+
- caddy
9
+
10
+
```bash
11
+
apt install golang
12
+
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
+
go/bin/xcaddy build \
14
+
--with github.com/caddyserver/cache-handler \
15
+
--with github.com/darkweak/storages/badger/caddy \
16
+
--with github.com/mholt/caddy-ratelimit
17
+
# then https://caddyserver.com/docs/running#manual-installation
18
+
19
+
mkdir /var/cache/caddy-badger
20
+
chown -R caddy:caddy /var/cache/caddy-badger/
21
+
```
22
+
23
+
- `/etc/caddy/Caddyfile`
24
+
25
+
```
26
+
{
27
+
cache {
28
+
badger
29
+
api {
30
+
prometheus
31
+
}
32
+
}
33
+
}
34
+
35
+
links.bsky.bad-example.com {
36
+
reverse_proxy link-aggregator:6789
37
+
38
+
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
+
rate_limit {
40
+
zone global_burst {
41
+
key {remote_host}
42
+
events 10
43
+
window 1s
44
+
}
45
+
zone global_general {
46
+
key {remote_host}
47
+
events 100
48
+
window 60s
49
+
log_key true
50
+
}
51
+
zone website_harsh_limit {
52
+
key {header.Origin}
53
+
match {
54
+
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
+
}
56
+
events 1000
57
+
window 30s
58
+
log_key true
59
+
}
60
+
}
61
+
respond /souin-api/metrics "denied" 403 # does not work
62
+
cache {
63
+
ttl 3s
64
+
stale 1h
65
+
default_cache_control public, s-maxage=3
66
+
badger {
67
+
path /var/cache/caddy-badger/links
68
+
}
69
+
}
70
+
}
71
+
72
+
gateway:80 {
73
+
metrics
74
+
cache
75
+
}
76
+
```
77
+
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
+
79
+
80
+
alternative proxies:
81
+
82
+
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
+
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
+
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
+
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
+
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
+
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
+
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
+
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
+
91
+
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
+
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
+
- apache traffic control has experimental rate-limiting plugins
94
+
95
+
96
+
- victoriametrics
97
+
98
+
```bash
99
+
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
+
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
+
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
+
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
+
104
+
```
105
+
106
+
- `/etc/victoria-metrics/prometheus.yml`
107
+
108
+
```yaml
109
+
global:
110
+
scrape_interval: '15s'
111
+
112
+
scrape_configs:
113
+
- job_name: 'link_aggregator'
114
+
static_configs:
115
+
- targets: ['link-aggregator:8765']
116
+
- job_name: 'gateway:caddy'
117
+
static_configs:
118
+
- targets: ['gateway:80/metrics']
119
+
- job_name: 'gateway:cache'
120
+
static_configs:
121
+
- targets: ['gateway:80/souin-api/metrics']
122
+
```
123
+
124
+
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
+
126
+
```
127
+
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
+
```
129
+
130
+
- grafana
131
+
132
+
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
+
134
+
something something something then
135
+
136
+
```
137
+
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
+
```
139
+
140
+
- raspi node_exporter
141
+
142
+
```bash
143
+
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
+
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
+
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
+
sudo useradd --no-create-home --shell /bin/false node_exporter
147
+
sudo nano /etc/systemd/system/node_exporter.service
148
+
# [Unit]
149
+
# Description=Node Exporter
150
+
# Wants=network-online.target
151
+
# After=network-online.target
152
+
153
+
# [Service]
154
+
# User=node_exporter
155
+
# Group=node_exporter
156
+
# Type=simple
157
+
# ExecStart=/usr/local/bin/node_exporter
158
+
# Restart=always
159
+
# RestartSec=3
160
+
161
+
# [Install]
162
+
# WantedBy=multi-user.target
163
+
sudo systemctl daemon-reload
164
+
sudo systemctl enable node_exporter.service
165
+
sudo systemctl start node_exporter.service
166
+
```
167
+
168
+
todo: get raspi vcgencmd outputs into metrics
169
+
170
+
- nginx on gateway
171
+
172
+
```nginx
173
+
# in http
174
+
175
+
##
176
+
# cozy cache
177
+
##
178
+
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
+
180
+
##
181
+
# cozy limit
182
+
##
183
+
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
+
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
+
186
+
# in sites-available/constellation.microcosm.blue
187
+
188
+
upstream cozy_link_aggregator {
189
+
server link-aggregator:6789;
190
+
keepalive 16;
191
+
}
192
+
193
+
server {
194
+
listen 8080;
195
+
listen [::]:8080;
196
+
197
+
server_name constellation.microcosm.blue;
198
+
199
+
proxy_cache cozy_zone;
200
+
proxy_cache_background_update on;
201
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
+
proxy_cache_lock_age 1s;
204
+
proxy_cache_lock_timeout 2s;
205
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
+
proxy_cache_valid any 15s; # non-200s default
207
+
proxy_read_timeout 5s;
208
+
proxy_send_timeout 15s;
209
+
proxy_socket_keepalive on;
210
+
211
+
limit_req zone=cozy_ip_limit nodelay burst=100;
212
+
limit_req zone=cozy_global_limit;
213
+
limit_req_status 429;
214
+
215
+
location / {
216
+
proxy_pass http://cozy_link_aggregator;
217
+
include proxy_params;
218
+
proxy_http_version 1.1;
219
+
proxy_set_header Connection ""; # for keepalive
220
+
}
221
+
}
222
+
```
223
+
224
+
also `systemctl edit nginx` and paste
225
+
226
+
```
227
+
[Service]
228
+
Restart=always
229
+
```
230
+
231
+
—https://serverfault.com/a/1003373
232
+
233
+
now making browsers redirect to the microcosm.blue url:
234
+
235
+
```
236
+
[...]
237
+
server_name links.bsky.bad-example.com;
238
+
239
+
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
+
if ($http_user_agent ~ ^Mozilla/) {
241
+
# for now send *browsers* to the new location, hopefully without impacting api requests
242
+
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
+
return 301 https://constellation.microcosm.blue$request_uri;
244
+
}
245
+
[...]
246
+
```
247
+
248
+
- nginx metrics
249
+
250
+
- download nginx-prometheus-exporter
251
+
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
+
253
+
- err actually going to make mistakes and try with snap
254
+
`snap install nginx-prometheus-exporter`
255
+
- so it got a binary for me but no systemd task set up. boooo.
256
+
`snap remove nginx-prometheus-exporter`
257
+
258
+
- ```bash
259
+
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
+
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
+
mv nginx-prometheus-exporter /usr/local/bin
262
+
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
+
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
+
# [Unit]
265
+
# Description=NGINX Exporter
266
+
# Wants=network-online.target
267
+
# After=network-online.target
268
+
269
+
# [Service]
270
+
# User=nginx-prometheus-exporter
271
+
# Group=nginx-prometheus-exporter
272
+
# Type=simple
273
+
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
+
# Restart=always
275
+
# RestartSec=3
276
+
277
+
# [Install]
278
+
# WantedBy=multi-user.target
279
+
systemctl daemon-reload
280
+
systemctl start nginx-prometheus-exporter.service
281
+
systemctl enable nginx-prometheus-exporter.service
282
+
```
283
+
284
+
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
+
286
+
```nginx
287
+
server {
288
+
listen 8080;
289
+
listen [::]:8080;
290
+
291
+
server_name gateway;
292
+
293
+
location /stub_status {
294
+
stub_status;
295
+
}
296
+
location / {
297
+
return 404;
298
+
}
299
+
}
300
+
```
301
+
302
+
```bash
303
+
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
+
```
305
+
306
+
307
+
## bootes (pi5)
308
+
309
+
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
+
- raspi-config: enable pcie 3, set hostname, enable ssh
311
+
- put ssh key into `.ssh/authorized_keys`
312
+
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
+
- `sudo apt update && sudo apt upgrade`
314
+
- `sudo apt install xfsprogs`
315
+
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
+
- `sudo mount /dev/nvme0n1 /mnt`
317
+
- set up tailscale
318
+
- `sudo tailscale up`
319
+
- `git clone https://github.com/atcosm/links.git`
320
+
- tailscale: disable bootes key expiry
321
+
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
+
- `cd links/constellation`
323
+
- `sudo apt install libssl-dev` needed
324
+
- `sudo apt install clang` needed for bindgen
325
+
- (in tmux) `cargo build --release`
326
+
- `mkdir ~/backup`
327
+
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
+
- `sudo chown pi:pi /mnt/`
329
+
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
+
etc
331
+
- follow above `- raspi node_exporter`
332
+
- configure victoriametrics to scrape the new pi
333
+
- configure ulimit before starting! `ulimit -n 16384`
334
+
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
+
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
+
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
+
- stop upstreaming requests to older instance in nginx
338
+
339
+
340
+
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
+
342
+
```ini
343
+
[Unit]
344
+
Description=Constellation backlinks index
345
+
After=network.target
346
+
347
+
[Service]
348
+
User=pi
349
+
WorkingDirectory=/home/pi/links/constellation
350
+
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
+
LimitNOFILE=16384
352
+
Restart=always
353
+
354
+
[Install]
355
+
WantedBy=multi-user.target
356
+
```
357
+
358
+
359
+
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
+
- todo: cross-compile for raspi?
361
+
362
+
---
363
+
364
+
some todos
365
+
366
+
- [x] tailscale: exit node
367
+
- [!] link_aggregator: use exit node
368
+
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
+
- [x] caddy: reverse proxy
370
+
- [x] build with cache and rate-limit plugins
371
+
- [x] configure systemd to keep it alive
372
+
- [x] configure caddy cache
373
+
- [x] configure caddy rate-limit
374
+
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
+
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
+
- [x] make some grafana dashboards
377
+
- [ ] raspi: mount /dev/sda on boot
378
+
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
+
380
+
- [x] use nginx instead of caddy
381
+
- [x] nginx: enable cache
382
+
- [x] nginx: rate-limit
383
+
- [ ] nginx: get metrics
384
+
385
+
386
+
387
+
388
+
---
389
+
390
+
nginx cors for constellation + small burst bump
391
+
392
+
```nginx
393
+
upstream cozy_constellation {
394
+
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
+
keepalive 16;
396
+
}
397
+
398
+
server {
399
+
server_name constellation.microcosm.blue;
400
+
401
+
proxy_cache cozy_zone;
402
+
proxy_cache_background_update on;
403
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
+
proxy_cache_lock_age 1s;
406
+
proxy_cache_lock_timeout 2s;
407
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
+
proxy_cache_valid any 2s; # non-200s default
409
+
proxy_read_timeout 5s;
410
+
proxy_send_timeout 15s;
411
+
proxy_socket_keepalive on;
412
+
413
+
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
+
proxy_hide_header 'Access-Control-Allow-Origin';
415
+
proxy_hide_header 'Access-Control-Allowed-Methods';
416
+
proxy_hide_header 'Access-Control-Allow-Headers';
417
+
add_header 'Access-Control-Allow-Origin' '*' always;
418
+
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
+
add_header 'Access-Control-Allow-Headers' '*' always;
420
+
421
+
422
+
limit_req zone=cozy_ip_limit nodelay burst=150;
423
+
limit_req zone=cozy_global_limit burst=1800;
424
+
limit_req_status 429;
425
+
426
+
location / {
427
+
proxy_pass http://cozy_constellation;
428
+
include proxy_params;
429
+
proxy_http_version 1.1;
430
+
proxy_set_header Connection ""; # for keepalive
431
+
}
432
+
433
+
434
+
listen 443 ssl; # managed by Certbot
435
+
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
+
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
+
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
+
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
+
440
+
}
441
+
442
+
server {
443
+
if ($host = constellation.microcosm.blue) {
444
+
return 301 https://$host$request_uri;
445
+
} # managed by Certbot
446
+
447
+
448
+
server_name constellation.microcosm.blue;
449
+
listen 80;
450
+
return 404; # managed by Certbot
451
+
}
452
+
```
453
+
454
+
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
+
456
+
leaving it for now though.
457
+
458
+
459
+
### nginx logs to prom
460
+
461
+
```bash
462
+
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
+
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
+
systemctl enable prometheus-nginxlog-exporter.service
465
+
466
+
```
467
+
468
+
have it run as www-data (maybe not the best idea but...)
469
+
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
+
set User under service and remove capabilities bounding
471
+
472
+
```systemd
473
+
User=www-data
474
+
#CapabilityBoundingSet=
475
+
```
476
+
477
+
in `nginx.conf` in `http`:
478
+
479
+
```nginx
480
+
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
+
```
482
+
483
+
in `sites-available/constellation.microcosm.blue` in `server`:
484
+
485
+
```nginx
486
+
# log format must match prometheus-nginx-log-exporter
487
+
access_log /var/log/nginx/constellation-access.log constellation_format;
488
+
```
489
+
490
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
+
492
+
493
+
494
+
```bash
495
+
systemctl start prometheus-nginxlog-exporter.service
496
+
```
+35
legacy/old-readme-details.md
+35
legacy/old-readme-details.md
···
1
+
[Constellation](./constellation/)
2
+
--------------------------------------------
3
+
4
+
A global atproto backlink index ✨
5
+
6
+
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
7
+
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
8
+
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
9
+
- Simple JSON API
10
+
11
+
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
12
+
13
+
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
14
+
- source: [./constellation/](./constellation/)
15
+
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
16
+
17
+
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
18
+
19
+
20
+
App: Spacedust
21
+
--------------
22
+
23
+
A notification subscription service 💫
24
+
25
+
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
26
+
27
+
- **status**: in design
28
+
29
+
30
+
Library: [links](./links/)
31
+
------------------------------------
32
+
33
+
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
34
+
35
+
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
+123
legacy/original-notes.md
+123
legacy/original-notes.md
···
1
+
---
2
+
3
+
4
+
old notes follow, ignore
5
+
------------------------
6
+
7
+
8
+
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
9
+
10
+
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
11
+
12
+
13
+
why
14
+
---
15
+
16
+
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
17
+
18
+
- bluesky posts (firesky, deletions, )
19
+
- blueksy post stats (emojis, )
20
+
- trending keywords ()
21
+
22
+
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
23
+
24
+
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
25
+
26
+
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
27
+
28
+
**so, why**
29
+
30
+
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
31
+
32
+
everything is links:
33
+
34
+
- likes
35
+
- follows
36
+
- blocks
37
+
- reposts
38
+
- quotes
39
+
40
+
some low-level things you could make from links:
41
+
42
+
- notification streams (part of ucosm)
43
+
- a global reverse index (part of ucosm)
44
+
45
+
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
46
+
47
+
48
+
extracting links
49
+
---------------
50
+
51
+
52
+
- low-level: pass a &str of a field value and get a parsed link back
53
+
54
+
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
55
+
56
+
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
57
+
58
+
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
59
+
60
+
61
+
resolving / canonicalizing links
62
+
--------------------------------
63
+
64
+
65
+
### at-uris
66
+
67
+
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
68
+
69
+
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
70
+
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
71
+
72
+
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
73
+
74
+
hopefully atrium will make it easy to resolve at-uris.
75
+
76
+
77
+
### urls
78
+
79
+
canonicalizing URLs is more annoying but also a bit more established. lots of details.
80
+
81
+
- do we have to deal with punycode?
82
+
- follow redirects (todo: only permanent ones, or all?)
83
+
- check for rel=canonical http header and possibly follow it
84
+
- check link rel=canonical meta tag and possibly follow it
85
+
- do we need to check site maps??
86
+
- do we have to care at all about AMP?
87
+
- do we want anything to do with url shorteners??
88
+
- how do multilingual sites affect this?
89
+
- do we have to care about `script type="application/ld+json"` ???
90
+
91
+
ugh. is there a crate for this.
92
+
93
+
94
+
### relative uris?
95
+
96
+
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
97
+
98
+
99
+
### canonicalizing
100
+
101
+
there should be a few async functions available to canonicalize already-parsed links.
102
+
103
+
- what happens if a link can't be resolved?
104
+
105
+
106
+
---
107
+
108
+
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
109
+
110
+
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
111
+
112
+
- rsky-syntax has an aturi
113
+
- adenosyne also
114
+
- might come back to these
115
+
116
+
117
+
-------
118
+
119
+
rocks
120
+
121
+
```bash
122
+
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
123
+
```
+196
legacy/ufos ops (move to micro-ops).md
+196
legacy/ufos ops (move to micro-ops).md
···
1
+
ufos ops
2
+
3
+
btrfs snapshots: snapper
4
+
5
+
```bash
6
+
sudo apt install snapper
7
+
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
+
9
+
# edit /etc/snapper/configs/ufos-db
10
+
# change
11
+
TIMELINE_MIN_AGE="1800"
12
+
TIMELINE_LIMIT_HOURLY="10"
13
+
TIMELINE_LIMIT_DAILY="10"
14
+
TIMELINE_LIMIT_WEEKLY="0"
15
+
TIMELINE_LIMIT_MONTHLY="10"
16
+
TIMELINE_LIMIT_YEARLY="10"
17
+
# to
18
+
TIMELINE_MIN_AGE="1800"
19
+
TIMELINE_LIMIT_HOURLY="22"
20
+
TIMELINE_LIMIT_DAILY="4"
21
+
TIMELINE_LIMIT_WEEKLY="0"
22
+
TIMELINE_LIMIT_MONTHLY="0"
23
+
TIMELINE_LIMIT_YEARLY="0"
24
+
```
25
+
26
+
this should be enough?
27
+
28
+
list snapshots:
29
+
30
+
```bash
31
+
sudo snapper -c ufos-db list
32
+
```
33
+
34
+
systemd
35
+
36
+
create file: `/etc/systemd/system/ufos.service`
37
+
38
+
```ini
39
+
[Unit]
40
+
Description=UFOs-API
41
+
After=network.target
42
+
43
+
[Service]
44
+
User=pi
45
+
WorkingDirectory=/home/pi/
46
+
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
+
Environment="RUST_LOG=info"
48
+
LimitNOFILE=16384
49
+
Restart=always
50
+
51
+
[Install]
52
+
WantedBy=multi-user.target
53
+
```
54
+
55
+
then
56
+
57
+
```bash
58
+
sudo systemctl daemon-reload
59
+
sudo systemctl enable ufos
60
+
sudo systemctl start ufos
61
+
```
62
+
63
+
monitor with
64
+
65
+
```bash
66
+
journalctl -u ufos -f
67
+
```
68
+
69
+
make sure a backup dir exists
70
+
71
+
```bash
72
+
mkdir /home/pi/backup
73
+
```
74
+
75
+
mount the NAS
76
+
77
+
```bash
78
+
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
+
```
80
+
81
+
manual rsync
82
+
83
+
```bash
84
+
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
+
```
86
+
87
+
backup script sketch
88
+
89
+
```bash
90
+
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
+
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
+
```
93
+
94
+
just crontab it?
95
+
96
+
`sudo crontab -e`
97
+
```bash
98
+
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
+
```
100
+
101
+
^^ try once initial backup is done
102
+
103
+
104
+
--columns subvolume,number
105
+
106
+
subvolume
107
+
number
108
+
109
+
110
+
111
+
112
+
gateway: follow constellation for nginx->prom thing
113
+
114
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
+
116
+
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
+
118
+
```hcl
119
+
listen {
120
+
port = 4044
121
+
}
122
+
123
+
namespace "nginx" {
124
+
source = {
125
+
files = [
126
+
"/var/log/nginx/constellation-access.log"
127
+
]
128
+
}
129
+
130
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
+
132
+
labels {
133
+
app = "constellation"
134
+
}
135
+
136
+
relabel "cache_status" {
137
+
from = "upstream_cache_status"
138
+
}
139
+
}
140
+
```
141
+
142
+
after:
143
+
144
+
```hcl
145
+
listen {
146
+
port = 4044
147
+
}
148
+
149
+
namespace "constellation" {
150
+
source = {
151
+
files = [
152
+
"/var/log/nginx/constellation-access.log"
153
+
]
154
+
}
155
+
156
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
+
158
+
labels {
159
+
app = "constellation"
160
+
}
161
+
162
+
relabel "cache_status" {
163
+
from = "upstream_cache_status"
164
+
}
165
+
166
+
namespace_label = "vhost"
167
+
metrics_override = { prefix = "nginx" }
168
+
}
169
+
170
+
namespace "ufos" {
171
+
source = {
172
+
files = [
173
+
"/var/log/nginx/ufos-access.log"
174
+
]
175
+
}
176
+
177
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
+
179
+
labels {
180
+
app = "ufos"
181
+
}
182
+
183
+
relabel "cache_status" {
184
+
from = "upstream_cache_status"
185
+
}
186
+
187
+
namespace_label = "vhost"
188
+
metrics_override = { prefix = "nginx" }
189
+
}
190
+
```
191
+
192
+
193
+
```bash
194
+
systemctl start prometheus-nginxlog-exporter.service
195
+
```
196
+
+57
-129
readme.md
+57
-129
readme.md
···
1
-
microcosm: links
2
-
================
3
-
4
-
this repo contains libraries and apps for working with cross-record references in at-protocol.
5
-
1
+
microcosm HTTP APIs + rust crates
2
+
=================================
3
+
[](https://bsky.app/profile/microcosm.blue)
4
+
[](https://discord.gg/tcDfe4PGVB)
5
+
[](https://github.com/sponsors/uniphil/)
6
+
[](https://ko-fi.com/bad_example)
6
7
7
-
App: [Constellation](./constellation/)
8
-
--------------------------------------------
8
+
Welcome! Documentation is under active development. If you like reading API docs, you'll probably hit the ground running!
9
9
10
-
A global atproto backlink index ✨
10
+
Tutorials, how-to guides, and client SDK libraries are all in the works for gentler on-ramps, but are not quite ready yet. But don't let that stop you! Hop in the [microcosm discord](https://discord.gg/tcDfe4PGVB), or post questions and tag [@bad-example.com](https://bsky.app/profile/bad-example.com) on Bluesky if you get stuck anywhere.
11
11
12
-
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
13
-
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
14
-
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
15
-
- Simple JSON API
12
+
> [!tip]
13
+
> This repository's primary home is moving to tangled: [@microcosm.blue/microcosm-rs](https://tangled.sh/@microcosm.blue/microcosm-rs). It will continue to be mirrored on [github](https://github.com/at-microcosm/microcosm-rs) for the forseeable future, and it's fine to open issues or pulls in either place!
16
14
17
-
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
18
15
19
-
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
20
-
- source: [./constellation/](./constellation/)
21
-
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
22
-
23
-
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
24
-
25
-
26
-
App: Spacedust
27
-
--------------
28
-
29
-
A notification subscription service 💫
30
-
31
-
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
32
-
33
-
- **status**: in design
34
-
35
-
36
-
Library: [links](./links/)
16
+
🌌 [Constellation](./constellation/)
37
17
------------------------------------
38
18
39
-
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
40
-
41
-
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
42
-
43
-
44
-
45
-
---
46
-
47
-
48
-
old notes follow, ignore
49
-
------------------------
50
-
51
-
52
-
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
53
-
54
-
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
55
-
56
-
57
-
why
58
-
---
59
-
60
-
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
61
-
62
-
- bluesky posts (firesky, deletions, )
63
-
- blueksy post stats (emojis, )
64
-
- trending keywords ()
65
-
66
-
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
67
-
68
-
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
69
-
70
-
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
71
-
72
-
**so, why**
73
-
74
-
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
75
-
76
-
everything is links:
77
-
78
-
- likes
79
-
- follows
80
-
- blocks
81
-
- reposts
82
-
- quotes
83
-
84
-
some low-level things you could make from links:
85
-
86
-
- notification streams (part of ucosm)
87
-
- a global reverse index (part of ucosm)
88
-
89
-
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
19
+
A global atproto interactions backlink index as a simple JSON API. Works with every lexicon, runs on a raspberry pi, consumes less than 2GiB of disk per day. Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts while respecting users' data choices.
90
20
21
+
- Source: [./constellation/](./constellation/)
22
+
- [Public instance/API docs](https://constellation.microcosm.blue/)
23
+
- Status: used in production. APIs will change but backwards compatibility will be maintained as long as needed.
91
24
92
-
extracting links
93
-
---------------
94
25
26
+
🎇 [Spacedust](./spacedust/)
27
+
----------------------------
95
28
96
-
- low-level: pass a &str of a field value and get a parsed link back
29
+
A global atproto interactions firehose. Extracts all at-uris, DIDs, and URLs from every lexicon in the firehose, and exposes them over a websocket modelled after [jetstream](github.com/bluesky-social/jetstream).
97
30
98
-
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
31
+
- Source: [./spacedust/](./spacedust/)
32
+
- [Public instance/API docs](https://spacedust.microcosm.blue/)
33
+
- Status: v0: the basics work and the APIs are in place! missing cursor replay, forward link storage, and delete event link hydration.
99
34
100
-
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
35
+
### Demos:
101
36
102
-
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
37
+
- [Spacedust notifications](https://notifications.microcosm.blue/): web push notifications for _every_ atproto app
38
+
- [Zero-Bluesky real-time interaction-updating post embed](https://bsky.bad-example.com/zero-bluesky-realtime-embed/)
103
39
104
40
105
-
resolving / canonicalizing links
106
-
--------------------------------
41
+
🛰️ [Slingshot](./slingshot)
42
+
---------------------------
107
43
44
+
A fast, eager, production-grade edge cache for atproto records and identities. Pre-caches all records from the firehose and maintains a longer-term cache of requested records on disk.
108
45
109
-
### at-uris
46
+
- Source: [./slingshot/](./slingshot/)
47
+
- [Public instance/API docs](https://slingshot.microcosm.blue/)
48
+
- Status: v0: most XRPC APIs are working. cache storage is being reworked.
110
49
111
-
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
112
50
113
-
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
114
-
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
51
+
🛸 [UFOs API](./ufos)
52
+
---------------------
115
53
116
-
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
54
+
Timeseries stats and sample records for every [collection](https://atproto.com/guides/glossary#collection) ever seen in the atproto firehose. Unique users are counted in hyperloglog sketches enabling arbitrary cardinality aggregation across time buckets and/or NSIDs.
117
55
118
-
hopefully atrium will make it easy to resolve at-uris.
56
+
- Source: [./ufos/](./ufos/)
57
+
- [Public instance/API docs](https://ufos-api.microcosm.blue/)
58
+
- Status: Used in production. It has APIs and they work! Needs improvement on indexing; needs more indexes and some more APIs to the data exposed.
119
59
60
+
> [!tip]
61
+
> See also: [UFOs atproto explorer](https://ufos.microcosm.blue/) built on UFOs API. ([source](github.com/at-microcosm/spacedust-utils))
120
62
121
-
### urls
122
63
123
-
canonicalizing URLs is more annoying but also a bit more established. lots of details.
64
+
💫 [Links](./links)
65
+
-------------------
124
66
125
-
- do we have to deal with punycode?
126
-
- follow redirects (todo: only permanent ones, or all?)
127
-
- check for rel=canonical http header and possibly follow it
128
-
- check link rel=canonical meta tag and possibly follow it
129
-
- do we need to check site maps??
130
-
- do we have to care at all about AMP?
131
-
- do we want anything to do with url shorteners??
132
-
- how do multilingual sites affect this?
133
-
- do we have to care about `script type="application/ld+json"` ???
67
+
Rust library for parsing and extracting links (at-uris, DIDs, and URLs) from atproto records.
134
68
135
-
ugh. is there a crate for this.
69
+
- Source: [./links/](./links/)
70
+
- Status: not yet published to crates.io; needs some rework
136
71
137
72
138
-
### relative uris?
139
-
140
-
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
141
-
142
-
143
-
### canonicalizing
144
-
145
-
there should be a few async functions available to canonicalize already-parsed links.
146
-
147
-
- what happens if a link can't be resolved?
73
+
🛩️ [Jetstream](./jetstream)
74
+
---------------------------
148
75
76
+
A low-overhead jetstream client with cursor handling and automatic reconnect.
149
77
150
-
---
78
+
- Source: [./links/](./links/)
79
+
- Status: used in multiple apps in production, but not yet published to crates.io; some rework planned
151
80
152
-
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
81
+
> [!tip]
82
+
> See also: [Rocketman](https://github.com/teal-fm/cadet/tree/main/rocketman), another excellent rust jetstream client which shares some lineage and _is_ published on crates.io.
153
83
154
-
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
155
84
156
-
- rsky-syntax has an aturi
157
-
- adenosyne also
158
-
- might come back to these
159
85
86
+
🔭 Deprecated: [Who am I](./who-am-i)
87
+
-------------------------------------
160
88
161
-
-------
89
+
An identity bridge for microcosm demos, that kinda worked. Fixing its problems is about equivalent to reinventing a lot of OIDC, so it's being retired.
162
90
163
-
rocks
91
+
- Source: [./who-am-i/](./who-am-i/)
92
+
- Status: ready for retirement.
164
93
165
-
```bash
166
-
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
167
-
```
94
+
> [!warning]
95
+
> `who-am-i` is still in use for the Spacedust Notifications demo, but that will hopefully be migrated to use atproto oauth directly instead.
+1
slingshot/.gitignore
+1
slingshot/.gitignore
···
1
+
foyer
+31
slingshot/Cargo.toml
+31
slingshot/Cargo.toml
···
1
+
[package]
2
+
name = "slingshot"
3
+
version = "0.1.0"
4
+
edition = "2024"
5
+
6
+
[dependencies]
7
+
atrium-api = { version = "0.25.4", default-features = false }
8
+
atrium-common = "0.1.2"
9
+
atrium-identity = "0.1.5"
10
+
atrium-oauth = "0.1.3"
11
+
clap = { version = "4.5.41", features = ["derive"] }
12
+
ctrlc = "3.4.7"
13
+
foyer = { version = "0.18.0", features = ["serde"] }
14
+
hickory-resolver = "0.25.2"
15
+
jetstream = { path = "../jetstream", features = ["metrics"] }
16
+
links = { path = "../links" }
17
+
log = "0.4.27"
18
+
metrics = "0.24.2"
19
+
metrics-exporter-prometheus = { version = "0.17.1", features = ["http-listener"] }
20
+
poem = { version = "3.1.12", features = ["acme", "static-files"] }
21
+
poem-openapi = { version = "5.1.16", features = ["scalar"] }
22
+
reqwest = { version = "0.12.22", features = ["json"] }
23
+
rustls = "0.23.31"
24
+
serde = { version = "1.0.219", features = ["derive"] }
25
+
serde_json = { version = "1.0.141", features = ["raw_value"] }
26
+
thiserror = "2.0.12"
27
+
time = { version = "0.3.41", features = ["serde"] }
28
+
tokio = { version = "1.47.0", features = ["full"] }
29
+
tokio-util = "0.7.15"
30
+
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
31
+
url = "2.5.4"
+93
slingshot/api-description.md
+93
slingshot/api-description.md
···
1
+
_A [gravitational slingshot](https://en.wikipedia.org/wiki/Gravity_assist) makes use of the gravity and relative movements of celestial bodies to accelerate a spacecraft and change its trajectory._
2
+
3
+
4
+
# Slingshot: edge record cache
5
+
6
+
Applications in [ATProtocol](https://atproto.com/) store data in users' own [PDS](https://atproto.com/guides/self-hosting) (Personal Data Server), which are distributed across thousands of independently-run servers all over the world. Trying to access this data poses challenges for client applications:
7
+
8
+
- A PDS might be far away with long network latency
9
+
- or may be on an unreliable connection
10
+
- or overloaded when you need it, or offline, or…
11
+
12
+
Large projects like [Bluesky](https://bsky.app/) control their performance and reliability by syncing all app-relevant data from PDSs into first-party databases. But for new apps, building out this additional data infrastructure adds significant effort and complexity up front.
13
+
14
+
**Slingshot is a fast, eager, production-grade cache of data in the [ATmosphere](https://atproto.com/)**, offering performance and reliability without custom infrastructure.
15
+
16
+
17
+
### Current status
18
+
19
+
> [!important]
20
+
> Slingshot is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and lower cache hit-rates as the internal storage caches are adjusted and reset.
21
+
22
+
The core APIs will not change, since they are standard third-party `com.atproto` query APIs from ATProtocol.
23
+
24
+
25
+
## Eager caching
26
+
27
+
In many cases, Slingshot can cache the data you need *before* first request!
28
+
29
+
Slingshot subscribes to the global [Firehose](https://atproto.com/specs/sync#firehose) of data updates. It keeps a short-term rolling indexed window of *all* data, and automatically promotes content likely to be requested to its longer-term main cache. _(automatic promotion is still a work in progress)_
30
+
31
+
When there is a cache miss, Slingshot can often still accelerate record fetching, since it keeps a large cache of resolved identities: it can usually request from the correct PDS without extra lookups.
32
+
33
+
34
+
## Precise invalidation
35
+
36
+
The fireshose includes **update** and **delete** events, which Slingshot uses to ensure stale and deleted data is removed within a very short window. Additonally, identity and account-level events can trigger rapid cleanup of data for deactivated and deleted accounts. _(some of this is still a work in progress)_
37
+
38
+
39
+
## Low-trust
40
+
41
+
The "AT" in ATProtocol [stands for _Authenticated Transfer_](https://atproto.com/guides/glossary#at-protocol): all data is cryptographically signed, which makes it possible to broadcast data through third parties and trust that it's real _without_ having to directly contact the originating server.
42
+
43
+
Two core standard query APIs are supported to balance convenience and trust. They both fetch [records](https://atproto.com/guides/glossary#record):
44
+
45
+
### [`com.atproto.repo.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.repo.getRecord)
46
+
47
+
- convenient `JSON` response format
48
+
- cannot be proven authentic
49
+
50
+
### [`com.atproto.sync.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.sync.getRecord)
51
+
52
+
- [`DAG-CBOR`](https://atproto.com/specs/data-model)-encoded response requires extra libraries to decode, but
53
+
- includes a cryptographic proof of authenticity!
54
+
55
+
_(work on this endpoint is in progress)_
56
+
57
+
58
+
## Service proxying
59
+
60
+
Clients can proxy atproto queries through their own PDS with [Service Proxying](https://atproto.com/specs/xrpc#service-proxying), and this is supported by Slingshot. The Slingshot instance must be started the `--domain` argument specified.
61
+
62
+
Service-proxied requests can specify a Slingshot instance via the `atproto-proxy` header:
63
+
64
+
```http
65
+
GET /xrpc/com.bad-example.identity.resolveMiniDoc?identifier=bad-example.com
66
+
Host: <your pds>
67
+
atproto-proxy: did:web:<slingshot domain>#slingshot
68
+
```
69
+
70
+
Where `<your pds>` is the user's own PDS host, and `<slingshot domain>` is the domain that the slingshot instance is deployed at (eg. `slingshot.microcosm.blue`). See the [Service Proxying](https://atproto.com/specs/xrpc#service-proxying) docs for more.
71
+
72
+
> [!tip]
73
+
> Service proxying is supported but completely optional. All APIs are directly accessible over the public internet, and GeoDNS helps route users to the closest instance to them for the lowest possible latency. (_note: deploying multiple slingshot instances with GeoDNS is still TODO_)
74
+
75
+
76
+
## Ergonomic APIs
77
+
78
+
- Slingshot also offers variants of the `getRecord` endpoints that accept a full `at-uri` as a parameter, to save clients from needing to parse and validate all parts of a record location.
79
+
80
+
- Bi-directionally verifying identity endpoints, so you can directly exchange atproto [`handle`](https://atproto.com/guides/glossary#handle)s for [`DID`](https://atproto.com/guides/glossary#did-decentralized-id)s without extra steps, plus a convenient [Mini-Doc](#tag/slingshot-specific-queries/get/xrpc/com.bad-example.identity.resolveMiniDoc) verified identity summary.
81
+
82
+
83
+
## Part of microcosm
84
+
85
+
[Microcosm](https://www.microcosm.blue/) is a collection of services and independent community-run infrastructure for ATProtocol.
86
+
87
+
Slingshot excels when combined with _shallow indexing_ services, which offer fast queries of global data relationships but with only references to the data records. Microcosm has a few!
88
+
89
+
- [🌌 Constellation](https://constellation.microcosm.blue/), a global backlink index (all social interactions in atproto are links!)
90
+
- [🎇 Spacedust](https://spacedust.microcosm.blue/), a firehose of all social interactions
91
+
92
+
> [!success]
93
+
> All microcosm projects are [open source](https://tangled.sh/@bad-example.com/microcosm-links). **You can help sustain Slingshot** and all of microcosm by becoming a [Github sponsor](https://github.com/sponsors/uniphil/) or a [Ko-fi supporter](https://ko-fi.com/bad_example)!
+7
slingshot/readme.md
+7
slingshot/readme.md
+80
slingshot/src/consumer.rs
+80
slingshot/src/consumer.rs
···
1
+
use crate::CachedRecord;
2
+
use crate::error::ConsumerError;
3
+
use foyer::HybridCache;
4
+
use jetstream::{
5
+
DefaultJetstreamEndpoints, JetstreamCompression, JetstreamConfig, JetstreamConnector,
6
+
events::{CommitOp, Cursor, EventKind},
7
+
};
8
+
use tokio_util::sync::CancellationToken;
9
+
10
+
pub async fn consume(
11
+
jetstream_endpoint: String,
12
+
cursor: Option<Cursor>,
13
+
no_zstd: bool,
14
+
shutdown: CancellationToken,
15
+
cache: HybridCache<String, CachedRecord>,
16
+
) -> Result<(), ConsumerError> {
17
+
let endpoint = DefaultJetstreamEndpoints::endpoint_or_shortcut(&jetstream_endpoint);
18
+
if endpoint == jetstream_endpoint {
19
+
log::info!("consumer: connecting jetstream at {endpoint}");
20
+
} else {
21
+
log::info!("consumer: connecting jetstream at {jetstream_endpoint} => {endpoint}");
22
+
}
23
+
let config: JetstreamConfig = JetstreamConfig {
24
+
endpoint,
25
+
compression: if no_zstd {
26
+
JetstreamCompression::None
27
+
} else {
28
+
JetstreamCompression::Zstd
29
+
},
30
+
replay_on_reconnect: true,
31
+
channel_size: 1024, // buffer up to ~1s of jetstream events
32
+
..Default::default()
33
+
};
34
+
let mut receiver = JetstreamConnector::new(config)?
35
+
.connect_cursor(cursor)
36
+
.await?;
37
+
38
+
log::info!("consumer: receiving messages..");
39
+
loop {
40
+
if shutdown.is_cancelled() {
41
+
log::info!("consumer: exiting for shutdown");
42
+
return Ok(());
43
+
}
44
+
let Some(mut event) = receiver.recv().await else {
45
+
log::error!("consumer: could not receive event, bailing");
46
+
break;
47
+
};
48
+
49
+
if event.kind != EventKind::Commit {
50
+
continue;
51
+
}
52
+
let Some(ref mut commit) = event.commit else {
53
+
log::warn!("consumer: commit event missing commit data, ignoring");
54
+
continue;
55
+
};
56
+
57
+
// TODO: something a bit more robust
58
+
let at_uri = format!(
59
+
"at://{}/{}/{}",
60
+
&*event.did, &*commit.collection, &*commit.rkey
61
+
);
62
+
63
+
if commit.operation == CommitOp::Delete {
64
+
cache.insert(at_uri, CachedRecord::Deleted);
65
+
} else {
66
+
let Some(record) = commit.record.take() else {
67
+
log::warn!("consumer: commit insert or update missing record, ignoring");
68
+
continue;
69
+
};
70
+
let Some(cid) = commit.cid.take() else {
71
+
log::warn!("consumer: commit insert or update missing CID, ignoring");
72
+
continue;
73
+
};
74
+
75
+
cache.insert(at_uri, CachedRecord::Found((cid, record).into()));
76
+
}
77
+
}
78
+
79
+
Err(ConsumerError::JetstreamEnded)
80
+
}
+93
slingshot/src/error.rs
+93
slingshot/src/error.rs
···
1
+
use crate::ErrorResponseObject;
2
+
use thiserror::Error;
3
+
4
+
#[derive(Debug, Error)]
5
+
pub enum ConsumerError {
6
+
#[error(transparent)]
7
+
JetstreamConnectionError(#[from] jetstream::error::ConnectionError),
8
+
#[error(transparent)]
9
+
JetstreamConfigValidationError(#[from] jetstream::error::ConfigValidationError),
10
+
#[error("jetstream ended")]
11
+
JetstreamEnded,
12
+
#[error("delay queue output dropped")]
13
+
DelayQueueOutputDropped,
14
+
}
15
+
16
+
#[derive(Debug, Error)]
17
+
pub enum ServerError {
18
+
#[error("server build error: {0}")]
19
+
AcmeBuildError(std::io::Error),
20
+
#[error("server exited: {0}")]
21
+
ServerExited(std::io::Error),
22
+
}
23
+
24
+
#[derive(Debug, Error)]
25
+
pub enum IdentityError {
26
+
#[error("whatever: {0}")]
27
+
WhateverError(String),
28
+
#[error("bad DID: {0}")]
29
+
BadDid(&'static str),
30
+
#[error("identity types got mixed up: {0}")]
31
+
IdentityValTypeMixup(String),
32
+
#[error("foyer error: {0}")]
33
+
FoyerError(#[from] foyer::Error),
34
+
35
+
#[error("failed to resolve: {0}")]
36
+
ResolutionFailed(#[from] atrium_identity::Error),
37
+
// #[error("identity resolved but no handle found for user")]
38
+
// NoHandle,
39
+
#[error("found handle {0:?} but it appears invalid: {1}")]
40
+
InvalidHandle(String, &'static str),
41
+
42
+
#[error("could not convert atrium did doc to partial mini doc: {0}")]
43
+
BadDidDoc(String),
44
+
45
+
#[error("wrong key for clearing refresh queue: {0}")]
46
+
RefreshQueueKeyError(&'static str),
47
+
}
48
+
49
+
#[derive(Debug, Error)]
50
+
pub enum HealthCheckError {
51
+
#[error("failed to send checkin: {0}")]
52
+
HealthCheckError(#[from] reqwest::Error),
53
+
}
54
+
55
+
#[derive(Debug, Error)]
56
+
pub enum MainTaskError {
57
+
#[error(transparent)]
58
+
ConsumerTaskError(#[from] ConsumerError),
59
+
#[error(transparent)]
60
+
ServerTaskError(#[from] ServerError),
61
+
#[error(transparent)]
62
+
IdentityTaskError(#[from] IdentityError),
63
+
#[error(transparent)]
64
+
HealthCheckError(#[from] HealthCheckError),
65
+
#[error("firehose cache failed to close: {0}")]
66
+
FirehoseCacheCloseError(foyer::Error),
67
+
}
68
+
69
+
#[derive(Debug, Error)]
70
+
pub enum RecordError {
71
+
#[error("identity error: {0}")]
72
+
IdentityError(#[from] IdentityError),
73
+
#[error("repo could not be validated as either a DID or an atproto handle")]
74
+
BadRepo,
75
+
#[error("could not get record: {0}")]
76
+
NotFound(&'static str),
77
+
#[error("could nto parse pds url: {0}")]
78
+
UrlParseError(#[from] url::ParseError),
79
+
#[error("reqwest send failed: {0}")]
80
+
SendError(reqwest::Error),
81
+
#[error("reqwest raised for status: {0}")]
82
+
StatusError(reqwest::Error),
83
+
#[error("reqwest failed to parse json: {0}")]
84
+
ParseJsonError(reqwest::Error),
85
+
#[error("upstream getRecord did not include a CID")]
86
+
MissingUpstreamCid,
87
+
#[error("upstream CID was not valid: {0}")]
88
+
BadUpstreamCid(String),
89
+
#[error("upstream atproto-looking bad request")]
90
+
UpstreamBadRequest(ErrorResponseObject),
91
+
#[error("upstream non-atproto bad request")]
92
+
UpstreamBadBadNotGoodRequest(reqwest::Error),
93
+
}
+22
slingshot/src/firehose_cache.rs
+22
slingshot/src/firehose_cache.rs
···
1
+
use crate::CachedRecord;
2
+
use foyer::{DirectFsDeviceOptions, Engine, HybridCache, HybridCacheBuilder};
3
+
use std::path::Path;
4
+
5
+
pub async fn firehose_cache(
6
+
cache_dir: impl AsRef<Path>,
7
+
) -> Result<HybridCache<String, CachedRecord>, String> {
8
+
let cache = HybridCacheBuilder::new()
9
+
.with_name("firehose")
10
+
.memory(64 * 2_usize.pow(20))
11
+
.with_weighter(|k: &String, v| k.len() + std::mem::size_of_val(v))
12
+
.storage(Engine::large())
13
+
.with_device_options(
14
+
DirectFsDeviceOptions::new(cache_dir)
15
+
.with_capacity(2_usize.pow(30)) // TODO: configurable (1GB to have something)
16
+
.with_file_size(16 * 2_usize.pow(20)), // note: this does limit the max cached item size, warning jumbo records
17
+
)
18
+
.build()
19
+
.await
20
+
.map_err(|e| format!("foyer setup error: {e:?}"))?;
21
+
Ok(cache)
22
+
}
+32
slingshot/src/healthcheck.rs
+32
slingshot/src/healthcheck.rs
···
1
+
use crate::error::HealthCheckError;
2
+
use reqwest::Client;
3
+
use std::time::Duration;
4
+
use tokio::time::sleep;
5
+
use tokio_util::sync::CancellationToken;
6
+
7
+
pub async fn healthcheck(
8
+
endpoint: String,
9
+
shutdown: CancellationToken,
10
+
) -> Result<(), HealthCheckError> {
11
+
let client = Client::builder()
12
+
.user_agent(format!(
13
+
"microcosm slingshot v{} (dev: @bad-example.com)",
14
+
env!("CARGO_PKG_VERSION")
15
+
))
16
+
.no_proxy()
17
+
.timeout(Duration::from_secs(10))
18
+
.build()?;
19
+
20
+
loop {
21
+
tokio::select! {
22
+
res = client.get(&endpoint).send() => {
23
+
let _ = res
24
+
.and_then(|r| r.error_for_status())
25
+
.inspect_err(|e| log::error!("failed to send healthcheck: {e}"));
26
+
},
27
+
_ = shutdown.cancelled() => break,
28
+
}
29
+
sleep(Duration::from_secs(51)).await;
30
+
}
31
+
Ok(())
32
+
}
+524
slingshot/src/identity.rs
+524
slingshot/src/identity.rs
···
1
+
use hickory_resolver::{ResolveError, TokioResolver};
2
+
use std::collections::{HashSet, VecDeque};
3
+
use std::path::Path;
4
+
use std::sync::Arc;
5
+
/// for now we're gonna just keep doing more cache
6
+
///
7
+
/// plc.director x foyer, ttl kept with data, refresh deferred to background on fetch
8
+
///
9
+
/// things we need:
10
+
///
11
+
/// 1. handle -> DID resolution: getRecord must accept a handle for `repo` param
12
+
/// 2. DID -> PDS resolution: so we know where to getRecord
13
+
/// 3. DID -> handle resolution: for bidirectional handle validation and in case we want to offer this
14
+
use std::time::Duration;
15
+
use tokio::sync::Mutex;
16
+
use tokio_util::sync::CancellationToken;
17
+
18
+
use crate::error::IdentityError;
19
+
use atrium_api::{
20
+
did_doc::DidDocument,
21
+
types::string::{Did, Handle},
22
+
};
23
+
use atrium_common::resolver::Resolver;
24
+
use atrium_identity::{
25
+
did::{CommonDidResolver, CommonDidResolverConfig, DEFAULT_PLC_DIRECTORY_URL},
26
+
handle::{AtprotoHandleResolver, AtprotoHandleResolverConfig, DnsTxtResolver},
27
+
};
28
+
use atrium_oauth::DefaultHttpClient; // it's probably not worth bringing all of atrium_oauth for this but
29
+
use foyer::{DirectFsDeviceOptions, Engine, HybridCache, HybridCacheBuilder};
30
+
use serde::{Deserialize, Serialize};
31
+
use time::UtcDateTime;
32
+
33
+
/// once we have something resolved, don't re-resolve until after this period
34
+
const MIN_TTL: Duration = Duration::from_secs(4 * 3600); // probably shoudl have a max ttl
35
+
const MIN_NOT_FOUND_TTL: Duration = Duration::from_secs(60);
36
+
37
+
#[derive(Debug, Clone, Hash, PartialEq, Eq, Serialize, Deserialize)]
38
+
enum IdentityKey {
39
+
Handle(Handle),
40
+
Did(Did),
41
+
}
42
+
43
+
#[derive(Debug, Serialize, Deserialize)]
44
+
struct IdentityVal(UtcDateTime, IdentityData);
45
+
46
+
#[derive(Debug, Serialize, Deserialize)]
47
+
enum IdentityData {
48
+
NotFound,
49
+
Did(Did),
50
+
Doc(PartialMiniDoc),
51
+
}
52
+
53
+
/// partial representation of a com.bad-example.identity mini atproto doc
54
+
///
55
+
/// partial because the handle is not verified
56
+
#[derive(Debug, Clone, Serialize, Deserialize)]
57
+
pub struct PartialMiniDoc {
58
+
/// an atproto handle (**unverified**)
59
+
///
60
+
/// the first valid atproto handle from the did doc's aka
61
+
pub unverified_handle: Handle,
62
+
/// the did's atproto pds url (TODO: type this?)
63
+
///
64
+
/// note: atrium *does* actually parse it into a URI, it just doesn't return
65
+
/// that for some reason
66
+
pub pds: String,
67
+
/// for now we're just pulling this straight from the did doc
68
+
///
69
+
/// would be nice to type and validate it
70
+
///
71
+
/// this is the publicKeyMultibase from the did doc.
72
+
/// legacy key encoding not supported.
73
+
/// `id`, `type`, and `controller` must be checked, but aren't stored.
74
+
pub signing_key: String,
75
+
}
76
+
77
+
impl TryFrom<DidDocument> for PartialMiniDoc {
78
+
type Error = String;
79
+
fn try_from(did_doc: DidDocument) -> Result<Self, Self::Error> {
80
+
// must use the first valid handle
81
+
let mut unverified_handle = None;
82
+
let Some(ref doc_akas) = did_doc.also_known_as else {
83
+
return Err("did doc missing `also_known_as`".to_string());
84
+
};
85
+
for aka in doc_akas {
86
+
let Some(maybe_handle) = aka.strip_prefix("at://") else {
87
+
continue;
88
+
};
89
+
let Ok(valid_handle) = Handle::new(maybe_handle.to_string()) else {
90
+
continue;
91
+
};
92
+
unverified_handle = Some(valid_handle);
93
+
break;
94
+
}
95
+
let Some(unverified_handle) = unverified_handle else {
96
+
return Err("no valid atproto handles in `also_known_as`".to_string());
97
+
};
98
+
99
+
// atrium seems to get service endpoint getters
100
+
let Some(pds) = did_doc.get_pds_endpoint() else {
101
+
return Err("no valid pds service found".to_string());
102
+
};
103
+
104
+
// TODO can't use atrium's get_signing_key() becuase it fails to check type and controller
105
+
// so if we check those and reject it, we might miss a later valid key in the array
106
+
// (todo is to fix atrium)
107
+
// actually: atrium might be flexible for legacy reps. for now we're rejecting legacy rep.
108
+
109
+
// must use the first valid signing key
110
+
let mut signing_key = None;
111
+
let Some(verification_methods) = did_doc.verification_method else {
112
+
return Err("no verification methods found".to_string());
113
+
};
114
+
for method in verification_methods {
115
+
if method.id != format!("{}#atproto", did_doc.id) {
116
+
continue;
117
+
}
118
+
if method.r#type != "Multikey" {
119
+
continue;
120
+
}
121
+
if method.controller != did_doc.id {
122
+
continue;
123
+
}
124
+
let Some(key) = method.public_key_multibase else {
125
+
continue;
126
+
};
127
+
signing_key = Some(key);
128
+
break;
129
+
}
130
+
let Some(signing_key) = signing_key else {
131
+
return Err("no valid atproto signing key found in verification methods".to_string());
132
+
};
133
+
134
+
Ok(PartialMiniDoc {
135
+
unverified_handle,
136
+
pds,
137
+
signing_key,
138
+
})
139
+
}
140
+
}
141
+
142
+
/// multi-producer *single-consumer* queue structures (wrap in arc-mutex plz)
143
+
///
144
+
/// the hashset allows testing for presense of items in the queue.
145
+
/// this has absolutely no support for multiple queue consumers.
146
+
#[derive(Debug, Default)]
147
+
struct RefreshQueue {
148
+
queue: VecDeque<IdentityKey>,
149
+
items: HashSet<IdentityKey>,
150
+
}
151
+
152
+
#[derive(Clone)]
153
+
pub struct Identity {
154
+
handle_resolver: Arc<AtprotoHandleResolver<HickoryDnsTxtResolver, DefaultHttpClient>>,
155
+
did_resolver: Arc<CommonDidResolver<DefaultHttpClient>>,
156
+
cache: HybridCache<IdentityKey, IdentityVal>,
157
+
/// multi-producer *single consumer* queue
158
+
refresh_queue: Arc<Mutex<RefreshQueue>>,
159
+
/// just a lock to ensure only one refresher (queue consumer) is running (to be improved with a better refresher)
160
+
refresher: Arc<Mutex<()>>,
161
+
}
162
+
163
+
impl Identity {
164
+
pub async fn new(cache_dir: impl AsRef<Path>) -> Result<Self, IdentityError> {
165
+
let http_client = Arc::new(DefaultHttpClient::default());
166
+
let handle_resolver = AtprotoHandleResolver::new(AtprotoHandleResolverConfig {
167
+
dns_txt_resolver: HickoryDnsTxtResolver::new().unwrap(),
168
+
http_client: http_client.clone(),
169
+
});
170
+
let did_resolver = CommonDidResolver::new(CommonDidResolverConfig {
171
+
plc_directory_url: DEFAULT_PLC_DIRECTORY_URL.to_string(),
172
+
http_client: http_client.clone(),
173
+
});
174
+
175
+
let cache = HybridCacheBuilder::new()
176
+
.with_name("identity")
177
+
.memory(16 * 2_usize.pow(20))
178
+
.with_weighter(|k, v| std::mem::size_of_val(k) + std::mem::size_of_val(v))
179
+
.storage(Engine::small())
180
+
.with_device_options(
181
+
DirectFsDeviceOptions::new(cache_dir)
182
+
.with_capacity(2_usize.pow(30)) // TODO: configurable (1GB to have something)
183
+
.with_file_size(2_usize.pow(20)), // note: this does limit the max cached item size, warning jumbo records
184
+
)
185
+
.build()
186
+
.await?;
187
+
188
+
Ok(Self {
189
+
handle_resolver: Arc::new(handle_resolver),
190
+
did_resolver: Arc::new(did_resolver),
191
+
cache,
192
+
refresh_queue: Default::default(),
193
+
refresher: Default::default(),
194
+
})
195
+
}
196
+
197
+
/// Resolve (and verify!) an atproto handle to a DID
198
+
///
199
+
/// The result can be stale
200
+
///
201
+
/// `None` if the handle can't be found or verification fails
202
+
pub async fn handle_to_did(&self, handle: Handle) -> Result<Option<Did>, IdentityError> {
203
+
let Some(did) = self.handle_to_unverified_did(&handle).await? else {
204
+
return Ok(None);
205
+
};
206
+
let Some(doc) = self.did_to_partial_mini_doc(&did).await? else {
207
+
return Ok(None);
208
+
};
209
+
if doc.unverified_handle != handle {
210
+
return Ok(None);
211
+
}
212
+
Ok(Some(did))
213
+
}
214
+
215
+
/// Resolve a DID to a pds url
216
+
///
217
+
/// This *also* incidentally resolves and verifies the handle, which might
218
+
/// make it slower than expected
219
+
pub async fn did_to_pds(&self, did: Did) -> Result<Option<String>, IdentityError> {
220
+
let Some(mini_doc) = self.did_to_partial_mini_doc(&did).await? else {
221
+
return Ok(None);
222
+
};
223
+
Ok(Some(mini_doc.pds))
224
+
}
225
+
226
+
/// Resolve (and cache but **not verify**) a handle to a DID
227
+
async fn handle_to_unverified_did(
228
+
&self,
229
+
handle: &Handle,
230
+
) -> Result<Option<Did>, IdentityError> {
231
+
let key = IdentityKey::Handle(handle.clone());
232
+
let entry = self
233
+
.cache
234
+
.fetch(key.clone(), {
235
+
let handle = handle.clone();
236
+
let resolver = self.handle_resolver.clone();
237
+
|| async move {
238
+
match resolver.resolve(&handle).await {
239
+
Ok(did) => Ok(IdentityVal(UtcDateTime::now(), IdentityData::Did(did))),
240
+
Err(atrium_identity::Error::NotFound) => {
241
+
Ok(IdentityVal(UtcDateTime::now(), IdentityData::NotFound))
242
+
}
243
+
Err(other) => Err(foyer::Error::Other(Box::new(
244
+
IdentityError::ResolutionFailed(other),
245
+
))),
246
+
}
247
+
}
248
+
})
249
+
.await?;
250
+
251
+
let now = UtcDateTime::now();
252
+
let IdentityVal(last_fetch, data) = entry.value();
253
+
match data {
254
+
IdentityData::Doc(_) => {
255
+
log::error!("identity value mixup: got a doc from a handle key (should be a did)");
256
+
Err(IdentityError::IdentityValTypeMixup(handle.to_string()))
257
+
}
258
+
IdentityData::NotFound => {
259
+
if (now - *last_fetch) >= MIN_NOT_FOUND_TTL {
260
+
self.queue_refresh(key).await;
261
+
}
262
+
Ok(None)
263
+
}
264
+
IdentityData::Did(did) => {
265
+
if (now - *last_fetch) >= MIN_TTL {
266
+
self.queue_refresh(key).await;
267
+
}
268
+
Ok(Some(did.clone()))
269
+
}
270
+
}
271
+
}
272
+
273
+
/// Fetch (and cache) a partial mini doc from a did
274
+
pub async fn did_to_partial_mini_doc(
275
+
&self,
276
+
did: &Did,
277
+
) -> Result<Option<PartialMiniDoc>, IdentityError> {
278
+
let key = IdentityKey::Did(did.clone());
279
+
let entry = self
280
+
.cache
281
+
.fetch(key.clone(), {
282
+
let did = did.clone();
283
+
let resolver = self.did_resolver.clone();
284
+
|| async move {
285
+
match resolver.resolve(&did).await {
286
+
Ok(did_doc) => {
287
+
// TODO: fix in atrium: should verify id is did
288
+
if did_doc.id != did.to_string() {
289
+
return Err(foyer::Error::other(Box::new(
290
+
IdentityError::BadDidDoc(
291
+
"did doc's id did not match did".to_string(),
292
+
),
293
+
)));
294
+
}
295
+
let mini_doc = did_doc.try_into().map_err(|e| {
296
+
foyer::Error::Other(Box::new(IdentityError::BadDidDoc(e)))
297
+
})?;
298
+
Ok(IdentityVal(UtcDateTime::now(), IdentityData::Doc(mini_doc)))
299
+
}
300
+
Err(atrium_identity::Error::NotFound) => {
301
+
Ok(IdentityVal(UtcDateTime::now(), IdentityData::NotFound))
302
+
}
303
+
Err(other) => Err(foyer::Error::Other(Box::new(
304
+
IdentityError::ResolutionFailed(other),
305
+
))),
306
+
}
307
+
}
308
+
})
309
+
.await?;
310
+
311
+
let now = UtcDateTime::now();
312
+
let IdentityVal(last_fetch, data) = entry.value();
313
+
match data {
314
+
IdentityData::Did(_) => {
315
+
log::error!("identity value mixup: got a did from a did key (should be a doc)");
316
+
Err(IdentityError::IdentityValTypeMixup(did.to_string()))
317
+
}
318
+
IdentityData::NotFound => {
319
+
if (now - *last_fetch) >= MIN_NOT_FOUND_TTL {
320
+
self.queue_refresh(key).await;
321
+
}
322
+
Ok(None)
323
+
}
324
+
IdentityData::Doc(mini_did) => {
325
+
if (now - *last_fetch) >= MIN_TTL {
326
+
self.queue_refresh(key).await;
327
+
}
328
+
Ok(Some(mini_did.clone()))
329
+
}
330
+
}
331
+
}
332
+
333
+
/// put a refresh task on the queue
334
+
///
335
+
/// this can be safely called from multiple concurrent tasks
336
+
async fn queue_refresh(&self, key: IdentityKey) {
337
+
// todo: max queue size
338
+
let mut q = self.refresh_queue.lock().await;
339
+
if !q.items.contains(&key) {
340
+
q.items.insert(key.clone());
341
+
q.queue.push_back(key);
342
+
}
343
+
}
344
+
345
+
/// find out what's next in the queue. concurrent consumers are not allowed.
346
+
///
347
+
/// intent is to leave the item in the queue while refreshing, so that a
348
+
/// producer will not re-add it if it's in progress. there's definitely
349
+
/// better ways to do this, but this is ~simple for as far as a single
350
+
/// consumer can take us.
351
+
///
352
+
/// we could take it from the queue but leave it in the set and remove from
353
+
/// set later, but splitting them apart feels more bug-prone.
354
+
async fn peek_refresh(&self) -> Option<IdentityKey> {
355
+
let q = self.refresh_queue.lock().await;
356
+
q.queue.front().cloned()
357
+
}
358
+
359
+
/// call to clear the latest key from the refresh queue. concurrent consumers not allowed.
360
+
///
361
+
/// must provide the last peeked refresh queue item as a small safety check
362
+
async fn complete_refresh(&self, key: &IdentityKey) -> Result<(), IdentityError> {
363
+
let mut q = self.refresh_queue.lock().await;
364
+
365
+
let Some(queue_key) = q.queue.pop_front() else {
366
+
// gone from queue + since we're in an error condition, make sure it's not stuck in items
367
+
// (not toctou because we have the lock)
368
+
// bolder here than below and removing from items because if the queue is *empty*, then we
369
+
// know it hasn't been re-added since losing sync.
370
+
if q.items.remove(key) {
371
+
log::error!("identity refresh: queue de-sync: not in ");
372
+
} else {
373
+
log::warn!(
374
+
"identity refresh: tried to complete with wrong key. are multiple queue consumers running?"
375
+
);
376
+
}
377
+
return Err(IdentityError::RefreshQueueKeyError("no key in queue"));
378
+
};
379
+
380
+
if queue_key != *key {
381
+
// extra weird case here, what's the most defensive behaviour?
382
+
// we have two keys: ours should have been first but isn't. this shouldn't happen, so let's
383
+
// just leave items alone for it. risks unbounded growth but we're in a bad place already.
384
+
// the other key is the one we just popped. we didn't want it, so maybe we should put it
385
+
// back, BUT if we somehow ended up with concurrent consumers, we have bigger problems. take
386
+
// responsibility for taking it instead: remove it from items as well, and just drop it.
387
+
//
388
+
// hope that whoever calls us takes this error seriously.
389
+
if q.items.remove(&queue_key) {
390
+
log::warn!(
391
+
"identity refresh: queue de-sync + dropping a bystander key without refreshing it!"
392
+
);
393
+
} else {
394
+
// you thought things couldn't get weirder? (i mean hopefully they can't)
395
+
log::error!("identity refresh: queue de-sync + bystander key also de-sync!?");
396
+
}
397
+
return Err(IdentityError::RefreshQueueKeyError(
398
+
"wrong key at front of queue",
399
+
));
400
+
}
401
+
402
+
if q.items.remove(key) {
403
+
Ok(())
404
+
} else {
405
+
log::error!("identity refresh: queue de-sync: key not in items");
406
+
Err(IdentityError::RefreshQueueKeyError("key not in items"))
407
+
}
408
+
}
409
+
410
+
/// run the refresh queue consumer
411
+
pub async fn run_refresher(&self, shutdown: CancellationToken) -> Result<(), IdentityError> {
412
+
let _guard = self
413
+
.refresher
414
+
.try_lock()
415
+
.expect("there to only be one refresher running");
416
+
loop {
417
+
if shutdown.is_cancelled() {
418
+
log::info!("identity refresher: exiting for shutdown: closing cache...");
419
+
if let Err(e) = self.cache.close().await {
420
+
log::error!("cache close errored: {e}");
421
+
} else {
422
+
log::info!("identity cache closed.")
423
+
}
424
+
return Ok(());
425
+
}
426
+
let Some(task_key) = self.peek_refresh().await else {
427
+
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
428
+
continue;
429
+
};
430
+
match task_key {
431
+
IdentityKey::Handle(ref handle) => {
432
+
log::trace!("refreshing handle {handle:?}");
433
+
match self.handle_resolver.resolve(handle).await {
434
+
Ok(did) => {
435
+
self.cache.insert(
436
+
task_key.clone(),
437
+
IdentityVal(UtcDateTime::now(), IdentityData::Did(did)),
438
+
);
439
+
}
440
+
Err(atrium_identity::Error::NotFound) => {
441
+
self.cache.insert(
442
+
task_key.clone(),
443
+
IdentityVal(UtcDateTime::now(), IdentityData::NotFound),
444
+
);
445
+
}
446
+
Err(err) => {
447
+
log::warn!(
448
+
"failed to refresh handle: {err:?}. leaving stale (should we eventually do something?)"
449
+
);
450
+
}
451
+
}
452
+
self.complete_refresh(&task_key).await?; // failures are bugs, so break loop
453
+
}
454
+
IdentityKey::Did(ref did) => {
455
+
log::trace!("refreshing did doc: {did:?}");
456
+
457
+
match self.did_resolver.resolve(did).await {
458
+
Ok(did_doc) => {
459
+
// TODO: fix in atrium: should verify id is did
460
+
if did_doc.id != did.to_string() {
461
+
log::warn!(
462
+
"refreshed did doc failed: wrong did doc id. dropping refresh."
463
+
);
464
+
continue;
465
+
}
466
+
let mini_doc = match did_doc.try_into() {
467
+
Ok(md) => md,
468
+
Err(e) => {
469
+
log::warn!(
470
+
"converting mini doc failed: {e:?}. dropping refresh."
471
+
);
472
+
continue;
473
+
}
474
+
};
475
+
self.cache.insert(
476
+
task_key.clone(),
477
+
IdentityVal(UtcDateTime::now(), IdentityData::Doc(mini_doc)),
478
+
);
479
+
}
480
+
Err(atrium_identity::Error::NotFound) => {
481
+
self.cache.insert(
482
+
task_key.clone(),
483
+
IdentityVal(UtcDateTime::now(), IdentityData::NotFound),
484
+
);
485
+
}
486
+
Err(err) => {
487
+
log::warn!(
488
+
"failed to refresh did doc: {err:?}. leaving stale (should we eventually do something?)"
489
+
);
490
+
}
491
+
}
492
+
493
+
self.complete_refresh(&task_key).await?; // failures are bugs, so break loop
494
+
}
495
+
}
496
+
}
497
+
}
498
+
}
499
+
500
+
pub struct HickoryDnsTxtResolver(TokioResolver);
501
+
502
+
impl HickoryDnsTxtResolver {
503
+
fn new() -> Result<Self, ResolveError> {
504
+
Ok(Self(TokioResolver::builder_tokio()?.build()))
505
+
}
506
+
}
507
+
508
+
impl DnsTxtResolver for HickoryDnsTxtResolver {
509
+
async fn resolve(
510
+
&self,
511
+
query: &str,
512
+
) -> core::result::Result<Vec<String>, Box<dyn std::error::Error + Send + Sync>> {
513
+
match self.0.txt_lookup(query).await {
514
+
Ok(r) => {
515
+
metrics::counter!("whoami_resolve_dns_txt", "success" => "true").increment(1);
516
+
Ok(r.iter().map(|r| r.to_string()).collect())
517
+
}
518
+
Err(e) => {
519
+
metrics::counter!("whoami_resolve_dns_txt", "success" => "false").increment(1);
520
+
Err(e.into())
521
+
}
522
+
}
523
+
}
524
+
}
+14
slingshot/src/lib.rs
+14
slingshot/src/lib.rs
···
1
+
mod consumer;
2
+
pub mod error;
3
+
mod firehose_cache;
4
+
mod healthcheck;
5
+
mod identity;
6
+
mod record;
7
+
mod server;
8
+
9
+
pub use consumer::consume;
10
+
pub use firehose_cache::firehose_cache;
11
+
pub use healthcheck::healthcheck;
12
+
pub use identity::Identity;
13
+
pub use record::{CachedRecord, ErrorResponseObject, Repo};
14
+
pub use server::serve;
+194
slingshot/src/main.rs
+194
slingshot/src/main.rs
···
1
+
// use foyer::HybridCache;
2
+
// use foyer::{Engine, DirectFsDeviceOptions, HybridCacheBuilder};
3
+
use metrics_exporter_prometheus::PrometheusBuilder;
4
+
use slingshot::{
5
+
Identity, Repo, consume, error::MainTaskError, firehose_cache, healthcheck, serve,
6
+
};
7
+
use std::path::PathBuf;
8
+
9
+
use clap::Parser;
10
+
use tokio_util::sync::CancellationToken;
11
+
12
+
/// Slingshot record edge cache
13
+
#[derive(Parser, Debug, Clone)]
14
+
#[command(version, about, long_about = None)]
15
+
struct Args {
16
+
/// Jetstream server to connect to (exclusive with --fixture). Provide either a wss:// URL, or a shorhand value:
17
+
/// 'us-east-1', 'us-east-2', 'us-west-1', or 'us-west-2'
18
+
#[arg(long)]
19
+
jetstream: String,
20
+
/// don't request zstd-compressed jetstream events
21
+
///
22
+
/// reduces CPU at the expense of more ingress bandwidth
23
+
#[arg(long, action)]
24
+
jetstream_no_zstd: bool,
25
+
/// where to keep disk caches
26
+
#[arg(long)]
27
+
cache_dir: PathBuf,
28
+
/// the domain pointing to this server
29
+
///
30
+
/// if present:
31
+
/// - a did:web document will be served at /.well-known/did.json
32
+
/// - an HTTPS certs will be automatically configured with Acme/letsencrypt
33
+
/// - TODO: a rate-limiter will be installed
34
+
#[arg(long)]
35
+
domain: Option<String>,
36
+
/// email address for letsencrypt contact
37
+
///
38
+
/// recommended in production, i guess?
39
+
#[arg(long)]
40
+
acme_contact: Option<String>,
41
+
/// a location to cache acme https certs
42
+
///
43
+
/// only used if --host is specified. omitting requires re-requesting certs
44
+
/// on every restart, and letsencrypt has rate limits that are easy to hit.
45
+
///
46
+
/// recommended in production, but mind the file permissions.
47
+
#[arg(long)]
48
+
certs: Option<PathBuf>,
49
+
/// an web address to send healtcheck pings to every ~51s or so
50
+
#[arg(long)]
51
+
healthcheck: Option<String>,
52
+
}
53
+
54
+
#[tokio::main]
55
+
async fn main() -> Result<(), String> {
56
+
tracing_subscriber::fmt::init();
57
+
58
+
let shutdown = CancellationToken::new();
59
+
60
+
let ctrlc_shutdown = shutdown.clone();
61
+
ctrlc::set_handler(move || ctrlc_shutdown.cancel()).expect("failed to set ctrl-c handler");
62
+
63
+
let args = Args::parse();
64
+
65
+
if let Err(e) = install_metrics_server() {
66
+
log::error!("failed to install metrics server: {e:?}");
67
+
} else {
68
+
log::info!("metrics listening at http://0.0.0.0:8765");
69
+
}
70
+
71
+
std::fs::create_dir_all(&args.cache_dir).map_err(|e| {
72
+
format!(
73
+
"failed to ensure cache parent dir: {e:?} (dir: {:?})",
74
+
args.cache_dir
75
+
)
76
+
})?;
77
+
let cache_dir = args.cache_dir.canonicalize().map_err(|e| {
78
+
format!(
79
+
"failed to canonicalize cache_dir: {e:?} (dir: {:?})",
80
+
args.cache_dir
81
+
)
82
+
})?;
83
+
log::info!("cache dir ready at at {cache_dir:?}.");
84
+
85
+
log::info!("setting up firehose cache...");
86
+
let cache = firehose_cache(cache_dir.join("./firehose")).await?;
87
+
log::info!("firehose cache ready.");
88
+
89
+
let mut tasks: tokio::task::JoinSet<Result<(), MainTaskError>> = tokio::task::JoinSet::new();
90
+
91
+
log::info!("starting identity service...");
92
+
let identity = Identity::new(cache_dir.join("./identity"))
93
+
.await
94
+
.map_err(|e| format!("identity setup failed: {e:?}"))?;
95
+
log::info!("identity service ready.");
96
+
let identity_refresher = identity.clone();
97
+
let identity_shutdown = shutdown.clone();
98
+
tasks.spawn(async move {
99
+
identity_refresher.run_refresher(identity_shutdown).await?;
100
+
Ok(())
101
+
});
102
+
103
+
let repo = Repo::new(identity.clone());
104
+
105
+
let server_shutdown = shutdown.clone();
106
+
let server_cache_handle = cache.clone();
107
+
tasks.spawn(async move {
108
+
serve(
109
+
server_cache_handle,
110
+
identity,
111
+
repo,
112
+
args.domain,
113
+
args.acme_contact,
114
+
args.certs,
115
+
server_shutdown,
116
+
)
117
+
.await?;
118
+
Ok(())
119
+
});
120
+
121
+
let consumer_shutdown = shutdown.clone();
122
+
let consumer_cache = cache.clone();
123
+
tasks.spawn(async move {
124
+
consume(
125
+
args.jetstream,
126
+
None,
127
+
args.jetstream_no_zstd,
128
+
consumer_shutdown,
129
+
consumer_cache,
130
+
)
131
+
.await?;
132
+
Ok(())
133
+
});
134
+
135
+
if let Some(hc) = args.healthcheck {
136
+
let healthcheck_shutdown = shutdown.clone();
137
+
tasks.spawn(async move {
138
+
healthcheck(hc, healthcheck_shutdown).await?;
139
+
Ok(())
140
+
});
141
+
}
142
+
143
+
tokio::select! {
144
+
_ = shutdown.cancelled() => log::warn!("shutdown requested"),
145
+
Some(r) = tasks.join_next() => {
146
+
log::warn!("a task exited, shutting down: {r:?}");
147
+
shutdown.cancel();
148
+
}
149
+
}
150
+
151
+
tasks.spawn(async move {
152
+
cache
153
+
.close()
154
+
.await
155
+
.map_err(MainTaskError::FirehoseCacheCloseError)
156
+
});
157
+
158
+
tokio::select! {
159
+
_ = async {
160
+
while let Some(completed) = tasks.join_next().await {
161
+
log::info!("shutdown: task completed: {completed:?}");
162
+
}
163
+
} => {},
164
+
_ = tokio::time::sleep(std::time::Duration::from_secs(30)) => {
165
+
log::info!("shutdown: not all tasks completed on time. aborting...");
166
+
tasks.shutdown().await;
167
+
},
168
+
}
169
+
170
+
log::info!("bye!");
171
+
172
+
Ok(())
173
+
}
174
+
175
+
fn install_metrics_server() -> Result<(), metrics_exporter_prometheus::BuildError> {
176
+
log::info!("installing metrics server...");
177
+
let host = [0, 0, 0, 0];
178
+
let port = 8765;
179
+
PrometheusBuilder::new()
180
+
.set_quantiles(&[0.5, 0.9, 0.99, 1.0])?
181
+
.set_bucket_duration(std::time::Duration::from_secs(300))?
182
+
.set_bucket_count(std::num::NonZero::new(12).unwrap()) // count * duration = 60 mins. stuff doesn't happen that fast here.
183
+
.set_enable_unit_suffix(false) // this seemed buggy for constellation (sometimes wouldn't engage)
184
+
.with_http_listener((host, port))
185
+
.install()?;
186
+
log::info!(
187
+
"metrics server installed! listening on http://{}.{}.{}.{}:{port}",
188
+
host[0],
189
+
host[1],
190
+
host[2],
191
+
host[3]
192
+
);
193
+
Ok(())
194
+
}
+155
slingshot/src/record.rs
+155
slingshot/src/record.rs
···
1
+
//! cached record storage
2
+
3
+
use crate::{Identity, error::RecordError};
4
+
use atrium_api::types::string::{Cid, Did, Nsid, RecordKey};
5
+
use reqwest::{Client, StatusCode};
6
+
use serde::{Deserialize, Serialize};
7
+
use serde_json::value::RawValue;
8
+
use std::str::FromStr;
9
+
use std::time::Duration;
10
+
use url::Url;
11
+
12
+
#[derive(Debug, Serialize, Deserialize)]
13
+
pub struct RawRecord {
14
+
cid: Cid,
15
+
record: String,
16
+
}
17
+
18
+
// TODO: should be able to do typed CID
19
+
impl From<(Cid, Box<RawValue>)> for RawRecord {
20
+
fn from((cid, rv): (Cid, Box<RawValue>)) -> Self {
21
+
Self {
22
+
cid,
23
+
record: rv.get().to_string(),
24
+
}
25
+
}
26
+
}
27
+
28
+
/// only for use with stored (validated) values, not general strings
29
+
impl From<&RawRecord> for (Cid, Box<RawValue>) {
30
+
fn from(RawRecord { cid, record }: &RawRecord) -> Self {
31
+
(
32
+
cid.clone(),
33
+
RawValue::from_string(record.to_string())
34
+
.expect("stored string from RawValue to be valid"),
35
+
)
36
+
}
37
+
}
38
+
39
+
#[derive(Debug, Serialize, Deserialize)]
40
+
pub enum CachedRecord {
41
+
Found(RawRecord),
42
+
Deleted,
43
+
}
44
+
45
+
//////// upstream record fetching
46
+
47
+
#[derive(Deserialize)]
48
+
struct RecordResponseObject {
49
+
#[allow(dead_code)] // expect it to be there but we ignore it
50
+
uri: String,
51
+
/// CID for this exact version of the record
52
+
///
53
+
/// this is optional in the spec and that's potentially TODO for slingshot
54
+
cid: Option<String>,
55
+
/// the record itself as JSON
56
+
value: Box<RawValue>,
57
+
}
58
+
59
+
#[derive(Debug, Deserialize)]
60
+
pub struct ErrorResponseObject {
61
+
pub error: String,
62
+
pub message: String,
63
+
}
64
+
65
+
#[derive(Clone)]
66
+
pub struct Repo {
67
+
identity: Identity,
68
+
client: Client,
69
+
}
70
+
71
+
impl Repo {
72
+
pub fn new(identity: Identity) -> Self {
73
+
let client = Client::builder()
74
+
.user_agent(format!(
75
+
"microcosm slingshot v{} (dev: @bad-example.com)",
76
+
env!("CARGO_PKG_VERSION")
77
+
))
78
+
.no_proxy()
79
+
.timeout(Duration::from_secs(10))
80
+
.build()
81
+
.unwrap();
82
+
Repo { identity, client }
83
+
}
84
+
85
+
pub async fn get_record(
86
+
&self,
87
+
did: &Did,
88
+
collection: &Nsid,
89
+
rkey: &RecordKey,
90
+
cid: &Option<Cid>,
91
+
) -> Result<CachedRecord, RecordError> {
92
+
let Some(pds) = self.identity.did_to_pds(did.clone()).await? else {
93
+
return Err(RecordError::NotFound("could not get pds for DID"));
94
+
};
95
+
96
+
// cid gets set to None for a retry, if it's Some and we got NotFound
97
+
let mut cid = cid;
98
+
99
+
let res = loop {
100
+
// TODO: throttle outgoing requests by host probably, generally guard against outgoing requests
101
+
let mut params = vec![
102
+
("repo", did.to_string()),
103
+
("collection", collection.to_string()),
104
+
("rkey", rkey.to_string()),
105
+
];
106
+
if let Some(cid) = cid {
107
+
params.push(("cid", cid.as_ref().to_string()));
108
+
}
109
+
let mut url = Url::parse_with_params(&pds, ¶ms)?;
110
+
url.set_path("/xrpc/com.atproto.repo.getRecord");
111
+
112
+
let res = self
113
+
.client
114
+
.get(url.clone())
115
+
.send()
116
+
.await
117
+
.map_err(RecordError::SendError)?;
118
+
119
+
if res.status() == StatusCode::BAD_REQUEST {
120
+
// 1. if we're not able to parse json, it's not something we can handle
121
+
let err = res
122
+
.json::<ErrorResponseObject>()
123
+
.await
124
+
.map_err(RecordError::UpstreamBadBadNotGoodRequest)?;
125
+
// 2. if we are, is it a NotFound? and if so, did we try with a CID?
126
+
// if so, retry with no CID (api handler will reject for mismatch but
127
+
// with a nice error + warm cache)
128
+
if err.error == "NotFound" && cid.is_some() {
129
+
cid = &None;
130
+
continue;
131
+
} else {
132
+
return Err(RecordError::UpstreamBadRequest(err));
133
+
}
134
+
}
135
+
break res;
136
+
};
137
+
138
+
let data = res
139
+
.error_for_status()
140
+
.map_err(RecordError::StatusError)? // TODO atproto error handling (think about handling not found)
141
+
.json::<RecordResponseObject>()
142
+
.await
143
+
.map_err(RecordError::ParseJsonError)?; // todo...
144
+
145
+
let Some(cid) = data.cid else {
146
+
return Err(RecordError::MissingUpstreamCid);
147
+
};
148
+
let cid = Cid::from_str(&cid).map_err(|e| RecordError::BadUpstreamCid(e.to_string()))?;
149
+
150
+
Ok(CachedRecord::Found(RawRecord {
151
+
cid,
152
+
record: data.value.to_string(),
153
+
}))
154
+
}
155
+
}
+769
slingshot/src/server.rs
+769
slingshot/src/server.rs
···
1
+
use crate::{
2
+
CachedRecord, ErrorResponseObject, Identity, Repo,
3
+
error::{RecordError, ServerError},
4
+
};
5
+
use atrium_api::types::string::{Cid, Did, Handle, Nsid, RecordKey};
6
+
use foyer::HybridCache;
7
+
use links::at_uri::parse_at_uri as normalize_at_uri;
8
+
use serde::Serialize;
9
+
use std::path::PathBuf;
10
+
use std::str::FromStr;
11
+
use std::sync::Arc;
12
+
use tokio_util::sync::CancellationToken;
13
+
14
+
use poem::{
15
+
Endpoint, EndpointExt, Route, Server,
16
+
endpoint::{StaticFileEndpoint, make_sync},
17
+
http::Method,
18
+
listener::{
19
+
Listener, TcpListener,
20
+
acme::{AutoCert, LETS_ENCRYPT_PRODUCTION},
21
+
},
22
+
middleware::{CatchPanic, Cors, Tracing},
23
+
};
24
+
use poem_openapi::{
25
+
ApiResponse, ContactObject, ExternalDocumentObject, Object, OpenApi, OpenApiService, Tags,
26
+
param::Query, payload::Json, types::Example,
27
+
};
28
+
29
+
fn example_handle() -> String {
30
+
"bad-example.com".to_string()
31
+
}
32
+
fn example_did() -> String {
33
+
"did:plc:hdhoaan3xa3jiuq4fg4mefid".to_string()
34
+
}
35
+
fn example_collection() -> String {
36
+
"app.bsky.feed.like".to_string()
37
+
}
38
+
fn example_rkey() -> String {
39
+
"3lv4ouczo2b2a".to_string()
40
+
}
41
+
fn example_uri() -> String {
42
+
format!(
43
+
"at://{}/{}/{}",
44
+
example_did(),
45
+
example_collection(),
46
+
example_rkey()
47
+
)
48
+
}
49
+
fn example_pds() -> String {
50
+
"https://porcini.us-east.host.bsky.network".to_string()
51
+
}
52
+
fn example_signing_key() -> String {
53
+
"zQ3shpq1g134o7HGDb86CtQFxnHqzx5pZWknrVX2Waum3fF6j".to_string()
54
+
}
55
+
56
+
#[derive(Object)]
57
+
#[oai(example = true)]
58
+
struct XrpcErrorResponseObject {
59
+
/// Should correspond an error `name` in the lexicon errors array
60
+
error: String,
61
+
/// Human-readable description and possibly additonal context
62
+
message: String,
63
+
}
64
+
impl Example for XrpcErrorResponseObject {
65
+
fn example() -> Self {
66
+
Self {
67
+
error: "RecordNotFound".to_string(),
68
+
message: "This record was deleted".to_string(),
69
+
}
70
+
}
71
+
}
72
+
type XrpcError = Json<XrpcErrorResponseObject>;
73
+
fn xrpc_error(error: impl AsRef<str>, message: impl AsRef<str>) -> XrpcError {
74
+
Json(XrpcErrorResponseObject {
75
+
error: error.as_ref().to_string(),
76
+
message: message.as_ref().to_string(),
77
+
})
78
+
}
79
+
80
+
fn bad_request_handler_get_record(err: poem::Error) -> GetRecordResponse {
81
+
GetRecordResponse::BadRequest(Json(XrpcErrorResponseObject {
82
+
error: "InvalidRequest".to_string(),
83
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
84
+
}))
85
+
}
86
+
87
+
fn bad_request_handler_resolve_mini(err: poem::Error) -> ResolveMiniIDResponse {
88
+
ResolveMiniIDResponse::BadRequest(Json(XrpcErrorResponseObject {
89
+
error: "InvalidRequest".to_string(),
90
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
91
+
}))
92
+
}
93
+
94
+
fn bad_request_handler_resolve_handle(err: poem::Error) -> JustDidResponse {
95
+
JustDidResponse::BadRequest(Json(XrpcErrorResponseObject {
96
+
error: "InvalidRequest".to_string(),
97
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
98
+
}))
99
+
}
100
+
101
+
#[derive(Object)]
102
+
#[oai(example = true)]
103
+
struct FoundRecordResponseObject {
104
+
/// at-uri for this record
105
+
uri: String,
106
+
/// CID for this exact version of the record
107
+
///
108
+
/// Slingshot will always return the CID, despite it not being a required
109
+
/// response property in the official lexicon.
110
+
///
111
+
/// TODO: probably actually let it be optional, idk are some pds's weirdly
112
+
/// not returning it?
113
+
cid: Option<String>,
114
+
/// the record itself as JSON
115
+
value: serde_json::Value,
116
+
}
117
+
impl Example for FoundRecordResponseObject {
118
+
fn example() -> Self {
119
+
Self {
120
+
uri: example_uri(),
121
+
cid: Some("bafyreialv3mzvvxaoyrfrwoer3xmabbmdchvrbyhayd7bga47qjbycy74e".to_string()),
122
+
value: serde_json::json!({
123
+
"$type": "app.bsky.feed.like",
124
+
"createdAt": "2025-07-29T18:02:02.327Z",
125
+
"subject": {
126
+
"cid": "bafyreia2gy6eyk5qfetgahvshpq35vtbwy6negpy3gnuulcdi723mi7vxy",
127
+
"uri": "at://did:plc:vwzwgnygau7ed7b7wt5ux7y2/app.bsky.feed.post/3lv4lkb4vgs2k"
128
+
}
129
+
}),
130
+
}
131
+
}
132
+
}
133
+
134
+
#[derive(ApiResponse)]
135
+
#[oai(bad_request_handler = "bad_request_handler_get_record")]
136
+
enum GetRecordResponse {
137
+
/// Record found
138
+
#[oai(status = 200)]
139
+
Ok(Json<FoundRecordResponseObject>),
140
+
/// Bad request or no record to return
141
+
///
142
+
/// The only error name in the repo.getRecord lexicon is `RecordNotFound`,
143
+
/// but the [canonical api docs](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
144
+
/// also list `InvalidRequest`, `ExpiredToken`, and `InvalidToken`. Of
145
+
/// these, slingshot will only generate `RecordNotFound` or `InvalidRequest`,
146
+
/// but may return any proxied error code from the upstream repo.
147
+
#[oai(status = 400)]
148
+
BadRequest(XrpcError),
149
+
/// Server errors
150
+
#[oai(status = 500)]
151
+
ServerError(XrpcError),
152
+
}
153
+
154
+
#[derive(Object)]
155
+
#[oai(example = true)]
156
+
struct MiniDocResponseObject {
157
+
/// DID, bi-directionally verified if a handle was provided in the query.
158
+
did: String,
159
+
/// The validated handle of the account or `handle.invalid` if the handle
160
+
/// did not bi-directionally match the DID document.
161
+
handle: String,
162
+
/// The identity's PDS URL
163
+
pds: String,
164
+
/// The atproto signing key publicKeyMultibase
165
+
///
166
+
/// Legacy key encoding not supported. the key is returned directly; `id`,
167
+
/// `type`, and `controller` are omitted.
168
+
signing_key: String,
169
+
}
170
+
impl Example for MiniDocResponseObject {
171
+
fn example() -> Self {
172
+
Self {
173
+
did: example_did(),
174
+
handle: example_handle(),
175
+
pds: example_pds(),
176
+
signing_key: example_signing_key(),
177
+
}
178
+
}
179
+
}
180
+
181
+
#[derive(ApiResponse)]
182
+
#[oai(bad_request_handler = "bad_request_handler_resolve_mini")]
183
+
enum ResolveMiniIDResponse {
184
+
/// Identity resolved
185
+
#[oai(status = 200)]
186
+
Ok(Json<MiniDocResponseObject>),
187
+
/// Bad request or identity not resolved
188
+
#[oai(status = 400)]
189
+
BadRequest(XrpcError),
190
+
}
191
+
192
+
#[derive(Object)]
193
+
#[oai(example = true)]
194
+
struct FoundDidResponseObject {
195
+
/// the DID, bi-directionally verified if using Slingshot
196
+
did: String,
197
+
}
198
+
impl Example for FoundDidResponseObject {
199
+
fn example() -> Self {
200
+
Self { did: example_did() }
201
+
}
202
+
}
203
+
204
+
#[derive(ApiResponse)]
205
+
#[oai(bad_request_handler = "bad_request_handler_resolve_handle")]
206
+
enum JustDidResponse {
207
+
/// Resolution succeeded
208
+
#[oai(status = 200)]
209
+
Ok(Json<FoundDidResponseObject>),
210
+
/// Bad request, failed to resolve, or failed to verify
211
+
///
212
+
/// `error` will be one of `InvalidRequest`, `HandleNotFound`.
213
+
#[oai(status = 400)]
214
+
BadRequest(XrpcError),
215
+
/// Something went wrong trying to complete the request
216
+
#[oai(status = 500)]
217
+
ServerError(XrpcError),
218
+
}
219
+
220
+
struct Xrpc {
221
+
cache: HybridCache<String, CachedRecord>,
222
+
identity: Identity,
223
+
repo: Arc<Repo>,
224
+
}
225
+
226
+
#[derive(Tags)]
227
+
enum ApiTags {
228
+
/// Core ATProtocol-compatible APIs.
229
+
///
230
+
/// > [!tip]
231
+
/// > Upstream documentation is available at
232
+
/// > https://docs.bsky.app/docs/category/http-reference
233
+
///
234
+
/// These queries are usually executed directly against the PDS containing
235
+
/// the data being requested. Slingshot offers a caching view of the same
236
+
/// contents with better expected performance and reliability.
237
+
#[oai(rename = "com.atproto.* queries")]
238
+
ComAtproto,
239
+
/// Additional and improved APIs.
240
+
///
241
+
/// These APIs offer small tweaks to the core ATProtocol APIs, with more
242
+
/// more convenient [request parameters](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.repo.getUriRecord)
243
+
/// or [response formats](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.identity.resolveMiniDoc).
244
+
///
245
+
/// > [!important]
246
+
/// > At the moment, these are namespaced under the `com.bad-example.*` NSID
247
+
/// > prefix, but as they stabilize they may be migrated to an org namespace
248
+
/// > like `blue.microcosm.*`. Support for asliasing to `com.bad-example.*`
249
+
/// > will be maintained as long as it's in use.
250
+
#[oai(rename = "slingshot-specific queries")]
251
+
Custom,
252
+
}
253
+
254
+
#[OpenApi]
255
+
impl Xrpc {
256
+
/// com.atproto.repo.getRecord
257
+
///
258
+
/// Get a single record from a repository. Does not require auth.
259
+
///
260
+
/// > [!tip]
261
+
/// > See also the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
262
+
/// > that this endpoint aims to be compatible with.
263
+
#[oai(
264
+
path = "/com.atproto.repo.getRecord",
265
+
method = "get",
266
+
tag = "ApiTags::ComAtproto"
267
+
)]
268
+
async fn get_record(
269
+
&self,
270
+
/// The DID or handle of the repo
271
+
#[oai(example = "example_did")]
272
+
Query(repo): Query<String>,
273
+
/// The NSID of the record collection
274
+
#[oai(example = "example_collection")]
275
+
Query(collection): Query<String>,
276
+
/// The Record key
277
+
#[oai(example = "example_rkey")]
278
+
Query(rkey): Query<String>,
279
+
/// Optional: the CID of the version of the record.
280
+
///
281
+
/// If not specified, then return the most recent version.
282
+
///
283
+
/// If a stale `CID` is specified and a newer version of the record
284
+
/// exists, Slingshot returns a `NotFound` error. That is: Slingshot
285
+
/// only retains the most recent version of a record.
286
+
Query(cid): Query<Option<String>>,
287
+
) -> GetRecordResponse {
288
+
self.get_record_impl(repo, collection, rkey, cid).await
289
+
}
290
+
291
+
/// com.bad-example.repo.getUriRecord
292
+
///
293
+
/// Ergonomic complement to [`com.atproto.repo.getRecord`](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
294
+
/// which accepts an `at-uri` instead of individual repo/collection/rkey params
295
+
#[oai(
296
+
path = "/com.bad-example.repo.getUriRecord",
297
+
method = "get",
298
+
tag = "ApiTags::Custom"
299
+
)]
300
+
async fn get_uri_record(
301
+
&self,
302
+
/// The at-uri of the record
303
+
///
304
+
/// The identifier can be a DID or an atproto handle, and the collection
305
+
/// and rkey segments must be present.
306
+
#[oai(example = "example_uri")]
307
+
Query(at_uri): Query<String>,
308
+
/// Optional: the CID of the version of the record.
309
+
///
310
+
/// If not specified, then return the most recent version.
311
+
///
312
+
/// > [!tip]
313
+
/// > If specified and a newer version of the record exists, returns 404 not
314
+
/// > found. That is: slingshot only retains the most recent version of a
315
+
/// > record.
316
+
Query(cid): Query<Option<String>>,
317
+
) -> GetRecordResponse {
318
+
let bad_at_uri = || {
319
+
GetRecordResponse::BadRequest(xrpc_error(
320
+
"InvalidRequest",
321
+
"at-uri does not appear to be valid",
322
+
))
323
+
};
324
+
325
+
let Some(normalized) = normalize_at_uri(&at_uri) else {
326
+
return bad_at_uri();
327
+
};
328
+
329
+
// TODO: move this to links
330
+
let Some(rest) = normalized.strip_prefix("at://") else {
331
+
return bad_at_uri();
332
+
};
333
+
let Some((repo, rest)) = rest.split_once('/') else {
334
+
return bad_at_uri();
335
+
};
336
+
let Some((collection, rest)) = rest.split_once('/') else {
337
+
return bad_at_uri();
338
+
};
339
+
let rkey = if let Some((rkey, _rest)) = rest.split_once('?') {
340
+
rkey
341
+
} else {
342
+
rest
343
+
};
344
+
345
+
self.get_record_impl(
346
+
repo.to_string(),
347
+
collection.to_string(),
348
+
rkey.to_string(),
349
+
cid,
350
+
)
351
+
.await
352
+
}
353
+
354
+
/// com.atproto.identity.resolveHandle
355
+
///
356
+
/// Resolves an atproto [`handle`](https://atproto.com/guides/glossary#handle)
357
+
/// (hostname) to a [`DID`](https://atproto.com/guides/glossary#did-decentralized-id).
358
+
///
359
+
/// > [!tip]
360
+
/// > Compatibility note: Slingshot will **always bi-directionally verify
361
+
/// > against the DID document**, which is optional according to the
362
+
/// > authoritative lexicon.
363
+
///
364
+
/// > [!tip]
365
+
/// > See the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-handle)
366
+
/// > that this endpoint aims to be compatible with.
367
+
#[oai(
368
+
path = "/com.atproto.identity.resolveHandle",
369
+
method = "get",
370
+
tag = "ApiTags::ComAtproto"
371
+
)]
372
+
async fn resolve_handle(
373
+
&self,
374
+
/// The handle to resolve.
375
+
#[oai(example = "example_handle")]
376
+
Query(handle): Query<String>,
377
+
) -> JustDidResponse {
378
+
let Ok(handle) = Handle::new(handle) else {
379
+
return JustDidResponse::BadRequest(xrpc_error("InvalidRequest", "not a valid handle"));
380
+
};
381
+
382
+
let Ok(alleged_did) = self.identity.handle_to_did(handle.clone()).await else {
383
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not resolve handle"));
384
+
};
385
+
386
+
let Some(alleged_did) = alleged_did else {
387
+
return JustDidResponse::BadRequest(xrpc_error(
388
+
"HandleNotFound",
389
+
"Could not resolve handle to a DID",
390
+
));
391
+
};
392
+
393
+
let Ok(partial_doc) = self.identity.did_to_partial_mini_doc(&alleged_did).await else {
394
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not fetch DID doc"));
395
+
};
396
+
397
+
let Some(partial_doc) = partial_doc else {
398
+
return JustDidResponse::BadRequest(xrpc_error(
399
+
"HandleNotFound",
400
+
"Resolved handle but could not find DID doc for the DID",
401
+
));
402
+
};
403
+
404
+
if partial_doc.unverified_handle != handle {
405
+
return JustDidResponse::BadRequest(xrpc_error(
406
+
"HandleNotFound",
407
+
"Resolved handle failed bi-directional validation",
408
+
));
409
+
}
410
+
411
+
JustDidResponse::Ok(Json(FoundDidResponseObject {
412
+
did: alleged_did.to_string(),
413
+
}))
414
+
}
415
+
416
+
/// com.bad-example.identity.resolveMiniDoc
417
+
///
418
+
/// Like [com.atproto.identity.resolveIdentity](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-identity)
419
+
/// but instead of the full `didDoc` it returns an atproto-relevant subset.
420
+
#[oai(
421
+
path = "/com.bad-example.identity.resolveMiniDoc",
422
+
method = "get",
423
+
tag = "ApiTags::Custom"
424
+
)]
425
+
async fn resolve_mini_id(
426
+
&self,
427
+
/// Handle or DID to resolve
428
+
#[oai(example = "example_handle")]
429
+
Query(identifier): Query<String>,
430
+
) -> ResolveMiniIDResponse {
431
+
let invalid = |reason: &'static str| {
432
+
ResolveMiniIDResponse::BadRequest(xrpc_error("InvalidRequest", reason))
433
+
};
434
+
435
+
let mut unverified_handle = None;
436
+
let did = match Did::new(identifier.clone()) {
437
+
Ok(did) => did,
438
+
Err(_) => {
439
+
let Ok(alleged_handle) = Handle::new(identifier) else {
440
+
return invalid("identifier was not a valid DID or handle");
441
+
};
442
+
if let Ok(res) = self.identity.handle_to_did(alleged_handle.clone()).await {
443
+
if let Some(did) = res {
444
+
// we did it joe
445
+
unverified_handle = Some(alleged_handle);
446
+
did
447
+
} else {
448
+
return invalid("Could not resolve handle identifier to a DID");
449
+
}
450
+
} else {
451
+
// TODO: ServerError not BadRequest
452
+
return invalid("errored while trying to resolve handle to DID");
453
+
}
454
+
}
455
+
};
456
+
let Ok(partial_doc) = self.identity.did_to_partial_mini_doc(&did).await else {
457
+
return invalid("failed to get DID doc");
458
+
};
459
+
let Some(partial_doc) = partial_doc else {
460
+
return invalid("failed to find DID doc");
461
+
};
462
+
463
+
// ok so here's where we're at:
464
+
// ✅ we have a DID
465
+
// ✅ we have a partial doc
466
+
// 🔶 if we have a handle, it's from the `identifier` (user-input)
467
+
// -> then we just need to compare to the partial doc to confirm
468
+
// -> else we need to resolve the DID doc's to a handle and check
469
+
let handle = if let Some(h) = unverified_handle {
470
+
if h == partial_doc.unverified_handle {
471
+
h.to_string()
472
+
} else {
473
+
"handle.invalid".to_string()
474
+
}
475
+
} else {
476
+
let Ok(handle_did) = self
477
+
.identity
478
+
.handle_to_did(partial_doc.unverified_handle.clone())
479
+
.await
480
+
else {
481
+
return invalid("failed to get did doc's handle");
482
+
};
483
+
let Some(handle_did) = handle_did else {
484
+
return invalid("failed to resolve did doc's handle");
485
+
};
486
+
if handle_did == did {
487
+
partial_doc.unverified_handle.to_string()
488
+
} else {
489
+
"handle.invalid".to_string()
490
+
}
491
+
};
492
+
493
+
ResolveMiniIDResponse::Ok(Json(MiniDocResponseObject {
494
+
did: did.to_string(),
495
+
handle,
496
+
pds: partial_doc.pds,
497
+
signing_key: partial_doc.signing_key,
498
+
}))
499
+
}
500
+
501
+
async fn get_record_impl(
502
+
&self,
503
+
repo: String,
504
+
collection: String,
505
+
rkey: String,
506
+
cid: Option<String>,
507
+
) -> GetRecordResponse {
508
+
let did = match Did::new(repo.clone()) {
509
+
Ok(did) => did,
510
+
Err(_) => {
511
+
let Ok(handle) = Handle::new(repo) else {
512
+
return GetRecordResponse::BadRequest(xrpc_error(
513
+
"InvalidRequest",
514
+
"repo was not a valid DID or handle",
515
+
));
516
+
};
517
+
if let Ok(res) = self.identity.handle_to_did(handle).await {
518
+
if let Some(did) = res {
519
+
did
520
+
} else {
521
+
return GetRecordResponse::BadRequest(xrpc_error(
522
+
"InvalidRequest",
523
+
"Could not resolve handle repo to a DID",
524
+
));
525
+
}
526
+
} else {
527
+
return GetRecordResponse::ServerError(xrpc_error(
528
+
"ResolutionFailed",
529
+
"errored while trying to resolve handle to DID",
530
+
));
531
+
}
532
+
}
533
+
};
534
+
535
+
let Ok(collection) = Nsid::new(collection) else {
536
+
return GetRecordResponse::BadRequest(xrpc_error(
537
+
"InvalidRequest",
538
+
"invalid NSID for collection",
539
+
));
540
+
};
541
+
542
+
let Ok(rkey) = RecordKey::new(rkey) else {
543
+
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "invalid rkey"));
544
+
};
545
+
546
+
let cid: Option<Cid> = if let Some(cid) = cid {
547
+
let Ok(cid) = Cid::from_str(&cid) else {
548
+
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "invalid CID"));
549
+
};
550
+
Some(cid)
551
+
} else {
552
+
None
553
+
};
554
+
555
+
let at_uri = format!("at://{}/{}/{}", &*did, &*collection, &*rkey);
556
+
557
+
let fr = self
558
+
.cache
559
+
.fetch(at_uri.clone(), {
560
+
let cid = cid.clone();
561
+
let repo_api = self.repo.clone();
562
+
|| async move {
563
+
repo_api
564
+
.get_record(&did, &collection, &rkey, &cid)
565
+
.await
566
+
.map_err(|e| foyer::Error::Other(Box::new(e)))
567
+
}
568
+
})
569
+
.await;
570
+
571
+
let entry = match fr {
572
+
Ok(e) => e,
573
+
Err(foyer::Error::Other(e)) => {
574
+
let record_error = match e.downcast::<RecordError>() {
575
+
Ok(e) => e,
576
+
Err(e) => {
577
+
log::error!("error (foyer other) getting cache entry, {e:?}");
578
+
return GetRecordResponse::ServerError(xrpc_error(
579
+
"ServerError",
580
+
"sorry, something went wrong",
581
+
));
582
+
}
583
+
};
584
+
let RecordError::UpstreamBadRequest(ErrorResponseObject { error, message }) =
585
+
*record_error
586
+
else {
587
+
log::error!("RecordError getting cache entry, {record_error:?}");
588
+
return GetRecordResponse::ServerError(xrpc_error(
589
+
"ServerError",
590
+
"sorry, something went wrong",
591
+
));
592
+
};
593
+
594
+
// all of the noise around here is so that we can ultimately reach this:
595
+
// upstream BadRequest extracted from the foyer result which we can proxy back
596
+
return GetRecordResponse::BadRequest(xrpc_error(
597
+
error,
598
+
format!("Upstream bad request: {message}"),
599
+
));
600
+
}
601
+
Err(e) => {
602
+
log::error!("error (foyer) getting cache entry, {e:?}");
603
+
return GetRecordResponse::ServerError(xrpc_error(
604
+
"ServerError",
605
+
"sorry, something went wrong",
606
+
));
607
+
}
608
+
};
609
+
610
+
match *entry {
611
+
CachedRecord::Found(ref raw) => {
612
+
let (found_cid, raw_value) = raw.into();
613
+
if cid.clone().map(|c| c != found_cid).unwrap_or(false) {
614
+
return GetRecordResponse::BadRequest(Json(XrpcErrorResponseObject {
615
+
error: "RecordNotFound".to_string(),
616
+
message: "A record was found but its CID did not match that requested"
617
+
.to_string(),
618
+
}));
619
+
}
620
+
// TODO: thank u stellz: https://gist.github.com/stella3d/51e679e55b264adff89d00a1e58d0272
621
+
let value =
622
+
serde_json::from_str(raw_value.get()).expect("RawValue to be valid json");
623
+
GetRecordResponse::Ok(Json(FoundRecordResponseObject {
624
+
uri: at_uri,
625
+
cid: Some(found_cid.as_ref().to_string()),
626
+
value,
627
+
}))
628
+
}
629
+
CachedRecord::Deleted => GetRecordResponse::BadRequest(Json(XrpcErrorResponseObject {
630
+
error: "RecordNotFound".to_string(),
631
+
message: "This record was deleted".to_string(),
632
+
})),
633
+
}
634
+
}
635
+
636
+
// TODO
637
+
// #[oai(path = "/com.atproto.identity.resolveHandle", method = "get")]
638
+
// #[oai(path = "/com.atproto.identity.resolveDid", method = "get")]
639
+
// but these are both not specified to do bidirectional validation, which is what we want to offer
640
+
// com.atproto.identity.resolveIdentity seems right, but requires returning the full did-doc
641
+
// would be nice if there were two queries:
642
+
// did -> verified handle + pds url
643
+
// handle -> verified did + pds url
644
+
//
645
+
// we could do horrible things and implement resolveIdentity with only a stripped-down fake did doc
646
+
// but this will *definitely* cause problems because eg. we're not currently storing pubkeys and
647
+
// those are a little bit important
648
+
}
649
+
650
+
#[derive(Debug, Clone, Serialize)]
651
+
#[serde(rename_all = "camelCase")]
652
+
struct AppViewService {
653
+
id: String,
654
+
r#type: String,
655
+
service_endpoint: String,
656
+
}
657
+
#[derive(Debug, Clone, Serialize)]
658
+
struct AppViewDoc {
659
+
id: String,
660
+
service: [AppViewService; 1],
661
+
}
662
+
/// Serve a did document for did:web for this to be an xrpc appview
663
+
///
664
+
/// No slingshot endpoints currently require auth, so it's not necessary to do
665
+
/// service proxying, however clients may wish to:
666
+
///
667
+
/// - PDS proxying offers a level of client IP anonymity from slingshot
668
+
/// - slingshot *may* implement more generous per-user rate-limits for proxied requests in the future
669
+
fn get_did_doc(domain: &str) -> impl Endpoint + use<> {
670
+
let doc = poem::web::Json(AppViewDoc {
671
+
id: format!("did:web:{domain}"),
672
+
service: [AppViewService {
673
+
id: "#slingshot".to_string(),
674
+
r#type: "SlingshotRecordProxy".to_string(),
675
+
service_endpoint: format!("https://{domain}"),
676
+
}],
677
+
});
678
+
make_sync(move |_| doc.clone())
679
+
}
680
+
681
+
pub async fn serve(
682
+
cache: HybridCache<String, CachedRecord>,
683
+
identity: Identity,
684
+
repo: Repo,
685
+
domain: Option<String>,
686
+
acme_contact: Option<String>,
687
+
certs: Option<PathBuf>,
688
+
shutdown: CancellationToken,
689
+
) -> Result<(), ServerError> {
690
+
let repo = Arc::new(repo);
691
+
let api_service = OpenApiService::new(
692
+
Xrpc {
693
+
cache,
694
+
identity,
695
+
repo,
696
+
},
697
+
"Slingshot",
698
+
env!("CARGO_PKG_VERSION"),
699
+
)
700
+
.server(if let Some(ref h) = domain {
701
+
format!("https://{h}")
702
+
} else {
703
+
"http://localhost:3000".to_string()
704
+
})
705
+
.url_prefix("/xrpc")
706
+
.contact(
707
+
ContactObject::new()
708
+
.name("@microcosm.blue")
709
+
.url("https://bsky.app/profile/microcosm.blue"),
710
+
)
711
+
.description(include_str!("../api-description.md"))
712
+
.external_document(ExternalDocumentObject::new(
713
+
"https://microcosm.blue/slingshot",
714
+
));
715
+
716
+
let mut app = Route::new()
717
+
.at("/", StaticFileEndpoint::new("./static/index.html"))
718
+
.nest("/openapi", api_service.spec_endpoint())
719
+
.nest("/xrpc/", api_service);
720
+
721
+
if let Some(domain) = domain {
722
+
rustls::crypto::aws_lc_rs::default_provider()
723
+
.install_default()
724
+
.expect("alskfjalksdjf");
725
+
726
+
app = app.at("/.well-known/did.json", get_did_doc(&domain));
727
+
728
+
let mut auto_cert = AutoCert::builder()
729
+
.directory_url(LETS_ENCRYPT_PRODUCTION)
730
+
.domain(&domain);
731
+
if let Some(contact) = acme_contact {
732
+
auto_cert = auto_cert.contact(contact);
733
+
}
734
+
if let Some(certs) = certs {
735
+
auto_cert = auto_cert.cache_path(certs);
736
+
}
737
+
let auto_cert = auto_cert.build().map_err(ServerError::AcmeBuildError)?;
738
+
739
+
run(
740
+
TcpListener::bind("0.0.0.0:443").acme(auto_cert),
741
+
app,
742
+
shutdown,
743
+
)
744
+
.await
745
+
} else {
746
+
run(TcpListener::bind("127.0.0.1:3000"), app, shutdown).await
747
+
}
748
+
}
749
+
750
+
async fn run<L>(listener: L, app: Route, shutdown: CancellationToken) -> Result<(), ServerError>
751
+
where
752
+
L: Listener + 'static,
753
+
{
754
+
let app = app
755
+
.with(
756
+
Cors::new()
757
+
.allow_origin_regex("*")
758
+
.allow_methods([Method::GET])
759
+
.allow_credentials(false),
760
+
)
761
+
.with(CatchPanic::new())
762
+
.with(Tracing);
763
+
Server::new(listener)
764
+
.name("slingshot")
765
+
.run_with_graceful_shutdown(app, shutdown.cancelled(), None)
766
+
.await
767
+
.map_err(ServerError::ServerExited)
768
+
.inspect(|()| log::info!("server ended. goodbye."))
769
+
}
slingshot/static/favicon.ico
slingshot/static/favicon.ico
This is a binary file and will not be displayed.
+67
slingshot/static/index.html
+67
slingshot/static/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<title>Slingshot: atproto edge record cache</title>
6
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7
+
<meta name="description" content="API Documentation for Slingshot, a firehose-listening atproto edge record and identity cache." />
8
+
<style>
9
+
:root {
10
+
--scalar-small: 13px;
11
+
}
12
+
.scalar-app .markdown .markdown-alert {
13
+
font-size: var(--scalar-small);
14
+
}
15
+
.sidebar-heading-link-title {
16
+
line-height: 1.2;
17
+
}
18
+
.custom-header {
19
+
height: 42px;
20
+
background-color: #221828;
21
+
box-shadow: inset 0 -1px 0 var(--scalar-border-color);
22
+
color: var(--scalar-color-1);
23
+
font-size: var(--scalar-font-size-3);
24
+
font-family: 'Iowan Old Style', 'Palatino Linotype', 'URW Palladio L', P052, serif;
25
+
padding: 0 18px;
26
+
justify-content: space-between;
27
+
}
28
+
.custom-header,
29
+
.custom-header nav {
30
+
display: flex;
31
+
align-items: center;
32
+
gap: 18px;
33
+
}
34
+
.custom-header a:hover {
35
+
color: var(--scalar-color-2);
36
+
}
37
+
38
+
.light-mode .custom-header {
39
+
background-color: thistle;
40
+
}
41
+
</style>
42
+
</head>
43
+
<body>
44
+
<header class="custom-header scalar-app">
45
+
<p>
46
+
TODO: thing
47
+
</p>
48
+
<nav>
49
+
<b>a <a href="https://microcosm.blue">microcosm</a> project</b>
50
+
<a href="https://bsky.app/profile/microcosm.blue">@microcosm.blue</a>
51
+
<a href="https://github.com/at-microcosm">github</a>
52
+
</nav>
53
+
</header>
54
+
55
+
<script id="api-reference" type="application/json" data-url="/openapi"></script>
56
+
57
+
<script>
58
+
var configuration = {
59
+
theme: 'purple',
60
+
hideModels: true,
61
+
}
62
+
document.getElementById('api-reference').dataset.configuration = JSON.stringify(configuration)
63
+
</script>
64
+
65
+
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
66
+
</body>
67
+
</html>
-196
ufos ops (move to micro-ops).md
-196
ufos ops (move to micro-ops).md
···
1
-
ufos ops
2
-
3
-
btrfs snapshots: snapper
4
-
5
-
```bash
6
-
sudo apt install snapper
7
-
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
-
9
-
# edit /etc/snapper/configs/ufos-db
10
-
# change
11
-
TIMELINE_MIN_AGE="1800"
12
-
TIMELINE_LIMIT_HOURLY="10"
13
-
TIMELINE_LIMIT_DAILY="10"
14
-
TIMELINE_LIMIT_WEEKLY="0"
15
-
TIMELINE_LIMIT_MONTHLY="10"
16
-
TIMELINE_LIMIT_YEARLY="10"
17
-
# to
18
-
TIMELINE_MIN_AGE="1800"
19
-
TIMELINE_LIMIT_HOURLY="22"
20
-
TIMELINE_LIMIT_DAILY="4"
21
-
TIMELINE_LIMIT_WEEKLY="0"
22
-
TIMELINE_LIMIT_MONTHLY="0"
23
-
TIMELINE_LIMIT_YEARLY="0"
24
-
```
25
-
26
-
this should be enough?
27
-
28
-
list snapshots:
29
-
30
-
```bash
31
-
sudo snapper -c ufos-db list
32
-
```
33
-
34
-
systemd
35
-
36
-
create file: `/etc/systemd/system/ufos.service`
37
-
38
-
```ini
39
-
[Unit]
40
-
Description=UFOs-API
41
-
After=network.target
42
-
43
-
[Service]
44
-
User=pi
45
-
WorkingDirectory=/home/pi/
46
-
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
-
Environment="RUST_LOG=info"
48
-
LimitNOFILE=16384
49
-
Restart=always
50
-
51
-
[Install]
52
-
WantedBy=multi-user.target
53
-
```
54
-
55
-
then
56
-
57
-
```bash
58
-
sudo systemctl daemon-reload
59
-
sudo systemctl enable ufos
60
-
sudo systemctl start ufos
61
-
```
62
-
63
-
monitor with
64
-
65
-
```bash
66
-
journalctl -u ufos -f
67
-
```
68
-
69
-
make sure a backup dir exists
70
-
71
-
```bash
72
-
mkdir /home/pi/backup
73
-
```
74
-
75
-
mount the NAS
76
-
77
-
```bash
78
-
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
-
```
80
-
81
-
manual rsync
82
-
83
-
```bash
84
-
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
-
```
86
-
87
-
backup script sketch
88
-
89
-
```bash
90
-
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
-
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
-
```
93
-
94
-
just crontab it?
95
-
96
-
`sudo crontab -e`
97
-
```bash
98
-
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
-
```
100
-
101
-
^^ try once initial backup is done
102
-
103
-
104
-
--columns subvolume,number
105
-
106
-
subvolume
107
-
number
108
-
109
-
110
-
111
-
112
-
gateway: follow constellation for nginx->prom thing
113
-
114
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
-
116
-
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
-
118
-
```hcl
119
-
listen {
120
-
port = 4044
121
-
}
122
-
123
-
namespace "nginx" {
124
-
source = {
125
-
files = [
126
-
"/var/log/nginx/constellation-access.log"
127
-
]
128
-
}
129
-
130
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
-
132
-
labels {
133
-
app = "constellation"
134
-
}
135
-
136
-
relabel "cache_status" {
137
-
from = "upstream_cache_status"
138
-
}
139
-
}
140
-
```
141
-
142
-
after:
143
-
144
-
```hcl
145
-
listen {
146
-
port = 4044
147
-
}
148
-
149
-
namespace "constellation" {
150
-
source = {
151
-
files = [
152
-
"/var/log/nginx/constellation-access.log"
153
-
]
154
-
}
155
-
156
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
-
158
-
labels {
159
-
app = "constellation"
160
-
}
161
-
162
-
relabel "cache_status" {
163
-
from = "upstream_cache_status"
164
-
}
165
-
166
-
namespace_label = "vhost"
167
-
metrics_override = { prefix = "nginx" }
168
-
}
169
-
170
-
namespace "ufos" {
171
-
source = {
172
-
files = [
173
-
"/var/log/nginx/ufos-access.log"
174
-
]
175
-
}
176
-
177
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
-
179
-
labels {
180
-
app = "ufos"
181
-
}
182
-
183
-
relabel "cache_status" {
184
-
from = "upstream_cache_status"
185
-
}
186
-
187
-
namespace_label = "vhost"
188
-
metrics_override = { prefix = "nginx" }
189
-
}
190
-
```
191
-
192
-
193
-
```bash
194
-
systemctl start prometheus-nginxlog-exporter.service
195
-
```
196
-
+48
-15
who-am-i/src/server.rs
+48
-15
who-am-i/src/server.rs
···
5
5
extract::{FromRef, Json as ExtractJson, Query, State},
6
6
http::{
7
7
StatusCode,
8
-
header::{CONTENT_SECURITY_POLICY, CONTENT_TYPE, HeaderMap, REFERER},
8
+
header::{CONTENT_SECURITY_POLICY, CONTENT_TYPE, HeaderMap, ORIGIN, REFERER},
9
9
},
10
10
response::{IntoResponse, Json, Redirect, Response},
11
11
routing::{get, post},
···
211
211
.into()
212
212
}
213
213
214
+
#[derive(Debug, Deserialize)]
215
+
struct PromptQuery {
216
+
// this must *ONLY* be used for the postmessage target origin
217
+
app: Option<String>,
218
+
}
214
219
async fn prompt(
215
220
State(AppState {
216
221
allowed_hosts,
···
221
226
tokens,
222
227
..
223
228
}): State<AppState>,
229
+
Query(params): Query<PromptQuery>,
224
230
jar: SignedCookieJar,
225
231
headers: HeaderMap,
226
232
) -> impl IntoResponse {
227
-
let err = |reason, check_frame| {
233
+
let err = |reason, check_frame, detail| {
228
234
metrics::counter!("whoami_auth_prompt", "ok" => "false", "reason" => reason).increment(1);
229
-
let info = json!({ "reason": reason, "check_frame": check_frame });
235
+
let info = json!({
236
+
"reason": reason,
237
+
"check_frame": check_frame,
238
+
"detail": detail,
239
+
});
230
240
let html = RenderHtml("prompt-error", engine.clone(), info);
231
241
(StatusCode::BAD_REQUEST, html).into_response()
232
242
};
233
243
234
-
let Some(referrer) = headers.get(REFERER) else {
235
-
return err("Missing referer", true);
244
+
let Some(parent) = headers.get(ORIGIN).or_else(|| {
245
+
eprintln!("referrer fallback");
246
+
// TODO: referer should only be used for localhost??
247
+
headers.get(REFERER)
248
+
}) else {
249
+
return err("Missing origin and no referrer for fallback", true, None);
236
250
};
237
-
let Ok(referrer) = referrer.to_str() else {
238
-
return err("Unreadable referer", true);
251
+
let Ok(parent) = parent.to_str() else {
252
+
return err("Unreadable origin or referrer", true, None);
239
253
};
240
-
let Ok(url) = Url::parse(referrer) else {
241
-
return err("Bad referer", true);
254
+
eprintln!(
255
+
"rolling with parent: {parent:?} (from origin? {})",
256
+
headers.get(ORIGIN).is_some()
257
+
);
258
+
let Ok(url) = Url::parse(parent) else {
259
+
return err("Bad origin or referrer", true, None);
242
260
};
243
261
let Some(parent_host) = url.host_str() else {
244
-
return err("Referer missing host", true);
262
+
return err("Origin or referrer missing host", true, None);
245
263
};
246
264
if !allowed_hosts.contains(parent_host) {
247
-
return err("Login is not allowed on this page", false);
265
+
return err(
266
+
"Login is not allowed on this page",
267
+
false,
268
+
Some(parent_host),
269
+
);
270
+
}
271
+
if let Some(ref app) = params.app {
272
+
if !allowed_hosts.contains(app) {
273
+
return err("Login is not allowed for this app", false, Some(app));
274
+
}
248
275
}
249
276
let parent_origin = url.origin().ascii_serialization();
250
277
if parent_origin == "null" {
251
-
return err("Referer origin is opaque", true);
278
+
return err("Origin or referrer header value is opaque", true, None);
252
279
}
253
280
254
-
let csp = format!("frame-ancestors {parent_origin}");
281
+
let all_allowed = allowed_hosts
282
+
.iter()
283
+
.map(|h| format!("https://{h}"))
284
+
.collect::<Vec<_>>()
285
+
.join(" ");
286
+
let csp = format!("frame-ancestors 'self' {parent_origin} {all_allowed}");
255
287
let frame_headers = [(CONTENT_SECURITY_POLICY, &csp)];
256
288
257
289
if let Some(did) = jar.get(DID_COOKIE_KEY) {
258
290
let Ok(did) = Did::new(did.value_trimmed().to_string()) else {
259
-
return err("Bad cookie", false);
291
+
return err("Bad cookie", false, None);
260
292
};
261
293
262
294
// push cookie expiry
···
266
298
Ok(t) => t,
267
299
Err(e) => {
268
300
eprintln!("failed to create JWT: {e:?}");
269
-
return err("failed to create JWT", false);
301
+
return err("failed to create JWT", false, None);
270
302
}
271
303
};
272
304
···
286
318
"fetch_key": fetch_key,
287
319
"parent_host": parent_host,
288
320
"parent_origin": parent_origin,
321
+
"parent_target": params.app.map(|h| format!("https://{h}")),
289
322
});
290
323
(frame_headers, jar, RenderHtml("prompt", engine, info)).into_response()
291
324
} else {
+1
-1
who-am-i/static/style.css
+1
-1
who-am-i/static/style.css
+1
who-am-i/templates/prompt-error.hbs
+1
who-am-i/templates/prompt-error.hbs
···
2
2
<div class="prompt-error">
3
3
<p class="went-wrong">Something went wrong :(</p>
4
4
<p class="reason">{{ reason }}</p>
5
+
<p class="reason detail">{{ detail }}</p>
5
6
<p id="maybe-not-in-iframe" class="hidden">
6
7
Possibly related: this prompt is meant to be shown in an iframe, but it seems like it's not.
7
8
</p>
+29
-7
who-am-i/templates/prompt.hbs
+29
-7
who-am-i/templates/prompt.hbs
···
6
6
<p id="error-message" class="hidden"></p>
7
7
8
8
<p id="prompt" class="detail">
9
-
<span class="parent-host">{{ parent_host }}</span> would like to confirm your handle
9
+
<span class="parent-host">{{ parent_host }}</span> wants to confirm your handle
10
10
</p>
11
11
12
12
<div id="loader" {{#unless did}}class="hidden"{{/unless}}>
···
29
29
30
30
<div id="need-storage" class="hidden">
31
31
<p class="problem">Sorry, your browser is blocking access.</p>
32
-
<p>Try <a href="/" target="_blank">connecting directly</a> first (but no promises).</p>
32
+
<p>
33
+
Try <a href="/" target="_blank">connecting directly</a> first (but no promises).
34
+
Clicking <button id="desperation">this button</button> might also help.
35
+
</p>
33
36
</div>
34
37
35
38
···
45
48
const allowEl = document.getElementById('handle-action'); // for known-did
46
49
const connectEl = document.getElementById('connect'); // for anon
47
50
const needStorageEl = document.getElementById('need-storage'); // for safari/frame isolation
51
+
const desperationEl = document.getElementById('desperation');
48
52
49
53
function err(e, msg) {
50
54
loaderEl.classList.add('hidden');
···
80
84
promptEl.classList.add('hidden');
81
85
infoEl.classList.add('hidden');
82
86
needStorageEl.classList.remove('hidden');
87
+
desperation.addEventListener('click', () => {
88
+
document.requestStorageAccess({
89
+
cookies: true,
90
+
localStorage: true,
91
+
}).then(
92
+
() => {
93
+
desperation.textContent = "(maybe helped?)";
94
+
setTimeout(() => location.reload(), 350);
95
+
},
96
+
() => desperation.textContent = "(doubtful)",
97
+
);
98
+
})
83
99
}
84
100
});
85
101
}
···
144
160
return info.handle;
145
161
}
146
162
163
+
const parentTarget = {{{json parent_target}}} ?? {{{json parent_origin}}};
164
+
147
165
const shareAllow = (handle, token) => {
148
-
top.postMessage(
149
-
{ action: "allow", handle, token },
150
-
{{{json parent_origin}}},
151
-
);
166
+
try {
167
+
top.postMessage(
168
+
{ action: "allow", handle, token },
169
+
parentTarget,
170
+
);
171
+
} catch (e) {
172
+
err(e, 'Identity verified but failed to connect with app');
173
+
};
152
174
promptEl.textContent = '✔️ shared';
153
175
}
154
176
155
177
const shareDeny = reason => {
156
178
top.postMessage(
157
179
{ action: "deny", reason },
158
-
{{{json parent_origin}}},
180
+
parentTarget,
159
181
);
160
182
}
161
183
</script>