+21
.github/workflows/build.yml
+21
.github/workflows/build.yml
···
1
+
name: Build
2
+
3
+
on:
4
+
push:
5
+
tags:
6
+
- "reflector-v*.*.*"
7
+
8
+
jobs:
9
+
build:
10
+
runs-on: ubuntu-latest
11
+
permissions:
12
+
contents: write
13
+
14
+
steps:
15
+
- uses: actions/checkout@v4
16
+
- name: build reflector
17
+
run: cargo build --bin reflector --release && mv target/release/reflector target/release/reflector_amd64
18
+
- name: release
19
+
uses: softprops/action-gh-release@v2
20
+
with:
21
+
files: target/release/reflector_amd64
+1
-1
.github/workflows/checks.yml
+1
-1
.github/workflows/checks.yml
···
28
28
- name: get nightly toolchain for jetstream fmt
29
29
run: rustup toolchain install nightly --allow-downgrade -c rustfmt
30
30
- name: fmt
31
-
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot -- --check
31
+
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot --package pocket -- --check
32
32
- name: fmt jetstream (nightly)
33
33
run: cargo +nightly fmt --package jetstream -- --check
34
34
- name: clippy
+581
-234
Cargo.lock
+581
-234
Cargo.lock
···
162
162
"proc-macro2",
163
163
"quote",
164
164
"serde",
165
-
"syn 2.0.103",
165
+
"syn 2.0.106",
166
166
]
167
167
168
168
[[package]]
···
192
192
"nom",
193
193
"num-traits",
194
194
"rusticata-macros",
195
-
"thiserror 2.0.12",
195
+
"thiserror 2.0.16",
196
196
"time",
197
197
]
198
198
···
204
204
dependencies = [
205
205
"proc-macro2",
206
206
"quote",
207
-
"syn 2.0.103",
207
+
"syn 2.0.106",
208
208
"synstructure",
209
209
]
210
210
···
216
216
dependencies = [
217
217
"proc-macro2",
218
218
"quote",
219
-
"syn 2.0.103",
219
+
"syn 2.0.106",
220
220
]
221
221
222
222
[[package]]
···
274
274
dependencies = [
275
275
"proc-macro2",
276
276
"quote",
277
-
"syn 2.0.103",
277
+
"syn 2.0.106",
278
278
]
279
279
280
280
[[package]]
···
291
291
dependencies = [
292
292
"proc-macro2",
293
293
"quote",
294
-
"syn 2.0.103",
294
+
"syn 2.0.106",
295
295
]
296
296
297
297
[[package]]
···
306
306
source = "registry+https://github.com/rust-lang/crates.io-index"
307
307
checksum = "46355d3245edc7b3160b2a45fe55d09a6963ebd3eee0252feb6b72fb0eb71463"
308
308
dependencies = [
309
-
"atrium-common",
310
-
"atrium-xrpc",
309
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
310
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
311
+
"chrono",
312
+
"http",
313
+
"ipld-core",
314
+
"langtag",
315
+
"regex",
316
+
"serde",
317
+
"serde_bytes",
318
+
"serde_json",
319
+
"thiserror 1.0.69",
320
+
"tokio",
321
+
"trait-variant",
322
+
]
323
+
324
+
[[package]]
325
+
name = "atrium-api"
326
+
version = "0.25.4"
327
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
328
+
dependencies = [
329
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
330
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
311
331
"chrono",
312
332
"http",
313
333
"ipld-core",
···
337
357
]
338
358
339
359
[[package]]
360
+
name = "atrium-common"
361
+
version = "0.1.2"
362
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
363
+
dependencies = [
364
+
"dashmap",
365
+
"lru",
366
+
"moka",
367
+
"thiserror 1.0.69",
368
+
"tokio",
369
+
"trait-variant",
370
+
"web-time",
371
+
]
372
+
373
+
[[package]]
374
+
name = "atrium-crypto"
375
+
version = "0.1.2"
376
+
source = "registry+https://github.com/rust-lang/crates.io-index"
377
+
checksum = "73a3da430c71dd9006d61072c20771f264e5c498420a49c32305ceab8bd71955"
378
+
dependencies = [
379
+
"ecdsa",
380
+
"k256",
381
+
"multibase",
382
+
"p256",
383
+
"thiserror 1.0.69",
384
+
]
385
+
386
+
[[package]]
340
387
name = "atrium-identity"
341
388
version = "0.1.5"
342
389
source = "registry+https://github.com/rust-lang/crates.io-index"
343
390
checksum = "c9e2d42bb4dbea038f4f5f45e3af2a89d61a9894a75f06aa550b74a60d2be380"
344
391
dependencies = [
345
-
"atrium-api",
346
-
"atrium-common",
347
-
"atrium-xrpc",
392
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
393
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
394
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
395
+
"serde",
396
+
"serde_html_form",
397
+
"serde_json",
398
+
"thiserror 1.0.69",
399
+
"trait-variant",
400
+
]
401
+
402
+
[[package]]
403
+
name = "atrium-identity"
404
+
version = "0.1.5"
405
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
406
+
dependencies = [
407
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
408
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
409
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
348
410
"serde",
349
411
"serde_html_form",
350
412
"serde_json",
···
358
420
source = "registry+https://github.com/rust-lang/crates.io-index"
359
421
checksum = "ca22dc4eaf77fd9bf050b21192ac58cd654a437d28e000ec114ebd93a51d36f5"
360
422
dependencies = [
361
-
"atrium-api",
362
-
"atrium-common",
363
-
"atrium-identity",
364
-
"atrium-xrpc",
423
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
424
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
425
+
"atrium-identity 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
426
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
427
+
"base64 0.22.1",
428
+
"chrono",
429
+
"dashmap",
430
+
"ecdsa",
431
+
"elliptic-curve",
432
+
"jose-jwa",
433
+
"jose-jwk",
434
+
"p256",
435
+
"rand 0.8.5",
436
+
"reqwest",
437
+
"serde",
438
+
"serde_html_form",
439
+
"serde_json",
440
+
"sha2",
441
+
"thiserror 1.0.69",
442
+
"tokio",
443
+
"trait-variant",
444
+
]
445
+
446
+
[[package]]
447
+
name = "atrium-oauth"
448
+
version = "0.1.3"
449
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
450
+
dependencies = [
451
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
452
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
453
+
"atrium-identity 0.1.5 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
454
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
365
455
"base64 0.22.1",
366
456
"chrono",
367
457
"dashmap",
···
396
486
]
397
487
398
488
[[package]]
489
+
name = "atrium-xrpc"
490
+
version = "0.12.3"
491
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
492
+
dependencies = [
493
+
"http",
494
+
"serde",
495
+
"serde_html_form",
496
+
"serde_json",
497
+
"thiserror 1.0.69",
498
+
"trait-variant",
499
+
]
500
+
501
+
[[package]]
399
502
name = "auto_enums"
400
503
version = "0.8.7"
401
504
source = "registry+https://github.com/rust-lang/crates.io-index"
···
404
507
"derive_utils",
405
508
"proc-macro2",
406
509
"quote",
407
-
"syn 2.0.103",
510
+
"syn 2.0.106",
408
511
]
409
512
410
513
[[package]]
···
500
603
"axum-core",
501
604
"bytes",
502
605
"cookie",
606
+
"form_urlencoded",
503
607
"futures-util",
504
608
"headers",
505
609
"http",
···
509
613
"pin-project-lite",
510
614
"rustversion",
511
615
"serde",
616
+
"serde_html_form",
617
+
"serde_path_to_error",
512
618
"tower",
513
619
"tower-layer",
514
620
"tower-service",
···
538
644
"axum",
539
645
"handlebars",
540
646
"serde",
541
-
"thiserror 2.0.12",
647
+
"thiserror 2.0.16",
542
648
]
543
649
544
650
[[package]]
···
643
749
"regex",
644
750
"rustc-hash 1.1.0",
645
751
"shlex",
646
-
"syn 2.0.103",
752
+
"syn 2.0.106",
647
753
"which",
648
754
]
649
755
···
662
768
"regex",
663
769
"rustc-hash 1.1.0",
664
770
"shlex",
665
-
"syn 2.0.103",
771
+
"syn 2.0.106",
666
772
]
667
773
668
774
[[package]]
···
680
786
"regex",
681
787
"rustc-hash 2.1.1",
682
788
"shlex",
683
-
"syn 2.0.103",
789
+
"syn 2.0.106",
790
+
]
791
+
792
+
[[package]]
793
+
name = "bitcoin-io"
794
+
version = "0.1.3"
795
+
source = "registry+https://github.com/rust-lang/crates.io-index"
796
+
checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf"
797
+
798
+
[[package]]
799
+
name = "bitcoin_hashes"
800
+
version = "0.14.0"
801
+
source = "registry+https://github.com/rust-lang/crates.io-index"
802
+
checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16"
803
+
dependencies = [
804
+
"bitcoin-io",
805
+
"hex-conservative",
684
806
]
685
807
686
808
[[package]]
···
740
862
741
863
[[package]]
742
864
name = "camino"
743
-
version = "1.1.9"
865
+
version = "1.2.1"
744
866
source = "registry+https://github.com/rust-lang/crates.io-index"
745
-
checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3"
867
+
checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609"
746
868
dependencies = [
747
-
"serde",
869
+
"serde_core",
748
870
]
749
871
750
872
[[package]]
···
817
939
]
818
940
819
941
[[package]]
942
+
name = "ciborium"
943
+
version = "0.2.2"
944
+
source = "registry+https://github.com/rust-lang/crates.io-index"
945
+
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
946
+
dependencies = [
947
+
"ciborium-io",
948
+
"ciborium-ll",
949
+
"serde",
950
+
]
951
+
952
+
[[package]]
953
+
name = "ciborium-io"
954
+
version = "0.2.2"
955
+
source = "registry+https://github.com/rust-lang/crates.io-index"
956
+
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
957
+
958
+
[[package]]
959
+
name = "ciborium-ll"
960
+
version = "0.2.2"
961
+
source = "registry+https://github.com/rust-lang/crates.io-index"
962
+
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
963
+
dependencies = [
964
+
"ciborium-io",
965
+
"half",
966
+
]
967
+
968
+
[[package]]
820
969
name = "cid"
821
970
version = "0.11.1"
822
971
source = "registry+https://github.com/rust-lang/crates.io-index"
···
843
992
844
993
[[package]]
845
994
name = "clap"
846
-
version = "4.5.41"
995
+
version = "4.5.48"
847
996
source = "registry+https://github.com/rust-lang/crates.io-index"
848
-
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
997
+
checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae"
849
998
dependencies = [
850
999
"clap_builder",
851
1000
"clap_derive",
···
853
1002
854
1003
[[package]]
855
1004
name = "clap_builder"
856
-
version = "4.5.41"
1005
+
version = "4.5.48"
857
1006
source = "registry+https://github.com/rust-lang/crates.io-index"
858
-
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
1007
+
checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9"
859
1008
dependencies = [
860
1009
"anstream",
861
1010
"anstyle",
···
865
1014
866
1015
[[package]]
867
1016
name = "clap_derive"
868
-
version = "4.5.41"
1017
+
version = "4.5.47"
869
1018
source = "registry+https://github.com/rust-lang/crates.io-index"
870
-
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
1019
+
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
871
1020
dependencies = [
872
1021
"heck",
873
1022
"proc-macro2",
874
1023
"quote",
875
-
"syn 2.0.103",
1024
+
"syn 2.0.106",
876
1025
]
877
1026
878
1027
[[package]]
···
1082
1231
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
1083
1232
1084
1233
[[package]]
1234
+
name = "crunchy"
1235
+
version = "0.2.4"
1236
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1237
+
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
1238
+
1239
+
[[package]]
1085
1240
name = "crypto-bigint"
1086
1241
version = "0.5.5"
1087
1242
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1158
1313
"proc-macro2",
1159
1314
"quote",
1160
1315
"strsim 0.11.1",
1161
-
"syn 2.0.103",
1316
+
"syn 2.0.106",
1162
1317
]
1163
1318
1164
1319
[[package]]
···
1180
1335
dependencies = [
1181
1336
"darling_core 0.20.11",
1182
1337
"quote",
1183
-
"syn 2.0.103",
1338
+
"syn 2.0.106",
1184
1339
]
1185
1340
1186
1341
[[package]]
···
1220
1375
checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f"
1221
1376
dependencies = [
1222
1377
"data-encoding",
1223
-
"syn 2.0.103",
1378
+
"syn 2.0.106",
1224
1379
]
1225
1380
1226
1381
[[package]]
···
1282
1437
"darling 0.20.11",
1283
1438
"proc-macro2",
1284
1439
"quote",
1285
-
"syn 2.0.103",
1440
+
"syn 2.0.106",
1286
1441
]
1287
1442
1288
1443
[[package]]
···
1292
1447
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
1293
1448
dependencies = [
1294
1449
"derive_builder_core",
1295
-
"syn 2.0.103",
1450
+
"syn 2.0.106",
1296
1451
]
1297
1452
1298
1453
[[package]]
···
1312
1467
dependencies = [
1313
1468
"proc-macro2",
1314
1469
"quote",
1315
-
"syn 2.0.103",
1470
+
"syn 2.0.106",
1316
1471
"unicode-xid",
1317
1472
]
1318
1473
···
1324
1479
dependencies = [
1325
1480
"proc-macro2",
1326
1481
"quote",
1327
-
"syn 2.0.103",
1482
+
"syn 2.0.106",
1328
1483
]
1329
1484
1330
1485
[[package]]
···
1368
1523
dependencies = [
1369
1524
"proc-macro2",
1370
1525
"quote",
1371
-
"syn 2.0.103",
1526
+
"syn 2.0.106",
1372
1527
]
1373
1528
1374
1529
[[package]]
···
1385
1540
1386
1541
[[package]]
1387
1542
name = "dropshot"
1388
-
version = "0.16.2"
1543
+
version = "0.16.3"
1389
1544
source = "registry+https://github.com/rust-lang/crates.io-index"
1390
-
checksum = "50e8fed669e35e757646ad10f97c4d26dd22cce3da689b307954f7000d2719d0"
1545
+
checksum = "eedf902e40c1024b8ed9ca16378a54e9655cdf0e698245ba82d81a3778dcbc54"
1391
1546
dependencies = [
1392
1547
"async-stream",
1393
1548
"async-trait",
···
1404
1559
"http-body-util",
1405
1560
"hyper",
1406
1561
"hyper-util",
1407
-
"indexmap 2.9.0",
1562
+
"indexmap 2.11.4",
1408
1563
"multer",
1409
1564
"openapiv3",
1410
1565
"paste",
···
1424
1579
"slog-bunyan",
1425
1580
"slog-json",
1426
1581
"slog-term",
1427
-
"thiserror 2.0.12",
1582
+
"thiserror 2.0.16",
1428
1583
"tokio",
1429
1584
"tokio-rustls 0.25.0",
1430
-
"toml",
1585
+
"toml 0.9.7",
1431
1586
"uuid",
1432
1587
"version_check",
1433
1588
"waitgroup",
···
1435
1590
1436
1591
[[package]]
1437
1592
name = "dropshot_endpoint"
1438
-
version = "0.16.2"
1593
+
version = "0.16.4"
1439
1594
source = "registry+https://github.com/rust-lang/crates.io-index"
1440
-
checksum = "acebb687581abdeaa2c89fa448818a5f803b0e68e5d7e7a1cf585a8f3c5c57ac"
1595
+
checksum = "89d09440e73a9dcf8a0f7fbd6ab889a7751d59f0fe76e5082a0a6d5623ec6da3"
1441
1596
dependencies = [
1442
1597
"heck",
1443
1598
"proc-macro2",
···
1445
1600
"semver",
1446
1601
"serde",
1447
1602
"serde_tokenstream",
1448
-
"syn 2.0.103",
1603
+
"syn 2.0.106",
1449
1604
]
1450
1605
1451
1606
[[package]]
···
1518
1673
"heck",
1519
1674
"proc-macro2",
1520
1675
"quote",
1521
-
"syn 2.0.103",
1676
+
"syn 2.0.106",
1522
1677
]
1523
1678
1524
1679
[[package]]
···
1530
1685
"once_cell",
1531
1686
"proc-macro2",
1532
1687
"quote",
1533
-
"syn 2.0.103",
1688
+
"syn 2.0.106",
1534
1689
]
1535
1690
1536
1691
[[package]]
···
1594
1749
]
1595
1750
1596
1751
[[package]]
1752
+
name = "fallible-iterator"
1753
+
version = "0.3.0"
1754
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1755
+
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
1756
+
1757
+
[[package]]
1758
+
name = "fallible-streaming-iterator"
1759
+
version = "0.1.9"
1760
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1761
+
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
1762
+
1763
+
[[package]]
1597
1764
name = "fastrand"
1598
1765
version = "2.3.0"
1599
1766
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1611
1778
1612
1779
[[package]]
1613
1780
name = "fjall"
1614
-
version = "2.8.0"
1781
+
version = "2.11.2"
1615
1782
source = "registry+https://github.com/rust-lang/crates.io-index"
1616
-
checksum = "26b2ced3483989a62b3533c9f99054d73b527c6c0045cf22b00fe87956f1a46f"
1783
+
checksum = "0b25ad44cd4360a0448a9b5a0a6f1c7a621101cca4578706d43c9a821418aebc"
1784
+
dependencies = [
1785
+
"byteorder",
1786
+
"byteview",
1787
+
"dashmap",
1788
+
"log",
1789
+
"lsm-tree",
1790
+
"path-absolutize",
1791
+
"std-semaphore",
1792
+
"tempfile",
1793
+
"xxhash-rust",
1794
+
]
1795
+
1796
+
[[package]]
1797
+
name = "fjall"
1798
+
version = "2.11.2"
1799
+
source = "git+https://github.com/fjall-rs/fjall.git#42d811f7c8cc9004407d520d37d2a1d8d246c03d"
1617
1800
dependencies = [
1618
1801
"byteorder",
1619
1802
"byteview",
···
1708
1891
"mixtrics",
1709
1892
"pin-project",
1710
1893
"serde",
1711
-
"thiserror 2.0.12",
1894
+
"thiserror 2.0.16",
1712
1895
"tokio",
1713
1896
"tracing",
1714
1897
]
···
1728
1911
"parking_lot",
1729
1912
"pin-project",
1730
1913
"serde",
1731
-
"thiserror 2.0.12",
1914
+
"thiserror 2.0.16",
1732
1915
"tokio",
1733
1916
"twox-hash",
1734
1917
]
···
1761
1944
"parking_lot",
1762
1945
"pin-project",
1763
1946
"serde",
1764
-
"thiserror 2.0.12",
1947
+
"thiserror 2.0.16",
1765
1948
"tokio",
1766
1949
"tracing",
1767
1950
]
···
1793
1976
"pin-project",
1794
1977
"rand 0.9.1",
1795
1978
"serde",
1796
-
"thiserror 2.0.12",
1979
+
"thiserror 2.0.16",
1797
1980
"tokio",
1798
1981
"tracing",
1799
1982
"twox-hash",
···
1882
2065
dependencies = [
1883
2066
"proc-macro2",
1884
2067
"quote",
1885
-
"syn 2.0.103",
2068
+
"syn 2.0.106",
1886
2069
]
1887
2070
1888
2071
[[package]]
···
2007
2190
"futures-core",
2008
2191
"futures-sink",
2009
2192
"http",
2010
-
"indexmap 2.9.0",
2193
+
"indexmap 2.11.4",
2011
2194
"slab",
2012
2195
"tokio",
2013
2196
"tokio-util",
2014
2197
"tracing",
2198
+
]
2199
+
2200
+
[[package]]
2201
+
name = "half"
2202
+
version = "2.6.0"
2203
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2204
+
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
2205
+
dependencies = [
2206
+
"cfg-if",
2207
+
"crunchy",
2015
2208
]
2016
2209
2017
2210
[[package]]
···
2027
2220
"pest_derive",
2028
2221
"serde",
2029
2222
"serde_json",
2030
-
"thiserror 2.0.12",
2223
+
"thiserror 2.0.16",
2031
2224
"walkdir",
2032
2225
]
2033
2226
···
2061
2254
"allocator-api2",
2062
2255
"equivalent",
2063
2256
"foldhash",
2257
+
]
2258
+
2259
+
[[package]]
2260
+
name = "hashlink"
2261
+
version = "0.10.0"
2262
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2263
+
checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
2264
+
dependencies = [
2265
+
"hashbrown 0.15.2",
2064
2266
]
2065
2267
2066
2268
[[package]]
···
2117
2319
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
2118
2320
2119
2321
[[package]]
2322
+
name = "hex-conservative"
2323
+
version = "0.2.1"
2324
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2325
+
checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd"
2326
+
dependencies = [
2327
+
"arrayvec",
2328
+
]
2329
+
2330
+
[[package]]
2120
2331
name = "hickory-proto"
2121
2332
version = "0.25.2"
2122
2333
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2134
2345
"once_cell",
2135
2346
"rand 0.9.1",
2136
2347
"ring",
2137
-
"thiserror 2.0.12",
2348
+
"thiserror 2.0.16",
2138
2349
"tinyvec",
2139
2350
"tokio",
2140
2351
"tracing",
···
2157
2368
"rand 0.9.1",
2158
2369
"resolv-conf",
2159
2370
"smallvec",
2160
-
"thiserror 2.0.12",
2371
+
"thiserror 2.0.16",
2161
2372
"tokio",
2162
2373
"tracing",
2163
2374
]
···
2349
2560
"js-sys",
2350
2561
"log",
2351
2562
"wasm-bindgen",
2352
-
"windows-core 0.61.0",
2563
+
"windows-core",
2353
2564
]
2354
2565
2355
2566
[[package]]
···
2476
2687
dependencies = [
2477
2688
"proc-macro2",
2478
2689
"quote",
2479
-
"syn 2.0.103",
2690
+
"syn 2.0.106",
2480
2691
]
2481
2692
2482
2693
[[package]]
···
2519
2730
2520
2731
[[package]]
2521
2732
name = "indexmap"
2522
-
version = "2.9.0"
2733
+
version = "2.11.4"
2523
2734
source = "registry+https://github.com/rust-lang/crates.io-index"
2524
-
checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e"
2735
+
checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5"
2525
2736
dependencies = [
2526
2737
"equivalent",
2527
2738
"hashbrown 0.15.2",
2528
2739
"serde",
2740
+
"serde_core",
2529
2741
]
2530
2742
2531
2743
[[package]]
···
2643
2855
dependencies = [
2644
2856
"anyhow",
2645
2857
"async-trait",
2646
-
"atrium-api",
2858
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
2647
2859
"chrono",
2648
2860
"clap",
2649
2861
"futures-util",
···
2651
2863
"metrics",
2652
2864
"serde",
2653
2865
"serde_json",
2654
-
"thiserror 2.0.12",
2866
+
"thiserror 2.0.16",
2655
2867
"tokio",
2656
2868
"tokio-tungstenite 0.26.2",
2657
2869
"url",
···
2679
2891
dependencies = [
2680
2892
"proc-macro2",
2681
2893
"quote",
2682
-
"syn 2.0.103",
2894
+
"syn 2.0.106",
2683
2895
]
2684
2896
2685
2897
[[package]]
···
2754
2966
]
2755
2967
2756
2968
[[package]]
2969
+
name = "jwt-compact"
2970
+
version = "0.9.0-beta.1"
2971
+
source = "git+https://github.com/fatfingers23/jwt-compact.git#aed088b8ff5ad44ef2785c453f6a4b7916728b1c"
2972
+
dependencies = [
2973
+
"anyhow",
2974
+
"base64ct",
2975
+
"chrono",
2976
+
"ciborium",
2977
+
"hmac",
2978
+
"lazy_static",
2979
+
"rand_core 0.6.4",
2980
+
"secp256k1",
2981
+
"serde",
2982
+
"serde_json",
2983
+
"sha2",
2984
+
"smallvec",
2985
+
"subtle",
2986
+
"zeroize",
2987
+
]
2988
+
2989
+
[[package]]
2990
+
name = "k256"
2991
+
version = "0.13.4"
2992
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2993
+
checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b"
2994
+
dependencies = [
2995
+
"cfg-if",
2996
+
"ecdsa",
2997
+
"elliptic-curve",
2998
+
"sha2",
2999
+
]
3000
+
3001
+
[[package]]
2757
3002
name = "langtag"
2758
3003
version = "0.3.4"
2759
3004
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2846
3091
]
2847
3092
2848
3093
[[package]]
3094
+
name = "libsqlite3-sys"
3095
+
version = "0.35.0"
3096
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3097
+
checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f"
3098
+
dependencies = [
3099
+
"pkg-config",
3100
+
"vcpkg",
3101
+
]
3102
+
3103
+
[[package]]
2849
3104
name = "libz-sys"
2850
3105
version = "1.1.22"
2851
3106
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2863
3118
"anyhow",
2864
3119
"fluent-uri",
2865
3120
"nom",
2866
-
"thiserror 2.0.12",
3121
+
"thiserror 2.0.16",
2867
3122
"tinyjson",
2868
3123
]
2869
3124
···
2897
3152
2898
3153
[[package]]
2899
3154
name = "log"
2900
-
version = "0.4.27"
3155
+
version = "0.4.28"
2901
3156
source = "registry+https://github.com/rust-lang/crates.io-index"
2902
-
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
3157
+
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
2903
3158
2904
3159
[[package]]
2905
3160
name = "loom"
···
2931
3186
2932
3187
[[package]]
2933
3188
name = "lsm-tree"
2934
-
version = "2.8.0"
3189
+
version = "2.10.2"
2935
3190
source = "registry+https://github.com/rust-lang/crates.io-index"
2936
-
checksum = "d0a63a5e98a38b51765274137d8aedfbd848da5f4d016867e186b673fcc06a8c"
3191
+
checksum = "55b6d7475a8dd22e749186968daacf8e2a77932b061b1bd263157987bbfc0c6c"
2937
3192
dependencies = [
2938
3193
"byteorder",
2939
3194
"crossbeam-skiplist",
···
3013
3268
"spin",
3014
3269
"tokio",
3015
3270
"tokio-util",
3016
-
"toml",
3271
+
"toml 0.8.23",
3017
3272
"tracing",
3018
3273
"tracing-subscriber",
3019
3274
]
···
3049
3304
3050
3305
[[package]]
3051
3306
name = "matchers"
3052
-
version = "0.1.0"
3307
+
version = "0.2.0"
3053
3308
source = "registry+https://github.com/rust-lang/crates.io-index"
3054
-
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
3309
+
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
3055
3310
dependencies = [
3056
-
"regex-automata 0.1.10",
3311
+
"regex-automata",
3057
3312
]
3058
3313
3059
3314
[[package]]
···
3103
3358
"http-body-util",
3104
3359
"hyper",
3105
3360
"hyper-util",
3106
-
"indexmap 2.9.0",
3361
+
"indexmap 2.11.4",
3107
3362
"ipnet",
3108
3363
"metrics",
3109
3364
"metrics-util 0.19.0",
···
3124
3379
"hyper",
3125
3380
"hyper-rustls",
3126
3381
"hyper-util",
3127
-
"indexmap 2.9.0",
3382
+
"indexmap 2.11.4",
3128
3383
"ipnet",
3129
3384
"metrics",
3130
3385
"metrics-util 0.20.0",
3131
3386
"quanta",
3132
-
"thiserror 2.0.12",
3387
+
"thiserror 2.0.16",
3133
3388
"tokio",
3134
3389
"tracing",
3135
3390
]
···
3352
3607
3353
3608
[[package]]
3354
3609
name = "nu-ansi-term"
3355
-
version = "0.46.0"
3610
+
version = "0.50.1"
3356
3611
source = "registry+https://github.com/rust-lang/crates.io-index"
3357
-
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
3612
+
checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399"
3358
3613
dependencies = [
3359
-
"overload",
3360
-
"winapi",
3614
+
"windows-sys 0.52.0",
3361
3615
]
3362
3616
3363
3617
[[package]]
···
3487
3741
3488
3742
[[package]]
3489
3743
name = "openapiv3"
3490
-
version = "2.0.0"
3744
+
version = "2.2.0"
3491
3745
source = "registry+https://github.com/rust-lang/crates.io-index"
3492
-
checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c"
3746
+
checksum = "5c8d427828b22ae1fff2833a03d8486c2c881367f1c336349f307f321e7f4d05"
3493
3747
dependencies = [
3494
-
"indexmap 2.9.0",
3748
+
"indexmap 2.11.4",
3495
3749
"serde",
3496
3750
"serde_json",
3497
3751
]
···
3519
3773
dependencies = [
3520
3774
"proc-macro2",
3521
3775
"quote",
3522
-
"syn 2.0.103",
3776
+
"syn 2.0.106",
3523
3777
]
3524
3778
3525
3779
[[package]]
···
3558
3812
dependencies = [
3559
3813
"hashbrown 0.13.2",
3560
3814
]
3561
-
3562
-
[[package]]
3563
-
name = "overload"
3564
-
version = "0.1.1"
3565
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3566
-
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
3567
3815
3568
3816
[[package]]
3569
3817
name = "p256"
···
3678
3926
checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323"
3679
3927
dependencies = [
3680
3928
"memchr",
3681
-
"thiserror 2.0.12",
3929
+
"thiserror 2.0.16",
3682
3930
"ucd-trie",
3683
3931
]
3684
3932
···
3702
3950
"pest_meta",
3703
3951
"proc-macro2",
3704
3952
"quote",
3705
-
"syn 2.0.103",
3953
+
"syn 2.0.106",
3706
3954
]
3707
3955
3708
3956
[[package]]
···
3732
3980
dependencies = [
3733
3981
"proc-macro2",
3734
3982
"quote",
3735
-
"syn 2.0.103",
3983
+
"syn 2.0.106",
3736
3984
]
3737
3985
3738
3986
[[package]]
···
3775
4023
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
3776
4024
3777
4025
[[package]]
4026
+
name = "pocket"
4027
+
version = "0.1.0"
4028
+
dependencies = [
4029
+
"atrium-crypto",
4030
+
"clap",
4031
+
"jwt-compact",
4032
+
"log",
4033
+
"poem",
4034
+
"poem-openapi",
4035
+
"reqwest",
4036
+
"rusqlite",
4037
+
"serde",
4038
+
"serde_json",
4039
+
"thiserror 2.0.16",
4040
+
"tokio",
4041
+
"tracing-subscriber",
4042
+
]
4043
+
4044
+
[[package]]
3778
4045
name = "poem"
3779
4046
version = "3.1.12"
3780
4047
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3787
4054
"headers",
3788
4055
"http",
3789
4056
"http-body-util",
4057
+
"httpdate",
3790
4058
"hyper",
3791
4059
"hyper-util",
3792
4060
"mime",
4061
+
"mime_guess",
3793
4062
"multer",
3794
4063
"nix",
3795
4064
"parking_lot",
···
3810
4079
"smallvec",
3811
4080
"sync_wrapper",
3812
4081
"tempfile",
3813
-
"thiserror 2.0.12",
4082
+
"thiserror 2.0.16",
3814
4083
"tokio",
3815
4084
"tokio-rustls 0.26.2",
3816
4085
"tokio-stream",
···
3829
4098
"proc-macro-crate",
3830
4099
"proc-macro2",
3831
4100
"quote",
3832
-
"syn 2.0.103",
4101
+
"syn 2.0.106",
3833
4102
]
3834
4103
3835
4104
[[package]]
···
3842
4111
"bytes",
3843
4112
"derive_more",
3844
4113
"futures-util",
3845
-
"indexmap 2.9.0",
4114
+
"indexmap 2.11.4",
3846
4115
"itertools 0.14.0",
3847
4116
"mime",
3848
4117
"num-traits",
···
3854
4123
"serde_json",
3855
4124
"serde_urlencoded",
3856
4125
"serde_yaml",
3857
-
"thiserror 2.0.12",
4126
+
"thiserror 2.0.16",
3858
4127
"tokio",
3859
4128
]
3860
4129
···
3866
4135
dependencies = [
3867
4136
"darling 0.20.11",
3868
4137
"http",
3869
-
"indexmap 2.9.0",
4138
+
"indexmap 2.11.4",
3870
4139
"mime",
3871
4140
"proc-macro-crate",
3872
4141
"proc-macro2",
3873
4142
"quote",
3874
4143
"regex",
3875
-
"syn 2.0.103",
3876
-
"thiserror 2.0.12",
4144
+
"syn 2.0.106",
4145
+
"thiserror 2.0.16",
3877
4146
]
3878
4147
3879
4148
[[package]]
···
3913
4182
checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55"
3914
4183
dependencies = [
3915
4184
"proc-macro2",
3916
-
"syn 2.0.103",
4185
+
"syn 2.0.106",
3917
4186
]
3918
4187
3919
4188
[[package]]
···
3981
4250
]
3982
4251
3983
4252
[[package]]
4253
+
name = "quasar"
4254
+
version = "0.1.0"
4255
+
dependencies = [
4256
+
"clap",
4257
+
"fjall 2.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
4258
+
]
4259
+
4260
+
[[package]]
3984
4261
name = "quick-xml"
3985
4262
version = "0.36.2"
3986
4263
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4014
4291
"rustc-hash 2.1.1",
4015
4292
"rustls 0.23.31",
4016
4293
"socket2 0.5.9",
4017
-
"thiserror 2.0.12",
4294
+
"thiserror 2.0.16",
4018
4295
"tokio",
4019
4296
"tracing",
4020
4297
"web-time",
···
4035
4312
"rustls 0.23.31",
4036
4313
"rustls-pki-types",
4037
4314
"slab",
4038
-
"thiserror 2.0.12",
4315
+
"thiserror 2.0.16",
4039
4316
"tinyvec",
4040
4317
"tracing",
4041
4318
"web-time",
···
4216
4493
dependencies = [
4217
4494
"proc-macro2",
4218
4495
"quote",
4219
-
"syn 2.0.103",
4496
+
"syn 2.0.106",
4497
+
]
4498
+
4499
+
[[package]]
4500
+
name = "reflector"
4501
+
version = "0.1.0"
4502
+
dependencies = [
4503
+
"clap",
4504
+
"log",
4505
+
"poem",
4506
+
"serde",
4507
+
"tokio",
4508
+
"tracing-subscriber",
4220
4509
]
4221
4510
4222
4511
[[package]]
···
4227
4516
dependencies = [
4228
4517
"aho-corasick",
4229
4518
"memchr",
4230
-
"regex-automata 0.4.9",
4231
-
"regex-syntax 0.8.5",
4232
-
]
4233
-
4234
-
[[package]]
4235
-
name = "regex-automata"
4236
-
version = "0.1.10"
4237
-
source = "registry+https://github.com/rust-lang/crates.io-index"
4238
-
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
4239
-
dependencies = [
4240
-
"regex-syntax 0.6.29",
4519
+
"regex-automata",
4520
+
"regex-syntax",
4241
4521
]
4242
4522
4243
4523
[[package]]
···
4248
4528
dependencies = [
4249
4529
"aho-corasick",
4250
4530
"memchr",
4251
-
"regex-syntax 0.8.5",
4531
+
"regex-syntax",
4252
4532
]
4253
4533
4254
4534
[[package]]
4255
4535
name = "regex-syntax"
4256
-
version = "0.6.29"
4257
-
source = "registry+https://github.com/rust-lang/crates.io-index"
4258
-
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
4259
-
4260
-
[[package]]
4261
-
name = "regex-syntax"
4262
4536
version = "0.8.5"
4263
4537
source = "registry+https://github.com/rust-lang/crates.io-index"
4264
4538
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
4265
4539
4266
4540
[[package]]
4267
4541
name = "reqwest"
4268
-
version = "0.12.22"
4542
+
version = "0.12.23"
4269
4543
source = "registry+https://github.com/rust-lang/crates.io-index"
4270
-
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
4544
+
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
4271
4545
dependencies = [
4272
4546
"async-compression",
4273
4547
"base64 0.22.1",
···
4389
4663
]
4390
4664
4391
4665
[[package]]
4666
+
name = "rusqlite"
4667
+
version = "0.37.0"
4668
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4669
+
checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f"
4670
+
dependencies = [
4671
+
"bitflags",
4672
+
"fallible-iterator",
4673
+
"fallible-streaming-iterator",
4674
+
"hashlink",
4675
+
"libsqlite3-sys",
4676
+
"smallvec",
4677
+
]
4678
+
4679
+
[[package]]
4392
4680
name = "rustc-demangle"
4393
4681
version = "0.1.24"
4394
4682
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4587
4875
"proc-macro2",
4588
4876
"quote",
4589
4877
"serde_derive_internals",
4590
-
"syn 2.0.103",
4878
+
"syn 2.0.106",
4591
4879
]
4592
4880
4593
4881
[[package]]
···
4617
4905
]
4618
4906
4619
4907
[[package]]
4908
+
name = "secp256k1"
4909
+
version = "0.30.0"
4910
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4911
+
checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252"
4912
+
dependencies = [
4913
+
"bitcoin_hashes",
4914
+
"rand 0.8.5",
4915
+
"secp256k1-sys",
4916
+
]
4917
+
4918
+
[[package]]
4919
+
name = "secp256k1-sys"
4920
+
version = "0.10.1"
4921
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4922
+
checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9"
4923
+
dependencies = [
4924
+
"cc",
4925
+
]
4926
+
4927
+
[[package]]
4620
4928
name = "security-framework"
4621
4929
version = "2.11.1"
4622
4930
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4666
4974
4667
4975
[[package]]
4668
4976
name = "serde"
4669
-
version = "1.0.219"
4977
+
version = "1.0.228"
4670
4978
source = "registry+https://github.com/rust-lang/crates.io-index"
4671
-
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
4979
+
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
4672
4980
dependencies = [
4981
+
"serde_core",
4673
4982
"serde_derive",
4674
4983
]
4675
4984
···
4683
4992
]
4684
4993
4685
4994
[[package]]
4995
+
name = "serde_core"
4996
+
version = "1.0.228"
4997
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4998
+
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
4999
+
dependencies = [
5000
+
"serde_derive",
5001
+
]
5002
+
5003
+
[[package]]
4686
5004
name = "serde_derive"
4687
-
version = "1.0.219"
5005
+
version = "1.0.228"
4688
5006
source = "registry+https://github.com/rust-lang/crates.io-index"
4689
-
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
5007
+
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
4690
5008
dependencies = [
4691
5009
"proc-macro2",
4692
5010
"quote",
4693
-
"syn 2.0.103",
5011
+
"syn 2.0.106",
4694
5012
]
4695
5013
4696
5014
[[package]]
···
4701
5019
dependencies = [
4702
5020
"proc-macro2",
4703
5021
"quote",
4704
-
"syn 2.0.103",
5022
+
"syn 2.0.106",
4705
5023
]
4706
5024
4707
5025
[[package]]
···
4711
5029
checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4"
4712
5030
dependencies = [
4713
5031
"form_urlencoded",
4714
-
"indexmap 2.9.0",
5032
+
"indexmap 2.11.4",
4715
5033
"itoa",
4716
5034
"ryu",
4717
5035
"serde",
···
4719
5037
4720
5038
[[package]]
4721
5039
name = "serde_json"
4722
-
version = "1.0.141"
5040
+
version = "1.0.145"
4723
5041
source = "registry+https://github.com/rust-lang/crates.io-index"
4724
-
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
5042
+
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
4725
5043
dependencies = [
4726
5044
"itoa",
4727
5045
"memchr",
4728
5046
"ryu",
4729
5047
"serde",
5048
+
"serde_core",
4730
5049
]
4731
5050
4732
5051
[[package]]
···
4749
5068
"percent-encoding",
4750
5069
"ryu",
4751
5070
"serde",
4752
-
"thiserror 2.0.12",
5071
+
"thiserror 2.0.16",
4753
5072
]
4754
5073
4755
5074
[[package]]
···
4762
5081
]
4763
5082
4764
5083
[[package]]
5084
+
name = "serde_spanned"
5085
+
version = "1.0.2"
5086
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5087
+
checksum = "5417783452c2be558477e104686f7de5dae53dba813c28435e0e70f82d9b04ee"
5088
+
dependencies = [
5089
+
"serde_core",
5090
+
]
5091
+
5092
+
[[package]]
4765
5093
name = "serde_tokenstream"
4766
5094
version = "0.2.2"
4767
5095
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4770
5098
"proc-macro2",
4771
5099
"quote",
4772
5100
"serde",
4773
-
"syn 2.0.103",
5101
+
"syn 2.0.106",
4774
5102
]
4775
5103
4776
5104
[[package]]
···
4795
5123
"chrono",
4796
5124
"hex",
4797
5125
"indexmap 1.9.3",
4798
-
"indexmap 2.9.0",
5126
+
"indexmap 2.11.4",
4799
5127
"serde",
4800
5128
"serde_derive",
4801
5129
"serde_json",
···
4812
5140
"darling 0.20.11",
4813
5141
"proc-macro2",
4814
5142
"quote",
4815
-
"syn 2.0.103",
5143
+
"syn 2.0.106",
4816
5144
]
4817
5145
4818
5146
[[package]]
···
4821
5149
source = "registry+https://github.com/rust-lang/crates.io-index"
4822
5150
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
4823
5151
dependencies = [
4824
-
"indexmap 2.9.0",
5152
+
"indexmap 2.11.4",
4825
5153
"itoa",
4826
5154
"ryu",
4827
5155
"serde",
···
4892
5220
dependencies = [
4893
5221
"num-bigint",
4894
5222
"num-traits",
4895
-
"thiserror 2.0.12",
5223
+
"thiserror 2.0.16",
4896
5224
"time",
4897
5225
]
4898
5226
···
4915
5243
name = "slingshot"
4916
5244
version = "0.1.0"
4917
5245
dependencies = [
4918
-
"atrium-api",
4919
-
"atrium-common",
4920
-
"atrium-identity",
4921
-
"atrium-oauth",
5246
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5247
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5248
+
"atrium-identity 0.1.5 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5249
+
"atrium-oauth 0.1.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
4922
5250
"clap",
4923
5251
"ctrlc",
4924
5252
"foyer",
4925
5253
"hickory-resolver",
4926
5254
"jetstream",
5255
+
"links",
4927
5256
"log",
4928
5257
"metrics",
4929
5258
"metrics-exporter-prometheus 0.17.2",
···
4933
5262
"rustls 0.23.31",
4934
5263
"serde",
4935
5264
"serde_json",
4936
-
"thiserror 2.0.12",
5265
+
"thiserror 2.0.16",
4937
5266
"time",
4938
5267
"tokio",
4939
5268
"tokio-util",
···
5044
5373
"serde",
5045
5374
"serde_json",
5046
5375
"serde_qs",
5047
-
"thiserror 2.0.12",
5376
+
"thiserror 2.0.16",
5048
5377
"tinyjson",
5049
5378
"tokio",
5050
5379
"tokio-tungstenite 0.27.0",
···
5113
5442
5114
5443
[[package]]
5115
5444
name = "syn"
5116
-
version = "2.0.103"
5445
+
version = "2.0.106"
5117
5446
source = "registry+https://github.com/rust-lang/crates.io-index"
5118
-
checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8"
5447
+
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
5119
5448
dependencies = [
5120
5449
"proc-macro2",
5121
5450
"quote",
···
5139
5468
dependencies = [
5140
5469
"proc-macro2",
5141
5470
"quote",
5142
-
"syn 2.0.103",
5471
+
"syn 2.0.106",
5143
5472
]
5144
5473
5145
5474
[[package]]
···
5210
5539
5211
5540
[[package]]
5212
5541
name = "thiserror"
5213
-
version = "2.0.12"
5542
+
version = "2.0.16"
5214
5543
source = "registry+https://github.com/rust-lang/crates.io-index"
5215
-
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
5544
+
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
5216
5545
dependencies = [
5217
-
"thiserror-impl 2.0.12",
5546
+
"thiserror-impl 2.0.16",
5218
5547
]
5219
5548
5220
5549
[[package]]
···
5225
5554
dependencies = [
5226
5555
"proc-macro2",
5227
5556
"quote",
5228
-
"syn 2.0.103",
5557
+
"syn 2.0.106",
5229
5558
]
5230
5559
5231
5560
[[package]]
5232
5561
name = "thiserror-impl"
5233
-
version = "2.0.12"
5562
+
version = "2.0.16"
5234
5563
source = "registry+https://github.com/rust-lang/crates.io-index"
5235
-
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
5564
+
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
5236
5565
dependencies = [
5237
5566
"proc-macro2",
5238
5567
"quote",
5239
-
"syn 2.0.103",
5568
+
"syn 2.0.106",
5240
5569
]
5241
5570
5242
5571
[[package]]
···
5335
5664
5336
5665
[[package]]
5337
5666
name = "tokio"
5338
-
version = "1.47.0"
5667
+
version = "1.47.1"
5339
5668
source = "registry+https://github.com/rust-lang/crates.io-index"
5340
-
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
5669
+
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
5341
5670
dependencies = [
5342
5671
"backtrace",
5343
5672
"bytes",
···
5361
5690
dependencies = [
5362
5691
"proc-macro2",
5363
5692
"quote",
5364
-
"syn 2.0.103",
5693
+
"syn 2.0.106",
5365
5694
]
5366
5695
5367
5696
[[package]]
···
5452
5781
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
5453
5782
dependencies = [
5454
5783
"serde",
5455
-
"serde_spanned",
5456
-
"toml_datetime",
5784
+
"serde_spanned 0.6.9",
5785
+
"toml_datetime 0.6.11",
5457
5786
"toml_edit",
5458
5787
]
5459
5788
5460
5789
[[package]]
5790
+
name = "toml"
5791
+
version = "0.9.7"
5792
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5793
+
checksum = "00e5e5d9bf2475ac9d4f0d9edab68cc573dc2fd644b0dba36b0c30a92dd9eaa0"
5794
+
dependencies = [
5795
+
"indexmap 2.11.4",
5796
+
"serde_core",
5797
+
"serde_spanned 1.0.2",
5798
+
"toml_datetime 0.7.2",
5799
+
"toml_parser",
5800
+
"toml_writer",
5801
+
"winnow",
5802
+
]
5803
+
5804
+
[[package]]
5461
5805
name = "toml_datetime"
5462
5806
version = "0.6.11"
5463
5807
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5467
5811
]
5468
5812
5469
5813
[[package]]
5814
+
name = "toml_datetime"
5815
+
version = "0.7.2"
5816
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5817
+
checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1"
5818
+
dependencies = [
5819
+
"serde_core",
5820
+
]
5821
+
5822
+
[[package]]
5470
5823
name = "toml_edit"
5471
5824
version = "0.22.27"
5472
5825
source = "registry+https://github.com/rust-lang/crates.io-index"
5473
5826
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
5474
5827
dependencies = [
5475
-
"indexmap 2.9.0",
5828
+
"indexmap 2.11.4",
5476
5829
"serde",
5477
-
"serde_spanned",
5478
-
"toml_datetime",
5830
+
"serde_spanned 0.6.9",
5831
+
"toml_datetime 0.6.11",
5479
5832
"toml_write",
5480
5833
"winnow",
5481
5834
]
5482
5835
5483
5836
[[package]]
5837
+
name = "toml_parser"
5838
+
version = "1.0.3"
5839
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5840
+
checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627"
5841
+
dependencies = [
5842
+
"winnow",
5843
+
]
5844
+
5845
+
[[package]]
5484
5846
name = "toml_write"
5485
5847
version = "0.1.2"
5486
5848
source = "registry+https://github.com/rust-lang/crates.io-index"
5487
5849
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
5850
+
5851
+
[[package]]
5852
+
name = "toml_writer"
5853
+
version = "1.0.3"
5854
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5855
+
checksum = "d163a63c116ce562a22cda521fcc4d79152e7aba014456fb5eb442f6d6a10109"
5488
5856
5489
5857
[[package]]
5490
5858
name = "tower"
···
5552
5920
dependencies = [
5553
5921
"proc-macro2",
5554
5922
"quote",
5555
-
"syn 2.0.103",
5923
+
"syn 2.0.106",
5556
5924
]
5557
5925
5558
5926
[[package]]
···
5578
5946
5579
5947
[[package]]
5580
5948
name = "tracing-subscriber"
5581
-
version = "0.3.19"
5949
+
version = "0.3.20"
5582
5950
source = "registry+https://github.com/rust-lang/crates.io-index"
5583
-
checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
5951
+
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
5584
5952
dependencies = [
5585
5953
"matchers",
5586
5954
"nu-ansi-term",
5587
5955
"once_cell",
5588
-
"regex",
5956
+
"regex-automata",
5589
5957
"sharded-slab",
5590
5958
"smallvec",
5591
5959
"thread_local",
···
5602
5970
dependencies = [
5603
5971
"proc-macro2",
5604
5972
"quote",
5605
-
"syn 2.0.103",
5973
+
"syn 2.0.106",
5606
5974
]
5607
5975
5608
5976
[[package]]
···
5625
5993
"native-tls",
5626
5994
"rand 0.9.1",
5627
5995
"sha1",
5628
-
"thiserror 2.0.12",
5996
+
"thiserror 2.0.16",
5629
5997
"url",
5630
5998
"utf-8",
5631
5999
]
···
5643
6011
"log",
5644
6012
"rand 0.9.1",
5645
6013
"sha1",
5646
-
"thiserror 2.0.12",
6014
+
"thiserror 2.0.16",
5647
6015
"utf-8",
5648
6016
]
5649
6017
···
5681
6049
"clap",
5682
6050
"dropshot",
5683
6051
"env_logger",
5684
-
"fjall",
6052
+
"fjall 2.11.2 (git+https://github.com/fjall-rs/fjall.git)",
5685
6053
"getrandom 0.3.3",
5686
6054
"http",
5687
6055
"jetstream",
···
5696
6064
"serde_qs",
5697
6065
"sha2",
5698
6066
"tempfile",
5699
-
"thiserror 2.0.12",
6067
+
"thiserror 2.0.16",
5700
6068
"tikv-jemallocator",
5701
6069
"tokio",
5702
6070
"tokio-util",
···
5802
6170
5803
6171
[[package]]
5804
6172
name = "uuid"
5805
-
version = "1.16.0"
6173
+
version = "1.18.1"
5806
6174
source = "registry+https://github.com/rust-lang/crates.io-index"
5807
-
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
6175
+
checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
5808
6176
dependencies = [
5809
6177
"getrandom 0.3.3",
6178
+
"js-sys",
5810
6179
"serde",
6180
+
"wasm-bindgen",
5811
6181
]
5812
6182
5813
6183
[[package]]
···
5818
6188
5819
6189
[[package]]
5820
6190
name = "value-log"
5821
-
version = "1.8.0"
6191
+
version = "1.9.0"
5822
6192
source = "registry+https://github.com/rust-lang/crates.io-index"
5823
-
checksum = "fd29b17c041f94e0885179637289815cd038f0c9fc19c4549d5a97017404fb7d"
6193
+
checksum = "62fc7c4ce161f049607ecea654dca3f2d727da5371ae85e2e4f14ce2b98ed67c"
5824
6194
dependencies = [
5825
6195
"byteorder",
5826
6196
"byteview",
···
5922
6292
"log",
5923
6293
"proc-macro2",
5924
6294
"quote",
5925
-
"syn 2.0.103",
6295
+
"syn 2.0.106",
5926
6296
"wasm-bindgen-shared",
5927
6297
]
5928
6298
···
5957
6327
dependencies = [
5958
6328
"proc-macro2",
5959
6329
"quote",
5960
-
"syn 2.0.103",
6330
+
"syn 2.0.106",
5961
6331
"wasm-bindgen-backend",
5962
6332
"wasm-bindgen-shared",
5963
6333
]
···
6007
6377
name = "who-am-i"
6008
6378
version = "0.1.0"
6009
6379
dependencies = [
6010
-
"atrium-api",
6011
-
"atrium-common",
6012
-
"atrium-identity",
6013
-
"atrium-oauth",
6380
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
6381
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
6382
+
"atrium-identity 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
6383
+
"atrium-oauth 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
6014
6384
"axum",
6015
6385
"axum-extra",
6016
6386
"axum-template",
···
6030
6400
"reqwest",
6031
6401
"serde",
6032
6402
"serde_json",
6033
-
"thiserror 2.0.12",
6403
+
"thiserror 2.0.16",
6034
6404
"tokio",
6035
6405
"tokio-util",
6036
6406
"url",
···
6085
6455
source = "registry+https://github.com/rust-lang/crates.io-index"
6086
6456
checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6"
6087
6457
dependencies = [
6088
-
"windows-core 0.58.0",
6458
+
"windows-core",
6089
6459
"windows-targets 0.52.6",
6090
6460
]
6091
6461
···
6095
6465
source = "registry+https://github.com/rust-lang/crates.io-index"
6096
6466
checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99"
6097
6467
dependencies = [
6098
-
"windows-implement 0.58.0",
6099
-
"windows-interface 0.58.0",
6468
+
"windows-implement",
6469
+
"windows-interface",
6100
6470
"windows-result 0.2.0",
6101
6471
"windows-strings 0.1.0",
6102
6472
"windows-targets 0.52.6",
6103
6473
]
6104
6474
6105
6475
[[package]]
6106
-
name = "windows-core"
6107
-
version = "0.61.0"
6108
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6109
-
checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980"
6110
-
dependencies = [
6111
-
"windows-implement 0.60.0",
6112
-
"windows-interface 0.59.1",
6113
-
"windows-link",
6114
-
"windows-result 0.3.4",
6115
-
"windows-strings 0.4.2",
6116
-
]
6117
-
6118
-
[[package]]
6119
6476
name = "windows-implement"
6120
6477
version = "0.58.0"
6121
6478
source = "registry+https://github.com/rust-lang/crates.io-index"
···
6123
6480
dependencies = [
6124
6481
"proc-macro2",
6125
6482
"quote",
6126
-
"syn 2.0.103",
6127
-
]
6128
-
6129
-
[[package]]
6130
-
name = "windows-implement"
6131
-
version = "0.60.0"
6132
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6133
-
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
6134
-
dependencies = [
6135
-
"proc-macro2",
6136
-
"quote",
6137
-
"syn 2.0.103",
6483
+
"syn 2.0.106",
6138
6484
]
6139
6485
6140
6486
[[package]]
···
6145
6491
dependencies = [
6146
6492
"proc-macro2",
6147
6493
"quote",
6148
-
"syn 2.0.103",
6149
-
]
6150
-
6151
-
[[package]]
6152
-
name = "windows-interface"
6153
-
version = "0.59.1"
6154
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6155
-
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
6156
-
dependencies = [
6157
-
"proc-macro2",
6158
-
"quote",
6159
-
"syn 2.0.103",
6494
+
"syn 2.0.106",
6160
6495
]
6161
6496
6162
6497
[[package]]
···
6363
6698
6364
6699
[[package]]
6365
6700
name = "winnow"
6366
-
version = "0.7.11"
6701
+
version = "0.7.13"
6367
6702
source = "registry+https://github.com/rust-lang/crates.io-index"
6368
-
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
6703
+
checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf"
6369
6704
dependencies = [
6370
6705
"memchr",
6371
6706
]
···
6423
6758
"nom",
6424
6759
"oid-registry",
6425
6760
"rusticata-macros",
6426
-
"thiserror 2.0.12",
6761
+
"thiserror 2.0.16",
6427
6762
"time",
6428
6763
]
6429
6764
···
6462
6797
dependencies = [
6463
6798
"proc-macro2",
6464
6799
"quote",
6465
-
"syn 2.0.103",
6800
+
"syn 2.0.106",
6466
6801
"synstructure",
6467
6802
]
6468
6803
···
6492
6827
dependencies = [
6493
6828
"proc-macro2",
6494
6829
"quote",
6495
-
"syn 2.0.103",
6830
+
"syn 2.0.106",
6496
6831
]
6497
6832
6498
6833
[[package]]
···
6503
6838
dependencies = [
6504
6839
"proc-macro2",
6505
6840
"quote",
6506
-
"syn 2.0.103",
6841
+
"syn 2.0.106",
6507
6842
]
6508
6843
6509
6844
[[package]]
···
6523
6858
dependencies = [
6524
6859
"proc-macro2",
6525
6860
"quote",
6526
-
"syn 2.0.103",
6861
+
"syn 2.0.106",
6527
6862
"synstructure",
6528
6863
]
6529
6864
···
6534
6869
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
6535
6870
dependencies = [
6536
6871
"serde",
6872
+
"zeroize_derive",
6873
+
]
6874
+
6875
+
[[package]]
6876
+
name = "zeroize_derive"
6877
+
version = "1.4.2"
6878
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6879
+
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
6880
+
dependencies = [
6881
+
"proc-macro2",
6882
+
"quote",
6883
+
"syn 2.0.106",
6537
6884
]
6538
6885
6539
6886
[[package]]
···
6555
6902
dependencies = [
6556
6903
"proc-macro2",
6557
6904
"quote",
6558
-
"syn 2.0.103",
6905
+
"syn 2.0.106",
6559
6906
]
6560
6907
6561
6908
[[package]]
+3
Cargo.toml
+3
Cargo.toml
+8
-1
Makefile
+8
-1
Makefile
···
5
5
cargo test --all-features
6
6
7
7
fmt:
8
-
cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot
8
+
cargo fmt --package links \
9
+
--package constellation \
10
+
--package ufos \
11
+
--package spacedust \
12
+
--package who-am-i \
13
+
--package slingshot \
14
+
--package pocket \
15
+
--package reflector
9
16
cargo +nightly fmt --package jetstream
10
17
11
18
clippy:
+1
-1
constellation/Cargo.toml
+1
-1
constellation/Cargo.toml
···
8
8
anyhow = "1.0.95"
9
9
askama = { version = "0.12.1", features = ["serde-json"] }
10
10
axum = "0.8.1"
11
-
axum-extra = { version = "0.10.0", features = ["typed-header"] }
11
+
axum-extra = { version = "0.10.0", features = ["query", "typed-header"] }
12
12
axum-metrics = "0.2"
13
13
bincode = "1.3.3"
14
14
clap = { version = "4.5.26", features = ["derive"] }
+661
constellation/LICENSE
+661
constellation/LICENSE
···
1
+
GNU AFFERO GENERAL PUBLIC LICENSE
2
+
Version 3, 19 November 2007
3
+
4
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+
Everyone is permitted to copy and distribute verbatim copies
6
+
of this license document, but changing it is not allowed.
7
+
8
+
Preamble
9
+
10
+
The GNU Affero General Public License is a free, copyleft license for
11
+
software and other kinds of works, specifically designed to ensure
12
+
cooperation with the community in the case of network server software.
13
+
14
+
The licenses for most software and other practical works are designed
15
+
to take away your freedom to share and change the works. By contrast,
16
+
our General Public Licenses are intended to guarantee your freedom to
17
+
share and change all versions of a program--to make sure it remains free
18
+
software for all its users.
19
+
20
+
When we speak of free software, we are referring to freedom, not
21
+
price. Our General Public Licenses are designed to make sure that you
22
+
have the freedom to distribute copies of free software (and charge for
23
+
them if you wish), that you receive source code or can get it if you
24
+
want it, that you can change the software or use pieces of it in new
25
+
free programs, and that you know you can do these things.
26
+
27
+
Developers that use our General Public Licenses protect your rights
28
+
with two steps: (1) assert copyright on the software, and (2) offer
29
+
you this License which gives you legal permission to copy, distribute
30
+
and/or modify the software.
31
+
32
+
A secondary benefit of defending all users' freedom is that
33
+
improvements made in alternate versions of the program, if they
34
+
receive widespread use, become available for other developers to
35
+
incorporate. Many developers of free software are heartened and
36
+
encouraged by the resulting cooperation. However, in the case of
37
+
software used on network servers, this result may fail to come about.
38
+
The GNU General Public License permits making a modified version and
39
+
letting the public access it on a server without ever releasing its
40
+
source code to the public.
41
+
42
+
The GNU Affero General Public License is designed specifically to
43
+
ensure that, in such cases, the modified source code becomes available
44
+
to the community. It requires the operator of a network server to
45
+
provide the source code of the modified version running there to the
46
+
users of that server. Therefore, public use of a modified version, on
47
+
a publicly accessible server, gives the public access to the source
48
+
code of the modified version.
49
+
50
+
An older license, called the Affero General Public License and
51
+
published by Affero, was designed to accomplish similar goals. This is
52
+
a different license, not a version of the Affero GPL, but Affero has
53
+
released a new version of the Affero GPL which permits relicensing under
54
+
this license.
55
+
56
+
The precise terms and conditions for copying, distribution and
57
+
modification follow.
58
+
59
+
TERMS AND CONDITIONS
60
+
61
+
0. Definitions.
62
+
63
+
"This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+
"Copyright" also means copyright-like laws that apply to other kinds of
66
+
works, such as semiconductor masks.
67
+
68
+
"The Program" refers to any copyrightable work licensed under this
69
+
License. Each licensee is addressed as "you". "Licensees" and
70
+
"recipients" may be individuals or organizations.
71
+
72
+
To "modify" a work means to copy from or adapt all or part of the work
73
+
in a fashion requiring copyright permission, other than the making of an
74
+
exact copy. The resulting work is called a "modified version" of the
75
+
earlier work or a work "based on" the earlier work.
76
+
77
+
A "covered work" means either the unmodified Program or a work based
78
+
on the Program.
79
+
80
+
To "propagate" a work means to do anything with it that, without
81
+
permission, would make you directly or secondarily liable for
82
+
infringement under applicable copyright law, except executing it on a
83
+
computer or modifying a private copy. Propagation includes copying,
84
+
distribution (with or without modification), making available to the
85
+
public, and in some countries other activities as well.
86
+
87
+
To "convey" a work means any kind of propagation that enables other
88
+
parties to make or receive copies. Mere interaction with a user through
89
+
a computer network, with no transfer of a copy, is not conveying.
90
+
91
+
An interactive user interface displays "Appropriate Legal Notices"
92
+
to the extent that it includes a convenient and prominently visible
93
+
feature that (1) displays an appropriate copyright notice, and (2)
94
+
tells the user that there is no warranty for the work (except to the
95
+
extent that warranties are provided), that licensees may convey the
96
+
work under this License, and how to view a copy of this License. If
97
+
the interface presents a list of user commands or options, such as a
98
+
menu, a prominent item in the list meets this criterion.
99
+
100
+
1. Source Code.
101
+
102
+
The "source code" for a work means the preferred form of the work
103
+
for making modifications to it. "Object code" means any non-source
104
+
form of a work.
105
+
106
+
A "Standard Interface" means an interface that either is an official
107
+
standard defined by a recognized standards body, or, in the case of
108
+
interfaces specified for a particular programming language, one that
109
+
is widely used among developers working in that language.
110
+
111
+
The "System Libraries" of an executable work include anything, other
112
+
than the work as a whole, that (a) is included in the normal form of
113
+
packaging a Major Component, but which is not part of that Major
114
+
Component, and (b) serves only to enable use of the work with that
115
+
Major Component, or to implement a Standard Interface for which an
116
+
implementation is available to the public in source code form. A
117
+
"Major Component", in this context, means a major essential component
118
+
(kernel, window system, and so on) of the specific operating system
119
+
(if any) on which the executable work runs, or a compiler used to
120
+
produce the work, or an object code interpreter used to run it.
121
+
122
+
The "Corresponding Source" for a work in object code form means all
123
+
the source code needed to generate, install, and (for an executable
124
+
work) run the object code and to modify the work, including scripts to
125
+
control those activities. However, it does not include the work's
126
+
System Libraries, or general-purpose tools or generally available free
127
+
programs which are used unmodified in performing those activities but
128
+
which are not part of the work. For example, Corresponding Source
129
+
includes interface definition files associated with source files for
130
+
the work, and the source code for shared libraries and dynamically
131
+
linked subprograms that the work is specifically designed to require,
132
+
such as by intimate data communication or control flow between those
133
+
subprograms and other parts of the work.
134
+
135
+
The Corresponding Source need not include anything that users
136
+
can regenerate automatically from other parts of the Corresponding
137
+
Source.
138
+
139
+
The Corresponding Source for a work in source code form is that
140
+
same work.
141
+
142
+
2. Basic Permissions.
143
+
144
+
All rights granted under this License are granted for the term of
145
+
copyright on the Program, and are irrevocable provided the stated
146
+
conditions are met. This License explicitly affirms your unlimited
147
+
permission to run the unmodified Program. The output from running a
148
+
covered work is covered by this License only if the output, given its
149
+
content, constitutes a covered work. This License acknowledges your
150
+
rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+
You may make, run and propagate covered works that you do not
153
+
convey, without conditions so long as your license otherwise remains
154
+
in force. You may convey covered works to others for the sole purpose
155
+
of having them make modifications exclusively for you, or provide you
156
+
with facilities for running those works, provided that you comply with
157
+
the terms of this License in conveying all material for which you do
158
+
not control copyright. Those thus making or running the covered works
159
+
for you must do so exclusively on your behalf, under your direction
160
+
and control, on terms that prohibit them from making any copies of
161
+
your copyrighted material outside their relationship with you.
162
+
163
+
Conveying under any other circumstances is permitted solely under
164
+
the conditions stated below. Sublicensing is not allowed; section 10
165
+
makes it unnecessary.
166
+
167
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+
No covered work shall be deemed part of an effective technological
170
+
measure under any applicable law fulfilling obligations under article
171
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+
similar laws prohibiting or restricting circumvention of such
173
+
measures.
174
+
175
+
When you convey a covered work, you waive any legal power to forbid
176
+
circumvention of technological measures to the extent such circumvention
177
+
is effected by exercising rights under this License with respect to
178
+
the covered work, and you disclaim any intention to limit operation or
179
+
modification of the work as a means of enforcing, against the work's
180
+
users, your or third parties' legal rights to forbid circumvention of
181
+
technological measures.
182
+
183
+
4. Conveying Verbatim Copies.
184
+
185
+
You may convey verbatim copies of the Program's source code as you
186
+
receive it, in any medium, provided that you conspicuously and
187
+
appropriately publish on each copy an appropriate copyright notice;
188
+
keep intact all notices stating that this License and any
189
+
non-permissive terms added in accord with section 7 apply to the code;
190
+
keep intact all notices of the absence of any warranty; and give all
191
+
recipients a copy of this License along with the Program.
192
+
193
+
You may charge any price or no price for each copy that you convey,
194
+
and you may offer support or warranty protection for a fee.
195
+
196
+
5. Conveying Modified Source Versions.
197
+
198
+
You may convey a work based on the Program, or the modifications to
199
+
produce it from the Program, in the form of source code under the
200
+
terms of section 4, provided that you also meet all of these conditions:
201
+
202
+
a) The work must carry prominent notices stating that you modified
203
+
it, and giving a relevant date.
204
+
205
+
b) The work must carry prominent notices stating that it is
206
+
released under this License and any conditions added under section
207
+
7. This requirement modifies the requirement in section 4 to
208
+
"keep intact all notices".
209
+
210
+
c) You must license the entire work, as a whole, under this
211
+
License to anyone who comes into possession of a copy. This
212
+
License will therefore apply, along with any applicable section 7
213
+
additional terms, to the whole of the work, and all its parts,
214
+
regardless of how they are packaged. This License gives no
215
+
permission to license the work in any other way, but it does not
216
+
invalidate such permission if you have separately received it.
217
+
218
+
d) If the work has interactive user interfaces, each must display
219
+
Appropriate Legal Notices; however, if the Program has interactive
220
+
interfaces that do not display Appropriate Legal Notices, your
221
+
work need not make them do so.
222
+
223
+
A compilation of a covered work with other separate and independent
224
+
works, which are not by their nature extensions of the covered work,
225
+
and which are not combined with it such as to form a larger program,
226
+
in or on a volume of a storage or distribution medium, is called an
227
+
"aggregate" if the compilation and its resulting copyright are not
228
+
used to limit the access or legal rights of the compilation's users
229
+
beyond what the individual works permit. Inclusion of a covered work
230
+
in an aggregate does not cause this License to apply to the other
231
+
parts of the aggregate.
232
+
233
+
6. Conveying Non-Source Forms.
234
+
235
+
You may convey a covered work in object code form under the terms
236
+
of sections 4 and 5, provided that you also convey the
237
+
machine-readable Corresponding Source under the terms of this License,
238
+
in one of these ways:
239
+
240
+
a) Convey the object code in, or embodied in, a physical product
241
+
(including a physical distribution medium), accompanied by the
242
+
Corresponding Source fixed on a durable physical medium
243
+
customarily used for software interchange.
244
+
245
+
b) Convey the object code in, or embodied in, a physical product
246
+
(including a physical distribution medium), accompanied by a
247
+
written offer, valid for at least three years and valid for as
248
+
long as you offer spare parts or customer support for that product
249
+
model, to give anyone who possesses the object code either (1) a
250
+
copy of the Corresponding Source for all the software in the
251
+
product that is covered by this License, on a durable physical
252
+
medium customarily used for software interchange, for a price no
253
+
more than your reasonable cost of physically performing this
254
+
conveying of source, or (2) access to copy the
255
+
Corresponding Source from a network server at no charge.
256
+
257
+
c) Convey individual copies of the object code with a copy of the
258
+
written offer to provide the Corresponding Source. This
259
+
alternative is allowed only occasionally and noncommercially, and
260
+
only if you received the object code with such an offer, in accord
261
+
with subsection 6b.
262
+
263
+
d) Convey the object code by offering access from a designated
264
+
place (gratis or for a charge), and offer equivalent access to the
265
+
Corresponding Source in the same way through the same place at no
266
+
further charge. You need not require recipients to copy the
267
+
Corresponding Source along with the object code. If the place to
268
+
copy the object code is a network server, the Corresponding Source
269
+
may be on a different server (operated by you or a third party)
270
+
that supports equivalent copying facilities, provided you maintain
271
+
clear directions next to the object code saying where to find the
272
+
Corresponding Source. Regardless of what server hosts the
273
+
Corresponding Source, you remain obligated to ensure that it is
274
+
available for as long as needed to satisfy these requirements.
275
+
276
+
e) Convey the object code using peer-to-peer transmission, provided
277
+
you inform other peers where the object code and Corresponding
278
+
Source of the work are being offered to the general public at no
279
+
charge under subsection 6d.
280
+
281
+
A separable portion of the object code, whose source code is excluded
282
+
from the Corresponding Source as a System Library, need not be
283
+
included in conveying the object code work.
284
+
285
+
A "User Product" is either (1) a "consumer product", which means any
286
+
tangible personal property which is normally used for personal, family,
287
+
or household purposes, or (2) anything designed or sold for incorporation
288
+
into a dwelling. In determining whether a product is a consumer product,
289
+
doubtful cases shall be resolved in favor of coverage. For a particular
290
+
product received by a particular user, "normally used" refers to a
291
+
typical or common use of that class of product, regardless of the status
292
+
of the particular user or of the way in which the particular user
293
+
actually uses, or expects or is expected to use, the product. A product
294
+
is a consumer product regardless of whether the product has substantial
295
+
commercial, industrial or non-consumer uses, unless such uses represent
296
+
the only significant mode of use of the product.
297
+
298
+
"Installation Information" for a User Product means any methods,
299
+
procedures, authorization keys, or other information required to install
300
+
and execute modified versions of a covered work in that User Product from
301
+
a modified version of its Corresponding Source. The information must
302
+
suffice to ensure that the continued functioning of the modified object
303
+
code is in no case prevented or interfered with solely because
304
+
modification has been made.
305
+
306
+
If you convey an object code work under this section in, or with, or
307
+
specifically for use in, a User Product, and the conveying occurs as
308
+
part of a transaction in which the right of possession and use of the
309
+
User Product is transferred to the recipient in perpetuity or for a
310
+
fixed term (regardless of how the transaction is characterized), the
311
+
Corresponding Source conveyed under this section must be accompanied
312
+
by the Installation Information. But this requirement does not apply
313
+
if neither you nor any third party retains the ability to install
314
+
modified object code on the User Product (for example, the work has
315
+
been installed in ROM).
316
+
317
+
The requirement to provide Installation Information does not include a
318
+
requirement to continue to provide support service, warranty, or updates
319
+
for a work that has been modified or installed by the recipient, or for
320
+
the User Product in which it has been modified or installed. Access to a
321
+
network may be denied when the modification itself materially and
322
+
adversely affects the operation of the network or violates the rules and
323
+
protocols for communication across the network.
324
+
325
+
Corresponding Source conveyed, and Installation Information provided,
326
+
in accord with this section must be in a format that is publicly
327
+
documented (and with an implementation available to the public in
328
+
source code form), and must require no special password or key for
329
+
unpacking, reading or copying.
330
+
331
+
7. Additional Terms.
332
+
333
+
"Additional permissions" are terms that supplement the terms of this
334
+
License by making exceptions from one or more of its conditions.
335
+
Additional permissions that are applicable to the entire Program shall
336
+
be treated as though they were included in this License, to the extent
337
+
that they are valid under applicable law. If additional permissions
338
+
apply only to part of the Program, that part may be used separately
339
+
under those permissions, but the entire Program remains governed by
340
+
this License without regard to the additional permissions.
341
+
342
+
When you convey a copy of a covered work, you may at your option
343
+
remove any additional permissions from that copy, or from any part of
344
+
it. (Additional permissions may be written to require their own
345
+
removal in certain cases when you modify the work.) You may place
346
+
additional permissions on material, added by you to a covered work,
347
+
for which you have or can give appropriate copyright permission.
348
+
349
+
Notwithstanding any other provision of this License, for material you
350
+
add to a covered work, you may (if authorized by the copyright holders of
351
+
that material) supplement the terms of this License with terms:
352
+
353
+
a) Disclaiming warranty or limiting liability differently from the
354
+
terms of sections 15 and 16 of this License; or
355
+
356
+
b) Requiring preservation of specified reasonable legal notices or
357
+
author attributions in that material or in the Appropriate Legal
358
+
Notices displayed by works containing it; or
359
+
360
+
c) Prohibiting misrepresentation of the origin of that material, or
361
+
requiring that modified versions of such material be marked in
362
+
reasonable ways as different from the original version; or
363
+
364
+
d) Limiting the use for publicity purposes of names of licensors or
365
+
authors of the material; or
366
+
367
+
e) Declining to grant rights under trademark law for use of some
368
+
trade names, trademarks, or service marks; or
369
+
370
+
f) Requiring indemnification of licensors and authors of that
371
+
material by anyone who conveys the material (or modified versions of
372
+
it) with contractual assumptions of liability to the recipient, for
373
+
any liability that these contractual assumptions directly impose on
374
+
those licensors and authors.
375
+
376
+
All other non-permissive additional terms are considered "further
377
+
restrictions" within the meaning of section 10. If the Program as you
378
+
received it, or any part of it, contains a notice stating that it is
379
+
governed by this License along with a term that is a further
380
+
restriction, you may remove that term. If a license document contains
381
+
a further restriction but permits relicensing or conveying under this
382
+
License, you may add to a covered work material governed by the terms
383
+
of that license document, provided that the further restriction does
384
+
not survive such relicensing or conveying.
385
+
386
+
If you add terms to a covered work in accord with this section, you
387
+
must place, in the relevant source files, a statement of the
388
+
additional terms that apply to those files, or a notice indicating
389
+
where to find the applicable terms.
390
+
391
+
Additional terms, permissive or non-permissive, may be stated in the
392
+
form of a separately written license, or stated as exceptions;
393
+
the above requirements apply either way.
394
+
395
+
8. Termination.
396
+
397
+
You may not propagate or modify a covered work except as expressly
398
+
provided under this License. Any attempt otherwise to propagate or
399
+
modify it is void, and will automatically terminate your rights under
400
+
this License (including any patent licenses granted under the third
401
+
paragraph of section 11).
402
+
403
+
However, if you cease all violation of this License, then your
404
+
license from a particular copyright holder is reinstated (a)
405
+
provisionally, unless and until the copyright holder explicitly and
406
+
finally terminates your license, and (b) permanently, if the copyright
407
+
holder fails to notify you of the violation by some reasonable means
408
+
prior to 60 days after the cessation.
409
+
410
+
Moreover, your license from a particular copyright holder is
411
+
reinstated permanently if the copyright holder notifies you of the
412
+
violation by some reasonable means, this is the first time you have
413
+
received notice of violation of this License (for any work) from that
414
+
copyright holder, and you cure the violation prior to 30 days after
415
+
your receipt of the notice.
416
+
417
+
Termination of your rights under this section does not terminate the
418
+
licenses of parties who have received copies or rights from you under
419
+
this License. If your rights have been terminated and not permanently
420
+
reinstated, you do not qualify to receive new licenses for the same
421
+
material under section 10.
422
+
423
+
9. Acceptance Not Required for Having Copies.
424
+
425
+
You are not required to accept this License in order to receive or
426
+
run a copy of the Program. Ancillary propagation of a covered work
427
+
occurring solely as a consequence of using peer-to-peer transmission
428
+
to receive a copy likewise does not require acceptance. However,
429
+
nothing other than this License grants you permission to propagate or
430
+
modify any covered work. These actions infringe copyright if you do
431
+
not accept this License. Therefore, by modifying or propagating a
432
+
covered work, you indicate your acceptance of this License to do so.
433
+
434
+
10. Automatic Licensing of Downstream Recipients.
435
+
436
+
Each time you convey a covered work, the recipient automatically
437
+
receives a license from the original licensors, to run, modify and
438
+
propagate that work, subject to this License. You are not responsible
439
+
for enforcing compliance by third parties with this License.
440
+
441
+
An "entity transaction" is a transaction transferring control of an
442
+
organization, or substantially all assets of one, or subdividing an
443
+
organization, or merging organizations. If propagation of a covered
444
+
work results from an entity transaction, each party to that
445
+
transaction who receives a copy of the work also receives whatever
446
+
licenses to the work the party's predecessor in interest had or could
447
+
give under the previous paragraph, plus a right to possession of the
448
+
Corresponding Source of the work from the predecessor in interest, if
449
+
the predecessor has it or can get it with reasonable efforts.
450
+
451
+
You may not impose any further restrictions on the exercise of the
452
+
rights granted or affirmed under this License. For example, you may
453
+
not impose a license fee, royalty, or other charge for exercise of
454
+
rights granted under this License, and you may not initiate litigation
455
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
456
+
any patent claim is infringed by making, using, selling, offering for
457
+
sale, or importing the Program or any portion of it.
458
+
459
+
11. Patents.
460
+
461
+
A "contributor" is a copyright holder who authorizes use under this
462
+
License of the Program or a work on which the Program is based. The
463
+
work thus licensed is called the contributor's "contributor version".
464
+
465
+
A contributor's "essential patent claims" are all patent claims
466
+
owned or controlled by the contributor, whether already acquired or
467
+
hereafter acquired, that would be infringed by some manner, permitted
468
+
by this License, of making, using, or selling its contributor version,
469
+
but do not include claims that would be infringed only as a
470
+
consequence of further modification of the contributor version. For
471
+
purposes of this definition, "control" includes the right to grant
472
+
patent sublicenses in a manner consistent with the requirements of
473
+
this License.
474
+
475
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+
patent license under the contributor's essential patent claims, to
477
+
make, use, sell, offer for sale, import and otherwise run, modify and
478
+
propagate the contents of its contributor version.
479
+
480
+
In the following three paragraphs, a "patent license" is any express
481
+
agreement or commitment, however denominated, not to enforce a patent
482
+
(such as an express permission to practice a patent or covenant not to
483
+
sue for patent infringement). To "grant" such a patent license to a
484
+
party means to make such an agreement or commitment not to enforce a
485
+
patent against the party.
486
+
487
+
If you convey a covered work, knowingly relying on a patent license,
488
+
and the Corresponding Source of the work is not available for anyone
489
+
to copy, free of charge and under the terms of this License, through a
490
+
publicly available network server or other readily accessible means,
491
+
then you must either (1) cause the Corresponding Source to be so
492
+
available, or (2) arrange to deprive yourself of the benefit of the
493
+
patent license for this particular work, or (3) arrange, in a manner
494
+
consistent with the requirements of this License, to extend the patent
495
+
license to downstream recipients. "Knowingly relying" means you have
496
+
actual knowledge that, but for the patent license, your conveying the
497
+
covered work in a country, or your recipient's use of the covered work
498
+
in a country, would infringe one or more identifiable patents in that
499
+
country that you have reason to believe are valid.
500
+
501
+
If, pursuant to or in connection with a single transaction or
502
+
arrangement, you convey, or propagate by procuring conveyance of, a
503
+
covered work, and grant a patent license to some of the parties
504
+
receiving the covered work authorizing them to use, propagate, modify
505
+
or convey a specific copy of the covered work, then the patent license
506
+
you grant is automatically extended to all recipients of the covered
507
+
work and works based on it.
508
+
509
+
A patent license is "discriminatory" if it does not include within
510
+
the scope of its coverage, prohibits the exercise of, or is
511
+
conditioned on the non-exercise of one or more of the rights that are
512
+
specifically granted under this License. You may not convey a covered
513
+
work if you are a party to an arrangement with a third party that is
514
+
in the business of distributing software, under which you make payment
515
+
to the third party based on the extent of your activity of conveying
516
+
the work, and under which the third party grants, to any of the
517
+
parties who would receive the covered work from you, a discriminatory
518
+
patent license (a) in connection with copies of the covered work
519
+
conveyed by you (or copies made from those copies), or (b) primarily
520
+
for and in connection with specific products or compilations that
521
+
contain the covered work, unless you entered into that arrangement,
522
+
or that patent license was granted, prior to 28 March 2007.
523
+
524
+
Nothing in this License shall be construed as excluding or limiting
525
+
any implied license or other defenses to infringement that may
526
+
otherwise be available to you under applicable patent law.
527
+
528
+
12. No Surrender of Others' Freedom.
529
+
530
+
If conditions are imposed on you (whether by court order, agreement or
531
+
otherwise) that contradict the conditions of this License, they do not
532
+
excuse you from the conditions of this License. If you cannot convey a
533
+
covered work so as to satisfy simultaneously your obligations under this
534
+
License and any other pertinent obligations, then as a consequence you may
535
+
not convey it at all. For example, if you agree to terms that obligate you
536
+
to collect a royalty for further conveying from those to whom you convey
537
+
the Program, the only way you could satisfy both those terms and this
538
+
License would be to refrain entirely from conveying the Program.
539
+
540
+
13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+
Notwithstanding any other provision of this License, if you modify the
543
+
Program, your modified version must prominently offer all users
544
+
interacting with it remotely through a computer network (if your version
545
+
supports such interaction) an opportunity to receive the Corresponding
546
+
Source of your version by providing access to the Corresponding Source
547
+
from a network server at no charge, through some standard or customary
548
+
means of facilitating copying of software. This Corresponding Source
549
+
shall include the Corresponding Source for any work covered by version 3
550
+
of the GNU General Public License that is incorporated pursuant to the
551
+
following paragraph.
552
+
553
+
Notwithstanding any other provision of this License, you have
554
+
permission to link or combine any covered work with a work licensed
555
+
under version 3 of the GNU General Public License into a single
556
+
combined work, and to convey the resulting work. The terms of this
557
+
License will continue to apply to the part which is the covered work,
558
+
but the work with which it is combined will remain governed by version
559
+
3 of the GNU General Public License.
560
+
561
+
14. Revised Versions of this License.
562
+
563
+
The Free Software Foundation may publish revised and/or new versions of
564
+
the GNU Affero General Public License from time to time. Such new versions
565
+
will be similar in spirit to the present version, but may differ in detail to
566
+
address new problems or concerns.
567
+
568
+
Each version is given a distinguishing version number. If the
569
+
Program specifies that a certain numbered version of the GNU Affero General
570
+
Public License "or any later version" applies to it, you have the
571
+
option of following the terms and conditions either of that numbered
572
+
version or of any later version published by the Free Software
573
+
Foundation. If the Program does not specify a version number of the
574
+
GNU Affero General Public License, you may choose any version ever published
575
+
by the Free Software Foundation.
576
+
577
+
If the Program specifies that a proxy can decide which future
578
+
versions of the GNU Affero General Public License can be used, that proxy's
579
+
public statement of acceptance of a version permanently authorizes you
580
+
to choose that version for the Program.
581
+
582
+
Later license versions may give you additional or different
583
+
permissions. However, no additional obligations are imposed on any
584
+
author or copyright holder as a result of your choosing to follow a
585
+
later version.
586
+
587
+
15. Disclaimer of Warranty.
588
+
589
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+
16. Limitation of Liability.
599
+
600
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+
SUCH DAMAGES.
609
+
610
+
17. Interpretation of Sections 15 and 16.
611
+
612
+
If the disclaimer of warranty and limitation of liability provided
613
+
above cannot be given local legal effect according to their terms,
614
+
reviewing courts shall apply local law that most closely approximates
615
+
an absolute waiver of all civil liability in connection with the
616
+
Program, unless a warranty or assumption of liability accompanies a
617
+
copy of the Program in return for a fee.
618
+
619
+
END OF TERMS AND CONDITIONS
620
+
621
+
How to Apply These Terms to Your New Programs
622
+
623
+
If you develop a new program, and you want it to be of the greatest
624
+
possible use to the public, the best way to achieve this is to make it
625
+
free software which everyone can redistribute and change under these terms.
626
+
627
+
To do so, attach the following notices to the program. It is safest
628
+
to attach them to the start of each source file to most effectively
629
+
state the exclusion of warranty; and each file should have at least
630
+
the "copyright" line and a pointer to where the full notice is found.
631
+
632
+
<one line to give the program's name and a brief idea of what it does.>
633
+
Copyright (C) <year> <name of author>
634
+
635
+
This program is free software: you can redistribute it and/or modify
636
+
it under the terms of the GNU Affero General Public License as published
637
+
by the Free Software Foundation, either version 3 of the License, or
638
+
(at your option) any later version.
639
+
640
+
This program is distributed in the hope that it will be useful,
641
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+
GNU Affero General Public License for more details.
644
+
645
+
You should have received a copy of the GNU Affero General Public License
646
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+
Also add information on how to contact you by electronic and paper mail.
649
+
650
+
If your software can interact with users remotely through a computer
651
+
network, you should also make sure that it provides a way for users to
652
+
get its source. For example, if your program is a web application, its
653
+
interface could display a "Source" link that leads users to an archive
654
+
of the code. There are many ways you could offer source, and different
655
+
solutions will be better for different programs; see section 13 for the
656
+
specific requirements.
657
+
658
+
You should also get your employer (if you work as a programmer) or school,
659
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+
For more information on this, and how to apply and follow the GNU AGPL, see
661
+
<https://www.gnu.org/licenses/>.
+3
constellation/LICENSE.future
+3
constellation/LICENSE.future
+9
constellation/readme.md
+9
constellation/readme.md
···
83
83
```
84
84
85
85
86
+
## Contributions
87
+
88
+
### Licensing
89
+
90
+
Constellation's source code is currently available exclusively under the AGPL license (see [LICENSE](./LICENSE)).
91
+
92
+
In the future, its code MAY become available under the MIT and/or Apache2.0 licenses, at the sole discretion of the microcosm organization. Contributing implies acceptance with this possible future licensing change. The change has not happed yet and is not guaranteed.
93
+
94
+
86
95
some todos
87
96
88
97
- [x] find links and write them to rocksdb
+57
-14
constellation/src/bin/main.rs
+57
-14
constellation/src/bin/main.rs
···
1
1
use anyhow::{bail, Result};
2
2
use clap::{Parser, ValueEnum};
3
3
use metrics_exporter_prometheus::PrometheusBuilder;
4
+
use std::net::SocketAddr;
4
5
use std::num::NonZero;
5
6
use std::path::PathBuf;
6
7
use std::sync::{atomic::AtomicU32, Arc};
···
21
22
#[derive(Parser, Debug)]
22
23
#[command(version, about, long_about = None)]
23
24
struct Args {
24
-
#[arg(short, long)]
25
+
/// constellation server's listen address
26
+
#[arg(long)]
27
+
#[clap(default_value = "0.0.0.0:6789")]
28
+
bind: SocketAddr,
29
+
/// metrics server's listen address
30
+
#[arg(long)]
31
+
#[clap(default_value = "0.0.0.0:8765")]
32
+
bind_metrics: SocketAddr,
25
33
/// Jetstream server to connect to (exclusive with --fixture). Provide either a wss:// URL, or a shorhand value:
26
34
/// 'us-east-1', 'us-east-2', 'us-west-1', or 'us-west-2'
27
35
#[arg(short, long)]
···
46
54
/// Saved jsonl from jetstream to use instead of a live subscription
47
55
#[arg(short, long)]
48
56
fixture: Option<PathBuf>,
57
+
/// run a scan across the target id table and write all key -> ids to id -> keys
58
+
#[arg(long, action)]
59
+
repair_target_ids: bool,
49
60
}
50
61
51
62
#[derive(Debug, Clone, ValueEnum)]
···
78
89
let stream = jetstream_url(&args.jetstream);
79
90
println!("using jetstream server {stream:?}...",);
80
91
92
+
let bind = args.bind;
93
+
let metrics_bind = args.bind_metrics;
94
+
81
95
let stay_alive = CancellationToken::new();
82
96
83
97
match args.backend {
84
-
StorageBackend::Memory => run(MemStorage::new(), fixture, None, stream, stay_alive),
98
+
StorageBackend::Memory => run(
99
+
MemStorage::new(),
100
+
fixture,
101
+
None,
102
+
stream,
103
+
bind,
104
+
metrics_bind,
105
+
stay_alive,
106
+
),
85
107
#[cfg(feature = "rocks")]
86
108
StorageBackend::Rocks => {
87
109
let storage_dir = args.data.clone().unwrap_or("rocks.test".into());
···
96
118
rocks.start_backup(backup_dir, auto_backup, stay_alive.clone())?;
97
119
}
98
120
println!("rocks ready.");
99
-
run(rocks, fixture, args.data, stream, stay_alive)
121
+
std::thread::scope(|s| {
122
+
if args.repair_target_ids {
123
+
let rocks = rocks.clone();
124
+
let stay_alive = stay_alive.clone();
125
+
s.spawn(move || {
126
+
let rep = rocks.run_repair(time::Duration::from_millis(0), stay_alive);
127
+
eprintln!("repair finished: {rep:?}");
128
+
rep
129
+
});
130
+
}
131
+
s.spawn(|| {
132
+
let r = run(
133
+
rocks,
134
+
fixture,
135
+
args.data,
136
+
stream,
137
+
bind,
138
+
metrics_bind,
139
+
stay_alive,
140
+
);
141
+
eprintln!("run finished: {r:?}");
142
+
r
143
+
});
144
+
});
145
+
Ok(())
100
146
}
101
147
}
102
148
}
···
106
152
fixture: Option<PathBuf>,
107
153
data_dir: Option<PathBuf>,
108
154
stream: String,
155
+
bind: SocketAddr,
156
+
metrics_bind: SocketAddr,
109
157
stay_alive: CancellationToken,
110
158
) -> Result<()> {
111
159
ctrlc::set_handler({
···
150
198
.build()
151
199
.expect("axum startup")
152
200
.block_on(async {
153
-
install_metrics_server()?;
154
-
serve(readable, "0.0.0.0:6789", staying_alive).await
201
+
install_metrics_server(metrics_bind)?;
202
+
serve(readable, bind, staying_alive).await
155
203
})
156
204
.unwrap();
157
205
stay_alive.drop_guard();
···
184
232
185
233
'monitor: loop {
186
234
match readable.get_stats() {
187
-
Ok(StorageStats { dids, targetables, linking_records }) => {
235
+
Ok(StorageStats { dids, targetables, linking_records, .. }) => {
188
236
metrics::gauge!("storage.stats.dids").set(dids as f64);
189
237
metrics::gauge!("storage.stats.targetables").set(targetables as f64);
190
238
metrics::gauge!("storage.stats.linking_records").set(linking_records as f64);
···
218
266
Ok(())
219
267
}
220
268
221
-
fn install_metrics_server() -> Result<()> {
269
+
fn install_metrics_server(metrics_bind: SocketAddr) -> Result<()> {
222
270
println!("installing metrics server...");
223
-
let host = [0, 0, 0, 0];
224
-
let port = 8765;
225
271
PrometheusBuilder::new()
226
272
.set_quantiles(&[0.5, 0.9, 0.99, 1.0])?
227
273
.set_bucket_duration(time::Duration::from_secs(30))?
228
274
.set_bucket_count(NonZero::new(10).unwrap()) // count * duration = 5 mins. stuff doesn't happen that fast here.
229
275
.set_enable_unit_suffix(true)
230
-
.with_http_listener((host, port))
276
+
.with_http_listener(metrics_bind)
231
277
.install()?;
232
-
println!(
233
-
"metrics server installed! listening on http://{}.{}.{}.{}:{port}",
234
-
host[0], host[1], host[2], host[3]
235
-
);
278
+
println!("metrics server installed! listening at {metrics_bind:?}");
236
279
Ok(())
237
280
}
238
281
+239
-238
constellation/src/bin/rocks-link-stats.rs
+239
-238
constellation/src/bin/rocks-link-stats.rs
···
1
-
use bincode::config::Options;
2
-
use clap::Parser;
3
-
use serde::Serialize;
4
-
use std::collections::HashMap;
5
-
use std::path::PathBuf;
1
+
// use bincode::config::Options;
2
+
// use clap::Parser;
3
+
// use serde::Serialize;
4
+
// use std::collections::HashMap;
5
+
// use std::path::PathBuf;
6
6
7
-
use tokio_util::sync::CancellationToken;
7
+
// use tokio_util::sync::CancellationToken;
8
8
9
-
use constellation::storage::rocks_store::{
10
-
Collection, DidId, RKey, RPath, Target, TargetKey, TargetLinkers, _bincode_opts,
11
-
};
12
-
use constellation::storage::RocksStorage;
13
-
use constellation::Did;
9
+
// use constellation::storage::rocks_store::{
10
+
// Collection, DidId, RKey, RPath, Target, TargetKey, TargetLinkers, _bincode_opts,
11
+
// };
12
+
// use constellation::storage::RocksStorage;
13
+
// use constellation::Did;
14
14
15
-
use links::parse_any_link;
16
-
use rocksdb::IteratorMode;
17
-
use std::time;
15
+
// use links::parse_any_link;
16
+
// use rocksdb::IteratorMode;
17
+
// use std::time;
18
18
19
-
/// Aggregate links in the at-mosphere
20
-
#[derive(Parser, Debug)]
21
-
#[command(version, about, long_about = None)]
22
-
struct Args {
23
-
/// where is rocksdb's data
24
-
#[arg(short, long)]
25
-
data: PathBuf,
26
-
/// slow down so we don't kill the firehose consumer, if running concurrently
27
-
#[arg(short, long)]
28
-
limit: Option<u64>,
29
-
}
19
+
// xxxx/// Aggregate links in the at-mosphere
20
+
// #[derive(Parser, Debug)]
21
+
// #[command(version, about, long_about = None)]
22
+
// struct Args {
23
+
// /// where is rocksdb's data
24
+
// #[arg(short, long)]
25
+
// data: PathBuf,
26
+
// /// slow down so we don't kill the firehose consumer, if running concurrently
27
+
// #[arg(short, long)]
28
+
// limit: Option<u64>,
29
+
// }
30
30
31
-
type LinkType = String;
31
+
// type LinkType = String;
32
32
33
-
#[derive(Debug, Eq, Hash, PartialEq, Serialize)]
34
-
struct SourceLink(Collection, RPath, LinkType, Option<Collection>); // last is target collection, if it's an at-uri link with a collection
33
+
// #[derive(Debug, Eq, Hash, PartialEq, Serialize)]
34
+
// struct SourceLink(Collection, RPath, LinkType, Option<Collection>); // last is target collection, if it's an at-uri link with a collection
35
35
36
-
#[derive(Debug, Serialize)]
37
-
struct SourceSample {
38
-
did: String,
39
-
rkey: String,
40
-
}
36
+
// #[derive(Debug, Serialize)]
37
+
// struct SourceSample {
38
+
// did: String,
39
+
// rkey: String,
40
+
// }
41
41
42
-
#[derive(Debug, Default, Serialize)]
43
-
struct Bucket {
44
-
count: u64,
45
-
sum: u64,
46
-
sample: Option<SourceSample>,
47
-
}
42
+
// #[derive(Debug, Default, Serialize)]
43
+
// struct Bucket {
44
+
// count: u64,
45
+
// sum: u64,
46
+
// sample: Option<SourceSample>,
47
+
// }
48
48
49
-
#[derive(Debug, Default, Serialize)]
50
-
struct Buckets([Bucket; 23]);
49
+
// #[derive(Debug, Default, Serialize)]
50
+
// struct Buckets([Bucket; 23]);
51
51
52
-
const BUCKETS: [u64; 23] = [
53
-
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16_384, 65_535,
54
-
262_144, 1_048_576,
55
-
];
52
+
// const BUCKETS: [u64; 23] = [
53
+
// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16_384, 65_535,
54
+
// 262_144, 1_048_576,
55
+
// ];
56
56
57
-
// b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b12, b16, b32, b64, b128, b256, b512, b1024, b4096, b16384, b65535, b262144, bmax
57
+
// xxx// b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b12, b16, b32, b64, b128, b256, b512, b1024, b4096, b16384, b65535, b262144, bmax
58
58
59
-
static DID_IDS_CF: &str = "did_ids";
60
-
static TARGET_IDS_CF: &str = "target_ids";
61
-
static TARGET_LINKERS_CF: &str = "target_links";
59
+
// static DID_IDS_CF: &str = "did_ids";
60
+
// static TARGET_IDS_CF: &str = "target_ids";
61
+
// static TARGET_LINKERS_CF: &str = "target_links";
62
62
63
-
const REPORT_INTERVAL: usize = 50_000;
63
+
// const REPORT_INTERVAL: usize = 50_000;
64
64
65
-
type Stats = HashMap<SourceLink, Buckets>;
65
+
// type Stats = HashMap<SourceLink, Buckets>;
66
66
67
-
#[derive(Debug, Serialize)]
68
-
struct Printable {
69
-
collection: String,
70
-
path: String,
71
-
link_type: String,
72
-
target_collection: Option<String>,
73
-
buckets: Buckets,
74
-
}
67
+
// #[derive(Debug, Serialize)]
68
+
// struct Printable {
69
+
// collection: String,
70
+
// path: String,
71
+
// link_type: String,
72
+
// target_collection: Option<String>,
73
+
// buckets: Buckets,
74
+
// }
75
75
76
-
#[derive(Debug, Default)]
77
-
struct ErrStats {
78
-
failed_to_get_sample: usize,
79
-
failed_to_read_target_id: usize,
80
-
failed_to_deserialize_target_key: usize,
81
-
failed_to_parse_target_as_link: usize,
82
-
failed_to_get_links: usize,
83
-
failed_to_deserialize_linkers: usize,
84
-
}
76
+
// #[derive(Debug, Default)]
77
+
// struct ErrStats {
78
+
// failed_to_get_sample: usize,
79
+
// failed_to_read_target_id: usize,
80
+
// failed_to_deserialize_target_key: usize,
81
+
// failed_to_parse_target_as_link: usize,
82
+
// failed_to_get_links: usize,
83
+
// failed_to_deserialize_linkers: usize,
84
+
// }
85
85
86
-
fn thousands(n: usize) -> String {
87
-
n.to_string()
88
-
.as_bytes()
89
-
.rchunks(3)
90
-
.rev()
91
-
.map(std::str::from_utf8)
92
-
.collect::<Result<Vec<&str>, _>>()
93
-
.unwrap()
94
-
.join(",")
95
-
}
86
+
// fn thousands(n: usize) -> String {
87
+
// n.to_string()
88
+
// .as_bytes()
89
+
// .rchunks(3)
90
+
// .rev()
91
+
// .map(std::str::from_utf8)
92
+
// .collect::<Result<Vec<&str>, _>>()
93
+
// .unwrap()
94
+
// .join(",")
95
+
// }
96
96
97
-
fn main() {
98
-
let args = Args::parse();
97
+
// fn main() {
98
+
// let args = Args::parse();
99
99
100
-
let limit = args.limit.map(|amount| {
101
-
ratelimit::Ratelimiter::builder(amount, time::Duration::from_secs(1))
102
-
.max_tokens(amount)
103
-
.initial_available(amount)
104
-
.build()
105
-
.unwrap()
106
-
});
100
+
// let limit = args.limit.map(|amount| {
101
+
// ratelimit::Ratelimiter::builder(amount, time::Duration::from_secs(1))
102
+
// .max_tokens(amount)
103
+
// .initial_available(amount)
104
+
// .build()
105
+
// .unwrap()
106
+
// });
107
107
108
-
eprintln!("starting rocksdb...");
109
-
let rocks = RocksStorage::open_readonly(args.data).unwrap();
110
-
eprintln!("rocks ready.");
108
+
// eprintln!("starting rocksdb...");
109
+
// let rocks = RocksStorage::open_readonly(args.data).unwrap();
110
+
// eprintln!("rocks ready.");
111
111
112
-
let RocksStorage { ref db, .. } = rocks;
112
+
// let RocksStorage { ref db, .. } = rocks;
113
113
114
-
let stay_alive = CancellationToken::new();
115
-
ctrlc::set_handler({
116
-
let mut desperation: u8 = 0;
117
-
let stay_alive = stay_alive.clone();
118
-
move || match desperation {
119
-
0 => {
120
-
eprintln!("ok, shutting down...");
121
-
stay_alive.cancel();
122
-
desperation += 1;
123
-
}
124
-
1.. => panic!("fine, panicking!"),
125
-
}
126
-
})
127
-
.unwrap();
114
+
// let stay_alive = CancellationToken::new();
115
+
// ctrlc::set_handler({
116
+
// let mut desperation: u8 = 0;
117
+
// let stay_alive = stay_alive.clone();
118
+
// move || match desperation {
119
+
// 0 => {
120
+
// eprintln!("ok, shutting down...");
121
+
// stay_alive.cancel();
122
+
// desperation += 1;
123
+
// }
124
+
// 1.. => panic!("fine, panicking!"),
125
+
// }
126
+
// })
127
+
// .unwrap();
128
128
129
-
let mut stats = Stats::new();
130
-
let mut err_stats: ErrStats = Default::default();
129
+
// let mut stats = Stats::new();
130
+
// let mut err_stats: ErrStats = Default::default();
131
131
132
-
let did_ids_cf = db.cf_handle(DID_IDS_CF).unwrap();
133
-
let target_id_cf = db.cf_handle(TARGET_IDS_CF).unwrap();
134
-
let target_links_cf = db.cf_handle(TARGET_LINKERS_CF).unwrap();
132
+
// let did_ids_cf = db.cf_handle(DID_IDS_CF).unwrap();
133
+
// let target_id_cf = db.cf_handle(TARGET_IDS_CF).unwrap();
134
+
// let target_links_cf = db.cf_handle(TARGET_LINKERS_CF).unwrap();
135
135
136
-
let t0 = time::Instant::now();
137
-
let mut t_prev = t0;
136
+
// let t0 = time::Instant::now();
137
+
// let mut t_prev = t0;
138
138
139
-
let mut i = 0;
140
-
for item in db.iterator_cf(&target_id_cf, IteratorMode::Start) {
141
-
if stay_alive.is_cancelled() {
142
-
break;
143
-
}
139
+
// let mut i = 0;
140
+
// for item in db.iterator_cf(&target_id_cf, IteratorMode::Start) {
141
+
// if stay_alive.is_cancelled() {
142
+
// break;
143
+
// }
144
144
145
-
if let Some(ref limiter) = limit {
146
-
if let Err(dur) = limiter.try_wait() {
147
-
std::thread::sleep(dur)
148
-
}
149
-
}
145
+
// if let Some(ref limiter) = limit {
146
+
// if let Err(dur) = limiter.try_wait() {
147
+
// std::thread::sleep(dur)
148
+
// }
149
+
// }
150
150
151
-
if i > 0 && i % REPORT_INTERVAL == 0 {
152
-
let now = time::Instant::now();
153
-
let rate = (REPORT_INTERVAL as f32) / (now.duration_since(t_prev).as_secs_f32());
154
-
eprintln!(
155
-
"{i}\t({}k)\t{:.2}\t{rate:.1}/s",
156
-
thousands(i / 1000),
157
-
t0.elapsed().as_secs_f32()
158
-
);
159
-
t_prev = now;
160
-
}
161
-
i += 1;
151
+
// if i > 0 && i % REPORT_INTERVAL == 0 {
152
+
// let now = time::Instant::now();
153
+
// let rate = (REPORT_INTERVAL as f32) / (now.duration_since(t_prev).as_secs_f32());
154
+
// eprintln!(
155
+
// "{i}\t({}k)\t{:.2}\t{rate:.1}/s",
156
+
// thousands(i / 1000),
157
+
// t0.elapsed().as_secs_f32()
158
+
// );
159
+
// t_prev = now;
160
+
// }
161
+
// i += 1;
162
162
163
-
let Ok((target_key, target_id)) = item else {
164
-
err_stats.failed_to_read_target_id += 1;
165
-
continue;
166
-
};
163
+
// let Ok((target_key, target_id)) = item else {
164
+
// err_stats.failed_to_read_target_id += 1;
165
+
// continue;
166
+
// };
167
167
168
-
let Ok(TargetKey(Target(target), collection, rpath)) =
169
-
_bincode_opts().deserialize(&target_key)
170
-
else {
171
-
err_stats.failed_to_deserialize_target_key += 1;
172
-
continue;
173
-
};
168
+
// let Ok(TargetKey(Target(target), collection, rpath)) =
169
+
// _bincode_opts().deserialize(&target_key)
170
+
// else {
171
+
// err_stats.failed_to_deserialize_target_key += 1;
172
+
// continue;
173
+
// };
174
174
175
-
let source = {
176
-
let Some(parsed) = parse_any_link(&target) else {
177
-
err_stats.failed_to_parse_target_as_link += 1;
178
-
continue;
179
-
};
180
-
SourceLink(
181
-
collection,
182
-
rpath,
183
-
parsed.name().into(),
184
-
parsed.at_uri_collection().map(Collection),
185
-
)
186
-
};
175
+
// let source = {
176
+
// let Some(parsed) = parse_any_link(&target) else {
177
+
// err_stats.failed_to_parse_target_as_link += 1;
178
+
// continue;
179
+
// };
180
+
// SourceLink(
181
+
// collection,
182
+
// rpath,
183
+
// parsed.name().into(),
184
+
// parsed.at_uri_collection().map(Collection),
185
+
// )
186
+
// };
187
187
188
-
let Ok(Some(links_raw)) = db.get_cf(&target_links_cf, &target_id) else {
189
-
err_stats.failed_to_get_links += 1;
190
-
continue;
191
-
};
192
-
let Ok(linkers) = _bincode_opts().deserialize::<TargetLinkers>(&links_raw) else {
193
-
err_stats.failed_to_deserialize_linkers += 1;
194
-
continue;
195
-
};
196
-
let (n, _) = linkers.count();
188
+
// let Ok(Some(links_raw)) = db.get_cf(&target_links_cf, &target_id) else {
189
+
// err_stats.failed_to_get_links += 1;
190
+
// continue;
191
+
// };
192
+
// let Ok(linkers) = _bincode_opts().deserialize::<TargetLinkers>(&links_raw) else {
193
+
// err_stats.failed_to_deserialize_linkers += 1;
194
+
// continue;
195
+
// };
196
+
// let (n, _) = linkers.count();
197
197
198
-
if n == 0 {
199
-
continue;
200
-
}
198
+
// if n == 0 {
199
+
// continue;
200
+
// }
201
201
202
-
let mut bucket = 0;
203
-
for edge in BUCKETS {
204
-
if n <= edge || bucket == 22 {
205
-
break;
206
-
}
207
-
bucket += 1;
208
-
}
202
+
// let mut bucket = 0;
203
+
// for edge in BUCKETS {
204
+
// if n <= edge || bucket == 22 {
205
+
// break;
206
+
// }
207
+
// bucket += 1;
208
+
// }
209
209
210
-
let b = &mut stats.entry(source).or_default().0[bucket];
211
-
b.count += 1;
212
-
b.sum += n;
213
-
if b.sample.is_none() {
214
-
let (DidId(did_id), RKey(k)) = &linkers.0[(n - 1) as usize];
215
-
if let Ok(Some(did_bytes)) = db.get_cf(&did_ids_cf, did_id.to_be_bytes()) {
216
-
if let Ok(Did(did)) = _bincode_opts().deserialize(&did_bytes) {
217
-
b.sample = Some(SourceSample {
218
-
did,
219
-
rkey: k.clone(),
220
-
});
221
-
} else {
222
-
err_stats.failed_to_get_sample += 1;
223
-
}
224
-
} else {
225
-
err_stats.failed_to_get_sample += 1;
226
-
}
227
-
}
210
+
// let b = &mut stats.entry(source).or_default().0[bucket];
211
+
// b.count += 1;
212
+
// b.sum += n;
213
+
// if b.sample.is_none() {
214
+
// let (DidId(did_id), RKey(k)) = &linkers.0[(n - 1) as usize];
215
+
// if let Ok(Some(did_bytes)) = db.get_cf(&did_ids_cf, did_id.to_be_bytes()) {
216
+
// if let Ok(Did(did)) = _bincode_opts().deserialize(&did_bytes) {
217
+
// b.sample = Some(SourceSample {
218
+
// did,
219
+
// rkey: k.clone(),
220
+
// });
221
+
// } else {
222
+
// err_stats.failed_to_get_sample += 1;
223
+
// }
224
+
// } else {
225
+
// err_stats.failed_to_get_sample += 1;
226
+
// }
227
+
// }
228
228
229
-
// if i >= 40_000 {
230
-
// break;
231
-
// }
232
-
}
229
+
// // if i >= 40_000 {
230
+
// // break;
231
+
// // }
232
+
// }
233
233
234
-
let dt = t0.elapsed();
234
+
// let dt = t0.elapsed();
235
235
236
-
eprintln!("gathering stats for output...");
236
+
// eprintln!("gathering stats for output...");
237
237
238
-
let itemified = stats
239
-
.into_iter()
240
-
.map(
241
-
|(
242
-
SourceLink(Collection(collection), RPath(path), link_type, target_collection),
243
-
buckets,
244
-
)| Printable {
245
-
collection,
246
-
path,
247
-
link_type,
248
-
target_collection: target_collection.map(|Collection(c)| c),
249
-
buckets,
250
-
},
251
-
)
252
-
.collect::<Vec<_>>();
238
+
// let itemified = stats
239
+
// .into_iter()
240
+
// .map(
241
+
// |(
242
+
// SourceLink(Collection(collection), RPath(path), link_type, target_collection),
243
+
// buckets,
244
+
// )| Printable {
245
+
// collection,
246
+
// path,
247
+
// link_type,
248
+
// target_collection: target_collection.map(|Collection(c)| c),
249
+
// buckets,
250
+
// },
251
+
// )
252
+
// .collect::<Vec<_>>();
253
253
254
-
match serde_json::to_string(&itemified) {
255
-
Ok(s) => println!("{s}"),
256
-
Err(e) => eprintln!("failed to serialize results: {e:?}"),
257
-
}
254
+
// match serde_json::to_string(&itemified) {
255
+
// Ok(s) => println!("{s}"),
256
+
// Err(e) => eprintln!("failed to serialize results: {e:?}"),
257
+
// }
258
258
259
-
eprintln!(
260
-
"{} summarizing {} link targets in {:.1}s",
261
-
if stay_alive.is_cancelled() {
262
-
"STOPPED"
263
-
} else {
264
-
"FINISHED"
265
-
},
266
-
thousands(i),
267
-
dt.as_secs_f32()
268
-
);
269
-
eprintln!("{err_stats:?}");
270
-
eprintln!("bye.");
271
-
}
259
+
// eprintln!(
260
+
// "{} summarizing {} link targets in {:.1}s",
261
+
// if stay_alive.is_cancelled() {
262
+
// "STOPPED"
263
+
// } else {
264
+
// "FINISHED"
265
+
// },
266
+
// thousands(i),
267
+
// dt.as_secs_f32()
268
+
// );
269
+
// eprintln!("{err_stats:?}");
270
+
// eprintln!("bye.");
271
+
// }
272
272
273
-
// scan plan
273
+
// xxx// scan plan
274
274
275
-
// buckets (backlink count)
276
-
// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16384, 65535, 262144, 1048576+
277
-
// by
278
-
// - collection
279
-
// - json path
280
-
// - link type
281
-
// samples for each bucket for each variation
275
+
// xxx// buckets (backlink count)
276
+
// xxx// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16384, 65535, 262144, 1048576+
277
+
// xxx// by
278
+
// xxx// - collection
279
+
// xxx// - json path
280
+
// xxx// - link type
281
+
// xxx// samples for each bucket for each variation
282
+
fn main() {}
+2
constellation/src/bin/rocks-restore-from-backup.rs
+2
constellation/src/bin/rocks-restore-from-backup.rs
···
3
3
use clap::Parser;
4
4
use std::path::PathBuf;
5
5
6
+
#[cfg(feature = "rocks")]
6
7
use rocksdb::backup::{BackupEngine, BackupEngineOptions, RestoreOptions};
7
8
8
9
use std::time;
···
19
20
to_data_dir: PathBuf,
20
21
}
21
22
23
+
#[cfg(feature = "rocks")]
22
24
fn main() -> Result<()> {
23
25
let args = Args::parse();
24
26
+13
-6
constellation/src/consumer/jetstream.rs
+13
-6
constellation/src/consumer/jetstream.rs
···
226
226
println!("jetstream closed the websocket cleanly.");
227
227
break;
228
228
}
229
-
r => eprintln!("jetstream: close result after error: {r:?}"),
229
+
Err(_) => {
230
+
counter!("jetstream_read_fail", "url" => stream.clone(), "reason" => "dirty close").increment(1);
231
+
println!("jetstream failed to close the websocket cleanly.");
232
+
break;
233
+
}
234
+
Ok(r) => {
235
+
eprintln!("jetstream: close result after error: {r:?}");
236
+
counter!("jetstream_read_fail", "url" => stream.clone(), "reason" => "read error")
237
+
.increment(1);
238
+
// if we didn't immediately get ConnectionClosed, we should keep polling read
239
+
// until we get it.
240
+
continue;
241
+
}
230
242
}
231
-
counter!("jetstream_read_fail", "url" => stream.clone(), "reason" => "read error")
232
-
.increment(1);
233
-
// if we didn't immediately get ConnectionClosed, we should keep polling read
234
-
// until we get it.
235
-
continue;
236
243
}
237
244
};
238
245
+8
-6
constellation/src/server/filters.rs
+8
-6
constellation/src/server/filters.rs
···
5
5
Ok({
6
6
if let Some(link) = parse_any_link(s) {
7
7
match link {
8
-
Link::AtUri(at_uri) => at_uri.strip_prefix("at://").map(|noproto| {
9
-
format!("https://atproto-browser-plus-links.vercel.app/at/{noproto}")
10
-
}),
11
-
Link::Did(did) => Some(format!(
12
-
"https://atproto-browser-plus-links.vercel.app/at/{did}"
13
-
)),
8
+
Link::AtUri(at_uri) => at_uri
9
+
.strip_prefix("at://")
10
+
.map(|noproto| format!("https://pdsls.dev/at://{noproto}")),
11
+
Link::Did(did) => Some(format!("https://pdsls.dev/at://{did}")),
14
12
Link::Uri(uri) => Some(uri),
15
13
}
16
14
} else {
···
22
20
pub fn human_number(n: &u64) -> askama::Result<String> {
23
21
Ok(n.to_formatted_string(&Locale::en))
24
22
}
23
+
24
+
pub fn to_u64(n: usize) -> askama::Result<u64> {
25
+
Ok(n as u64)
26
+
}
+332
-19
constellation/src/server/mod.rs
+332
-19
constellation/src/server/mod.rs
···
11
11
use bincode::Options;
12
12
use serde::{Deserialize, Serialize};
13
13
use serde_with::serde_as;
14
-
use std::collections::HashMap;
14
+
use std::collections::{HashMap, HashSet};
15
15
use std::time::{Duration, UNIX_EPOCH};
16
16
use tokio::net::{TcpListener, ToSocketAddrs};
17
-
use tokio::task::block_in_place;
17
+
use tokio::task::spawn_blocking;
18
18
use tokio_util::sync::CancellationToken;
19
19
20
20
use crate::storage::{LinkReader, StorageStats};
···
28
28
const DEFAULT_CURSOR_LIMIT: u64 = 16;
29
29
const DEFAULT_CURSOR_LIMIT_MAX: u64 = 100;
30
30
31
-
const INDEX_BEGAN_AT_TS: u64 = 1738083600; // TODO: not this
31
+
fn get_default_cursor_limit() -> u64 {
32
+
DEFAULT_CURSOR_LIMIT
33
+
}
34
+
35
+
fn to500(e: tokio::task::JoinError) -> http::StatusCode {
36
+
eprintln!("handler error: {e}");
37
+
http::StatusCode::INTERNAL_SERVER_ERROR
38
+
}
32
39
33
40
pub async fn serve<S, A>(store: S, addr: A, stay_alive: CancellationToken) -> anyhow::Result<()>
34
41
where
···
41
48
"/",
42
49
get({
43
50
let store = store.clone();
44
-
move |accept| async { block_in_place(|| hello(accept, store)) }
51
+
move |accept| async {
52
+
spawn_blocking(|| hello(accept, store))
53
+
.await
54
+
.map_err(to500)?
55
+
}
56
+
}),
57
+
)
58
+
.route(
59
+
"/xrpc/blue.microcosm.links.getManyToManyCounts",
60
+
get({
61
+
let store = store.clone();
62
+
move |accept, query| async {
63
+
spawn_blocking(|| get_many_to_many_counts(accept, query, store))
64
+
.await
65
+
.map_err(to500)?
66
+
}
45
67
}),
46
68
)
47
69
.route(
48
70
"/links/count",
49
71
get({
50
72
let store = store.clone();
51
-
move |accept, query| async { block_in_place(|| count_links(accept, query, store)) }
73
+
move |accept, query| async {
74
+
spawn_blocking(|| count_links(accept, query, store))
75
+
.await
76
+
.map_err(to500)?
77
+
}
52
78
}),
53
79
)
54
80
.route(
···
56
82
get({
57
83
let store = store.clone();
58
84
move |accept, query| async {
59
-
block_in_place(|| count_distinct_dids(accept, query, store))
85
+
spawn_blocking(|| count_distinct_dids(accept, query, store))
86
+
.await
87
+
.map_err(to500)?
88
+
}
89
+
}),
90
+
)
91
+
.route(
92
+
"/xrpc/blue.microcosm.links.getBacklinks",
93
+
get({
94
+
let store = store.clone();
95
+
move |accept, query| async {
96
+
spawn_blocking(|| get_backlinks(accept, query, store))
97
+
.await
98
+
.map_err(to500)?
60
99
}
61
100
}),
62
101
)
···
64
103
"/links",
65
104
get({
66
105
let store = store.clone();
67
-
move |accept, query| async { block_in_place(|| get_links(accept, query, store)) }
106
+
move |accept, query| async {
107
+
spawn_blocking(|| get_links(accept, query, store))
108
+
.await
109
+
.map_err(to500)?
110
+
}
68
111
}),
69
112
)
70
113
.route(
···
72
115
get({
73
116
let store = store.clone();
74
117
move |accept, query| async {
75
-
block_in_place(|| get_distinct_dids(accept, query, store))
118
+
spawn_blocking(|| get_distinct_dids(accept, query, store))
119
+
.await
120
+
.map_err(to500)?
76
121
}
77
122
}),
78
123
)
···
82
127
get({
83
128
let store = store.clone();
84
129
move |accept, query| async {
85
-
block_in_place(|| count_all_links(accept, query, store))
130
+
spawn_blocking(|| count_all_links(accept, query, store))
131
+
.await
132
+
.map_err(to500)?
86
133
}
87
134
}),
88
135
)
···
91
138
get({
92
139
let store = store.clone();
93
140
move |accept, query| async {
94
-
block_in_place(|| explore_links(accept, query, store))
141
+
spawn_blocking(|| explore_links(accept, query, store))
142
+
.await
143
+
.map_err(to500)?
95
144
}
96
145
}),
97
146
)
···
150
199
#[template(path = "hello.html.j2")]
151
200
struct HelloReponse {
152
201
help: &'static str,
153
-
days_indexed: u64,
202
+
days_indexed: Option<u64>,
154
203
stats: StorageStats,
155
204
}
156
205
fn hello(
···
160
209
let stats = store
161
210
.get_stats()
162
211
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
163
-
let days_indexed = (UNIX_EPOCH + Duration::from_secs(INDEX_BEGAN_AT_TS))
164
-
.elapsed()
212
+
let days_indexed = stats
213
+
.started_at
214
+
.map(|c| (UNIX_EPOCH + Duration::from_micros(c)).elapsed())
215
+
.transpose()
165
216
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?
166
-
.as_secs()
167
-
/ 86400;
217
+
.map(|d| d.as_secs() / 86_400);
168
218
Ok(acceptable(accept, HelloReponse {
169
219
help: "open this URL in a web browser (or request with Accept: text/html) for information about this API.",
170
220
days_indexed,
···
173
223
}
174
224
175
225
#[derive(Clone, Deserialize)]
226
+
#[serde(rename_all = "camelCase")]
227
+
struct GetManyToManyCountsQuery {
228
+
subject: String,
229
+
source: String,
230
+
/// path to the secondary link in the linking record
231
+
path_to_other: String,
232
+
/// filter to linking records (join of the m2m) by these DIDs
233
+
#[serde(default)]
234
+
did: Vec<String>,
235
+
/// filter to specific secondary records
236
+
#[serde(default)]
237
+
other_subject: Vec<String>,
238
+
cursor: Option<OpaqueApiCursor>,
239
+
/// Set the max number of links to return per page of results
240
+
#[serde(default = "get_default_cursor_limit")]
241
+
limit: u64,
242
+
}
243
+
#[derive(Serialize)]
244
+
struct OtherSubjectCount {
245
+
subject: String,
246
+
total: u64,
247
+
distinct: u64,
248
+
}
249
+
#[derive(Template, Serialize)]
250
+
#[template(path = "get-many-to-many-counts.html.j2")]
251
+
struct GetManyToManyCountsResponse {
252
+
counts_by_other_subject: Vec<OtherSubjectCount>,
253
+
cursor: Option<OpaqueApiCursor>,
254
+
#[serde(skip_serializing)]
255
+
query: GetManyToManyCountsQuery,
256
+
}
257
+
fn get_many_to_many_counts(
258
+
accept: ExtractAccept,
259
+
query: axum_extra::extract::Query<GetManyToManyCountsQuery>,
260
+
store: impl LinkReader,
261
+
) -> Result<impl IntoResponse, http::StatusCode> {
262
+
let cursor_key = query
263
+
.cursor
264
+
.clone()
265
+
.map(|oc| ApiKeyedCursor::try_from(oc).map_err(|_| http::StatusCode::BAD_REQUEST))
266
+
.transpose()?
267
+
.map(|c| c.next);
268
+
269
+
let limit = query.limit;
270
+
if limit > DEFAULT_CURSOR_LIMIT_MAX {
271
+
return Err(http::StatusCode::BAD_REQUEST);
272
+
}
273
+
274
+
let filter_dids: HashSet<Did> = HashSet::from_iter(
275
+
query
276
+
.did
277
+
.iter()
278
+
.map(|d| d.trim())
279
+
.filter(|d| !d.is_empty())
280
+
.map(|d| Did(d.to_string())),
281
+
);
282
+
283
+
let filter_other_subjects: HashSet<String> = HashSet::from_iter(
284
+
query
285
+
.other_subject
286
+
.iter()
287
+
.map(|s| s.trim().to_string())
288
+
.filter(|s| !s.is_empty()),
289
+
);
290
+
291
+
let Some((collection, path)) = query.source.split_once(':') else {
292
+
return Err(http::StatusCode::BAD_REQUEST);
293
+
};
294
+
let path = format!(".{path}");
295
+
296
+
let path_to_other = format!(".{}", query.path_to_other);
297
+
298
+
let paged = store
299
+
.get_many_to_many_counts(
300
+
&query.subject,
301
+
collection,
302
+
&path,
303
+
&path_to_other,
304
+
limit,
305
+
cursor_key,
306
+
&filter_dids,
307
+
&filter_other_subjects,
308
+
)
309
+
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
310
+
311
+
let cursor = paged.next.map(|next| ApiKeyedCursor { next }.into());
312
+
313
+
let items = paged
314
+
.items
315
+
.into_iter()
316
+
.map(|(subject, total, distinct)| OtherSubjectCount {
317
+
subject,
318
+
total,
319
+
distinct,
320
+
})
321
+
.collect();
322
+
323
+
Ok(acceptable(
324
+
accept,
325
+
GetManyToManyCountsResponse {
326
+
counts_by_other_subject: items,
327
+
cursor,
328
+
query: (*query).clone(),
329
+
},
330
+
))
331
+
}
332
+
333
+
#[derive(Clone, Deserialize)]
176
334
struct GetLinksCountQuery {
177
335
target: String,
178
336
collection: String,
···
233
391
}
234
392
235
393
#[derive(Clone, Deserialize)]
394
+
struct GetBacklinksQuery {
395
+
/// The link target
396
+
///
397
+
/// can be an AT-URI, plain DID, or regular URI
398
+
subject: String,
399
+
/// Filter links only from this link source
400
+
///
401
+
/// eg.: `app.bsky.feed.like:subject.uri`
402
+
source: String,
403
+
cursor: Option<OpaqueApiCursor>,
404
+
/// Filter links only from these DIDs
405
+
///
406
+
/// include multiple times to filter by multiple source DIDs
407
+
#[serde(default)]
408
+
did: Vec<String>,
409
+
/// Set the max number of links to return per page of results
410
+
#[serde(default = "get_default_cursor_limit")]
411
+
limit: u64,
412
+
// TODO: allow reverse (er, forward) order as well
413
+
}
414
+
#[derive(Template, Serialize)]
415
+
#[template(path = "get-backlinks.html.j2")]
416
+
struct GetBacklinksResponse {
417
+
total: u64,
418
+
records: Vec<RecordId>,
419
+
cursor: Option<OpaqueApiCursor>,
420
+
#[serde(skip_serializing)]
421
+
query: GetBacklinksQuery,
422
+
#[serde(skip_serializing)]
423
+
collection: String,
424
+
#[serde(skip_serializing)]
425
+
path: String,
426
+
}
427
+
fn get_backlinks(
428
+
accept: ExtractAccept,
429
+
query: axum_extra::extract::Query<GetBacklinksQuery>, // supports multiple param occurrences
430
+
store: impl LinkReader,
431
+
) -> Result<impl IntoResponse, http::StatusCode> {
432
+
let until = query
433
+
.cursor
434
+
.clone()
435
+
.map(|oc| ApiCursor::try_from(oc).map_err(|_| http::StatusCode::BAD_REQUEST))
436
+
.transpose()?
437
+
.map(|c| c.next);
438
+
439
+
let limit = query.limit;
440
+
if limit > DEFAULT_CURSOR_LIMIT_MAX {
441
+
return Err(http::StatusCode::BAD_REQUEST);
442
+
}
443
+
444
+
let filter_dids: HashSet<Did> = HashSet::from_iter(
445
+
query
446
+
.did
447
+
.iter()
448
+
.map(|d| d.trim())
449
+
.filter(|d| !d.is_empty())
450
+
.map(|d| Did(d.to_string())),
451
+
);
452
+
453
+
let Some((collection, path)) = query.source.split_once(':') else {
454
+
return Err(http::StatusCode::BAD_REQUEST);
455
+
};
456
+
let path = format!(".{path}");
457
+
458
+
let paged = store
459
+
.get_links(
460
+
&query.subject,
461
+
collection,
462
+
&path,
463
+
limit,
464
+
until,
465
+
&filter_dids,
466
+
)
467
+
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
468
+
469
+
let cursor = paged.next.map(|next| {
470
+
ApiCursor {
471
+
version: paged.version,
472
+
next,
473
+
}
474
+
.into()
475
+
});
476
+
477
+
Ok(acceptable(
478
+
accept,
479
+
GetBacklinksResponse {
480
+
total: paged.total,
481
+
records: paged.items,
482
+
cursor,
483
+
query: (*query).clone(),
484
+
collection: collection.to_string(),
485
+
path,
486
+
},
487
+
))
488
+
}
489
+
490
+
#[derive(Clone, Deserialize)]
236
491
struct GetLinkItemsQuery {
237
492
target: String,
238
493
collection: String,
239
494
path: String,
240
495
cursor: Option<OpaqueApiCursor>,
241
-
limit: Option<u64>,
496
+
/// Filter links only from these DIDs
497
+
///
498
+
/// include multiple times to filter by multiple source DIDs
499
+
#[serde(default)]
500
+
did: Vec<String>,
501
+
/// [deprecated] Filter links only from these DIDs
502
+
///
503
+
/// format: comma-separated sequence of DIDs
504
+
///
505
+
/// errors: if `did` parameter is also present
506
+
///
507
+
/// deprecated: use `did`, which can be repeated multiple times
508
+
from_dids: Option<String>, // comma separated: gross
509
+
#[serde(default = "get_default_cursor_limit")]
510
+
limit: u64,
242
511
// TODO: allow reverse (er, forward) order as well
243
512
}
244
513
#[derive(Template, Serialize)]
···
255
524
}
256
525
fn get_links(
257
526
accept: ExtractAccept,
258
-
query: Query<GetLinkItemsQuery>,
527
+
query: axum_extra::extract::Query<GetLinkItemsQuery>, // supports multiple param occurrences
259
528
store: impl LinkReader,
260
529
) -> Result<impl IntoResponse, http::StatusCode> {
261
530
let until = query
···
265
534
.transpose()?
266
535
.map(|c| c.next);
267
536
268
-
let limit = query.limit.unwrap_or(DEFAULT_CURSOR_LIMIT);
537
+
let limit = query.limit;
269
538
if limit > DEFAULT_CURSOR_LIMIT_MAX {
270
539
return Err(http::StatusCode::BAD_REQUEST);
271
540
}
272
541
542
+
let mut filter_dids: HashSet<Did> = HashSet::from_iter(
543
+
query
544
+
.did
545
+
.iter()
546
+
.map(|d| d.trim())
547
+
.filter(|d| !d.is_empty())
548
+
.map(|d| Did(d.to_string())),
549
+
);
550
+
551
+
if let Some(comma_joined) = &query.from_dids {
552
+
if !filter_dids.is_empty() {
553
+
return Err(http::StatusCode::BAD_REQUEST);
554
+
}
555
+
for did in comma_joined.split(',') {
556
+
filter_dids.insert(Did(did.to_string()));
557
+
}
558
+
}
559
+
273
560
let paged = store
274
-
.get_links(&query.target, &query.collection, &query.path, limit, until)
561
+
.get_links(
562
+
&query.target,
563
+
&query.collection,
564
+
&query.path,
565
+
limit,
566
+
until,
567
+
&filter_dids,
568
+
)
275
569
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
276
570
277
571
let cursor = paged.next.map(|next| {
···
433
727
OpaqueApiCursor(bincode::DefaultOptions::new().serialize(&item).unwrap())
434
728
}
435
729
}
730
+
731
+
#[derive(Serialize, Deserialize)] // for bincode
732
+
struct ApiKeyedCursor {
733
+
next: String, // the key
734
+
}
735
+
736
+
impl TryFrom<OpaqueApiCursor> for ApiKeyedCursor {
737
+
type Error = bincode::Error;
738
+
739
+
fn try_from(item: OpaqueApiCursor) -> Result<Self, Self::Error> {
740
+
bincode::DefaultOptions::new().deserialize(&item.0)
741
+
}
742
+
}
743
+
744
+
impl From<ApiKeyedCursor> for OpaqueApiCursor {
745
+
fn from(item: ApiKeyedCursor) -> Self {
746
+
OpaqueApiCursor(bincode::DefaultOptions::new().serialize(&item).unwrap())
747
+
}
748
+
}
+93
-1
constellation/src/storage/mem_store.rs
+93
-1
constellation/src/storage/mem_store.rs
···
1
-
use super::{LinkReader, LinkStorage, PagedAppendingCollection, StorageStats};
1
+
use super::{
2
+
LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection, StorageStats,
3
+
};
2
4
use crate::{ActionableEvent, CountsByCount, Did, RecordId};
3
5
use anyhow::Result;
4
6
use links::CollectedLink;
···
132
134
}
133
135
134
136
impl LinkReader for MemStorage {
137
+
fn get_many_to_many_counts(
138
+
&self,
139
+
target: &str,
140
+
collection: &str,
141
+
path: &str,
142
+
path_to_other: &str,
143
+
limit: u64,
144
+
after: Option<String>,
145
+
filter_dids: &HashSet<Did>,
146
+
filter_to_targets: &HashSet<String>,
147
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>> {
148
+
let data = self.0.lock().unwrap();
149
+
let Some(paths) = data.targets.get(&Target::new(target)) else {
150
+
return Ok(PagedOrderedCollection::default());
151
+
};
152
+
let Some(linkers) = paths.get(&Source::new(collection, path)) else {
153
+
return Ok(PagedOrderedCollection::default());
154
+
};
155
+
156
+
let path_to_other = RecordPath::new(path_to_other);
157
+
let filter_to_targets: HashSet<Target> =
158
+
HashSet::from_iter(filter_to_targets.iter().map(|s| Target::new(s)));
159
+
160
+
let mut grouped_counts: HashMap<Target, (u64, HashSet<Did>)> = HashMap::new();
161
+
for (did, rkey) in linkers.iter().flatten().cloned() {
162
+
if !filter_dids.is_empty() && !filter_dids.contains(&did) {
163
+
continue;
164
+
}
165
+
if let Some(fwd_target) = data
166
+
.links
167
+
.get(&did)
168
+
.unwrap_or(&HashMap::new())
169
+
.get(&RepoId {
170
+
collection: collection.to_string(),
171
+
rkey,
172
+
})
173
+
.unwrap_or(&Vec::new())
174
+
.iter()
175
+
.filter_map(|(path, target)| {
176
+
if *path == path_to_other
177
+
&& (filter_to_targets.is_empty() || filter_to_targets.contains(target))
178
+
{
179
+
Some(target)
180
+
} else {
181
+
None
182
+
}
183
+
})
184
+
.take(1)
185
+
.next()
186
+
{
187
+
let e = grouped_counts.entry(fwd_target.clone()).or_default();
188
+
e.0 += 1;
189
+
e.1.insert(did.clone());
190
+
}
191
+
}
192
+
let mut items: Vec<(String, u64, u64)> = grouped_counts
193
+
.iter()
194
+
.map(|(k, (n, u))| (k.0.clone(), *n, u.len() as u64))
195
+
.collect();
196
+
items.sort();
197
+
items = items
198
+
.into_iter()
199
+
.skip_while(|(t, _, _)| after.as_ref().map(|a| t <= a).unwrap_or(false))
200
+
.take(limit as usize)
201
+
.collect();
202
+
let next = if items.len() as u64 >= limit {
203
+
items.last().map(|(t, _, _)| t.clone())
204
+
} else {
205
+
None
206
+
};
207
+
Ok(PagedOrderedCollection { items, next })
208
+
}
209
+
135
210
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64> {
136
211
let data = self.0.lock().unwrap();
137
212
let Some(paths) = data.targets.get(&Target::new(target)) else {
···
166
241
path: &str,
167
242
limit: u64,
168
243
until: Option<u64>,
244
+
filter_dids: &HashSet<Did>,
169
245
) -> Result<PagedAppendingCollection<RecordId>> {
170
246
let data = self.0.lock().unwrap();
171
247
let Some(paths) = data.targets.get(&Target::new(target)) else {
···
183
259
next: None,
184
260
total: 0,
185
261
});
262
+
};
263
+
264
+
let did_rkeys: Vec<_> = if !filter_dids.is_empty() {
265
+
did_rkeys
266
+
.iter()
267
+
.filter(|m| {
268
+
Option::<(Did, RKey)>::clone(m)
269
+
.map(|(did, _)| filter_dids.contains(&did))
270
+
.unwrap_or(false)
271
+
})
272
+
.cloned()
273
+
.collect()
274
+
} else {
275
+
did_rkeys.to_vec()
186
276
};
187
277
188
278
let total = did_rkeys.len();
···
338
428
dids,
339
429
targetables,
340
430
linking_records,
431
+
started_at: None,
432
+
other_data: Default::default(),
341
433
})
342
434
}
343
435
}
+484
-14
constellation/src/storage/mod.rs
+484
-14
constellation/src/storage/mod.rs
···
1
1
use crate::{ActionableEvent, CountsByCount, Did, RecordId};
2
2
use anyhow::Result;
3
3
use serde::{Deserialize, Serialize};
4
-
use std::collections::HashMap;
4
+
use std::collections::{HashMap, HashSet};
5
5
6
6
pub mod mem_store;
7
7
pub use mem_store::MemStorage;
···
19
19
pub total: u64,
20
20
}
21
21
22
+
/// A paged collection whose keys are sorted instead of indexed
23
+
///
24
+
/// this has weaker guarantees than PagedAppendingCollection: it might
25
+
/// return a totally consistent snapshot. but it should avoid duplicates
26
+
/// and each page should at least be internally consistent.
27
+
#[derive(Debug, PartialEq, Default)]
28
+
pub struct PagedOrderedCollection<T, K: Ord> {
29
+
pub items: Vec<T>,
30
+
pub next: Option<K>,
31
+
}
32
+
22
33
#[derive(Debug, Deserialize, Serialize, PartialEq)]
23
34
pub struct StorageStats {
24
35
/// estimate of how many accounts we've seen create links. the _subjects_ of any links are not represented here.
···
33
44
/// records with multiple links are single-counted.
34
45
/// for LSM stores, deleted links don't decrement this, and updated records with any links will likely increment it.
35
46
pub linking_records: u64,
47
+
48
+
/// first jetstream cursor when this instance first started
49
+
pub started_at: Option<u64>,
50
+
51
+
/// anything else we want to throw in
52
+
pub other_data: HashMap<String, u64>,
36
53
}
37
54
38
55
pub trait LinkStorage: Send + Sync {
···
48
65
}
49
66
50
67
pub trait LinkReader: Clone + Send + Sync + 'static {
68
+
#[allow(clippy::too_many_arguments)]
69
+
fn get_many_to_many_counts(
70
+
&self,
71
+
target: &str,
72
+
collection: &str,
73
+
path: &str,
74
+
path_to_other: &str,
75
+
limit: u64,
76
+
after: Option<String>,
77
+
filter_dids: &HashSet<Did>,
78
+
filter_to_targets: &HashSet<String>,
79
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>>;
80
+
51
81
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>;
52
82
53
83
fn get_distinct_did_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>;
···
59
89
path: &str,
60
90
limit: u64,
61
91
until: Option<u64>,
92
+
filter_dids: &HashSet<Did>,
62
93
) -> Result<PagedAppendingCollection<RecordId>>;
63
94
64
95
fn get_distinct_dids(
···
145
176
);
146
177
assert_eq!(storage.get_distinct_did_count("", "", "")?, 0);
147
178
assert_eq!(
148
-
storage.get_links("a.com", "app.t.c", ".abc.uri", 100, None)?,
179
+
storage.get_links(
180
+
"a.com",
181
+
"app.t.c",
182
+
".abc.uri",
183
+
100,
184
+
None,
185
+
&HashSet::default()
186
+
)?,
149
187
PagedAppendingCollection {
150
188
version: (0, 0),
151
189
items: vec![],
···
641
679
0,
642
680
)?;
643
681
assert_eq!(
644
-
storage.get_links("a.com", "app.t.c", ".abc.uri", 100, None)?,
682
+
storage.get_links(
683
+
"a.com",
684
+
"app.t.c",
685
+
".abc.uri",
686
+
100,
687
+
None,
688
+
&HashSet::default()
689
+
)?,
645
690
PagedAppendingCollection {
646
691
version: (1, 0),
647
692
items: vec![RecordId {
···
682
727
0,
683
728
)?;
684
729
}
685
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
730
+
let links =
731
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
686
732
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, None)?;
687
733
assert_eq!(
688
734
links,
···
713
759
total: 5,
714
760
}
715
761
);
716
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
762
+
let links = storage.get_links(
763
+
"a.com",
764
+
"app.t.c",
765
+
".abc.uri",
766
+
2,
767
+
links.next,
768
+
&HashSet::default(),
769
+
)?;
717
770
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?;
718
771
assert_eq!(
719
772
links,
···
744
797
total: 5,
745
798
}
746
799
);
747
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
800
+
let links = storage.get_links(
801
+
"a.com",
802
+
"app.t.c",
803
+
".abc.uri",
804
+
2,
805
+
links.next,
806
+
&HashSet::default(),
807
+
)?;
748
808
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?;
749
809
assert_eq!(
750
810
links,
···
771
831
assert_stats(storage.get_stats()?, 5..=5, 1..=1, 5..=5);
772
832
});
773
833
834
+
test_each_storage!(get_filtered_links, |storage| {
835
+
let links = storage.get_links(
836
+
"a.com",
837
+
"app.t.c",
838
+
".abc.uri",
839
+
2,
840
+
None,
841
+
&HashSet::from([Did("did:plc:linker".to_string())]),
842
+
)?;
843
+
assert_eq!(
844
+
links,
845
+
PagedAppendingCollection {
846
+
version: (0, 0),
847
+
items: vec![],
848
+
next: None,
849
+
total: 0,
850
+
}
851
+
);
852
+
853
+
storage.push(
854
+
&ActionableEvent::CreateLinks {
855
+
record_id: RecordId {
856
+
did: "did:plc:linker".into(),
857
+
collection: "app.t.c".into(),
858
+
rkey: "asdf".into(),
859
+
},
860
+
links: vec![CollectedLink {
861
+
target: Link::Uri("a.com".into()),
862
+
path: ".abc.uri".into(),
863
+
}],
864
+
},
865
+
0,
866
+
)?;
867
+
868
+
let links = storage.get_links(
869
+
"a.com",
870
+
"app.t.c",
871
+
".abc.uri",
872
+
2,
873
+
None,
874
+
&HashSet::from([Did("did:plc:linker".to_string())]),
875
+
)?;
876
+
assert_eq!(
877
+
links,
878
+
PagedAppendingCollection {
879
+
version: (1, 0),
880
+
items: vec![RecordId {
881
+
did: "did:plc:linker".into(),
882
+
collection: "app.t.c".into(),
883
+
rkey: "asdf".into(),
884
+
},],
885
+
next: None,
886
+
total: 1,
887
+
}
888
+
);
889
+
890
+
let links = storage.get_links(
891
+
"a.com",
892
+
"app.t.c",
893
+
".abc.uri",
894
+
2,
895
+
None,
896
+
&HashSet::from([Did("did:plc:someone-else".to_string())]),
897
+
)?;
898
+
assert_eq!(
899
+
links,
900
+
PagedAppendingCollection {
901
+
version: (0, 0),
902
+
items: vec![],
903
+
next: None,
904
+
total: 0,
905
+
}
906
+
);
907
+
908
+
storage.push(
909
+
&ActionableEvent::CreateLinks {
910
+
record_id: RecordId {
911
+
did: "did:plc:linker".into(),
912
+
collection: "app.t.c".into(),
913
+
rkey: "asdf-2".into(),
914
+
},
915
+
links: vec![CollectedLink {
916
+
target: Link::Uri("a.com".into()),
917
+
path: ".abc.uri".into(),
918
+
}],
919
+
},
920
+
0,
921
+
)?;
922
+
storage.push(
923
+
&ActionableEvent::CreateLinks {
924
+
record_id: RecordId {
925
+
did: "did:plc:someone-else".into(),
926
+
collection: "app.t.c".into(),
927
+
rkey: "asdf".into(),
928
+
},
929
+
links: vec![CollectedLink {
930
+
target: Link::Uri("a.com".into()),
931
+
path: ".abc.uri".into(),
932
+
}],
933
+
},
934
+
0,
935
+
)?;
936
+
937
+
let links = storage.get_links(
938
+
"a.com",
939
+
"app.t.c",
940
+
".abc.uri",
941
+
2,
942
+
None,
943
+
&HashSet::from([Did("did:plc:linker".to_string())]),
944
+
)?;
945
+
assert_eq!(
946
+
links,
947
+
PagedAppendingCollection {
948
+
version: (2, 0),
949
+
items: vec![
950
+
RecordId {
951
+
did: "did:plc:linker".into(),
952
+
collection: "app.t.c".into(),
953
+
rkey: "asdf-2".into(),
954
+
},
955
+
RecordId {
956
+
did: "did:plc:linker".into(),
957
+
collection: "app.t.c".into(),
958
+
rkey: "asdf".into(),
959
+
},
960
+
],
961
+
next: None,
962
+
total: 2,
963
+
}
964
+
);
965
+
966
+
let links = storage.get_links(
967
+
"a.com",
968
+
"app.t.c",
969
+
".abc.uri",
970
+
2,
971
+
None,
972
+
&HashSet::from([
973
+
Did("did:plc:linker".to_string()),
974
+
Did("did:plc:someone-else".to_string()),
975
+
]),
976
+
)?;
977
+
assert_eq!(
978
+
links,
979
+
PagedAppendingCollection {
980
+
version: (3, 0),
981
+
items: vec![
982
+
RecordId {
983
+
did: "did:plc:someone-else".into(),
984
+
collection: "app.t.c".into(),
985
+
rkey: "asdf".into(),
986
+
},
987
+
RecordId {
988
+
did: "did:plc:linker".into(),
989
+
collection: "app.t.c".into(),
990
+
rkey: "asdf-2".into(),
991
+
},
992
+
],
993
+
next: Some(1),
994
+
total: 3,
995
+
}
996
+
);
997
+
998
+
let links = storage.get_links(
999
+
"a.com",
1000
+
"app.t.c",
1001
+
".abc.uri",
1002
+
2,
1003
+
None,
1004
+
&HashSet::from([Did("did:plc:someone-unknown".to_string())]),
1005
+
)?;
1006
+
assert_eq!(
1007
+
links,
1008
+
PagedAppendingCollection {
1009
+
version: (0, 0),
1010
+
items: vec![],
1011
+
next: None,
1012
+
total: 0,
1013
+
}
1014
+
);
1015
+
});
1016
+
774
1017
test_each_storage!(get_links_exact_multiple, |storage| {
775
1018
for i in 1..=4 {
776
1019
storage.push(
···
788
1031
0,
789
1032
)?;
790
1033
}
791
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1034
+
let links =
1035
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
792
1036
assert_eq!(
793
1037
links,
794
1038
PagedAppendingCollection {
···
809
1053
total: 4,
810
1054
}
811
1055
);
812
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1056
+
let links = storage.get_links(
1057
+
"a.com",
1058
+
"app.t.c",
1059
+
".abc.uri",
1060
+
2,
1061
+
links.next,
1062
+
&HashSet::default(),
1063
+
)?;
813
1064
assert_eq!(
814
1065
links,
815
1066
PagedAppendingCollection {
···
850
1101
0,
851
1102
)?;
852
1103
}
853
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1104
+
let links =
1105
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
854
1106
assert_eq!(
855
1107
links,
856
1108
PagedAppendingCollection {
···
885
1137
},
886
1138
0,
887
1139
)?;
888
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1140
+
let links = storage.get_links(
1141
+
"a.com",
1142
+
"app.t.c",
1143
+
".abc.uri",
1144
+
2,
1145
+
links.next,
1146
+
&HashSet::default(),
1147
+
)?;
889
1148
assert_eq!(
890
1149
links,
891
1150
PagedAppendingCollection {
···
926
1185
0,
927
1186
)?;
928
1187
}
929
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1188
+
let links =
1189
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
930
1190
assert_eq!(
931
1191
links,
932
1192
PagedAppendingCollection {
···
955
1215
}),
956
1216
0,
957
1217
)?;
958
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1218
+
let links = storage.get_links(
1219
+
"a.com",
1220
+
"app.t.c",
1221
+
".abc.uri",
1222
+
2,
1223
+
links.next,
1224
+
&HashSet::default(),
1225
+
)?;
959
1226
assert_eq!(
960
1227
links,
961
1228
PagedAppendingCollection {
···
989
1256
0,
990
1257
)?;
991
1258
}
992
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1259
+
let links =
1260
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
993
1261
assert_eq!(
994
1262
links,
995
1263
PagedAppendingCollection {
···
1014
1282
&ActionableEvent::DeactivateAccount("did:plc:asdf-1".into()),
1015
1283
0,
1016
1284
)?;
1017
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1285
+
let links = storage.get_links(
1286
+
"a.com",
1287
+
"app.t.c",
1288
+
".abc.uri",
1289
+
2,
1290
+
links.next,
1291
+
&HashSet::default(),
1292
+
)?;
1018
1293
assert_eq!(
1019
1294
links,
1020
1295
PagedAppendingCollection {
···
1081
1356
counts
1082
1357
});
1083
1358
assert_stats(storage.get_stats()?, 1..=1, 2..=2, 1..=1);
1359
+
});
1360
+
1361
+
//////// many-to-many /////////
1362
+
1363
+
test_each_storage!(get_m2m_counts_empty, |storage| {
1364
+
assert_eq!(
1365
+
storage.get_many_to_many_counts(
1366
+
"a.com",
1367
+
"a.b.c",
1368
+
".d.e",
1369
+
".f.g",
1370
+
10,
1371
+
None,
1372
+
&HashSet::new(),
1373
+
&HashSet::new(),
1374
+
)?,
1375
+
PagedOrderedCollection {
1376
+
items: vec![],
1377
+
next: None,
1378
+
}
1379
+
);
1380
+
});
1381
+
1382
+
test_each_storage!(get_m2m_counts_single, |storage| {
1383
+
storage.push(
1384
+
&ActionableEvent::CreateLinks {
1385
+
record_id: RecordId {
1386
+
did: "did:plc:asdf".into(),
1387
+
collection: "app.t.c".into(),
1388
+
rkey: "asdf".into(),
1389
+
},
1390
+
links: vec![
1391
+
CollectedLink {
1392
+
target: Link::Uri("a.com".into()),
1393
+
path: ".abc.uri".into(),
1394
+
},
1395
+
CollectedLink {
1396
+
target: Link::Uri("b.com".into()),
1397
+
path: ".def.uri".into(),
1398
+
},
1399
+
CollectedLink {
1400
+
target: Link::Uri("b.com".into()),
1401
+
path: ".ghi.uri".into(),
1402
+
},
1403
+
],
1404
+
},
1405
+
0,
1406
+
)?;
1407
+
assert_eq!(
1408
+
storage.get_many_to_many_counts(
1409
+
"a.com",
1410
+
"app.t.c",
1411
+
".abc.uri",
1412
+
".def.uri",
1413
+
10,
1414
+
None,
1415
+
&HashSet::new(),
1416
+
&HashSet::new(),
1417
+
)?,
1418
+
PagedOrderedCollection {
1419
+
items: vec![("b.com".to_string(), 1, 1)],
1420
+
next: None,
1421
+
}
1422
+
);
1423
+
});
1424
+
1425
+
test_each_storage!(get_m2m_counts_filters, |storage| {
1426
+
storage.push(
1427
+
&ActionableEvent::CreateLinks {
1428
+
record_id: RecordId {
1429
+
did: "did:plc:asdf".into(),
1430
+
collection: "app.t.c".into(),
1431
+
rkey: "asdf".into(),
1432
+
},
1433
+
links: vec![
1434
+
CollectedLink {
1435
+
target: Link::Uri("a.com".into()),
1436
+
path: ".abc.uri".into(),
1437
+
},
1438
+
CollectedLink {
1439
+
target: Link::Uri("b.com".into()),
1440
+
path: ".def.uri".into(),
1441
+
},
1442
+
],
1443
+
},
1444
+
0,
1445
+
)?;
1446
+
storage.push(
1447
+
&ActionableEvent::CreateLinks {
1448
+
record_id: RecordId {
1449
+
did: "did:plc:asdfasdf".into(),
1450
+
collection: "app.t.c".into(),
1451
+
rkey: "asdf".into(),
1452
+
},
1453
+
links: vec![
1454
+
CollectedLink {
1455
+
target: Link::Uri("a.com".into()),
1456
+
path: ".abc.uri".into(),
1457
+
},
1458
+
CollectedLink {
1459
+
target: Link::Uri("b.com".into()),
1460
+
path: ".def.uri".into(),
1461
+
},
1462
+
],
1463
+
},
1464
+
1,
1465
+
)?;
1466
+
storage.push(
1467
+
&ActionableEvent::CreateLinks {
1468
+
record_id: RecordId {
1469
+
did: "did:plc:fdsa".into(),
1470
+
collection: "app.t.c".into(),
1471
+
rkey: "asdf".into(),
1472
+
},
1473
+
links: vec![
1474
+
CollectedLink {
1475
+
target: Link::Uri("a.com".into()),
1476
+
path: ".abc.uri".into(),
1477
+
},
1478
+
CollectedLink {
1479
+
target: Link::Uri("c.com".into()),
1480
+
path: ".def.uri".into(),
1481
+
},
1482
+
],
1483
+
},
1484
+
2,
1485
+
)?;
1486
+
storage.push(
1487
+
&ActionableEvent::CreateLinks {
1488
+
record_id: RecordId {
1489
+
did: "did:plc:fdsa".into(),
1490
+
collection: "app.t.c".into(),
1491
+
rkey: "asdf2".into(),
1492
+
},
1493
+
links: vec![
1494
+
CollectedLink {
1495
+
target: Link::Uri("a.com".into()),
1496
+
path: ".abc.uri".into(),
1497
+
},
1498
+
CollectedLink {
1499
+
target: Link::Uri("c.com".into()),
1500
+
path: ".def.uri".into(),
1501
+
},
1502
+
],
1503
+
},
1504
+
3,
1505
+
)?;
1506
+
assert_eq!(
1507
+
storage.get_many_to_many_counts(
1508
+
"a.com",
1509
+
"app.t.c",
1510
+
".abc.uri",
1511
+
".def.uri",
1512
+
10,
1513
+
None,
1514
+
&HashSet::new(),
1515
+
&HashSet::new(),
1516
+
)?,
1517
+
PagedOrderedCollection {
1518
+
items: vec![("b.com".to_string(), 2, 2), ("c.com".to_string(), 2, 1),],
1519
+
next: None,
1520
+
}
1521
+
);
1522
+
assert_eq!(
1523
+
storage.get_many_to_many_counts(
1524
+
"a.com",
1525
+
"app.t.c",
1526
+
".abc.uri",
1527
+
".def.uri",
1528
+
10,
1529
+
None,
1530
+
&HashSet::from_iter([Did("did:plc:fdsa".to_string())]),
1531
+
&HashSet::new(),
1532
+
)?,
1533
+
PagedOrderedCollection {
1534
+
items: vec![("c.com".to_string(), 2, 1),],
1535
+
next: None,
1536
+
}
1537
+
);
1538
+
assert_eq!(
1539
+
storage.get_many_to_many_counts(
1540
+
"a.com",
1541
+
"app.t.c",
1542
+
".abc.uri",
1543
+
".def.uri",
1544
+
10,
1545
+
None,
1546
+
&HashSet::new(),
1547
+
&HashSet::from_iter(["b.com".to_string()]),
1548
+
)?,
1549
+
PagedOrderedCollection {
1550
+
items: vec![("b.com".to_string(), 2, 2),],
1551
+
next: None,
1552
+
}
1553
+
);
1084
1554
});
1085
1555
}
+361
-41
constellation/src/storage/rocks_store.rs
+361
-41
constellation/src/storage/rocks_store.rs
···
1
-
use super::{ActionableEvent, LinkReader, LinkStorage, PagedAppendingCollection, StorageStats};
1
+
use super::{
2
+
ActionableEvent, LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection,
3
+
StorageStats,
4
+
};
2
5
use crate::{CountsByCount, Did, RecordId};
3
6
use anyhow::{bail, Result};
4
7
use bincode::Options as BincodeOptions;
···
11
14
MultiThreaded, Options, PrefixRange, ReadOptions, WriteBatch,
12
15
};
13
16
use serde::{Deserialize, Serialize};
14
-
use std::collections::{HashMap, HashSet};
17
+
use std::collections::{BTreeMap, HashMap, HashSet};
15
18
use std::io::Read;
16
19
use std::marker::PhantomData;
17
20
use std::path::{Path, PathBuf};
···
20
23
Arc,
21
24
};
22
25
use std::thread;
23
-
use std::time::{Duration, Instant};
26
+
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
24
27
use tokio_util::sync::CancellationToken;
25
28
26
29
static DID_IDS_CF: &str = "did_ids";
···
29
32
static LINK_TARGETS_CF: &str = "link_targets";
30
33
31
34
static JETSTREAM_CURSOR_KEY: &str = "jetstream_cursor";
35
+
static STARTED_AT_KEY: &str = "jetstream_first_cursor";
36
+
// add reverse mappings for targets if this db was running before that was a thing
37
+
static TARGET_ID_REPAIR_STATE_KEY: &str = "target_id_table_repair_state";
38
+
39
+
static COZY_FIRST_CURSOR: u64 = 1_738_083_600_000_000; // constellation.microcosm.blue started
40
+
41
+
#[derive(Debug, Clone, Serialize, Deserialize)]
42
+
struct TargetIdRepairState {
43
+
/// start time for repair, microseconds timestamp
44
+
current_us_started_at: u64,
45
+
/// id table's latest id when repair started
46
+
id_when_started: u64,
47
+
/// id table id
48
+
latest_repaired_i: u64,
49
+
}
50
+
impl AsRocksValue for TargetIdRepairState {}
51
+
impl ValueFromRocks for TargetIdRepairState {}
32
52
33
53
// todo: actually understand and set these options probably better
34
54
fn rocks_opts_base() -> Options {
···
56
76
#[derive(Debug, Clone)]
57
77
pub struct RocksStorage {
58
78
pub db: Arc<DBWithThreadMode<MultiThreaded>>, // TODO: mov seqs here (concat merge op will be fun)
59
-
did_id_table: IdTable<Did, DidIdValue, true>,
60
-
target_id_table: IdTable<TargetKey, TargetId, false>,
79
+
did_id_table: IdTable<Did, DidIdValue>,
80
+
target_id_table: IdTable<TargetKey, TargetId>,
61
81
is_writer: bool,
62
82
backup_task: Arc<Option<thread::JoinHandle<Result<()>>>>,
63
83
}
···
85
105
fn cf_descriptor(&self) -> ColumnFamilyDescriptor {
86
106
ColumnFamilyDescriptor::new(&self.name, rocks_opts_base())
87
107
}
88
-
fn init<const WITH_REVERSE: bool>(
89
-
self,
90
-
db: &DBWithThreadMode<MultiThreaded>,
91
-
) -> Result<IdTable<Orig, IdVal, WITH_REVERSE>> {
108
+
fn init(self, db: &DBWithThreadMode<MultiThreaded>) -> Result<IdTable<Orig, IdVal>> {
92
109
if db.cf_handle(&self.name).is_none() {
93
110
bail!("failed to get cf handle from db -- was the db open with our .cf_descriptor()?");
94
111
}
···
119
136
}
120
137
}
121
138
#[derive(Debug, Clone)]
122
-
struct IdTable<Orig, IdVal: IdTableValue, const WITH_REVERSE: bool>
139
+
struct IdTable<Orig, IdVal: IdTableValue>
123
140
where
124
141
Orig: KeyFromRocks,
125
142
for<'a> &'a Orig: AsRocksKey,
···
127
144
base: IdTableBase<Orig, IdVal>,
128
145
priv_id_seq: u64,
129
146
}
130
-
impl<Orig: Clone, IdVal: IdTableValue, const WITH_REVERSE: bool> IdTable<Orig, IdVal, WITH_REVERSE>
147
+
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal>
131
148
where
132
149
Orig: KeyFromRocks,
133
150
for<'v> &'v IdVal: AsRocksValue,
···
139
156
_key_marker: PhantomData,
140
157
_val_marker: PhantomData,
141
158
name: name.into(),
142
-
id_seq: Arc::new(AtomicU64::new(0)), // zero is "uninint", first seq num will be 1
159
+
id_seq: Arc::new(AtomicU64::new(0)), // zero is "uninit", first seq num will be 1
143
160
}
144
161
}
145
162
fn get_id_val(
···
178
195
id_value
179
196
}))
180
197
}
198
+
181
199
fn estimate_count(&self) -> u64 {
182
200
self.base.id_seq.load(Ordering::SeqCst) - 1 // -1 because seq zero is reserved
183
201
}
184
-
}
185
-
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal, true>
186
-
where
187
-
Orig: KeyFromRocks,
188
-
for<'v> &'v IdVal: AsRocksValue,
189
-
for<'k> &'k Orig: AsRocksKey,
190
-
{
202
+
191
203
fn get_or_create_id_val(
192
204
&mut self,
193
205
db: &DBWithThreadMode<MultiThreaded>,
···
215
227
}
216
228
}
217
229
}
218
-
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal, false>
219
-
where
220
-
Orig: KeyFromRocks,
221
-
for<'v> &'v IdVal: AsRocksValue,
222
-
for<'k> &'k Orig: AsRocksKey,
223
-
{
224
-
fn get_or_create_id_val(
225
-
&mut self,
226
-
db: &DBWithThreadMode<MultiThreaded>,
227
-
batch: &mut WriteBatch,
228
-
orig: &Orig,
229
-
) -> Result<IdVal> {
230
-
let cf = db.cf_handle(&self.base.name).unwrap();
231
-
self.__get_or_create_id_val(&cf, db, batch, orig)
232
-
}
233
-
}
234
230
235
231
impl IdTableValue for DidIdValue {
236
232
fn new(v: u64) -> Self {
···
249
245
}
250
246
}
251
247
248
+
fn now() -> u64 {
249
+
SystemTime::now()
250
+
.duration_since(UNIX_EPOCH)
251
+
.unwrap()
252
+
.as_micros() as u64
253
+
}
254
+
252
255
impl RocksStorage {
253
256
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
254
257
Self::describe_metrics();
255
-
RocksStorage::open_readmode(path, false)
258
+
let me = RocksStorage::open_readmode(path, false)?;
259
+
me.global_init()?;
260
+
Ok(me)
256
261
}
257
262
258
263
pub fn open_readonly(path: impl AsRef<Path>) -> Result<Self> {
···
260
265
}
261
266
262
267
fn open_readmode(path: impl AsRef<Path>, readonly: bool) -> Result<Self> {
263
-
let did_id_table = IdTable::<_, _, true>::setup(DID_IDS_CF);
264
-
let target_id_table = IdTable::<_, _, false>::setup(TARGET_IDS_CF);
268
+
let did_id_table = IdTable::setup(DID_IDS_CF);
269
+
let target_id_table = IdTable::setup(TARGET_IDS_CF);
265
270
271
+
// note: global stuff like jetstream cursor goes in the default cf
272
+
// these are bonus extra cfs
266
273
let cfs = vec![
267
274
// id reference tables
268
275
did_id_table.cf_descriptor(),
···
296
303
is_writer: !readonly,
297
304
backup_task: None.into(),
298
305
})
306
+
}
307
+
308
+
fn global_init(&self) -> Result<()> {
309
+
let first_run = self.db.get(JETSTREAM_CURSOR_KEY)?.is_some();
310
+
if first_run {
311
+
self.db.put(STARTED_AT_KEY, _rv(now()))?;
312
+
313
+
// hack / temporary: if we're a new db, put in a completed repair
314
+
// state so we don't run repairs (repairs are for old-code dbs)
315
+
let completed = TargetIdRepairState {
316
+
id_when_started: 0,
317
+
current_us_started_at: 0,
318
+
latest_repaired_i: 0,
319
+
};
320
+
self.db.put(TARGET_ID_REPAIR_STATE_KEY, _rv(completed))?;
321
+
}
322
+
Ok(())
323
+
}
324
+
325
+
pub fn run_repair(&self, breather: Duration, stay_alive: CancellationToken) -> Result<bool> {
326
+
let mut state = match self
327
+
.db
328
+
.get(TARGET_ID_REPAIR_STATE_KEY)?
329
+
.map(|s| _vr(&s))
330
+
.transpose()?
331
+
{
332
+
Some(s) => s,
333
+
None => TargetIdRepairState {
334
+
id_when_started: self.did_id_table.priv_id_seq,
335
+
current_us_started_at: now(),
336
+
latest_repaired_i: 0,
337
+
},
338
+
};
339
+
340
+
eprintln!("initial repair state: {state:?}");
341
+
342
+
let cf = self.db.cf_handle(TARGET_IDS_CF).unwrap();
343
+
344
+
let mut iter = self.db.raw_iterator_cf(&cf);
345
+
iter.seek_to_first();
346
+
347
+
eprintln!("repair iterator sent to first key");
348
+
349
+
// skip ahead if we're done some, or take a single first step
350
+
for _ in 0..state.latest_repaired_i {
351
+
iter.next();
352
+
}
353
+
354
+
eprintln!(
355
+
"repair iterator skipped to {}th key",
356
+
state.latest_repaired_i
357
+
);
358
+
359
+
let mut maybe_done = false;
360
+
361
+
let mut write_fast = rocksdb::WriteOptions::default();
362
+
write_fast.set_sync(false);
363
+
write_fast.disable_wal(true);
364
+
365
+
while !stay_alive.is_cancelled() && !maybe_done {
366
+
// let mut batch = WriteBatch::default();
367
+
368
+
let mut any_written = false;
369
+
370
+
for _ in 0..1000 {
371
+
if state.latest_repaired_i % 1_000_000 == 0 {
372
+
eprintln!("target iter at {}", state.latest_repaired_i);
373
+
}
374
+
state.latest_repaired_i += 1;
375
+
376
+
if !iter.valid() {
377
+
eprintln!("invalid iter, are we done repairing?");
378
+
maybe_done = true;
379
+
break;
380
+
};
381
+
382
+
// eprintln!("iterator seems to be valid! getting the key...");
383
+
let raw_key = iter.key().unwrap();
384
+
if raw_key.len() == 8 {
385
+
// eprintln!("found an 8-byte key, skipping it since it's probably an id...");
386
+
iter.next();
387
+
continue;
388
+
}
389
+
let target: TargetKey = _kr::<TargetKey>(raw_key)?;
390
+
let target_id: TargetId = _vr(iter.value().unwrap())?;
391
+
392
+
self.db
393
+
.put_cf_opt(&cf, target_id.id().to_be_bytes(), _rv(&target), &write_fast)?;
394
+
any_written = true;
395
+
iter.next();
396
+
}
397
+
398
+
if any_written {
399
+
self.db
400
+
.put(TARGET_ID_REPAIR_STATE_KEY, _rv(state.clone()))?;
401
+
std::thread::sleep(breather);
402
+
}
403
+
}
404
+
405
+
eprintln!("repair iterator done.");
406
+
407
+
Ok(false)
299
408
}
300
409
301
410
pub fn start_backup(
···
826
935
}
827
936
828
937
impl LinkReader for RocksStorage {
938
+
fn get_many_to_many_counts(
939
+
&self,
940
+
target: &str,
941
+
collection: &str,
942
+
path: &str,
943
+
path_to_other: &str,
944
+
limit: u64,
945
+
after: Option<String>,
946
+
filter_dids: &HashSet<Did>,
947
+
filter_to_targets: &HashSet<String>,
948
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>> {
949
+
let collection = Collection(collection.to_string());
950
+
let path = RPath(path.to_string());
951
+
952
+
let target_key = TargetKey(Target(target.to_string()), collection.clone(), path.clone());
953
+
954
+
// unfortunately the cursor is a, uh, stringified number.
955
+
// this was easier for the memstore (plain target, not target id), and
956
+
// making it generic is a bit awful.
957
+
// so... parse the number out of a string here :(
958
+
// TODO: this should bubble up to a BAD_REQUEST response
959
+
let after = after.map(|s| s.parse::<u64>().map(TargetId)).transpose()?;
960
+
961
+
let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else {
962
+
eprintln!("nothin doin for this target, {target_key:?}");
963
+
return Ok(Default::default());
964
+
};
965
+
966
+
let filter_did_ids: HashMap<DidId, bool> = filter_dids
967
+
.iter()
968
+
.filter_map(|did| self.did_id_table.get_id_val(&self.db, did).transpose())
969
+
.collect::<Result<Vec<DidIdValue>>>()?
970
+
.into_iter()
971
+
.map(|DidIdValue(id, active)| (id, active))
972
+
.collect();
973
+
974
+
// stored targets are keyed by triples of (target, collection, path).
975
+
// target filtering only consideres the target itself, so we actually
976
+
// need to do a prefix iteration of all target ids for this target and
977
+
// keep them all.
978
+
// i *think* the number of keys at a target prefix should usually be
979
+
// pretty small, so this is hopefully fine. but if it turns out to be
980
+
// large, we can push this filtering back into the main links loop and
981
+
// do forward db queries per backlink to get the raw target back out.
982
+
let mut filter_to_target_ids: HashSet<TargetId> = HashSet::new();
983
+
for t in filter_to_targets {
984
+
for (_, target_id) in self.iter_targets_for_target(&Target(t.to_string())) {
985
+
filter_to_target_ids.insert(target_id);
986
+
}
987
+
}
988
+
989
+
let linkers = self.get_target_linkers(&target_id)?;
990
+
991
+
let mut grouped_counts: BTreeMap<TargetId, (u64, HashSet<DidId>)> = BTreeMap::new();
992
+
993
+
for (did_id, rkey) in linkers.0 {
994
+
if did_id.is_empty() {
995
+
continue;
996
+
}
997
+
998
+
if !filter_did_ids.is_empty() && filter_did_ids.get(&did_id) != Some(&true) {
999
+
continue;
1000
+
}
1001
+
1002
+
let record_link_key = RecordLinkKey(did_id, collection.clone(), rkey);
1003
+
let Some(targets) = self.get_record_link_targets(&record_link_key)? else {
1004
+
continue;
1005
+
};
1006
+
1007
+
let Some(fwd_target) = targets
1008
+
.0
1009
+
.into_iter()
1010
+
.filter_map(|RecordLinkTarget(rpath, target_id)| {
1011
+
if rpath.0 == path_to_other
1012
+
&& (filter_to_target_ids.is_empty()
1013
+
|| filter_to_target_ids.contains(&target_id))
1014
+
{
1015
+
Some(target_id)
1016
+
} else {
1017
+
None
1018
+
}
1019
+
})
1020
+
.take(1)
1021
+
.next()
1022
+
else {
1023
+
eprintln!("no forward match");
1024
+
continue;
1025
+
};
1026
+
1027
+
// small relief: we page over target ids, so we can already bail
1028
+
// reprocessing previous pages here
1029
+
if after.as_ref().map(|a| fwd_target <= *a).unwrap_or(false) {
1030
+
continue;
1031
+
}
1032
+
1033
+
// aand we can skip target ids that must be on future pages
1034
+
// (this check continues after the did-lookup, which we have to do)
1035
+
let page_is_full = grouped_counts.len() as u64 >= limit;
1036
+
if page_is_full {
1037
+
let current_max = grouped_counts.keys().next_back().unwrap(); // limit should be non-zero bleh
1038
+
if fwd_target > *current_max {
1039
+
continue;
1040
+
}
1041
+
}
1042
+
1043
+
// bit painful: 2-step lookup to make sure this did is active
1044
+
let Some(did) = self.did_id_table.get_val_from_id(&self.db, did_id.0)? else {
1045
+
eprintln!("failed to look up did from did_id {did_id:?}");
1046
+
continue;
1047
+
};
1048
+
let Some(DidIdValue(_, active)) = self.did_id_table.get_id_val(&self.db, &did)? else {
1049
+
eprintln!("failed to look up did_value from did_id {did_id:?}: {did:?}: data consistency bug?");
1050
+
continue;
1051
+
};
1052
+
if !active {
1053
+
continue;
1054
+
}
1055
+
1056
+
// page-management, continued
1057
+
// if we have a full page, and we're inserting a *new* key less than
1058
+
// the current max, then we can evict the current max
1059
+
let mut should_evict = false;
1060
+
let entry = grouped_counts.entry(fwd_target.clone()).or_insert_with(|| {
1061
+
// this is a *new* key, so kick the max if we're full
1062
+
should_evict = page_is_full;
1063
+
Default::default()
1064
+
});
1065
+
entry.0 += 1;
1066
+
entry.1.insert(did_id);
1067
+
1068
+
if should_evict {
1069
+
grouped_counts.pop_last();
1070
+
}
1071
+
}
1072
+
1073
+
let mut items: Vec<(String, u64, u64)> = Vec::with_capacity(grouped_counts.len());
1074
+
for (target_id, (n, dids)) in &grouped_counts {
1075
+
let Some(target) = self
1076
+
.target_id_table
1077
+
.get_val_from_id(&self.db, target_id.0)?
1078
+
else {
1079
+
eprintln!("failed to look up target from target_id {target_id:?}");
1080
+
continue;
1081
+
};
1082
+
items.push((target.0 .0, *n, dids.len() as u64));
1083
+
}
1084
+
1085
+
let next = if grouped_counts.len() as u64 >= limit {
1086
+
// yeah.... it's a number saved as a string......sorry
1087
+
grouped_counts
1088
+
.keys()
1089
+
.next_back()
1090
+
.map(|k| format!("{}", k.0))
1091
+
} else {
1092
+
None
1093
+
};
1094
+
1095
+
Ok(PagedOrderedCollection { items, next })
1096
+
}
1097
+
829
1098
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64> {
830
1099
let target_key = TargetKey(
831
1100
Target(target.to_string()),
···
860
1129
path: &str,
861
1130
limit: u64,
862
1131
until: Option<u64>,
1132
+
filter_dids: &HashSet<Did>,
863
1133
) -> Result<PagedAppendingCollection<RecordId>> {
864
1134
let target_key = TargetKey(
865
1135
Target(target.to_string()),
···
876
1146
});
877
1147
};
878
1148
879
-
let linkers = self.get_target_linkers(&target_id)?;
1149
+
let mut linkers = self.get_target_linkers(&target_id)?;
1150
+
if !filter_dids.is_empty() {
1151
+
let mut did_filter = HashSet::new();
1152
+
for did in filter_dids {
1153
+
let Some(DidIdValue(did_id, active)) =
1154
+
self.did_id_table.get_id_val(&self.db, did)?
1155
+
else {
1156
+
eprintln!("failed to find a did_id for {did:?}");
1157
+
continue;
1158
+
};
1159
+
if !active {
1160
+
eprintln!("excluding inactive did from filtered results");
1161
+
continue;
1162
+
}
1163
+
did_filter.insert(did_id);
1164
+
}
1165
+
linkers.0.retain(|linker| did_filter.contains(&linker.0));
1166
+
}
880
1167
881
1168
let (alive, gone) = linkers.count();
882
1169
let total = alive + gone;
···
1024
1311
.map(|s| s.parse::<u64>())
1025
1312
.transpose()?
1026
1313
.unwrap_or(0);
1314
+
let started_at = self
1315
+
.db
1316
+
.get(STARTED_AT_KEY)?
1317
+
.map(|c| _vr(&c))
1318
+
.transpose()?
1319
+
.unwrap_or(COZY_FIRST_CURSOR);
1320
+
1321
+
let other_data = self
1322
+
.db
1323
+
.get(TARGET_ID_REPAIR_STATE_KEY)?
1324
+
.map(|s| _vr(&s))
1325
+
.transpose()?
1326
+
.map(
1327
+
|TargetIdRepairState {
1328
+
current_us_started_at,
1329
+
id_when_started,
1330
+
latest_repaired_i,
1331
+
}| {
1332
+
HashMap::from([
1333
+
("current_us_started_at".to_string(), current_us_started_at),
1334
+
("id_when_started".to_string(), id_when_started),
1335
+
("latest_repaired_i".to_string(), latest_repaired_i),
1336
+
])
1337
+
},
1338
+
)
1339
+
.unwrap_or(HashMap::default());
1340
+
1027
1341
Ok(StorageStats {
1028
1342
dids,
1029
1343
targetables,
1030
1344
linking_records,
1345
+
started_at: Some(started_at),
1346
+
other_data,
1031
1347
})
1032
1348
}
1033
1349
}
···
1053
1369
impl AsRocksValue for &TargetId {}
1054
1370
impl KeyFromRocks for TargetKey {}
1055
1371
impl ValueFromRocks for TargetId {}
1372
+
1373
+
// temp?
1374
+
impl KeyFromRocks for TargetId {}
1375
+
impl AsRocksValue for &TargetKey {}
1056
1376
1057
1377
// target_links table
1058
1378
impl AsRocksKey for &TargetId {}
···
1124
1444
}
1125
1445
1126
1446
// target ids
1127
-
#[derive(Debug, Clone, Serialize, Deserialize)]
1447
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialOrd, Ord, PartialEq, Eq, Hash)]
1128
1448
struct TargetId(u64); // key
1129
1449
1130
-
#[derive(Debug, Clone, Serialize, Deserialize)]
1450
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
1131
1451
pub struct Target(pub String); // the actual target/uri
1132
1452
1133
1453
// targets (uris, dids, etc.): the reverse index
+1
-1
constellation/templates/dids.html.j2
+1
-1
constellation/templates/dids.html.j2
···
27
27
{% for did in linking_dids %}
28
28
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ did.0 }}
29
29
-> see <a href="/links/all?target={{ did.0|urlencode }}">links to this DID</a>
30
-
-> browse <a href="https://atproto-browser-plus-links.vercel.app/at/{{ did.0|urlencode }}">this DID record</a></pre>
30
+
-> browse <a href="https://pdsls.dev/at://{{ did.0|urlencode }}">this DID record</a></pre>
31
31
{% endfor %}
32
32
33
33
{% if let Some(c) = cursor %}
+54
constellation/templates/get-backlinks.html.j2
+54
constellation/templates/get-backlinks.html.j2
···
1
+
{% extends "base.html.j2" %}
2
+
{% import "try-it-macros.html.j2" as try_it %}
3
+
4
+
{% block title %}Backlinks{% endblock %}
5
+
{% block description %}All {{ query.source }} records with links to {{ query.subject }}{% endblock %}
6
+
7
+
{% block content %}
8
+
9
+
{% call try_it::get_backlinks(query.subject, query.source, query.did, query.limit) %}
10
+
11
+
<h2>
12
+
Links to <code>{{ query.subject }}</code>
13
+
{% if let Some(browseable_uri) = query.subject|to_browseable %}
14
+
<small style="font-weight: normal; font-size: 1rem"><a href="{{ browseable_uri }}">browse record</a></small>
15
+
{% endif %}
16
+
</h2>
17
+
18
+
<p><strong>{{ total|human_number }} links</strong> from <code>{{ query.source }}</code>.</p>
19
+
20
+
<ul>
21
+
<li>See distinct linking DIDs at <code>/links/distinct-dids</code>: <a href="/links/distinct-dids?target={{ query.subject|urlencode }}&collection={{ collection|urlencode }}&path={{ path|urlencode }}">/links/distinct-dids?target={{ query.subject }}&collection={{ collection }}&path={{ path }}</a></li>
22
+
<li>See all links to this target at <code>/links/all</code>: <a href="/links/all?target={{ query.subject|urlencode }}">/links/all?target={{ query.subject }}</a></li>
23
+
</ul>
24
+
25
+
<h3>Links, most recent first:</h3>
26
+
27
+
{% for record in records %}
28
+
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ record.did().0 }} (<a href="/links/all?target={{ record.did().0|urlencode }}">DID links</a>)
29
+
<strong>Collection</strong>: {{ record.collection }}
30
+
<strong>RKey</strong>: {{ record.rkey }}
31
+
-> <a href="https://pdsls.dev/at://{{ record.did().0 }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
32
+
{% endfor %}
33
+
34
+
{% if let Some(c) = cursor %}
35
+
<form method="get" action="/xrpc/blue.microcosm.links.getBacklinks">
36
+
<input type="hidden" name="subject" value="{{ query.subject }}" />
37
+
<input type="hidden" name="source" value="{{ query.source }}" />
38
+
<input type="hidden" name="limit" value="{{ query.limit }}" />
39
+
{% for did in query.did %}
40
+
<input type="hidden" name="did" value="{{ did }}" />
41
+
{% endfor %}
42
+
<input type="hidden" name="cursor" value={{ c|json|safe }} />
43
+
<button type="submit">next page…</button>
44
+
</form>
45
+
{% else %}
46
+
<button disabled><em>end of results</em></button>
47
+
{% endif %}
48
+
49
+
<details>
50
+
<summary>Raw JSON response</summary>
51
+
<pre class="code">{{ self|tojson }}</pre>
52
+
</details>
53
+
54
+
{% endblock %}
+67
constellation/templates/get-many-to-many-counts.html.j2
+67
constellation/templates/get-many-to-many-counts.html.j2
···
1
+
{% extends "base.html.j2" %}
2
+
{% import "try-it-macros.html.j2" as try_it %}
3
+
4
+
{% block title %}Many to Many counts{% endblock %}
5
+
{% block description %}Counts of many-to-many {{ query.source }} join records with links to {{ query.subject }} and a secondary target at {{ query.path_to_other }}{% endblock %}
6
+
7
+
{% block content %}
8
+
9
+
{% call try_it::get_many_to_many_counts(
10
+
query.subject,
11
+
query.source,
12
+
query.path_to_other,
13
+
query.did,
14
+
query.other_subject,
15
+
query.limit,
16
+
) %}
17
+
18
+
<h2>
19
+
Many-to-many links to <code>{{ query.subject }}</code> joining through <code>{{ query.path_to_other }}</code>
20
+
{% if let Some(browseable_uri) = query.subject|to_browseable %}
21
+
<small style="font-weight: normal; font-size: 1rem"><a href="{{ browseable_uri }}">browse record</a></small>
22
+
{% endif %}
23
+
</h2>
24
+
25
+
<p><strong>{% if cursor.is_some() || query.cursor.is_some() %}more than {% endif %}{{ counts_by_other_subject.len()|to_u64|human_number }} joins</strong> <code>{{ query.source }}โ{{ query.path_to_other }}</code></p>
26
+
27
+
<ul>
28
+
<li>See direct backlinks at <code>/xrpc/blue.microcosm.links.getBacklinks</code>: <a href="/xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject|urlencode }}&source={{ query.source|urlencode }}">/xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject }}&source={{ query.source }}</a></li>
29
+
<li>See all links to this target at <code>/links/all</code>: <a href="/links/all?target={{ query.subject|urlencode }}">/links/all?target={{ query.subject }}</a></li>
30
+
</ul>
31
+
32
+
<h3>Counts by other subject:</h3>
33
+
34
+
{% for counts in counts_by_other_subject %}
35
+
<pre style="display: block; margin: 1em 2em" class="code"><strong>Joined subject</strong>: {{ counts.subject }}
36
+
<strong>Joining records</strong>: {{ counts.total }}
37
+
<strong>Unique joiner ids</strong>: {{ counts.distinct }}
38
+
-> {% if let Some(browseable_uri) = counts.subject|to_browseable -%}
39
+
<a href="{{ browseable_uri }}">browse record</a>
40
+
{%- endif %}</pre>
41
+
{% endfor %}
42
+
43
+
{% if let Some(c) = cursor %}
44
+
<form method="get" action="/xrpc/blue.microcosm.links.getManyToManyCounts">
45
+
<input type="hidden" name="subject" value="{{ query.subject }}" />
46
+
<input type="hidden" name="source" value="{{ query.source }}" />
47
+
<input type="hidden" name="pathToOther" value="{{ query.path_to_other }}" />
48
+
{% for did in query.did %}
49
+
<input type="hidden" name="did" value="{{ did }}" />
50
+
{% endfor %}
51
+
{% for otherSubject in query.other_subject %}
52
+
<input type="hidden" name="otherSubject" value="{{ otherSubject }}" />
53
+
{% endfor %}
54
+
<input type="hidden" name="limit" value="{{ query.limit }}" />
55
+
<input type="hidden" name="cursor" value={{ c|json|safe }} />
56
+
<button type="submit">next page…</button>
57
+
</form>
58
+
{% else %}
59
+
<button disabled><em>end of results</em></button>
60
+
{% endif %}
61
+
62
+
<details>
63
+
<summary>Raw JSON response</summary>
64
+
<pre class="code">{{ self|tojson }}</pre>
65
+
</details>
66
+
67
+
{% endblock %}
+65
-7
constellation/templates/hello.html.j2
+65
-7
constellation/templates/hello.html.j2
···
19
19
<p>It works by recursively walking <em>all</em> records coming through the firehose, searching for anything that looks like a link. Links are indexed by the target they point at, the collection the record came from, and the JSON path to the link in that record.</p>
20
20
21
21
<p>
22
-
This server has indexed <span class="stat">{{ stats.linking_records|human_number }}</span> links between <span class="stat">{{ stats.targetables|human_number }}</span> targets and sources from <span class="stat">{{ stats.dids|human_number }}</span> identities over <span class="stat">{{ days_indexed|human_number }}</span> days.<br/>
23
-
<small>(indexing new records in real time, backfill still TODO)</small>
22
+
This server has indexed <span class="stat">{{ stats.linking_records|human_number }}</span> links between <span class="stat">{{ stats.targetables|human_number }}</span> targets and sources from <span class="stat">{{ stats.dids|human_number }}</span> identities over <span class="stat">
23
+
{%- if let Some(days) = days_indexed %}
24
+
{{ days|human_number }}
25
+
{% else %}
26
+
???
27
+
{% endif -%}
28
+
</span> days.<br/>
29
+
<small>(indexing new records in real time, backfill coming soon!)</small>
24
30
</p>
25
31
26
-
<p>The API is currently <strong>unstable</strong>. But feel free to use it! If you want to be nice, put your project name and bsky username (or email) in your user-agent header for api requests.</p>
32
+
{# {% for k, v in stats.other_data.iter() %}
33
+
<p><strong>{{ k }}</strong>: {{ v }}</p>
34
+
{% endfor %} #}
35
+
36
+
<p>You're welcome to use this public instance! Please do not build the torment nexus. If you want to be nice, put your project name and bsky username (or email) in your user-agent header for api requests.</p>
27
37
28
38
29
39
<h2>API Endpoints</h2>
30
40
41
+
<h3 class="route"><code>GET /xrpc/blue.microcosm.links.getBacklinks</code></h3>
42
+
43
+
<p>A list of records linking to any record, identity, or uri.</p>
44
+
45
+
<h4>Query parameters:</h4>
46
+
47
+
<ul>
48
+
<li><p><code>subject</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
49
+
<li><p><code>source</code>: required. Example: <code>app.bsky.feed.like:subject.uri</code></p></li>
50
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
51
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
52
+
</ul>
53
+
54
+
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
55
+
{% call try_it::get_backlinks("at://did:plc:a4pqq234yw7fqbddawjo7y35/app.bsky.feed.post/3m237ilwc372e", "app.bsky.feed.like:subject.uri", [""], 16) %}
56
+
57
+
58
+
<h3 class="route"><code>GET /xrpc/blue.microcosm.links.getManyToManyCounts</code></h3>
59
+
60
+
<p>TODO: description</p>
61
+
62
+
<h4>Query parameters:</h4>
63
+
64
+
<ul>
65
+
<li><p><code>subject</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
66
+
<li><p><code>source</code>: required. Example: <code>app.bsky.feed.like:subject.uri</code></p></li>
67
+
<li><p><code>pathToOther</code>: required. Path to the secondary link in the many-to-many record. Example: <code>otherThing.uri</code></p></li>
68
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
69
+
<li><p><code>otherSubject</code>: optional, filter secondary links to specific subjects. Include multiple times to filter by multiple users. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
70
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
71
+
</ul>
72
+
73
+
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
74
+
{% call try_it::get_many_to_many_counts(
75
+
"at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/good-first-issue",
76
+
"sh.tangled.label.op:add[].key",
77
+
"subject",
78
+
[""],
79
+
[""],
80
+
25,
81
+
) %}
82
+
83
+
31
84
<h3 class="route"><code>GET /links</code></h3>
32
85
33
86
<p>A list of records linking to a target.</p>
34
87
88
+
<p>[DEPRECATED]: use <code>GET /xrpc/blue.microcosm.links.getBacklinks</code>. New apps should avoid it, but this endpoint <strong>will</strong> remain supported for the forseeable future.</p>
89
+
35
90
<h4>Query parameters:</h4>
36
91
37
92
<ul>
38
-
<li><code>target</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></li>
39
-
<li><code>collection</code>: required. Example: <code>app.bsky.feed.like</code></li>
40
-
<li><code>path</code>: required, must url-encode. Example: <code>.subject.uri</code></li>
93
+
<li><p><code>target</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
94
+
<li><p><code>collection</code>: required. Example: <code>app.bsky.feed.like</code></p></li>
95
+
<li><p><code>path</code>: required, must url-encode. Example: <code>.subject.uri</code></p></li>
96
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
97
+
<li><p><code>from_dids</code> [deprecated]: optional. Use <code>did</code> instead. Example: <code>from_dids=did:plc:vc7f4oafdgxsihk4cry2xpze,did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
98
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
41
99
</ul>
42
100
43
101
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
44
-
{% call try_it::links("at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r", "app.bsky.feed.like", ".subject.uri") %}
102
+
{% call try_it::links("at://did:plc:a4pqq234yw7fqbddawjo7y35/app.bsky.feed.post/3m237ilwc372e", "app.bsky.feed.like", ".subject.uri", [""], 16) %}
45
103
46
104
47
105
<h3 class="route"><code>GET /links/distinct-dids</code></h3>
+2
-2
constellation/templates/links.html.j2
+2
-2
constellation/templates/links.html.j2
···
6
6
7
7
{% block content %}
8
8
9
-
{% call try_it::links(query.target, query.collection, query.path) %}
9
+
{% call try_it::links(query.target, query.collection, query.path, query.did, query.limit) %}
10
10
11
11
<h2>
12
12
Links to <code>{{ query.target }}</code>
···
28
28
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ record.did().0 }} (<a href="/links/all?target={{ record.did().0|urlencode }}">DID links</a>)
29
29
<strong>Collection</strong>: {{ record.collection }}
30
30
<strong>RKey</strong>: {{ record.rkey }}
31
-
-> <a href="https://atproto-browser-plus-links.vercel.app/at/{{ record.did().0|urlencode }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
31
+
-> <a href="https://pdsls.dev/at://{{ record.did().0 }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
32
32
{% endfor %}
33
33
34
34
{% if let Some(c) = cursor %}
+88
-3
constellation/templates/try-it-macros.html.j2
+88
-3
constellation/templates/try-it-macros.html.j2
···
1
-
{% macro links(target, collection, path) %}
1
+
{% macro get_backlinks(subject, source, dids, limit) %}
2
+
<form method="get" action="/xrpc/blue.microcosm.links.getBacklinks">
3
+
<pre class="code"><strong>GET</strong> /xrpc/blue.microcosm.links.getBacklinks
4
+
?subject= <input type="text" name="subject" value="{{ subject }}" placeholder="at-uri, did, uri..." />
5
+
&source= <input type="text" name="source" value="{{ source }}" placeholder="app.bsky.feed.like:subject.uri" />
6
+
{%- for did in dids %}{% if !did.is_empty() %}
7
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
8
+
<span id="did-placeholder"></span> <button id="add-did">+ did filter</button>
9
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
10
+
</form>
11
+
<script>
12
+
const addDidButton = document.getElementById('add-did');
13
+
const didPlaceholder = document.getElementById('did-placeholder');
14
+
addDidButton.addEventListener('click', e => {
15
+
e.preventDefault();
16
+
const i = document.createElement('input');
17
+
i.placeholder = 'did:plc:...';
18
+
i.name = "did"
19
+
const p = addDidButton.parentNode;
20
+
p.insertBefore(document.createTextNode('&did= '), didPlaceholder);
21
+
p.insertBefore(i, didPlaceholder);
22
+
p.insertBefore(document.createTextNode('\n '), didPlaceholder);
23
+
});
24
+
</script>
25
+
{% endmacro %}
26
+
27
+
{% macro get_many_to_many_counts(subject, source, pathToOther, dids, otherSubjects, limit) %}
28
+
<form method="get" action="/xrpc/blue.microcosm.links.getManyToManyCounts">
29
+
<pre class="code"><strong>GET</strong> /xrpc/blue.microcosm.links.getManyToManyCounts
30
+
?subject= <input type="text" name="subject" value="{{ subject }}" placeholder="at-uri, did, uri..." />
31
+
&source= <input type="text" name="source" value="{{ source }}" placeholder="app.bsky.feed.like:subject.uri" />
32
+
&pathToOther= <input type="text" name="pathToOther" value="{{ pathToOther }}" placeholder="otherThing.uri" />
33
+
{%- for did in dids %}{% if !did.is_empty() %}
34
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
35
+
<span id="m2m-subject-placeholder"></span> <button id="m2m-add-subject">+ other subject filter</button>
36
+
{%- for otherSubject in otherSubjects %}{% if !otherSubject.is_empty() %}
37
+
&otherSubject= <input type="text" name="did" value="{{ otherSubject }}" placeholder="at-uri, did, uri..." />{% endif %}{% endfor %}
38
+
<span id="m2m-did-placeholder"></span> <button id="m2m-add-did">+ did filter</button>
39
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
40
+
</form>
41
+
<script>
42
+
const m2mAddDidButton = document.getElementById('m2m-add-did');
43
+
const m2mDidPlaceholder = document.getElementById('m2m-did-placeholder');
44
+
m2mAddDidButton.addEventListener('click', e => {
45
+
e.preventDefault();
46
+
const i = document.createElement('input');
47
+
i.placeholder = 'did:plc:...';
48
+
i.name = "did"
49
+
const p = m2mAddDidButton.parentNode;
50
+
p.insertBefore(document.createTextNode('&did= '), m2mDidPlaceholder);
51
+
p.insertBefore(i, m2mDidPlaceholder);
52
+
p.insertBefore(document.createTextNode('\n '), m2mDidPlaceholder);
53
+
});
54
+
const m2mAddSubjectButton = document.getElementById('m2m-add-subject');
55
+
const m2mSubjectPlaceholder = document.getElementById('m2m-subject-placeholder');
56
+
m2mAddSubjectButton.addEventListener('click', e => {
57
+
e.preventDefault();
58
+
const i = document.createElement('input');
59
+
i.placeholder = 'at-uri, did, uri...';
60
+
i.name = "otherSubject"
61
+
const p = m2mAddSubjectButton.parentNode;
62
+
p.insertBefore(document.createTextNode('&otherSubject= '), m2mSubjectPlaceholder);
63
+
p.insertBefore(i, m2mSubjectPlaceholder);
64
+
p.insertBefore(document.createTextNode('\n '), m2mSubjectPlaceholder);
65
+
});
66
+
</script>
67
+
{% endmacro %}
68
+
69
+
{% macro links(target, collection, path, dids, limit) %}
2
70
<form method="get" action="/links">
3
71
<pre class="code"><strong>GET</strong> /links
4
72
?target= <input type="text" name="target" value="{{ target }}" placeholder="target" />
5
73
&collection= <input type="text" name="collection" value="{{ collection }}" placeholder="collection" />
6
-
&path= <input type="text" name="path" value="{{ path }}" placeholder="path" /> <button type="submit">get links</button></pre>
74
+
&path= <input type="text" name="path" value="{{ path }}" placeholder="path" />
75
+
{%- for did in dids %}{% if !did.is_empty() %}
76
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
77
+
<span id="did-placeholder"></span> <button id="add-did">+ did filter</button>
78
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
7
79
</form>
80
+
<script>
81
+
const addDidButton = document.getElementById('add-did');
82
+
const didPlaceholder = document.getElementById('did-placeholder');
83
+
addDidButton.addEventListener('click', e => {
84
+
e.preventDefault();
85
+
const i = document.createElement('input');
86
+
i.placeholder = 'did:plc:...';
87
+
i.name = "did"
88
+
const p = addDidButton.parentNode;
89
+
p.insertBefore(document.createTextNode('&did= '), didPlaceholder);
90
+
p.insertBefore(i, didPlaceholder);
91
+
p.insertBefore(document.createTextNode('\n '), didPlaceholder);
92
+
});
93
+
</script>
8
94
{% endmacro %}
9
-
10
95
11
96
{% macro dids(target, collection, path) %}
12
97
<form method="get" action="/links/distinct-dids">
-496
cozy-setup (move to another repo).md
-496
cozy-setup (move to another repo).md
···
1
-
cozy-ucosm
2
-
3
-
4
-
## gateway
5
-
6
-
- tailscale (exit node enabled)
7
-
-> allow ipv4 and ipv6 forwarding
8
-
- caddy
9
-
10
-
```bash
11
-
apt install golang
12
-
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
-
go/bin/xcaddy build \
14
-
--with github.com/caddyserver/cache-handler \
15
-
--with github.com/darkweak/storages/badger/caddy \
16
-
--with github.com/mholt/caddy-ratelimit
17
-
# then https://caddyserver.com/docs/running#manual-installation
18
-
19
-
mkdir /var/cache/caddy-badger
20
-
chown -R caddy:caddy /var/cache/caddy-badger/
21
-
```
22
-
23
-
- `/etc/caddy/Caddyfile`
24
-
25
-
```
26
-
{
27
-
cache {
28
-
badger
29
-
api {
30
-
prometheus
31
-
}
32
-
}
33
-
}
34
-
35
-
links.bsky.bad-example.com {
36
-
reverse_proxy link-aggregator:6789
37
-
38
-
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
-
rate_limit {
40
-
zone global_burst {
41
-
key {remote_host}
42
-
events 10
43
-
window 1s
44
-
}
45
-
zone global_general {
46
-
key {remote_host}
47
-
events 100
48
-
window 60s
49
-
log_key true
50
-
}
51
-
zone website_harsh_limit {
52
-
key {header.Origin}
53
-
match {
54
-
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
-
}
56
-
events 1000
57
-
window 30s
58
-
log_key true
59
-
}
60
-
}
61
-
respond /souin-api/metrics "denied" 403 # does not work
62
-
cache {
63
-
ttl 3s
64
-
stale 1h
65
-
default_cache_control public, s-maxage=3
66
-
badger {
67
-
path /var/cache/caddy-badger/links
68
-
}
69
-
}
70
-
}
71
-
72
-
gateway:80 {
73
-
metrics
74
-
cache
75
-
}
76
-
```
77
-
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
-
79
-
80
-
alternative proxies:
81
-
82
-
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
-
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
-
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
-
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
-
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
-
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
-
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
-
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
-
91
-
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
-
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
-
- apache traffic control has experimental rate-limiting plugins
94
-
95
-
96
-
- victoriametrics
97
-
98
-
```bash
99
-
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
-
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
-
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
-
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
-
104
-
```
105
-
106
-
- `/etc/victoria-metrics/prometheus.yml`
107
-
108
-
```yaml
109
-
global:
110
-
scrape_interval: '15s'
111
-
112
-
scrape_configs:
113
-
- job_name: 'link_aggregator'
114
-
static_configs:
115
-
- targets: ['link-aggregator:8765']
116
-
- job_name: 'gateway:caddy'
117
-
static_configs:
118
-
- targets: ['gateway:80/metrics']
119
-
- job_name: 'gateway:cache'
120
-
static_configs:
121
-
- targets: ['gateway:80/souin-api/metrics']
122
-
```
123
-
124
-
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
-
126
-
```
127
-
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
-
```
129
-
130
-
- grafana
131
-
132
-
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
-
134
-
something something something then
135
-
136
-
```
137
-
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
-
```
139
-
140
-
- raspi node_exporter
141
-
142
-
```bash
143
-
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
-
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
-
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
-
sudo useradd --no-create-home --shell /bin/false node_exporter
147
-
sudo nano /etc/systemd/system/node_exporter.service
148
-
# [Unit]
149
-
# Description=Node Exporter
150
-
# Wants=network-online.target
151
-
# After=network-online.target
152
-
153
-
# [Service]
154
-
# User=node_exporter
155
-
# Group=node_exporter
156
-
# Type=simple
157
-
# ExecStart=/usr/local/bin/node_exporter
158
-
# Restart=always
159
-
# RestartSec=3
160
-
161
-
# [Install]
162
-
# WantedBy=multi-user.target
163
-
sudo systemctl daemon-reload
164
-
sudo systemctl enable node_exporter.service
165
-
sudo systemctl start node_exporter.service
166
-
```
167
-
168
-
todo: get raspi vcgencmd outputs into metrics
169
-
170
-
- nginx on gateway
171
-
172
-
```nginx
173
-
# in http
174
-
175
-
##
176
-
# cozy cache
177
-
##
178
-
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
-
180
-
##
181
-
# cozy limit
182
-
##
183
-
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
-
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
-
186
-
# in sites-available/constellation.microcosm.blue
187
-
188
-
upstream cozy_link_aggregator {
189
-
server link-aggregator:6789;
190
-
keepalive 16;
191
-
}
192
-
193
-
server {
194
-
listen 8080;
195
-
listen [::]:8080;
196
-
197
-
server_name constellation.microcosm.blue;
198
-
199
-
proxy_cache cozy_zone;
200
-
proxy_cache_background_update on;
201
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
-
proxy_cache_lock_age 1s;
204
-
proxy_cache_lock_timeout 2s;
205
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
-
proxy_cache_valid any 15s; # non-200s default
207
-
proxy_read_timeout 5s;
208
-
proxy_send_timeout 15s;
209
-
proxy_socket_keepalive on;
210
-
211
-
limit_req zone=cozy_ip_limit nodelay burst=100;
212
-
limit_req zone=cozy_global_limit;
213
-
limit_req_status 429;
214
-
215
-
location / {
216
-
proxy_pass http://cozy_link_aggregator;
217
-
include proxy_params;
218
-
proxy_http_version 1.1;
219
-
proxy_set_header Connection ""; # for keepalive
220
-
}
221
-
}
222
-
```
223
-
224
-
also `systemctl edit nginx` and paste
225
-
226
-
```
227
-
[Service]
228
-
Restart=always
229
-
```
230
-
231
-
โhttps://serverfault.com/a/1003373
232
-
233
-
now making browsers redirect to the microcosm.blue url:
234
-
235
-
```
236
-
[...]
237
-
server_name links.bsky.bad-example.com;
238
-
239
-
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
-
if ($http_user_agent ~ ^Mozilla/) {
241
-
# for now send *browsers* to the new location, hopefully without impacting api requests
242
-
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
-
return 301 https://constellation.microcosm.blue$request_uri;
244
-
}
245
-
[...]
246
-
```
247
-
248
-
- nginx metrics
249
-
250
-
- download nginx-prometheus-exporter
251
-
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
-
253
-
- err actually going to make mistakes and try with snap
254
-
`snap install nginx-prometheus-exporter`
255
-
- so it got a binary for me but no systemd task set up. boooo.
256
-
`snap remove nginx-prometheus-exporter`
257
-
258
-
- ```bash
259
-
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
-
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
-
mv nginx-prometheus-exporter /usr/local/bin
262
-
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
-
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
-
# [Unit]
265
-
# Description=NGINX Exporter
266
-
# Wants=network-online.target
267
-
# After=network-online.target
268
-
269
-
# [Service]
270
-
# User=nginx-prometheus-exporter
271
-
# Group=nginx-prometheus-exporter
272
-
# Type=simple
273
-
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
-
# Restart=always
275
-
# RestartSec=3
276
-
277
-
# [Install]
278
-
# WantedBy=multi-user.target
279
-
systemctl daemon-reload
280
-
systemctl start nginx-prometheus-exporter.service
281
-
systemctl enable nginx-prometheus-exporter.service
282
-
```
283
-
284
-
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
-
286
-
```nginx
287
-
server {
288
-
listen 8080;
289
-
listen [::]:8080;
290
-
291
-
server_name gateway;
292
-
293
-
location /stub_status {
294
-
stub_status;
295
-
}
296
-
location / {
297
-
return 404;
298
-
}
299
-
}
300
-
```
301
-
302
-
```bash
303
-
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
-
```
305
-
306
-
307
-
## bootes (pi5)
308
-
309
-
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
-
- raspi-config: enable pcie 3, set hostname, enable ssh
311
-
- put ssh key into `.ssh/authorized_keys`
312
-
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
-
- `sudo apt update && sudo apt upgrade`
314
-
- `sudo apt install xfsprogs`
315
-
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
-
- `sudo mount /dev/nvme0n1 /mnt`
317
-
- set up tailscale
318
-
- `sudo tailscale up`
319
-
- `git clone https://github.com/atcosm/links.git`
320
-
- tailscale: disable bootes key expiry
321
-
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
-
- `cd links/constellation`
323
-
- `sudo apt install libssl-dev` needed
324
-
- `sudo apt install clang` needed for bindgen
325
-
- (in tmux) `cargo build --release`
326
-
- `mkdir ~/backup`
327
-
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
-
- `sudo chown pi:pi /mnt/`
329
-
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
-
etc
331
-
- follow above `- raspi node_exporter`
332
-
- configure victoriametrics to scrape the new pi
333
-
- configure ulimit before starting! `ulimit -n 16384`
334
-
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
-
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
-
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
-
- stop upstreaming requests to older instance in nginx
338
-
339
-
340
-
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
-
342
-
```ini
343
-
[Unit]
344
-
Description=Constellation backlinks index
345
-
After=network.target
346
-
347
-
[Service]
348
-
User=pi
349
-
WorkingDirectory=/home/pi/links/constellation
350
-
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
-
LimitNOFILE=16384
352
-
Restart=always
353
-
354
-
[Install]
355
-
WantedBy=multi-user.target
356
-
```
357
-
358
-
359
-
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
-
- todo: cross-compile for raspi?
361
-
362
-
---
363
-
364
-
some todos
365
-
366
-
- [x] tailscale: exit node
367
-
- [!] link_aggregator: use exit node
368
-
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
-
- [x] caddy: reverse proxy
370
-
- [x] build with cache and rate-limit plugins
371
-
- [x] configure systemd to keep it alive
372
-
- [x] configure caddy cache
373
-
- [x] configure caddy rate-limit
374
-
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
-
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
-
- [x] make some grafana dashboards
377
-
- [ ] raspi: mount /dev/sda on boot
378
-
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
-
380
-
- [x] use nginx instead of caddy
381
-
- [x] nginx: enable cache
382
-
- [x] nginx: rate-limit
383
-
- [ ] nginx: get metrics
384
-
385
-
386
-
387
-
388
-
---
389
-
390
-
nginx cors for constellation + small burst bump
391
-
392
-
```nginx
393
-
upstream cozy_constellation {
394
-
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
-
keepalive 16;
396
-
}
397
-
398
-
server {
399
-
server_name constellation.microcosm.blue;
400
-
401
-
proxy_cache cozy_zone;
402
-
proxy_cache_background_update on;
403
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
-
proxy_cache_lock_age 1s;
406
-
proxy_cache_lock_timeout 2s;
407
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
-
proxy_cache_valid any 2s; # non-200s default
409
-
proxy_read_timeout 5s;
410
-
proxy_send_timeout 15s;
411
-
proxy_socket_keepalive on;
412
-
413
-
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
-
proxy_hide_header 'Access-Control-Allow-Origin';
415
-
proxy_hide_header 'Access-Control-Allowed-Methods';
416
-
proxy_hide_header 'Access-Control-Allow-Headers';
417
-
add_header 'Access-Control-Allow-Origin' '*' always;
418
-
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
-
add_header 'Access-Control-Allow-Headers' '*' always;
420
-
421
-
422
-
limit_req zone=cozy_ip_limit nodelay burst=150;
423
-
limit_req zone=cozy_global_limit burst=1800;
424
-
limit_req_status 429;
425
-
426
-
location / {
427
-
proxy_pass http://cozy_constellation;
428
-
include proxy_params;
429
-
proxy_http_version 1.1;
430
-
proxy_set_header Connection ""; # for keepalive
431
-
}
432
-
433
-
434
-
listen 443 ssl; # managed by Certbot
435
-
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
-
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
-
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
-
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
-
440
-
}
441
-
442
-
server {
443
-
if ($host = constellation.microcosm.blue) {
444
-
return 301 https://$host$request_uri;
445
-
} # managed by Certbot
446
-
447
-
448
-
server_name constellation.microcosm.blue;
449
-
listen 80;
450
-
return 404; # managed by Certbot
451
-
}
452
-
```
453
-
454
-
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
-
456
-
leaving it for now though.
457
-
458
-
459
-
### nginx logs to prom
460
-
461
-
```bash
462
-
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
-
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
-
systemctl enable prometheus-nginxlog-exporter.service
465
-
466
-
```
467
-
468
-
have it run as www-data (maybe not the best idea but...)
469
-
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
-
set User under service and remove capabilities bounding
471
-
472
-
```systemd
473
-
User=www-data
474
-
#CapabilityBoundingSet=
475
-
```
476
-
477
-
in `nginx.conf` in `http`:
478
-
479
-
```nginx
480
-
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
-
```
482
-
483
-
in `sites-available/constellation.microcosm.blue` in `server`:
484
-
485
-
```nginx
486
-
# log format must match prometheus-nginx-log-exporter
487
-
access_log /var/log/nginx/constellation-access.log constellation_format;
488
-
```
489
-
490
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
-
492
-
493
-
494
-
```bash
495
-
systemctl start prometheus-nginxlog-exporter.service
496
-
```
+1
-1
jetstream/Cargo.toml
+1
-1
jetstream/Cargo.toml
···
10
10
11
11
[dependencies]
12
12
async-trait = "0.1.83"
13
-
atrium-api = { version = "0.25.4", default-features = false, features = [
13
+
atrium-api = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace", default-features = false, features = [
14
14
"namespace-appbsky",
15
15
] }
16
16
tokio = { version = "1.44.2", features = ["full", "sync", "time"] }
+496
legacy/cozy-setup (move to another repo).md
+496
legacy/cozy-setup (move to another repo).md
···
1
+
cozy-ucosm
2
+
3
+
4
+
## gateway
5
+
6
+
- tailscale (exit node enabled)
7
+
-> allow ipv4 and ipv6 forwarding
8
+
- caddy
9
+
10
+
```bash
11
+
apt install golang
12
+
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
+
go/bin/xcaddy build \
14
+
--with github.com/caddyserver/cache-handler \
15
+
--with github.com/darkweak/storages/badger/caddy \
16
+
--with github.com/mholt/caddy-ratelimit
17
+
# then https://caddyserver.com/docs/running#manual-installation
18
+
19
+
mkdir /var/cache/caddy-badger
20
+
chown -R caddy:caddy /var/cache/caddy-badger/
21
+
```
22
+
23
+
- `/etc/caddy/Caddyfile`
24
+
25
+
```
26
+
{
27
+
cache {
28
+
badger
29
+
api {
30
+
prometheus
31
+
}
32
+
}
33
+
}
34
+
35
+
links.bsky.bad-example.com {
36
+
reverse_proxy link-aggregator:6789
37
+
38
+
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
+
rate_limit {
40
+
zone global_burst {
41
+
key {remote_host}
42
+
events 10
43
+
window 1s
44
+
}
45
+
zone global_general {
46
+
key {remote_host}
47
+
events 100
48
+
window 60s
49
+
log_key true
50
+
}
51
+
zone website_harsh_limit {
52
+
key {header.Origin}
53
+
match {
54
+
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
+
}
56
+
events 1000
57
+
window 30s
58
+
log_key true
59
+
}
60
+
}
61
+
respond /souin-api/metrics "denied" 403 # does not work
62
+
cache {
63
+
ttl 3s
64
+
stale 1h
65
+
default_cache_control public, s-maxage=3
66
+
badger {
67
+
path /var/cache/caddy-badger/links
68
+
}
69
+
}
70
+
}
71
+
72
+
gateway:80 {
73
+
metrics
74
+
cache
75
+
}
76
+
```
77
+
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
+
79
+
80
+
alternative proxies:
81
+
82
+
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
+
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
+
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
+
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
+
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
+
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
+
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
+
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
+
91
+
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
+
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
+
- apache traffic control has experimental rate-limiting plugins
94
+
95
+
96
+
- victoriametrics
97
+
98
+
```bash
99
+
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
+
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
+
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
+
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
+
104
+
```
105
+
106
+
- `/etc/victoria-metrics/prometheus.yml`
107
+
108
+
```yaml
109
+
global:
110
+
scrape_interval: '15s'
111
+
112
+
scrape_configs:
113
+
- job_name: 'link_aggregator'
114
+
static_configs:
115
+
- targets: ['link-aggregator:8765']
116
+
- job_name: 'gateway:caddy'
117
+
static_configs:
118
+
- targets: ['gateway:80/metrics']
119
+
- job_name: 'gateway:cache'
120
+
static_configs:
121
+
- targets: ['gateway:80/souin-api/metrics']
122
+
```
123
+
124
+
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
+
126
+
```
127
+
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
+
```
129
+
130
+
- grafana
131
+
132
+
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
+
134
+
something something something then
135
+
136
+
```
137
+
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
+
```
139
+
140
+
- raspi node_exporter
141
+
142
+
```bash
143
+
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
+
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
+
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
+
sudo useradd --no-create-home --shell /bin/false node_exporter
147
+
sudo nano /etc/systemd/system/node_exporter.service
148
+
# [Unit]
149
+
# Description=Node Exporter
150
+
# Wants=network-online.target
151
+
# After=network-online.target
152
+
153
+
# [Service]
154
+
# User=node_exporter
155
+
# Group=node_exporter
156
+
# Type=simple
157
+
# ExecStart=/usr/local/bin/node_exporter
158
+
# Restart=always
159
+
# RestartSec=3
160
+
161
+
# [Install]
162
+
# WantedBy=multi-user.target
163
+
sudo systemctl daemon-reload
164
+
sudo systemctl enable node_exporter.service
165
+
sudo systemctl start node_exporter.service
166
+
```
167
+
168
+
todo: get raspi vcgencmd outputs into metrics
169
+
170
+
- nginx on gateway
171
+
172
+
```nginx
173
+
# in http
174
+
175
+
##
176
+
# cozy cache
177
+
##
178
+
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
+
180
+
##
181
+
# cozy limit
182
+
##
183
+
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
+
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
+
186
+
# in sites-available/constellation.microcosm.blue
187
+
188
+
upstream cozy_link_aggregator {
189
+
server link-aggregator:6789;
190
+
keepalive 16;
191
+
}
192
+
193
+
server {
194
+
listen 8080;
195
+
listen [::]:8080;
196
+
197
+
server_name constellation.microcosm.blue;
198
+
199
+
proxy_cache cozy_zone;
200
+
proxy_cache_background_update on;
201
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
+
proxy_cache_lock_age 1s;
204
+
proxy_cache_lock_timeout 2s;
205
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
+
proxy_cache_valid any 15s; # non-200s default
207
+
proxy_read_timeout 5s;
208
+
proxy_send_timeout 15s;
209
+
proxy_socket_keepalive on;
210
+
211
+
limit_req zone=cozy_ip_limit nodelay burst=100;
212
+
limit_req zone=cozy_global_limit;
213
+
limit_req_status 429;
214
+
215
+
location / {
216
+
proxy_pass http://cozy_link_aggregator;
217
+
include proxy_params;
218
+
proxy_http_version 1.1;
219
+
proxy_set_header Connection ""; # for keepalive
220
+
}
221
+
}
222
+
```
223
+
224
+
also `systemctl edit nginx` and paste
225
+
226
+
```
227
+
[Service]
228
+
Restart=always
229
+
```
230
+
231
+
โhttps://serverfault.com/a/1003373
232
+
233
+
now making browsers redirect to the microcosm.blue url:
234
+
235
+
```
236
+
[...]
237
+
server_name links.bsky.bad-example.com;
238
+
239
+
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
+
if ($http_user_agent ~ ^Mozilla/) {
241
+
# for now send *browsers* to the new location, hopefully without impacting api requests
242
+
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
+
return 301 https://constellation.microcosm.blue$request_uri;
244
+
}
245
+
[...]
246
+
```
247
+
248
+
- nginx metrics
249
+
250
+
- download nginx-prometheus-exporter
251
+
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
+
253
+
- err actually going to make mistakes and try with snap
254
+
`snap install nginx-prometheus-exporter`
255
+
- so it got a binary for me but no systemd task set up. boooo.
256
+
`snap remove nginx-prometheus-exporter`
257
+
258
+
- ```bash
259
+
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
+
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
+
mv nginx-prometheus-exporter /usr/local/bin
262
+
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
+
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
+
# [Unit]
265
+
# Description=NGINX Exporter
266
+
# Wants=network-online.target
267
+
# After=network-online.target
268
+
269
+
# [Service]
270
+
# User=nginx-prometheus-exporter
271
+
# Group=nginx-prometheus-exporter
272
+
# Type=simple
273
+
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
+
# Restart=always
275
+
# RestartSec=3
276
+
277
+
# [Install]
278
+
# WantedBy=multi-user.target
279
+
systemctl daemon-reload
280
+
systemctl start nginx-prometheus-exporter.service
281
+
systemctl enable nginx-prometheus-exporter.service
282
+
```
283
+
284
+
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
+
286
+
```nginx
287
+
server {
288
+
listen 8080;
289
+
listen [::]:8080;
290
+
291
+
server_name gateway;
292
+
293
+
location /stub_status {
294
+
stub_status;
295
+
}
296
+
location / {
297
+
return 404;
298
+
}
299
+
}
300
+
```
301
+
302
+
```bash
303
+
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
+
```
305
+
306
+
307
+
## bootes (pi5)
308
+
309
+
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
+
- raspi-config: enable pcie 3, set hostname, enable ssh
311
+
- put ssh key into `.ssh/authorized_keys`
312
+
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
+
- `sudo apt update && sudo apt upgrade`
314
+
- `sudo apt install xfsprogs`
315
+
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
+
- `sudo mount /dev/nvme0n1 /mnt`
317
+
- set up tailscale
318
+
- `sudo tailscale up`
319
+
- `git clone https://github.com/atcosm/links.git`
320
+
- tailscale: disable bootes key expiry
321
+
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
+
- `cd links/constellation`
323
+
- `sudo apt install libssl-dev` needed
324
+
- `sudo apt install clang` needed for bindgen
325
+
- (in tmux) `cargo build --release`
326
+
- `mkdir ~/backup`
327
+
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
+
- `sudo chown pi:pi /mnt/`
329
+
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
+
etc
331
+
- follow above `- raspi node_exporter`
332
+
- configure victoriametrics to scrape the new pi
333
+
- configure ulimit before starting! `ulimit -n 16384`
334
+
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
+
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
+
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
+
- stop upstreaming requests to older instance in nginx
338
+
339
+
340
+
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
+
342
+
```ini
343
+
[Unit]
344
+
Description=Constellation backlinks index
345
+
After=network.target
346
+
347
+
[Service]
348
+
User=pi
349
+
WorkingDirectory=/home/pi/links/constellation
350
+
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
+
LimitNOFILE=16384
352
+
Restart=always
353
+
354
+
[Install]
355
+
WantedBy=multi-user.target
356
+
```
357
+
358
+
359
+
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
+
- todo: cross-compile for raspi?
361
+
362
+
---
363
+
364
+
some todos
365
+
366
+
- [x] tailscale: exit node
367
+
- [!] link_aggregator: use exit node
368
+
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
+
- [x] caddy: reverse proxy
370
+
- [x] build with cache and rate-limit plugins
371
+
- [x] configure systemd to keep it alive
372
+
- [x] configure caddy cache
373
+
- [x] configure caddy rate-limit
374
+
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
+
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
+
- [x] make some grafana dashboards
377
+
- [ ] raspi: mount /dev/sda on boot
378
+
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
+
380
+
- [x] use nginx instead of caddy
381
+
- [x] nginx: enable cache
382
+
- [x] nginx: rate-limit
383
+
- [ ] nginx: get metrics
384
+
385
+
386
+
387
+
388
+
---
389
+
390
+
nginx cors for constellation + small burst bump
391
+
392
+
```nginx
393
+
upstream cozy_constellation {
394
+
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
+
keepalive 16;
396
+
}
397
+
398
+
server {
399
+
server_name constellation.microcosm.blue;
400
+
401
+
proxy_cache cozy_zone;
402
+
proxy_cache_background_update on;
403
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
+
proxy_cache_lock_age 1s;
406
+
proxy_cache_lock_timeout 2s;
407
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
+
proxy_cache_valid any 2s; # non-200s default
409
+
proxy_read_timeout 5s;
410
+
proxy_send_timeout 15s;
411
+
proxy_socket_keepalive on;
412
+
413
+
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
+
proxy_hide_header 'Access-Control-Allow-Origin';
415
+
proxy_hide_header 'Access-Control-Allowed-Methods';
416
+
proxy_hide_header 'Access-Control-Allow-Headers';
417
+
add_header 'Access-Control-Allow-Origin' '*' always;
418
+
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
+
add_header 'Access-Control-Allow-Headers' '*' always;
420
+
421
+
422
+
limit_req zone=cozy_ip_limit nodelay burst=150;
423
+
limit_req zone=cozy_global_limit burst=1800;
424
+
limit_req_status 429;
425
+
426
+
location / {
427
+
proxy_pass http://cozy_constellation;
428
+
include proxy_params;
429
+
proxy_http_version 1.1;
430
+
proxy_set_header Connection ""; # for keepalive
431
+
}
432
+
433
+
434
+
listen 443 ssl; # managed by Certbot
435
+
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
+
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
+
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
+
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
+
440
+
}
441
+
442
+
server {
443
+
if ($host = constellation.microcosm.blue) {
444
+
return 301 https://$host$request_uri;
445
+
} # managed by Certbot
446
+
447
+
448
+
server_name constellation.microcosm.blue;
449
+
listen 80;
450
+
return 404; # managed by Certbot
451
+
}
452
+
```
453
+
454
+
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
+
456
+
leaving it for now though.
457
+
458
+
459
+
### nginx logs to prom
460
+
461
+
```bash
462
+
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
+
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
+
systemctl enable prometheus-nginxlog-exporter.service
465
+
466
+
```
467
+
468
+
have it run as www-data (maybe not the best idea but...)
469
+
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
+
set User under service and remove capabilities bounding
471
+
472
+
```systemd
473
+
User=www-data
474
+
#CapabilityBoundingSet=
475
+
```
476
+
477
+
in `nginx.conf` in `http`:
478
+
479
+
```nginx
480
+
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
+
```
482
+
483
+
in `sites-available/constellation.microcosm.blue` in `server`:
484
+
485
+
```nginx
486
+
# log format must match prometheus-nginx-log-exporter
487
+
access_log /var/log/nginx/constellation-access.log constellation_format;
488
+
```
489
+
490
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
+
492
+
493
+
494
+
```bash
495
+
systemctl start prometheus-nginxlog-exporter.service
496
+
```
+35
legacy/old-readme-details.md
+35
legacy/old-readme-details.md
···
1
+
[Constellation](./constellation/)
2
+
--------------------------------------------
3
+
4
+
A global atproto backlink index โจ
5
+
6
+
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
7
+
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
8
+
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
9
+
- Simple JSON API
10
+
11
+
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
12
+
13
+
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
14
+
- source: [./constellation/](./constellation/)
15
+
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
16
+
17
+
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
18
+
19
+
20
+
App: Spacedust
21
+
--------------
22
+
23
+
A notification subscription service ๐ซ
24
+
25
+
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
26
+
27
+
- **status**: in design
28
+
29
+
30
+
Library: [links](./links/)
31
+
------------------------------------
32
+
33
+
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
34
+
35
+
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
+123
legacy/original-notes.md
+123
legacy/original-notes.md
···
1
+
---
2
+
3
+
4
+
old notes follow, ignore
5
+
------------------------
6
+
7
+
8
+
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
9
+
10
+
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
11
+
12
+
13
+
why
14
+
---
15
+
16
+
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
17
+
18
+
- bluesky posts (firesky, deletions, )
19
+
- blueksy post stats (emojis, )
20
+
- trending keywords ()
21
+
22
+
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
23
+
24
+
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
25
+
26
+
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
27
+
28
+
**so, why**
29
+
30
+
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
31
+
32
+
everything is links:
33
+
34
+
- likes
35
+
- follows
36
+
- blocks
37
+
- reposts
38
+
- quotes
39
+
40
+
some low-level things you could make from links:
41
+
42
+
- notification streams (part of ucosm)
43
+
- a global reverse index (part of ucosm)
44
+
45
+
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
46
+
47
+
48
+
extracting links
49
+
---------------
50
+
51
+
52
+
- low-level: pass a &str of a field value and get a parsed link back
53
+
54
+
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
55
+
56
+
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
57
+
58
+
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
59
+
60
+
61
+
resolving / canonicalizing links
62
+
--------------------------------
63
+
64
+
65
+
### at-uris
66
+
67
+
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
68
+
69
+
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
70
+
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
71
+
72
+
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
73
+
74
+
hopefully atrium will make it easy to resolve at-uris.
75
+
76
+
77
+
### urls
78
+
79
+
canonicalizing URLs is more annoying but also a bit more established. lots of details.
80
+
81
+
- do we have to deal with punycode?
82
+
- follow redirects (todo: only permanent ones, or all?)
83
+
- check for rel=canonical http header and possibly follow it
84
+
- check link rel=canonical meta tag and possibly follow it
85
+
- do we need to check site maps??
86
+
- do we have to care at all about AMP?
87
+
- do we want anything to do with url shorteners??
88
+
- how do multilingual sites affect this?
89
+
- do we have to care about `script type="application/ld+json"` ???
90
+
91
+
ugh. is there a crate for this.
92
+
93
+
94
+
### relative uris?
95
+
96
+
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
97
+
98
+
99
+
### canonicalizing
100
+
101
+
there should be a few async functions available to canonicalize already-parsed links.
102
+
103
+
- what happens if a link can't be resolved?
104
+
105
+
106
+
---
107
+
108
+
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
109
+
110
+
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
111
+
112
+
- rsky-syntax has an aturi
113
+
- adenosyne also
114
+
- might come back to these
115
+
116
+
117
+
-------
118
+
119
+
rocks
120
+
121
+
```bash
122
+
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
123
+
```
+196
legacy/ufos ops (move to micro-ops).md
+196
legacy/ufos ops (move to micro-ops).md
···
1
+
ufos ops
2
+
3
+
btrfs snapshots: snapper
4
+
5
+
```bash
6
+
sudo apt install snapper
7
+
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
+
9
+
# edit /etc/snapper/configs/ufos-db
10
+
# change
11
+
TIMELINE_MIN_AGE="1800"
12
+
TIMELINE_LIMIT_HOURLY="10"
13
+
TIMELINE_LIMIT_DAILY="10"
14
+
TIMELINE_LIMIT_WEEKLY="0"
15
+
TIMELINE_LIMIT_MONTHLY="10"
16
+
TIMELINE_LIMIT_YEARLY="10"
17
+
# to
18
+
TIMELINE_MIN_AGE="1800"
19
+
TIMELINE_LIMIT_HOURLY="22"
20
+
TIMELINE_LIMIT_DAILY="4"
21
+
TIMELINE_LIMIT_WEEKLY="0"
22
+
TIMELINE_LIMIT_MONTHLY="0"
23
+
TIMELINE_LIMIT_YEARLY="0"
24
+
```
25
+
26
+
this should be enough?
27
+
28
+
list snapshots:
29
+
30
+
```bash
31
+
sudo snapper -c ufos-db list
32
+
```
33
+
34
+
systemd
35
+
36
+
create file: `/etc/systemd/system/ufos.service`
37
+
38
+
```ini
39
+
[Unit]
40
+
Description=UFOs-API
41
+
After=network.target
42
+
43
+
[Service]
44
+
User=pi
45
+
WorkingDirectory=/home/pi/
46
+
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
+
Environment="RUST_LOG=info"
48
+
LimitNOFILE=16384
49
+
Restart=always
50
+
51
+
[Install]
52
+
WantedBy=multi-user.target
53
+
```
54
+
55
+
then
56
+
57
+
```bash
58
+
sudo systemctl daemon-reload
59
+
sudo systemctl enable ufos
60
+
sudo systemctl start ufos
61
+
```
62
+
63
+
monitor with
64
+
65
+
```bash
66
+
journalctl -u ufos -f
67
+
```
68
+
69
+
make sure a backup dir exists
70
+
71
+
```bash
72
+
mkdir /home/pi/backup
73
+
```
74
+
75
+
mount the NAS
76
+
77
+
```bash
78
+
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
+
```
80
+
81
+
manual rsync
82
+
83
+
```bash
84
+
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
+
```
86
+
87
+
backup script sketch
88
+
89
+
```bash
90
+
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
+
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
+
```
93
+
94
+
just crontab it?
95
+
96
+
`sudo crontab -e`
97
+
```bash
98
+
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
+
```
100
+
101
+
^^ try once initial backup is done
102
+
103
+
104
+
--columns subvolume,number
105
+
106
+
subvolume
107
+
number
108
+
109
+
110
+
111
+
112
+
gateway: follow constellation for nginx->prom thing
113
+
114
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
+
116
+
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
+
118
+
```hcl
119
+
listen {
120
+
port = 4044
121
+
}
122
+
123
+
namespace "nginx" {
124
+
source = {
125
+
files = [
126
+
"/var/log/nginx/constellation-access.log"
127
+
]
128
+
}
129
+
130
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
+
132
+
labels {
133
+
app = "constellation"
134
+
}
135
+
136
+
relabel "cache_status" {
137
+
from = "upstream_cache_status"
138
+
}
139
+
}
140
+
```
141
+
142
+
after:
143
+
144
+
```hcl
145
+
listen {
146
+
port = 4044
147
+
}
148
+
149
+
namespace "constellation" {
150
+
source = {
151
+
files = [
152
+
"/var/log/nginx/constellation-access.log"
153
+
]
154
+
}
155
+
156
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
+
158
+
labels {
159
+
app = "constellation"
160
+
}
161
+
162
+
relabel "cache_status" {
163
+
from = "upstream_cache_status"
164
+
}
165
+
166
+
namespace_label = "vhost"
167
+
metrics_override = { prefix = "nginx" }
168
+
}
169
+
170
+
namespace "ufos" {
171
+
source = {
172
+
files = [
173
+
"/var/log/nginx/ufos-access.log"
174
+
]
175
+
}
176
+
177
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
+
179
+
labels {
180
+
app = "ufos"
181
+
}
182
+
183
+
relabel "cache_status" {
184
+
from = "upstream_cache_status"
185
+
}
186
+
187
+
namespace_label = "vhost"
188
+
metrics_override = { prefix = "nginx" }
189
+
}
190
+
```
191
+
192
+
193
+
```bash
194
+
systemctl start prometheus-nginxlog-exporter.service
195
+
```
196
+
+1
pocket/.gitignore
+1
pocket/.gitignore
···
1
+
prefs.sqlite3*
+19
pocket/Cargo.toml
+19
pocket/Cargo.toml
···
1
+
[package]
2
+
name = "pocket"
3
+
version = "0.1.0"
4
+
edition = "2024"
5
+
6
+
[dependencies]
7
+
atrium-crypto = "0.1.2"
8
+
clap = { version = "4.5.41", features = ["derive"] }
9
+
jwt-compact = { git = "https://github.com/fatfingers23/jwt-compact.git", features = ["es256k"] }
10
+
log = "0.4.27"
11
+
poem = { version = "3.1.12", features = ["acme", "static-files"] }
12
+
poem-openapi = { version = "5.1.16", features = ["scalar"] }
13
+
reqwest = { version = "0.12.22", features = ["json"] }
14
+
rusqlite = "0.37.0"
15
+
serde = { version = "1.0.219", features = ["derive"] }
16
+
serde_json = { version = "1.0.141" }
17
+
thiserror = "2.0.16"
18
+
tokio = { version = "1.47.0", features = ["full"] }
19
+
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
+17
pocket/api-description.md
+17
pocket/api-description.md
···
1
+
_A pocket dimension to stash a bit of non-public user data._
2
+
3
+
4
+
# Pocket: user preference storage
5
+
6
+
This API leverages atproto service proxying to offer a bit of per-user per-app non-public data storage.
7
+
Perfect for things like application preferences that might be better left out of the public PDS data.
8
+
9
+
The intent is to use oauth scopes to isolate storage on a per-application basis, and to allow easy data migration from a community hosted instance to your own if you end up needing that.
10
+
11
+
12
+
### Current status
13
+
14
+
> [!important]
15
+
> Pocket is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and occaisional data loss until it's stable.
16
+
17
+
ATProto might end up adding a similar feature to [PDSs](https://atproto.com/guides/glossary#pds-personal-data-server). If/when that happens, you should use it instead of this!
+7
pocket/src/lib.rs
+7
pocket/src/lib.rs
+34
pocket/src/main.rs
+34
pocket/src/main.rs
···
1
+
use clap::Parser;
2
+
use pocket::{Storage, serve};
3
+
use std::path::PathBuf;
4
+
5
+
/// Slingshot record edge cache
6
+
#[derive(Parser, Debug, Clone)]
7
+
#[command(version, about, long_about = None)]
8
+
struct Args {
9
+
/// path to the sqlite db file
10
+
#[arg(long)]
11
+
db: Option<PathBuf>,
12
+
/// just initialize the db and exit
13
+
#[arg(long, action)]
14
+
init_db: bool,
15
+
/// the domain for serving a did doc (unused if running behind reflector)
16
+
#[arg(long)]
17
+
domain: Option<String>,
18
+
}
19
+
20
+
#[tokio::main]
21
+
async fn main() {
22
+
tracing_subscriber::fmt::init();
23
+
log::info!("๐ hi");
24
+
let args = Args::parse();
25
+
let domain = args.domain.unwrap_or("bad-example.com".into());
26
+
let db_path = args.db.unwrap_or("prefs.sqlite3".into());
27
+
if args.init_db {
28
+
Storage::init(&db_path).unwrap();
29
+
log::info!("๐ initialized db at {db_path:?}. bye")
30
+
} else {
31
+
let storage = Storage::connect(db_path).unwrap();
32
+
serve(&domain, storage).await
33
+
}
34
+
}
+265
pocket/src/server.rs
+265
pocket/src/server.rs
···
1
+
use crate::{Storage, TokenVerifier};
2
+
use poem::{
3
+
Endpoint, EndpointExt, Route, Server,
4
+
endpoint::{StaticFileEndpoint, make_sync},
5
+
http::Method,
6
+
listener::TcpListener,
7
+
middleware::{CatchPanic, Cors, Tracing},
8
+
};
9
+
use poem_openapi::{
10
+
ApiResponse, ContactObject, ExternalDocumentObject, Object, OpenApi, OpenApiService,
11
+
SecurityScheme, Tags,
12
+
auth::Bearer,
13
+
payload::{Json, PlainText},
14
+
types::Example,
15
+
};
16
+
use serde::Serialize;
17
+
use serde_json::{Value, json};
18
+
use std::sync::{Arc, Mutex};
19
+
20
+
#[derive(Debug, SecurityScheme)]
21
+
#[oai(ty = "bearer")]
22
+
struct XrpcAuth(Bearer);
23
+
24
+
#[derive(Tags)]
25
+
enum ApiTags {
26
+
/// Custom pocket APIs
27
+
#[oai(rename = "Pocket APIs")]
28
+
Pocket,
29
+
}
30
+
31
+
#[derive(Object)]
32
+
#[oai(example = true)]
33
+
struct XrpcErrorResponseObject {
34
+
/// Should correspond an error `name` in the lexicon errors array
35
+
error: String,
36
+
/// Human-readable description and possibly additonal context
37
+
message: String,
38
+
}
39
+
impl Example for XrpcErrorResponseObject {
40
+
fn example() -> Self {
41
+
Self {
42
+
error: "PreferencesNotFound".to_string(),
43
+
message: "No preferences were found for this user".to_string(),
44
+
}
45
+
}
46
+
}
47
+
type XrpcError = Json<XrpcErrorResponseObject>;
48
+
fn xrpc_error(error: impl AsRef<str>, message: impl AsRef<str>) -> XrpcError {
49
+
Json(XrpcErrorResponseObject {
50
+
error: error.as_ref().to_string(),
51
+
message: message.as_ref().to_string(),
52
+
})
53
+
}
54
+
55
+
#[derive(Debug, Object)]
56
+
#[oai(example = true)]
57
+
struct BskyPrefsObject {
58
+
/// at-uri for this record
59
+
preferences: Value,
60
+
}
61
+
impl Example for BskyPrefsObject {
62
+
fn example() -> Self {
63
+
Self {
64
+
preferences: json!({
65
+
"hello": "world",
66
+
}),
67
+
}
68
+
}
69
+
}
70
+
71
+
#[derive(ApiResponse)]
72
+
enum GetBskyPrefsResponse {
73
+
/// Record found
74
+
#[oai(status = 200)]
75
+
Ok(Json<BskyPrefsObject>),
76
+
/// Bad request or no preferences to return
77
+
#[oai(status = 400)]
78
+
BadRequest(XrpcError),
79
+
}
80
+
81
+
#[derive(ApiResponse)]
82
+
enum PutBskyPrefsResponse {
83
+
/// Record found
84
+
#[oai(status = 200)]
85
+
Ok(PlainText<String>),
86
+
/// Bad request or no preferences to return
87
+
#[oai(status = 400)]
88
+
BadRequest(XrpcError),
89
+
// /// Server errors
90
+
// #[oai(status = 500)]
91
+
// ServerError(XrpcError),
92
+
}
93
+
94
+
struct Xrpc {
95
+
verifier: TokenVerifier,
96
+
storage: Arc<Mutex<Storage>>,
97
+
}
98
+
99
+
#[OpenApi]
100
+
impl Xrpc {
101
+
/// com.bad-example.pocket.getPreferences
102
+
///
103
+
/// get stored preferencess
104
+
#[oai(
105
+
path = "/com.bad-example.pocket.getPreferences",
106
+
method = "get",
107
+
tag = "ApiTags::Pocket"
108
+
)]
109
+
async fn pocket_get_prefs(&self, XrpcAuth(auth): XrpcAuth) -> GetBskyPrefsResponse {
110
+
let (did, aud) = match self
111
+
.verifier
112
+
.verify("com.bad-example.pocket.getPreferences", &auth.token)
113
+
.await
114
+
{
115
+
Ok(d) => d,
116
+
Err(e) => return GetBskyPrefsResponse::BadRequest(xrpc_error("boooo", e.to_string())),
117
+
};
118
+
log::info!("verified did: {did}/{aud}");
119
+
120
+
let storage = self.storage.clone();
121
+
122
+
let Ok(Ok(res)) = tokio::task::spawn_blocking(move || {
123
+
storage
124
+
.lock()
125
+
.unwrap()
126
+
.get(&did, &aud)
127
+
.inspect_err(|e| log::error!("failed to get prefs: {e}"))
128
+
})
129
+
.await
130
+
else {
131
+
return GetBskyPrefsResponse::BadRequest(xrpc_error("boooo", "failed to get from db"));
132
+
};
133
+
134
+
let Some(serialized) = res else {
135
+
return GetBskyPrefsResponse::BadRequest(xrpc_error(
136
+
"NotFound",
137
+
"could not find prefs for u",
138
+
));
139
+
};
140
+
141
+
let preferences = match serde_json::from_str(&serialized) {
142
+
Ok(v) => v,
143
+
Err(e) => {
144
+
log::error!("failed to deserialize prefs: {e}");
145
+
return GetBskyPrefsResponse::BadRequest(xrpc_error(
146
+
"boooo",
147
+
"failed to deserialize prefs",
148
+
));
149
+
}
150
+
};
151
+
152
+
GetBskyPrefsResponse::Ok(Json(BskyPrefsObject { preferences }))
153
+
}
154
+
155
+
/// com.bad-example.pocket.putPreferences
156
+
///
157
+
/// store bluesky prefs
158
+
#[oai(
159
+
path = "/com.bad-example.pocket.putPreferences",
160
+
method = "post",
161
+
tag = "ApiTags::Pocket"
162
+
)]
163
+
async fn pocket_put_prefs(
164
+
&self,
165
+
XrpcAuth(auth): XrpcAuth,
166
+
Json(prefs): Json<BskyPrefsObject>,
167
+
) -> PutBskyPrefsResponse {
168
+
let (did, aud) = match self
169
+
.verifier
170
+
.verify("com.bad-example.pocket.putPreferences", &auth.token)
171
+
.await
172
+
{
173
+
Ok(d) => d,
174
+
Err(e) => return PutBskyPrefsResponse::BadRequest(xrpc_error("boooo", e.to_string())),
175
+
};
176
+
log::info!("verified did: {did}/{aud}");
177
+
log::warn!("received prefs: {prefs:?}");
178
+
179
+
let storage = self.storage.clone();
180
+
let serialized = prefs.preferences.to_string();
181
+
182
+
let Ok(Ok(())) = tokio::task::spawn_blocking(move || {
183
+
storage
184
+
.lock()
185
+
.unwrap()
186
+
.put(&did, &aud, &serialized)
187
+
.inspect_err(|e| log::error!("failed to insert prefs: {e}"))
188
+
})
189
+
.await
190
+
else {
191
+
return PutBskyPrefsResponse::BadRequest(xrpc_error("boooo", "failed to put to db"));
192
+
};
193
+
194
+
PutBskyPrefsResponse::Ok(PlainText("saved.".to_string()))
195
+
}
196
+
}
197
+
198
+
#[derive(Debug, Clone, Serialize)]
199
+
#[serde(rename_all = "camelCase")]
200
+
struct AppViewService {
201
+
id: String,
202
+
r#type: String,
203
+
service_endpoint: String,
204
+
}
205
+
#[derive(Debug, Clone, Serialize)]
206
+
struct AppViewDoc {
207
+
id: String,
208
+
service: [AppViewService; 2],
209
+
}
210
+
/// Serve a did document for did:web for this to be an xrpc appview
211
+
fn get_did_doc(domain: &str) -> impl Endpoint + use<> {
212
+
let doc = poem::web::Json(AppViewDoc {
213
+
id: format!("did:web:{domain}"),
214
+
service: [
215
+
AppViewService {
216
+
id: "#pocket_prefs".to_string(),
217
+
r#type: "PocketPreferences".to_string(),
218
+
service_endpoint: format!("https://{domain}"),
219
+
},
220
+
AppViewService {
221
+
id: "#bsky_appview".to_string(),
222
+
r#type: "BlueskyAppview".to_string(),
223
+
service_endpoint: format!("https://{domain}"),
224
+
},
225
+
],
226
+
});
227
+
make_sync(move |_| doc.clone())
228
+
}
229
+
230
+
pub async fn serve(domain: &str, storage: Storage) -> () {
231
+
let verifier = TokenVerifier::default();
232
+
let api_service = OpenApiService::new(
233
+
Xrpc {
234
+
verifier,
235
+
storage: Arc::new(Mutex::new(storage)),
236
+
},
237
+
"Pocket",
238
+
env!("CARGO_PKG_VERSION"),
239
+
)
240
+
.server(domain)
241
+
.url_prefix("/xrpc")
242
+
.contact(
243
+
ContactObject::new()
244
+
.name("@microcosm.blue")
245
+
.url("https://bsky.app/profile/microcosm.blue"),
246
+
)
247
+
.description(include_str!("../api-description.md"))
248
+
.external_document(ExternalDocumentObject::new("https://microcosm.blue/pocket"));
249
+
250
+
let app = Route::new()
251
+
.nest("/openapi", api_service.spec_endpoint())
252
+
.nest("/xrpc/", api_service)
253
+
.at("/.well-known/did.json", get_did_doc(domain))
254
+
.at("/", StaticFileEndpoint::new("./static/index.html"))
255
+
.with(
256
+
Cors::new()
257
+
.allow_method(Method::GET)
258
+
.allow_method(Method::POST),
259
+
)
260
+
.with(CatchPanic::new())
261
+
.with(Tracing);
262
+
263
+
let listener = TcpListener::bind("127.0.0.1:3000");
264
+
Server::new(listener).name("pocket").run(app).await.unwrap();
265
+
}
+50
pocket/src/storage.rs
+50
pocket/src/storage.rs
···
1
+
use rusqlite::{Connection, OptionalExtension, Result};
2
+
use std::path::Path;
3
+
4
+
pub struct Storage {
5
+
con: Connection,
6
+
}
7
+
8
+
impl Storage {
9
+
pub fn connect(path: impl AsRef<Path>) -> Result<Self> {
10
+
let con = Connection::open(path)?;
11
+
con.pragma_update(None, "journal_mode", "WAL")?;
12
+
con.pragma_update(None, "synchronous", "NORMAL")?;
13
+
con.pragma_update(None, "busy_timeout", "100")?;
14
+
con.pragma_update(None, "foreign_keys", "ON")?;
15
+
Ok(Self { con })
16
+
}
17
+
pub fn init(path: impl AsRef<Path>) -> Result<Self> {
18
+
let me = Self::connect(path)?;
19
+
me.con.execute(
20
+
r#"
21
+
create table prefs (
22
+
actor text not null,
23
+
aud text not null,
24
+
pref text not null,
25
+
primary key (actor, aud)
26
+
) strict"#,
27
+
(),
28
+
)?;
29
+
Ok(me)
30
+
}
31
+
pub fn put(&self, actor: &str, aud: &str, pref: &str) -> Result<()> {
32
+
self.con.execute(
33
+
r#"insert into prefs (actor, aud, pref)
34
+
values (?1, ?2, ?3)
35
+
on conflict do update set pref = excluded.pref"#,
36
+
[actor, aud, pref],
37
+
)?;
38
+
Ok(())
39
+
}
40
+
pub fn get(&self, actor: &str, aud: &str) -> Result<Option<String>> {
41
+
self.con
42
+
.query_one(
43
+
r#"select pref from prefs
44
+
where actor = ?1 and aud = ?2"#,
45
+
[actor, aud],
46
+
|row| row.get(0),
47
+
)
48
+
.optional()
49
+
}
50
+
}
+143
pocket/src/token.rs
+143
pocket/src/token.rs
···
1
+
use atrium_crypto::did::parse_multikey;
2
+
use atrium_crypto::verify::Verifier;
3
+
use jwt_compact::UntrustedToken;
4
+
use serde::Deserialize;
5
+
use std::collections::HashMap;
6
+
use std::time::Duration;
7
+
use thiserror::Error;
8
+
9
+
#[derive(Debug, Deserialize)]
10
+
struct MiniDoc {
11
+
signing_key: String,
12
+
did: String,
13
+
}
14
+
15
+
#[derive(Error, Debug)]
16
+
pub enum VerifyError {
17
+
#[error("The cross-service authorization token failed verification: {0}")]
18
+
VerificationFailed(&'static str),
19
+
#[error("Error trying to resolve the DID to a signing key, retry in a moment: {0}")]
20
+
ResolutionFailed(&'static str),
21
+
}
22
+
23
+
pub struct TokenVerifier {
24
+
client: reqwest::Client,
25
+
}
26
+
27
+
impl TokenVerifier {
28
+
pub fn new() -> Self {
29
+
let client = reqwest::Client::builder()
30
+
.user_agent(format!(
31
+
"microcosm pocket v{} (dev: @bad-example.com)",
32
+
env!("CARGO_PKG_VERSION")
33
+
))
34
+
.no_proxy()
35
+
.timeout(Duration::from_secs(12)) // slingshot timeout is 10s
36
+
.build()
37
+
.unwrap();
38
+
Self { client }
39
+
}
40
+
41
+
pub async fn verify(
42
+
&self,
43
+
expected_lxm: &str,
44
+
token: &str,
45
+
) -> Result<(String, String), VerifyError> {
46
+
let untrusted = UntrustedToken::new(token).unwrap();
47
+
48
+
// danger! unfortunately we need to decode the DID from the jwt body before we have a public key to verify the jwt with
49
+
let Ok(untrusted_claims) =
50
+
untrusted.deserialize_claims_unchecked::<HashMap<String, String>>()
51
+
else {
52
+
return Err(VerifyError::VerificationFailed(
53
+
"could not deserialize jtw claims",
54
+
));
55
+
};
56
+
57
+
// get the (untrusted!) claimed DID
58
+
let Some(untrusted_did) = untrusted_claims.custom.get("iss") else {
59
+
return Err(VerifyError::VerificationFailed(
60
+
"jwt must include the user's did in `iss`",
61
+
));
62
+
};
63
+
64
+
// bail if it's not even a user-ish did
65
+
if !untrusted_did.starts_with("did:") {
66
+
return Err(VerifyError::VerificationFailed("iss should be a did"));
67
+
}
68
+
if untrusted_did.contains("#") {
69
+
return Err(VerifyError::VerificationFailed(
70
+
"iss should be a user did without a service identifier",
71
+
));
72
+
}
73
+
74
+
let endpoint =
75
+
"https://slingshot.microcosm.blue/xrpc/com.bad-example.identity.resolveMiniDoc";
76
+
let doc: MiniDoc = self
77
+
.client
78
+
.get(format!("{endpoint}?identifier={untrusted_did}"))
79
+
.send()
80
+
.await
81
+
.map_err(|_| VerifyError::ResolutionFailed("failed to fetch minidoc"))?
82
+
.error_for_status()
83
+
.map_err(|_| VerifyError::ResolutionFailed("non-ok response for minidoc"))?
84
+
.json()
85
+
.await
86
+
.map_err(|_| VerifyError::ResolutionFailed("failed to parse json to minidoc"))?;
87
+
88
+
// sanity check before we go ahead with this signing key
89
+
if doc.did != *untrusted_did {
90
+
return Err(VerifyError::VerificationFailed(
91
+
"wtf, resolveMiniDoc returned a doc for a different DID, slingshot bug",
92
+
));
93
+
}
94
+
95
+
let Ok((alg, public_key)) = parse_multikey(&doc.signing_key) else {
96
+
return Err(VerifyError::VerificationFailed(
97
+
"could not parse signing key form minidoc",
98
+
));
99
+
};
100
+
101
+
// i _guess_ we've successfully bootstrapped the verification of the jwt unless this fails
102
+
if let Err(e) = Verifier::default().verify(
103
+
alg,
104
+
&public_key,
105
+
&untrusted.signed_data,
106
+
untrusted.signature_bytes(),
107
+
) {
108
+
log::warn!("jwt verification failed: {e}");
109
+
return Err(VerifyError::VerificationFailed(
110
+
"jwt signature verification failed",
111
+
));
112
+
}
113
+
114
+
// past this point we're should have established trust. crossing ts and dotting is.
115
+
let did = &untrusted_did;
116
+
let claims = &untrusted_claims;
117
+
118
+
let Some(aud) = claims.custom.get("aud") else {
119
+
return Err(VerifyError::VerificationFailed("missing aud"));
120
+
};
121
+
let Some(mut aud) = aud.strip_prefix("did:web:") else {
122
+
return Err(VerifyError::VerificationFailed("expected a did:web aud"));
123
+
};
124
+
if let Some((aud_without_hash, _)) = aud.split_once("#") {
125
+
log::warn!("aud claim is missing service id fragment: {aud:?}");
126
+
aud = aud_without_hash;
127
+
}
128
+
let Some(lxm) = claims.custom.get("lxm") else {
129
+
return Err(VerifyError::VerificationFailed("missing lxm"));
130
+
};
131
+
if lxm != expected_lxm {
132
+
return Err(VerifyError::VerificationFailed("wrong lxm"));
133
+
}
134
+
135
+
Ok((did.to_string(), aud.to_string()))
136
+
}
137
+
}
138
+
139
+
impl Default for TokenVerifier {
140
+
fn default() -> Self {
141
+
Self::new()
142
+
}
143
+
}
+67
pocket/static/index.html
+67
pocket/static/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<title>Pocket: atproto user preference storage</title>
6
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7
+
<meta name="description" content="API Documentation for Pocket, a simple user-preference storage system for atproto" />
8
+
<style>
9
+
:root {
10
+
--scalar-small: 13px;
11
+
}
12
+
.scalar-app .markdown .markdown-alert {
13
+
font-size: var(--scalar-small);
14
+
}
15
+
.sidebar-heading-link-title {
16
+
line-height: 1.2;
17
+
}
18
+
.custom-header {
19
+
height: 42px;
20
+
background-color: #221828;
21
+
box-shadow: inset 0 -1px 0 var(--scalar-border-color);
22
+
color: var(--scalar-color-1);
23
+
font-size: var(--scalar-font-size-3);
24
+
font-family: 'Iowan Old Style', 'Palatino Linotype', 'URW Palladio L', P052, serif;
25
+
padding: 0 18px;
26
+
justify-content: space-between;
27
+
}
28
+
.custom-header,
29
+
.custom-header nav {
30
+
display: flex;
31
+
align-items: center;
32
+
gap: 18px;
33
+
}
34
+
.custom-header a:hover {
35
+
color: var(--scalar-color-2);
36
+
}
37
+
38
+
.light-mode .custom-header {
39
+
background-color: thistle;
40
+
}
41
+
</style>
42
+
</head>
43
+
<body>
44
+
<header class="custom-header scalar-app">
45
+
<p>
46
+
TODO: thing
47
+
</p>
48
+
<nav>
49
+
<b>a <a href="https://microcosm.blue">microcosm</a> project</b>
50
+
<a href="https://bsky.app/profile/microcosm.blue">@microcosm.blue</a>
51
+
<a href="https://github.com/at-microcosm">github</a>
52
+
</nav>
53
+
</header>
54
+
55
+
<script id="api-reference" type="application/json" data-url="/openapi"></script>
56
+
57
+
<script>
58
+
var configuration = {
59
+
theme: 'purple',
60
+
hideModels: true,
61
+
}
62
+
document.getElementById('api-reference').dataset.configuration = JSON.stringify(configuration)
63
+
</script>
64
+
65
+
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
66
+
</body>
67
+
</html>
+8
quasar/Cargo.toml
+8
quasar/Cargo.toml
+3
quasar/readme.md
+3
quasar/readme.md
+57
-129
readme.md
+57
-129
readme.md
···
1
-
microcosm: links
2
-
================
3
-
4
-
this repo contains libraries and apps for working with cross-record references in at-protocol.
5
-
1
+
microcosm HTTP APIs + rust crates
2
+
=================================
3
+
[](https://bsky.app/profile/microcosm.blue)
4
+
[](https://discord.gg/tcDfe4PGVB)
5
+
[](https://github.com/sponsors/uniphil/)
6
+
[](https://ko-fi.com/bad_example)
6
7
7
-
App: [Constellation](./constellation/)
8
-
--------------------------------------------
8
+
Welcome! Documentation is under active development. If you like reading API docs, you'll probably hit the ground running!
9
9
10
-
A global atproto backlink index โจ
10
+
Tutorials, how-to guides, and client SDK libraries are all in the works for gentler on-ramps, but are not quite ready yet. But don't let that stop you! Hop in the [microcosm discord](https://discord.gg/tcDfe4PGVB), or post questions and tag [@bad-example.com](https://bsky.app/profile/bad-example.com) on Bluesky if you get stuck anywhere.
11
11
12
-
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
13
-
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
14
-
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
15
-
- Simple JSON API
12
+
> [!tip]
13
+
> This repository's primary home is moving to tangled: [@microcosm.blue/microcosm-rs](https://tangled.sh/@microcosm.blue/microcosm-rs). It will continue to be mirrored on [github](https://github.com/at-microcosm/microcosm-rs) for the forseeable future, and it's fine to open issues or pulls in either place!
16
14
17
-
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
18
15
19
-
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
20
-
- source: [./constellation/](./constellation/)
21
-
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
22
-
23
-
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
24
-
25
-
26
-
App: Spacedust
27
-
--------------
28
-
29
-
A notification subscription service ๐ซ
30
-
31
-
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
32
-
33
-
- **status**: in design
34
-
35
-
36
-
Library: [links](./links/)
16
+
๐ [Constellation](./constellation/)
37
17
------------------------------------
38
18
39
-
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
40
-
41
-
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
42
-
43
-
44
-
45
-
---
46
-
47
-
48
-
old notes follow, ignore
49
-
------------------------
50
-
51
-
52
-
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
53
-
54
-
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
55
-
56
-
57
-
why
58
-
---
59
-
60
-
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
61
-
62
-
- bluesky posts (firesky, deletions, )
63
-
- blueksy post stats (emojis, )
64
-
- trending keywords ()
65
-
66
-
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
67
-
68
-
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
69
-
70
-
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
71
-
72
-
**so, why**
73
-
74
-
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
75
-
76
-
everything is links:
77
-
78
-
- likes
79
-
- follows
80
-
- blocks
81
-
- reposts
82
-
- quotes
83
-
84
-
some low-level things you could make from links:
85
-
86
-
- notification streams (part of ucosm)
87
-
- a global reverse index (part of ucosm)
88
-
89
-
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
19
+
A global atproto interactions backlink index as a simple JSON API. Works with every lexicon, runs on a raspberry pi, consumes less than 2GiB of disk per day. Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts while respecting users' data choices.
90
20
21
+
- Source: [./constellation/](./constellation/)
22
+
- [Public instance/API docs](https://constellation.microcosm.blue/)
23
+
- Status: used in production. APIs will change but backwards compatibility will be maintained as long as needed.
91
24
92
-
extracting links
93
-
---------------
94
25
26
+
๐ [Spacedust](./spacedust/)
27
+
----------------------------
95
28
96
-
- low-level: pass a &str of a field value and get a parsed link back
29
+
A global atproto interactions firehose. Extracts all at-uris, DIDs, and URLs from every lexicon in the firehose, and exposes them over a websocket modelled after [jetstream](github.com/bluesky-social/jetstream).
97
30
98
-
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
31
+
- Source: [./spacedust/](./spacedust/)
32
+
- [Public instance/API docs](https://spacedust.microcosm.blue/)
33
+
- Status: v0: the basics work and the APIs are in place! missing cursor replay, forward link storage, and delete event link hydration.
99
34
100
-
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
35
+
### Demos:
101
36
102
-
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
37
+
- [Spacedust notifications](https://notifications.microcosm.blue/): web push notifications for _every_ atproto app
38
+
- [Zero-Bluesky real-time interaction-updating post embed](https://bsky.bad-example.com/zero-bluesky-realtime-embed/)
103
39
104
40
105
-
resolving / canonicalizing links
106
-
--------------------------------
41
+
๐ฐ๏ธ [Slingshot](./slingshot)
42
+
---------------------------
107
43
44
+
A fast, eager, production-grade edge cache for atproto records and identities. Pre-caches all records from the firehose and maintains a longer-term cache of requested records on disk.
108
45
109
-
### at-uris
46
+
- Source: [./slingshot/](./slingshot/)
47
+
- [Public instance/API docs](https://slingshot.microcosm.blue/)
48
+
- Status: v0: most XRPC APIs are working. cache storage is being reworked.
110
49
111
-
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
112
50
113
-
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
114
-
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
51
+
๐ธ [UFOs API](./ufos)
52
+
---------------------
115
53
116
-
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
54
+
Timeseries stats and sample records for every [collection](https://atproto.com/guides/glossary#collection) ever seen in the atproto firehose. Unique users are counted in hyperloglog sketches enabling arbitrary cardinality aggregation across time buckets and/or NSIDs.
117
55
118
-
hopefully atrium will make it easy to resolve at-uris.
56
+
- Source: [./ufos/](./ufos/)
57
+
- [Public instance/API docs](https://ufos-api.microcosm.blue/)
58
+
- Status: Used in production. It has APIs and they work! Needs improvement on indexing; needs more indexes and some more APIs to the data exposed.
119
59
60
+
> [!tip]
61
+
> See also: [UFOs atproto explorer](https://ufos.microcosm.blue/) built on UFOs API. ([source](github.com/at-microcosm/spacedust-utils))
120
62
121
-
### urls
122
63
123
-
canonicalizing URLs is more annoying but also a bit more established. lots of details.
64
+
๐ซ [Links](./links)
65
+
-------------------
124
66
125
-
- do we have to deal with punycode?
126
-
- follow redirects (todo: only permanent ones, or all?)
127
-
- check for rel=canonical http header and possibly follow it
128
-
- check link rel=canonical meta tag and possibly follow it
129
-
- do we need to check site maps??
130
-
- do we have to care at all about AMP?
131
-
- do we want anything to do with url shorteners??
132
-
- how do multilingual sites affect this?
133
-
- do we have to care about `script type="application/ld+json"` ???
67
+
Rust library for parsing and extracting links (at-uris, DIDs, and URLs) from atproto records.
134
68
135
-
ugh. is there a crate for this.
69
+
- Source: [./links/](./links/)
70
+
- Status: not yet published to crates.io; needs some rework
136
71
137
72
138
-
### relative uris?
139
-
140
-
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
141
-
142
-
143
-
### canonicalizing
144
-
145
-
there should be a few async functions available to canonicalize already-parsed links.
146
-
147
-
- what happens if a link can't be resolved?
73
+
๐ฉ๏ธ [Jetstream](./jetstream)
74
+
---------------------------
148
75
76
+
A low-overhead jetstream client with cursor handling and automatic reconnect.
149
77
150
-
---
78
+
- Source: [./links/](./links/)
79
+
- Status: used in multiple apps in production, but not yet published to crates.io; some rework planned
151
80
152
-
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
81
+
> [!tip]
82
+
> See also: [Rocketman](https://github.com/teal-fm/cadet/tree/main/rocketman), another excellent rust jetstream client which shares some lineage and _is_ published on crates.io.
153
83
154
-
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
155
84
156
-
- rsky-syntax has an aturi
157
-
- adenosyne also
158
-
- might come back to these
159
85
86
+
๐ญ Deprecated: [Who am I](./who-am-i)
87
+
-------------------------------------
160
88
161
-
-------
89
+
An identity bridge for microcosm demos, that kinda worked. Fixing its problems is about equivalent to reinventing a lot of OIDC, so it's being retired.
162
90
163
-
rocks
91
+
- Source: [./who-am-i/](./who-am-i/)
92
+
- Status: ready for retirement.
164
93
165
-
```bash
166
-
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
167
-
```
94
+
> [!warning]
95
+
> `who-am-i` is still in use for the Spacedust Notifications demo, but that will hopefully be migrated to use atproto oauth directly instead.
+12
reflector/Cargo.toml
+12
reflector/Cargo.toml
···
1
+
[package]
2
+
name = "reflector"
3
+
version = "0.1.0"
4
+
edition = "2024"
5
+
6
+
[dependencies]
7
+
clap = { version = "4.5.47", features = ["derive"] }
8
+
log = "0.4.28"
9
+
poem = "3.1.12"
10
+
serde = { version = "1.0.219", features = ["derive"] }
11
+
tokio = "1.47.1"
12
+
tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }
+9
reflector/readme.md
+9
reflector/readme.md
···
1
+
# reflector
2
+
3
+
a tiny did:web service server that maps subdomains to a single service endpoint
4
+
5
+
receiving requests from multiple subdomains is left as a problem for the reverse proxy to solve, since acme wildcard certificates (ie. letsencrypt) require the most complicated and involved challenge type (DNS).
6
+
7
+
caddy [has good support for](https://caddyserver.com/docs/caddyfile/patterns#wildcard-certificates) configuring the wildcard DNS challenge with various DNS providers, and also supports [on-demand](https://caddyserver.com/docs/automatic-https#using-on-demand-tls) provisioning via the simpler methods.
8
+
9
+
if you only need a small fixed number of subdomains, you can also use certbot or otherwise individually configure them in your reverse proxy.
+112
reflector/src/main.rs
+112
reflector/src/main.rs
···
1
+
use clap::Parser;
2
+
use poem::{
3
+
EndpointExt, Response, Route, Server, get, handler,
4
+
http::StatusCode,
5
+
listener::TcpListener,
6
+
middleware::{AddData, Tracing},
7
+
web::{Data, Json, Query, TypedHeader, headers::Host},
8
+
};
9
+
use serde::{Deserialize, Serialize};
10
+
11
+
#[handler]
12
+
fn hello() -> String {
13
+
"ษนoสษวส
โ
วษน".to_string()
14
+
}
15
+
16
+
#[derive(Debug, Serialize)]
17
+
struct DidDoc {
18
+
id: String,
19
+
service: [DidService; 1],
20
+
}
21
+
22
+
#[derive(Debug, Clone, Serialize)]
23
+
#[serde(rename_all = "camelCase")]
24
+
struct DidService {
25
+
id: String,
26
+
r#type: String,
27
+
service_endpoint: String,
28
+
}
29
+
30
+
#[handler]
31
+
fn did_doc(TypedHeader(host): TypedHeader<Host>, service: Data<&DidService>) -> Json<DidDoc> {
32
+
Json(DidDoc {
33
+
id: format!("did:web:{}", host.hostname()),
34
+
service: [service.clone()],
35
+
})
36
+
}
37
+
38
+
#[derive(Deserialize)]
39
+
struct AskQuery {
40
+
domain: String,
41
+
}
42
+
#[handler]
43
+
fn ask_caddy(
44
+
Data(parent): Data<&Option<String>>,
45
+
Query(AskQuery { domain }): Query<AskQuery>,
46
+
) -> Response {
47
+
if let Some(parent) = parent
48
+
&& let Some(prefix) = domain.strip_suffix(&format!(".{parent}"))
49
+
&& !prefix.contains('.')
50
+
{
51
+
// no sub-sub-domains allowed
52
+
return Response::builder().body("ok");
53
+
};
54
+
Response::builder()
55
+
.status(StatusCode::FORBIDDEN)
56
+
.body("nope")
57
+
}
58
+
59
+
/// Slingshot record edge cache
60
+
#[derive(Parser, Debug, Clone)]
61
+
#[command(version, about, long_about = None)]
62
+
struct Args {
63
+
/// The DID document service ID to serve
64
+
///
65
+
/// must start with a '#', like `#bsky_appview'
66
+
#[arg(long)]
67
+
id: String,
68
+
/// Service type
69
+
///
70
+
/// Not sure exactly what its requirements are. 'BlueskyAppview' for example
71
+
#[arg(long)]
72
+
r#type: String,
73
+
/// The HTTPS endpoint for the service
74
+
#[arg(long)]
75
+
service_endpoint: String,
76
+
/// The parent domain; requests should come from subdomains of this
77
+
#[arg(long)]
78
+
domain: Option<String>,
79
+
}
80
+
81
+
impl From<Args> for DidService {
82
+
fn from(a: Args) -> Self {
83
+
Self {
84
+
id: a.id,
85
+
r#type: a.r#type,
86
+
service_endpoint: a.service_endpoint,
87
+
}
88
+
}
89
+
}
90
+
91
+
#[tokio::main(flavor = "current_thread")]
92
+
async fn main() {
93
+
tracing_subscriber::fmt::init();
94
+
log::info!("ษนoสษวส
โ
วษน");
95
+
96
+
let args = Args::parse();
97
+
let domain = args.domain.clone();
98
+
let service: DidService = args.into();
99
+
100
+
Server::new(TcpListener::bind("0.0.0.0:3001"))
101
+
.run(
102
+
Route::new()
103
+
.at("/", get(hello))
104
+
.at("/.well-known/did.json", get(did_doc))
105
+
.at("/ask", get(ask_caddy))
106
+
.with(AddData::new(service))
107
+
.with(AddData::new(domain))
108
+
.with(Tracing),
109
+
)
110
+
.await
111
+
.unwrap()
112
+
}
+6
-5
slingshot/Cargo.toml
+6
-5
slingshot/Cargo.toml
···
4
4
edition = "2024"
5
5
6
6
[dependencies]
7
-
atrium-api = { version = "0.25.4", default-features = false }
8
-
atrium-common = "0.1.2"
9
-
atrium-identity = "0.1.5"
10
-
atrium-oauth = "0.1.3"
7
+
atrium-api = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace", default-features = false }
8
+
atrium-common = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
9
+
atrium-identity = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
10
+
atrium-oauth = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
11
11
clap = { version = "4.5.41", features = ["derive"] }
12
12
ctrlc = "3.4.7"
13
13
foyer = { version = "0.18.0", features = ["serde"] }
14
14
hickory-resolver = "0.25.2"
15
15
jetstream = { path = "../jetstream", features = ["metrics"] }
16
+
links = { path = "../links" }
16
17
log = "0.4.27"
17
18
metrics = "0.24.2"
18
19
metrics-exporter-prometheus = { version = "0.17.1", features = ["http-listener"] }
19
-
poem = { version = "3.1.12", features = ["acme"] }
20
+
poem = { version = "3.1.12", features = ["acme", "static-files"] }
20
21
poem-openapi = { version = "5.1.16", features = ["scalar"] }
21
22
reqwest = { version = "0.12.22", features = ["json"] }
22
23
rustls = "0.23.31"
+93
slingshot/api-description.md
+93
slingshot/api-description.md
···
1
+
_A [gravitational slingshot](https://en.wikipedia.org/wiki/Gravity_assist) makes use of the gravity and relative movements of celestial bodies to accelerate a spacecraft and change its trajectory._
2
+
3
+
4
+
# Slingshot: edge record cache
5
+
6
+
Applications in [ATProtocol](https://atproto.com/) store data in users' own [PDS](https://atproto.com/guides/self-hosting) (Personal Data Server), which are distributed across thousands of independently-run servers all over the world. Trying to access this data poses challenges for client applications:
7
+
8
+
- A PDS might be far away with long network latency
9
+
- or may be on an unreliable connection
10
+
- or overloaded when you need it, or offline, orโฆ
11
+
12
+
Large projects like [Bluesky](https://bsky.app/) control their performance and reliability by syncing all app-relevant data from PDSs into first-party databases. But for new apps, building out this additional data infrastructure adds significant effort and complexity up front.
13
+
14
+
**Slingshot is a fast, eager, production-grade cache of data in the [ATmosphere](https://atproto.com/)**, offering performance and reliability without custom infrastructure.
15
+
16
+
17
+
### Current status
18
+
19
+
> [!important]
20
+
> Slingshot is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and lower cache hit-rates as the internal storage caches are adjusted and reset.
21
+
22
+
The core APIs will not change, since they are standard third-party `com.atproto` query APIs from ATProtocol.
23
+
24
+
25
+
## Eager caching
26
+
27
+
In many cases, Slingshot can cache the data you need *before* first request!
28
+
29
+
Slingshot subscribes to the global [Firehose](https://atproto.com/specs/sync#firehose) of data updates. It keeps a short-term rolling indexed window of *all* data, and automatically promotes content likely to be requested to its longer-term main cache. _(automatic promotion is still a work in progress)_
30
+
31
+
When there is a cache miss, Slingshot can often still accelerate record fetching, since it keeps a large cache of resolved identities: it can usually request from the correct PDS without extra lookups.
32
+
33
+
34
+
## Precise invalidation
35
+
36
+
The fireshose includes **update** and **delete** events, which Slingshot uses to ensure stale and deleted data is removed within a very short window. Additonally, identity and account-level events can trigger rapid cleanup of data for deactivated and deleted accounts. _(some of this is still a work in progress)_
37
+
38
+
39
+
## Low-trust
40
+
41
+
The "AT" in ATProtocol [stands for _Authenticated Transfer_](https://atproto.com/guides/glossary#at-protocol): all data is cryptographically signed, which makes it possible to broadcast data through third parties and trust that it's real _without_ having to directly contact the originating server.
42
+
43
+
Two core standard query APIs are supported to balance convenience and trust. They both fetch [records](https://atproto.com/guides/glossary#record):
44
+
45
+
### [`com.atproto.repo.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.repo.getRecord)
46
+
47
+
- convenient `JSON` response format
48
+
- cannot be proven authentic
49
+
50
+
### [`com.atproto.sync.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.sync.getRecord)
51
+
52
+
- [`DAG-CBOR`](https://atproto.com/specs/data-model)-encoded response requires extra libraries to decode, but
53
+
- includes a cryptographic proof of authenticity!
54
+
55
+
_(work on this endpoint is in progress)_
56
+
57
+
58
+
## Service proxying
59
+
60
+
Clients can proxy atproto queries through their own PDS with [Service Proxying](https://atproto.com/specs/xrpc#service-proxying), and this is supported by Slingshot. The Slingshot instance must be started the `--domain` argument specified.
61
+
62
+
Service-proxied requests can specify a Slingshot instance via the `atproto-proxy` header:
63
+
64
+
```http
65
+
GET /xrpc/com.bad-example.identity.resolveMiniDoc?identifier=bad-example.com
66
+
Host: <your pds>
67
+
atproto-proxy: did:web:<slingshot domain>#slingshot
68
+
```
69
+
70
+
Where `<your pds>` is the user's own PDS host, and `<slingshot domain>` is the domain that the slingshot instance is deployed at (eg. `slingshot.microcosm.blue`). See the [Service Proxying](https://atproto.com/specs/xrpc#service-proxying) docs for more.
71
+
72
+
> [!tip]
73
+
> Service proxying is supported but completely optional. All APIs are directly accessible over the public internet, and GeoDNS helps route users to the closest instance to them for the lowest possible latency. (_note: deploying multiple slingshot instances with GeoDNS is still TODO_)
74
+
75
+
76
+
## Ergonomic APIs
77
+
78
+
- Slingshot also offers variants of the `getRecord` endpoints that accept a full `at-uri` as a parameter, to save clients from needing to parse and validate all parts of a record location.
79
+
80
+
- Bi-directionally verifying identity endpoints, so you can directly exchange atproto [`handle`](https://atproto.com/guides/glossary#handle)s for [`DID`](https://atproto.com/guides/glossary#did-decentralized-id)s without extra steps, plus a convenient [Mini-Doc](#tag/slingshot-specific-queries/get/xrpc/com.bad-example.identity.resolveMiniDoc) verified identity summary.
81
+
82
+
83
+
## Part of microcosm
84
+
85
+
[Microcosm](https://www.microcosm.blue/) is a collection of services and independent community-run infrastructure for ATProtocol.
86
+
87
+
Slingshot excels when combined with _shallow indexing_ services, which offer fast queries of global data relationships but with only references to the data records. Microcosm has a few!
88
+
89
+
- [๐ Constellation](https://constellation.microcosm.blue/), a global backlink index (all social interactions in atproto are links!)
90
+
- [๐ Spacedust](https://spacedust.microcosm.blue/), a firehose of all social interactions
91
+
92
+
> [!success]
93
+
> All microcosm projects are [open source](https://tangled.sh/@bad-example.com/microcosm-links). **You can help sustain Slingshot** and all of microcosm by becoming a [Github sponsor](https://github.com/sponsors/uniphil/) or a [Ko-fi supporter](https://ko-fi.com/bad_example)!
+15
slingshot/src/error.rs
+15
slingshot/src/error.rs
···
1
+
use crate::ErrorResponseObject;
1
2
use thiserror::Error;
2
3
3
4
#[derive(Debug, Error)]
···
46
47
}
47
48
48
49
#[derive(Debug, Error)]
50
+
pub enum HealthCheckError {
51
+
#[error("failed to send checkin: {0}")]
52
+
HealthCheckError(#[from] reqwest::Error),
53
+
}
54
+
55
+
#[derive(Debug, Error)]
49
56
pub enum MainTaskError {
50
57
#[error(transparent)]
51
58
ConsumerTaskError(#[from] ConsumerError),
···
53
60
ServerTaskError(#[from] ServerError),
54
61
#[error(transparent)]
55
62
IdentityTaskError(#[from] IdentityError),
63
+
#[error(transparent)]
64
+
HealthCheckError(#[from] HealthCheckError),
65
+
#[error("firehose cache failed to close: {0}")]
66
+
FirehoseCacheCloseError(foyer::Error),
56
67
}
57
68
58
69
#[derive(Debug, Error)]
···
75
86
MissingUpstreamCid,
76
87
#[error("upstream CID was not valid: {0}")]
77
88
BadUpstreamCid(String),
89
+
#[error("upstream atproto-looking bad request")]
90
+
UpstreamBadRequest(ErrorResponseObject),
91
+
#[error("upstream non-atproto bad request")]
92
+
UpstreamBadBadNotGoodRequest(reqwest::Error),
78
93
}
+8
-2
slingshot/src/firehose_cache.rs
+8
-2
slingshot/src/firehose_cache.rs
···
4
4
5
5
pub async fn firehose_cache(
6
6
cache_dir: impl AsRef<Path>,
7
+
memory_mb: usize,
8
+
disk_gb: usize,
7
9
) -> Result<HybridCache<String, CachedRecord>, String> {
8
10
let cache = HybridCacheBuilder::new()
9
11
.with_name("firehose")
10
-
.memory(64 * 2_usize.pow(20))
12
+
.memory(memory_mb * 2_usize.pow(20))
11
13
.with_weighter(|k: &String, v| k.len() + std::mem::size_of_val(v))
12
14
.storage(Engine::large())
13
-
.with_device_options(DirectFsDeviceOptions::new(cache_dir))
15
+
.with_device_options(
16
+
DirectFsDeviceOptions::new(cache_dir)
17
+
.with_capacity(disk_gb * 2_usize.pow(30))
18
+
.with_file_size(16 * 2_usize.pow(20)), // note: this does limit the max cached item size, warning jumbo records
19
+
)
14
20
.build()
15
21
.await
16
22
.map_err(|e| format!("foyer setup error: {e:?}"))?;
+32
slingshot/src/healthcheck.rs
+32
slingshot/src/healthcheck.rs
···
1
+
use crate::error::HealthCheckError;
2
+
use reqwest::Client;
3
+
use std::time::Duration;
4
+
use tokio::time::sleep;
5
+
use tokio_util::sync::CancellationToken;
6
+
7
+
pub async fn healthcheck(
8
+
endpoint: String,
9
+
shutdown: CancellationToken,
10
+
) -> Result<(), HealthCheckError> {
11
+
let client = Client::builder()
12
+
.user_agent(format!(
13
+
"microcosm slingshot v{} (dev: @bad-example.com)",
14
+
env!("CARGO_PKG_VERSION")
15
+
))
16
+
.no_proxy()
17
+
.timeout(Duration::from_secs(10))
18
+
.build()?;
19
+
20
+
loop {
21
+
tokio::select! {
22
+
res = client.get(&endpoint).send() => {
23
+
let _ = res
24
+
.and_then(|r| r.error_for_status())
25
+
.inspect_err(|e| log::error!("failed to send healthcheck: {e}"));
26
+
},
27
+
_ = shutdown.cancelled() => break,
28
+
}
29
+
sleep(Duration::from_secs(51)).await;
30
+
}
31
+
Ok(())
32
+
}
+27
-12
slingshot/src/identity.rs
+27
-12
slingshot/src/identity.rs
···
13
13
/// 3. DID -> handle resolution: for bidirectional handle validation and in case we want to offer this
14
14
use std::time::Duration;
15
15
use tokio::sync::Mutex;
16
+
use tokio_util::sync::CancellationToken;
16
17
17
18
use crate::error::IdentityError;
18
19
use atrium_api::{
···
53
54
///
54
55
/// partial because the handle is not verified
55
56
#[derive(Debug, Clone, Serialize, Deserialize)]
56
-
struct PartialMiniDoc {
57
+
pub struct PartialMiniDoc {
57
58
/// an atproto handle (**unverified**)
58
59
///
59
60
/// the first valid atproto handle from the did doc's aka
60
-
unverified_handle: Handle,
61
+
pub unverified_handle: Handle,
61
62
/// the did's atproto pds url (TODO: type this?)
62
63
///
63
64
/// note: atrium *does* actually parse it into a URI, it just doesn't return
64
65
/// that for some reason
65
-
pds: String,
66
+
pub pds: String,
66
67
/// for now we're just pulling this straight from the did doc
67
68
///
68
69
/// would be nice to type and validate it
···
70
71
/// this is the publicKeyMultibase from the did doc.
71
72
/// legacy key encoding not supported.
72
73
/// `id`, `type`, and `controller` must be checked, but aren't stored.
73
-
signing_key: String,
74
+
pub signing_key: String,
74
75
}
75
76
76
77
impl TryFrom<DidDocument> for PartialMiniDoc {
···
175
176
.with_name("identity")
176
177
.memory(16 * 2_usize.pow(20))
177
178
.with_weighter(|k, v| std::mem::size_of_val(k) + std::mem::size_of_val(v))
178
-
.storage(Engine::large())
179
-
.with_device_options(DirectFsDeviceOptions::new(cache_dir))
179
+
.storage(Engine::small())
180
+
.with_device_options(
181
+
DirectFsDeviceOptions::new(cache_dir)
182
+
.with_capacity(2_usize.pow(30)) // TODO: configurable (1GB to have something)
183
+
.with_file_size(2_usize.pow(20)), // note: this does limit the max cached item size, warning jumbo records
184
+
)
180
185
.build()
181
186
.await?;
182
187
···
207
212
Ok(Some(did))
208
213
}
209
214
210
-
/// Resolve (and verify!) a DID to a pds url
215
+
/// Resolve a DID to a pds url
211
216
///
212
217
/// This *also* incidentally resolves and verifies the handle, which might
213
218
/// make it slower than expected
···
235
240
Err(atrium_identity::Error::NotFound) => {
236
241
Ok(IdentityVal(UtcDateTime::now(), IdentityData::NotFound))
237
242
}
238
-
Err(other) => Err(foyer::Error::Other(Box::new(
239
-
IdentityError::ResolutionFailed(other),
240
-
))),
243
+
Err(other) => Err(foyer::Error::Other(Box::new({
244
+
log::debug!("other error resolving handle: {other:?}");
245
+
IdentityError::ResolutionFailed(other)
246
+
}))),
241
247
}
242
248
}
243
249
})
···
266
272
}
267
273
268
274
/// Fetch (and cache) a partial mini doc from a did
269
-
async fn did_to_partial_mini_doc(
275
+
pub async fn did_to_partial_mini_doc(
270
276
&self,
271
277
did: &Did,
272
278
) -> Result<Option<PartialMiniDoc>, IdentityError> {
···
403
409
}
404
410
405
411
/// run the refresh queue consumer
406
-
pub async fn run_refresher(&self) -> Result<(), IdentityError> {
412
+
pub async fn run_refresher(&self, shutdown: CancellationToken) -> Result<(), IdentityError> {
407
413
let _guard = self
408
414
.refresher
409
415
.try_lock()
410
416
.expect("there to only be one refresher running");
411
417
loop {
418
+
if shutdown.is_cancelled() {
419
+
log::info!("identity refresher: exiting for shutdown: closing cache...");
420
+
if let Err(e) = self.cache.close().await {
421
+
log::error!("cache close errored: {e}");
422
+
} else {
423
+
log::info!("identity cache closed.")
424
+
}
425
+
return Ok(());
426
+
}
412
427
let Some(task_key) = self.peek_refresh().await else {
413
428
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
414
429
continue;
+3
-1
slingshot/src/lib.rs
+3
-1
slingshot/src/lib.rs
···
1
1
mod consumer;
2
2
pub mod error;
3
3
mod firehose_cache;
4
+
mod healthcheck;
4
5
mod identity;
5
6
mod record;
6
7
mod server;
7
8
8
9
pub use consumer::consume;
9
10
pub use firehose_cache::firehose_cache;
11
+
pub use healthcheck::healthcheck;
10
12
pub use identity::Identity;
11
-
pub use record::{CachedRecord, Repo};
13
+
pub use record::{CachedRecord, ErrorResponseObject, Repo};
12
14
pub use server::serve;
+68
-10
slingshot/src/main.rs
+68
-10
slingshot/src/main.rs
···
1
1
// use foyer::HybridCache;
2
2
// use foyer::{Engine, DirectFsDeviceOptions, HybridCacheBuilder};
3
3
use metrics_exporter_prometheus::PrometheusBuilder;
4
-
use slingshot::{Identity, Repo, consume, error::MainTaskError, firehose_cache, serve};
4
+
use slingshot::{
5
+
Identity, Repo, consume, error::MainTaskError, firehose_cache, healthcheck, serve,
6
+
};
5
7
use std::path::PathBuf;
6
8
7
9
use clap::Parser;
···
23
25
/// where to keep disk caches
24
26
#[arg(long)]
25
27
cache_dir: PathBuf,
28
+
/// memory cache size in MB
29
+
#[arg(long, default_value_t = 64)]
30
+
cache_memory_mb: usize,
31
+
/// disk cache size in GB
32
+
#[arg(long, default_value_t = 1)]
33
+
cache_disk_gb: usize,
34
+
/// host for HTTP server (when not using --domain)
35
+
#[arg(long, default_value = "127.0.0.1")]
36
+
host: String,
37
+
/// port for HTTP server (when not using --domain)
38
+
#[arg(long, default_value_t = 3000)]
39
+
port: u16,
40
+
/// port for metrics/prometheus server
41
+
#[arg(long, default_value_t = 8765)]
42
+
metrics_port: u16,
26
43
/// the domain pointing to this server
27
44
///
28
45
/// if present:
···
30
47
/// - an HTTPS certs will be automatically configured with Acme/letsencrypt
31
48
/// - TODO: a rate-limiter will be installed
32
49
#[arg(long)]
33
-
host: Option<String>,
50
+
domain: Option<String>,
51
+
/// email address for letsencrypt contact
52
+
///
53
+
/// recommended in production, i guess?
54
+
#[arg(long)]
55
+
acme_contact: Option<String>,
56
+
/// a location to cache acme https certs
57
+
///
58
+
/// only used if --host is specified. omitting requires re-requesting certs
59
+
/// on every restart, and letsencrypt has rate limits that are easy to hit.
60
+
///
61
+
/// recommended in production, but mind the file permissions.
62
+
#[arg(long)]
63
+
certs: Option<PathBuf>,
64
+
/// an web address to send healtcheck pings to every ~51s or so
65
+
#[arg(long)]
66
+
healthcheck: Option<String>,
34
67
}
35
68
36
69
#[tokio::main]
···
44
77
45
78
let args = Args::parse();
46
79
47
-
if let Err(e) = install_metrics_server() {
80
+
if let Err(e) = install_metrics_server(args.metrics_port) {
48
81
log::error!("failed to install metrics server: {e:?}");
49
82
} else {
50
-
log::info!("metrics listening at http://0.0.0.0:8765");
83
+
log::info!("metrics listening at http://0.0.0.0:{}", args.metrics_port);
51
84
}
52
85
53
86
std::fs::create_dir_all(&args.cache_dir).map_err(|e| {
···
65
98
log::info!("cache dir ready at at {cache_dir:?}.");
66
99
67
100
log::info!("setting up firehose cache...");
68
-
let cache = firehose_cache(cache_dir.join("./firehose")).await?;
101
+
let cache = firehose_cache(
102
+
cache_dir.join("./firehose"),
103
+
args.cache_memory_mb,
104
+
args.cache_disk_gb,
105
+
)
106
+
.await?;
69
107
log::info!("firehose cache ready.");
70
108
71
109
let mut tasks: tokio::task::JoinSet<Result<(), MainTaskError>> = tokio::task::JoinSet::new();
···
76
114
.map_err(|e| format!("identity setup failed: {e:?}"))?;
77
115
log::info!("identity service ready.");
78
116
let identity_refresher = identity.clone();
117
+
let identity_shutdown = shutdown.clone();
79
118
tasks.spawn(async move {
80
-
identity_refresher.run_refresher().await?;
119
+
identity_refresher.run_refresher(identity_shutdown).await?;
81
120
Ok(())
82
121
});
83
122
···
90
129
server_cache_handle,
91
130
identity,
92
131
repo,
132
+
args.domain,
133
+
args.acme_contact,
134
+
args.certs,
93
135
args.host,
136
+
args.port,
94
137
server_shutdown,
95
138
)
96
139
.await?;
···
98
141
});
99
142
100
143
let consumer_shutdown = shutdown.clone();
144
+
let consumer_cache = cache.clone();
101
145
tasks.spawn(async move {
102
146
consume(
103
147
args.jetstream,
104
148
None,
105
149
args.jetstream_no_zstd,
106
150
consumer_shutdown,
107
-
cache,
151
+
consumer_cache,
108
152
)
109
153
.await?;
110
154
Ok(())
111
155
});
112
156
157
+
if let Some(hc) = args.healthcheck {
158
+
let healthcheck_shutdown = shutdown.clone();
159
+
tasks.spawn(async move {
160
+
healthcheck(hc, healthcheck_shutdown).await?;
161
+
Ok(())
162
+
});
163
+
}
164
+
113
165
tokio::select! {
114
166
_ = shutdown.cancelled() => log::warn!("shutdown requested"),
115
167
Some(r) = tasks.join_next() => {
···
118
170
}
119
171
}
120
172
173
+
tasks.spawn(async move {
174
+
cache
175
+
.close()
176
+
.await
177
+
.map_err(MainTaskError::FirehoseCacheCloseError)
178
+
});
179
+
121
180
tokio::select! {
122
181
_ = async {
123
182
while let Some(completed) = tasks.join_next().await {
124
183
log::info!("shutdown: task completed: {completed:?}");
125
184
}
126
185
} => {},
127
-
_ = tokio::time::sleep(std::time::Duration::from_secs(3)) => {
186
+
_ = tokio::time::sleep(std::time::Duration::from_secs(30)) => {
128
187
log::info!("shutdown: not all tasks completed on time. aborting...");
129
188
tasks.shutdown().await;
130
189
},
···
135
194
Ok(())
136
195
}
137
196
138
-
fn install_metrics_server() -> Result<(), metrics_exporter_prometheus::BuildError> {
197
+
fn install_metrics_server(port: u16) -> Result<(), metrics_exporter_prometheus::BuildError> {
139
198
log::info!("installing metrics server...");
140
199
let host = [0, 0, 0, 0];
141
-
let port = 8765;
142
200
PrometheusBuilder::new()
143
201
.set_quantiles(&[0.5, 0.9, 0.99, 1.0])?
144
202
.set_bucket_duration(std::time::Duration::from_secs(300))?
+50
-20
slingshot/src/record.rs
+50
-20
slingshot/src/record.rs
···
2
2
3
3
use crate::{Identity, error::RecordError};
4
4
use atrium_api::types::string::{Cid, Did, Nsid, RecordKey};
5
-
use reqwest::Client;
5
+
use reqwest::{Client, StatusCode};
6
6
use serde::{Deserialize, Serialize};
7
7
use serde_json::value::RawValue;
8
8
use std::str::FromStr;
···
56
56
value: Box<RawValue>,
57
57
}
58
58
59
+
#[derive(Debug, Deserialize)]
60
+
pub struct ErrorResponseObject {
61
+
pub error: String,
62
+
pub message: String,
63
+
}
64
+
59
65
#[derive(Clone)]
60
66
pub struct Repo {
61
67
identity: Identity,
···
87
93
return Err(RecordError::NotFound("could not get pds for DID"));
88
94
};
89
95
90
-
// TODO: throttle by host probably, generally guard against outgoing requests
96
+
// cid gets set to None for a retry, if it's Some and we got NotFound
97
+
let mut cid = cid;
91
98
92
-
let mut params = vec![
93
-
("repo", did.to_string()),
94
-
("collection", collection.to_string()),
95
-
("rkey", rkey.to_string()),
96
-
];
97
-
if let Some(cid) = cid {
98
-
params.push(("cid", cid.as_ref().to_string()));
99
-
}
100
-
let mut url = Url::parse_with_params(&pds, ¶ms)?;
101
-
url.set_path("/xrpc/com.atproto.repo.getRecord");
99
+
let res = loop {
100
+
// TODO: throttle outgoing requests by host probably, generally guard against outgoing requests
101
+
let mut params = vec![
102
+
("repo", did.to_string()),
103
+
("collection", collection.to_string()),
104
+
("rkey", rkey.to_string()),
105
+
];
106
+
if let Some(cid) = cid {
107
+
params.push(("cid", cid.as_ref().to_string()));
108
+
}
109
+
let mut url = Url::parse_with_params(&pds, ¶ms)?;
110
+
url.set_path("/xrpc/com.atproto.repo.getRecord");
102
111
103
-
let res = self
104
-
.client
105
-
.get(url)
106
-
.send()
107
-
.await
108
-
.map_err(RecordError::SendError)?
112
+
let res = self
113
+
.client
114
+
.get(url.clone())
115
+
.send()
116
+
.await
117
+
.map_err(RecordError::SendError)?;
118
+
119
+
if res.status() == StatusCode::BAD_REQUEST {
120
+
// 1. if we're not able to parse json, it's not something we can handle
121
+
let err = res
122
+
.json::<ErrorResponseObject>()
123
+
.await
124
+
.map_err(RecordError::UpstreamBadBadNotGoodRequest)?;
125
+
// 2. if we are, is it a NotFound? and if so, did we try with a CID?
126
+
// if so, retry with no CID (api handler will reject for mismatch but
127
+
// with a nice error + warm cache)
128
+
if err.error == "NotFound" && cid.is_some() {
129
+
cid = &None;
130
+
continue;
131
+
} else {
132
+
return Err(RecordError::UpstreamBadRequest(err));
133
+
}
134
+
}
135
+
break res;
136
+
};
137
+
138
+
let data = res
109
139
.error_for_status()
110
140
.map_err(RecordError::StatusError)? // TODO atproto error handling (think about handling not found)
111
141
.json::<RecordResponseObject>()
112
142
.await
113
143
.map_err(RecordError::ParseJsonError)?; // todo...
114
144
115
-
let Some(cid) = res.cid else {
145
+
let Some(cid) = data.cid else {
116
146
return Err(RecordError::MissingUpstreamCid);
117
147
};
118
148
let cid = Cid::from_str(&cid).map_err(|e| RecordError::BadUpstreamCid(e.to_string()))?;
119
149
120
150
Ok(CachedRecord::Found(RawRecord {
121
151
cid,
122
-
record: res.value.to_string(),
152
+
record: data.value.to_string(),
123
153
}))
124
154
}
125
155
}
+493
-60
slingshot/src/server.rs
+493
-60
slingshot/src/server.rs
···
1
-
use crate::{CachedRecord, Identity, Repo, error::ServerError};
1
+
use crate::{
2
+
CachedRecord, ErrorResponseObject, Identity, Repo,
3
+
error::{RecordError, ServerError},
4
+
};
2
5
use atrium_api::types::string::{Cid, Did, Handle, Nsid, RecordKey};
3
6
use foyer::HybridCache;
7
+
use links::at_uri::parse_at_uri as normalize_at_uri;
4
8
use serde::Serialize;
9
+
use std::path::PathBuf;
5
10
use std::str::FromStr;
6
11
use std::sync::Arc;
7
12
use tokio_util::sync::CancellationToken;
8
13
9
14
use poem::{
10
15
Endpoint, EndpointExt, Route, Server,
11
-
endpoint::make_sync,
16
+
endpoint::{StaticFileEndpoint, make_sync},
12
17
http::Method,
13
18
listener::{
14
19
Listener, TcpListener,
15
20
acme::{AutoCert, LETS_ENCRYPT_PRODUCTION},
16
21
},
17
-
middleware::{Cors, Tracing},
22
+
middleware::{CatchPanic, Cors, Tracing},
18
23
};
19
24
use poem_openapi::{
20
-
ApiResponse, Object, OpenApi, OpenApiService, param::Query, payload::Json, types::Example,
25
+
ApiResponse, ContactObject, ExternalDocumentObject, Object, OpenApi, OpenApiService, Tags,
26
+
param::Query, payload::Json, types::Example,
21
27
};
22
28
29
+
fn example_handle() -> String {
30
+
"bad-example.com".to_string()
31
+
}
23
32
fn example_did() -> String {
24
33
"did:plc:hdhoaan3xa3jiuq4fg4mefid".to_string()
25
34
}
···
29
38
fn example_rkey() -> String {
30
39
"3lv4ouczo2b2a".to_string()
31
40
}
41
+
fn example_uri() -> String {
42
+
format!(
43
+
"at://{}/{}/{}",
44
+
example_did(),
45
+
example_collection(),
46
+
example_rkey()
47
+
)
48
+
}
49
+
fn example_pds() -> String {
50
+
"https://porcini.us-east.host.bsky.network".to_string()
51
+
}
52
+
fn example_signing_key() -> String {
53
+
"zQ3shpq1g134o7HGDb86CtQFxnHqzx5pZWknrVX2Waum3fF6j".to_string()
54
+
}
32
55
33
56
#[derive(Object)]
34
57
#[oai(example = true)]
···
54
77
})
55
78
}
56
79
57
-
fn bad_request_handler(err: poem::Error) -> GetRecordResponse {
80
+
fn bad_request_handler_get_record(err: poem::Error) -> GetRecordResponse {
58
81
GetRecordResponse::BadRequest(Json(XrpcErrorResponseObject {
59
82
error: "InvalidRequest".to_string(),
60
83
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
61
84
}))
62
85
}
63
86
87
+
fn bad_request_handler_resolve_mini(err: poem::Error) -> ResolveMiniIDResponse {
88
+
ResolveMiniIDResponse::BadRequest(Json(XrpcErrorResponseObject {
89
+
error: "InvalidRequest".to_string(),
90
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
91
+
}))
92
+
}
93
+
94
+
fn bad_request_handler_resolve_handle(err: poem::Error) -> JustDidResponse {
95
+
JustDidResponse::BadRequest(Json(XrpcErrorResponseObject {
96
+
error: "InvalidRequest".to_string(),
97
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
98
+
}))
99
+
}
100
+
64
101
#[derive(Object)]
65
102
#[oai(example = true)]
66
103
struct FoundRecordResponseObject {
···
70
107
///
71
108
/// Slingshot will always return the CID, despite it not being a required
72
109
/// response property in the official lexicon.
110
+
///
111
+
/// TODO: probably actually let it be optional, idk are some pds's weirdly
112
+
/// not returning it?
73
113
cid: Option<String>,
74
114
/// the record itself as JSON
75
115
value: serde_json::Value,
···
77
117
impl Example for FoundRecordResponseObject {
78
118
fn example() -> Self {
79
119
Self {
80
-
uri: format!(
81
-
"at://{}/{}/{}",
82
-
example_did(),
83
-
example_collection(),
84
-
example_rkey()
85
-
),
120
+
uri: example_uri(),
86
121
cid: Some("bafyreialv3mzvvxaoyrfrwoer3xmabbmdchvrbyhayd7bga47qjbycy74e".to_string()),
87
122
value: serde_json::json!({
88
123
"$type": "app.bsky.feed.like",
···
97
132
}
98
133
99
134
#[derive(ApiResponse)]
100
-
#[oai(bad_request_handler = "bad_request_handler")]
135
+
#[oai(bad_request_handler = "bad_request_handler_get_record")]
101
136
enum GetRecordResponse {
102
137
/// Record found
103
138
#[oai(status = 200)]
···
107
142
/// The only error name in the repo.getRecord lexicon is `RecordNotFound`,
108
143
/// but the [canonical api docs](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
109
144
/// also list `InvalidRequest`, `ExpiredToken`, and `InvalidToken`. Of
110
-
/// these, slingshot will only return `RecordNotFound` or `InvalidRequest`.
145
+
/// these, slingshot will only generate `RecordNotFound` or `InvalidRequest`,
146
+
/// but may return any proxied error code from the upstream repo.
147
+
#[oai(status = 400)]
148
+
BadRequest(XrpcError),
149
+
/// Server errors
150
+
#[oai(status = 500)]
151
+
ServerError(XrpcError),
152
+
}
153
+
154
+
#[derive(Object)]
155
+
#[oai(example = true)]
156
+
struct MiniDocResponseObject {
157
+
/// DID, bi-directionally verified if a handle was provided in the query.
158
+
did: String,
159
+
/// The validated handle of the account or `handle.invalid` if the handle
160
+
/// did not bi-directionally match the DID document.
161
+
handle: String,
162
+
/// The identity's PDS URL
163
+
pds: String,
164
+
/// The atproto signing key publicKeyMultibase
165
+
///
166
+
/// Legacy key encoding not supported. the key is returned directly; `id`,
167
+
/// `type`, and `controller` are omitted.
168
+
signing_key: String,
169
+
}
170
+
impl Example for MiniDocResponseObject {
171
+
fn example() -> Self {
172
+
Self {
173
+
did: example_did(),
174
+
handle: example_handle(),
175
+
pds: example_pds(),
176
+
signing_key: example_signing_key(),
177
+
}
178
+
}
179
+
}
180
+
181
+
#[derive(ApiResponse)]
182
+
#[oai(bad_request_handler = "bad_request_handler_resolve_mini")]
183
+
enum ResolveMiniIDResponse {
184
+
/// Identity resolved
185
+
#[oai(status = 200)]
186
+
Ok(Json<MiniDocResponseObject>),
187
+
/// Bad request or identity not resolved
188
+
#[oai(status = 400)]
189
+
BadRequest(XrpcError),
190
+
}
191
+
192
+
#[derive(Object)]
193
+
#[oai(example = true)]
194
+
struct FoundDidResponseObject {
195
+
/// the DID, bi-directionally verified if using Slingshot
196
+
did: String,
197
+
}
198
+
impl Example for FoundDidResponseObject {
199
+
fn example() -> Self {
200
+
Self { did: example_did() }
201
+
}
202
+
}
203
+
204
+
#[derive(ApiResponse)]
205
+
#[oai(bad_request_handler = "bad_request_handler_resolve_handle")]
206
+
enum JustDidResponse {
207
+
/// Resolution succeeded
208
+
#[oai(status = 200)]
209
+
Ok(Json<FoundDidResponseObject>),
210
+
/// Bad request, failed to resolve, or failed to verify
211
+
///
212
+
/// `error` will be one of `InvalidRequest`, `HandleNotFound`.
111
213
#[oai(status = 400)]
112
214
BadRequest(XrpcError),
113
-
/// Just using 500 for potentially upstream errors for now
215
+
/// Something went wrong trying to complete the request
114
216
#[oai(status = 500)]
115
217
ServerError(XrpcError),
116
218
}
···
121
223
repo: Arc<Repo>,
122
224
}
123
225
226
+
#[derive(Tags)]
227
+
enum ApiTags {
228
+
/// Core ATProtocol-compatible APIs.
229
+
///
230
+
/// > [!tip]
231
+
/// > Upstream documentation is available at
232
+
/// > https://docs.bsky.app/docs/category/http-reference
233
+
///
234
+
/// These queries are usually executed directly against the PDS containing
235
+
/// the data being requested. Slingshot offers a caching view of the same
236
+
/// contents with better expected performance and reliability.
237
+
#[oai(rename = "com.atproto.* queries")]
238
+
ComAtproto,
239
+
/// Additional and improved APIs.
240
+
///
241
+
/// These APIs offer small tweaks to the core ATProtocol APIs, with more
242
+
/// more convenient [request parameters](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.repo.getUriRecord)
243
+
/// or [response formats](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.identity.resolveMiniDoc).
244
+
///
245
+
/// > [!important]
246
+
/// > At the moment, these are namespaced under the `com.bad-example.*` NSID
247
+
/// > prefix, but as they stabilize they may be migrated to an org namespace
248
+
/// > like `blue.microcosm.*`. Support for asliasing to `com.bad-example.*`
249
+
/// > will be maintained as long as it's in use.
250
+
#[oai(rename = "slingshot-specific queries")]
251
+
Custom,
252
+
}
253
+
124
254
#[OpenApi]
125
255
impl Xrpc {
126
256
/// com.atproto.repo.getRecord
127
257
///
128
258
/// Get a single record from a repository. Does not require auth.
129
259
///
130
-
/// See https://docs.bsky.app/docs/api/com-atproto-repo-get-record for the
131
-
/// canonical XRPC documentation that this endpoint aims to be compatible
132
-
/// with.
133
-
#[oai(path = "/com.atproto.repo.getRecord", method = "get")]
260
+
/// > [!tip]
261
+
/// > See also the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
262
+
/// > that this endpoint aims to be compatible with.
263
+
#[oai(
264
+
path = "/com.atproto.repo.getRecord",
265
+
method = "get",
266
+
tag = "ApiTags::ComAtproto"
267
+
)]
134
268
async fn get_record(
135
269
&self,
136
270
/// The DID or handle of the repo
···
146
280
///
147
281
/// If not specified, then return the most recent version.
148
282
///
149
-
/// If specified and a newer version of the record exists, returns 404 not
150
-
/// found. That is: slingshot only retains the most recent version of a
151
-
/// record. (TODO: verify bsky behaviour for mismatched/old CID)
283
+
/// If a stale `CID` is specified and a newer version of the record
284
+
/// exists, Slingshot returns a `NotFound` error. That is: Slingshot
285
+
/// only retains the most recent version of a record.
286
+
Query(cid): Query<Option<String>>,
287
+
) -> GetRecordResponse {
288
+
self.get_record_impl(repo, collection, rkey, cid).await
289
+
}
290
+
291
+
/// com.bad-example.repo.getUriRecord
292
+
///
293
+
/// Ergonomic complement to [`com.atproto.repo.getRecord`](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
294
+
/// which accepts an `at-uri` instead of individual repo/collection/rkey params
295
+
#[oai(
296
+
path = "/com.bad-example.repo.getUriRecord",
297
+
method = "get",
298
+
tag = "ApiTags::Custom"
299
+
)]
300
+
async fn get_uri_record(
301
+
&self,
302
+
/// The at-uri of the record
303
+
///
304
+
/// The identifier can be a DID or an atproto handle, and the collection
305
+
/// and rkey segments must be present.
306
+
#[oai(example = "example_uri")]
307
+
Query(at_uri): Query<String>,
308
+
/// Optional: the CID of the version of the record.
309
+
///
310
+
/// If not specified, then return the most recent version.
311
+
///
312
+
/// > [!tip]
313
+
/// > If specified and a newer version of the record exists, returns 404 not
314
+
/// > found. That is: slingshot only retains the most recent version of a
315
+
/// > record.
152
316
Query(cid): Query<Option<String>>,
153
317
) -> GetRecordResponse {
318
+
let bad_at_uri = || {
319
+
GetRecordResponse::BadRequest(xrpc_error(
320
+
"InvalidRequest",
321
+
"at-uri does not appear to be valid",
322
+
))
323
+
};
324
+
325
+
let Some(normalized) = normalize_at_uri(&at_uri) else {
326
+
return bad_at_uri();
327
+
};
328
+
329
+
// TODO: move this to links
330
+
let Some(rest) = normalized.strip_prefix("at://") else {
331
+
return bad_at_uri();
332
+
};
333
+
let Some((repo, rest)) = rest.split_once('/') else {
334
+
return bad_at_uri();
335
+
};
336
+
let Some((collection, rest)) = rest.split_once('/') else {
337
+
return bad_at_uri();
338
+
};
339
+
let rkey = if let Some((rkey, _rest)) = rest.split_once('?') {
340
+
rkey
341
+
} else {
342
+
rest
343
+
};
344
+
345
+
self.get_record_impl(
346
+
repo.to_string(),
347
+
collection.to_string(),
348
+
rkey.to_string(),
349
+
cid,
350
+
)
351
+
.await
352
+
}
353
+
354
+
/// com.atproto.identity.resolveHandle
355
+
///
356
+
/// Resolves an atproto [`handle`](https://atproto.com/guides/glossary#handle)
357
+
/// (hostname) to a [`DID`](https://atproto.com/guides/glossary#did-decentralized-id).
358
+
///
359
+
/// > [!tip]
360
+
/// > Compatibility note: Slingshot will **always bi-directionally verify
361
+
/// > against the DID document**, which is optional according to the
362
+
/// > authoritative lexicon.
363
+
///
364
+
/// > [!tip]
365
+
/// > See the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-handle)
366
+
/// > that this endpoint aims to be compatible with.
367
+
#[oai(
368
+
path = "/com.atproto.identity.resolveHandle",
369
+
method = "get",
370
+
tag = "ApiTags::ComAtproto"
371
+
)]
372
+
async fn resolve_handle(
373
+
&self,
374
+
/// The handle to resolve.
375
+
#[oai(example = "example_handle")]
376
+
Query(handle): Query<String>,
377
+
) -> JustDidResponse {
378
+
let Ok(handle) = Handle::new(handle) else {
379
+
return JustDidResponse::BadRequest(xrpc_error("InvalidRequest", "not a valid handle"));
380
+
};
381
+
382
+
let Ok(alleged_did) = self.identity.handle_to_did(handle.clone()).await else {
383
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not resolve handle"));
384
+
};
385
+
386
+
let Some(alleged_did) = alleged_did else {
387
+
return JustDidResponse::BadRequest(xrpc_error(
388
+
"HandleNotFound",
389
+
"Could not resolve handle to a DID",
390
+
));
391
+
};
392
+
393
+
let Ok(partial_doc) = self.identity.did_to_partial_mini_doc(&alleged_did).await else {
394
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not fetch DID doc"));
395
+
};
396
+
397
+
let Some(partial_doc) = partial_doc else {
398
+
return JustDidResponse::BadRequest(xrpc_error(
399
+
"HandleNotFound",
400
+
"Resolved handle but could not find DID doc for the DID",
401
+
));
402
+
};
403
+
404
+
if partial_doc.unverified_handle != handle {
405
+
return JustDidResponse::BadRequest(xrpc_error(
406
+
"HandleNotFound",
407
+
"Resolved handle failed bi-directional validation",
408
+
));
409
+
}
410
+
411
+
JustDidResponse::Ok(Json(FoundDidResponseObject {
412
+
did: alleged_did.to_string(),
413
+
}))
414
+
}
415
+
416
+
/// com.bad-example.identity.resolveMiniDoc
417
+
///
418
+
/// Like [com.atproto.identity.resolveIdentity](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-identity)
419
+
/// but instead of the full `didDoc` it returns an atproto-relevant subset.
420
+
#[oai(
421
+
path = "/com.bad-example.identity.resolveMiniDoc",
422
+
method = "get",
423
+
tag = "ApiTags::Custom"
424
+
)]
425
+
async fn resolve_mini_id(
426
+
&self,
427
+
/// Handle or DID to resolve
428
+
#[oai(example = "example_handle")]
429
+
Query(identifier): Query<String>,
430
+
) -> ResolveMiniIDResponse {
431
+
let invalid = |reason: &'static str| {
432
+
ResolveMiniIDResponse::BadRequest(xrpc_error("InvalidRequest", reason))
433
+
};
434
+
435
+
let mut unverified_handle = None;
436
+
let did = match Did::new(identifier.clone()) {
437
+
Ok(did) => did,
438
+
Err(_) => {
439
+
let Ok(alleged_handle) = Handle::new(identifier) else {
440
+
return invalid("Identifier was not a valid DID or handle");
441
+
};
442
+
443
+
match self.identity.handle_to_did(alleged_handle.clone()).await {
444
+
Ok(res) => {
445
+
if let Some(did) = res {
446
+
// we did it joe
447
+
unverified_handle = Some(alleged_handle);
448
+
did
449
+
} else {
450
+
return invalid("Could not resolve handle identifier to a DID");
451
+
}
452
+
}
453
+
Err(e) => {
454
+
log::debug!("failed to resolve handle: {e}");
455
+
// TODO: ServerError not BadRequest
456
+
return invalid("Errored while trying to resolve handle to DID");
457
+
}
458
+
}
459
+
}
460
+
};
461
+
let Ok(partial_doc) = self.identity.did_to_partial_mini_doc(&did).await else {
462
+
return invalid("Failed to get DID doc");
463
+
};
464
+
let Some(partial_doc) = partial_doc else {
465
+
return invalid("Failed to find DID doc");
466
+
};
467
+
468
+
// ok so here's where we're at:
469
+
// โ
we have a DID
470
+
// โ
we have a partial doc
471
+
// ๐ถ if we have a handle, it's from the `identifier` (user-input)
472
+
// -> then we just need to compare to the partial doc to confirm
473
+
// -> else we need to resolve the DID doc's to a handle and check
474
+
let handle = if let Some(h) = unverified_handle {
475
+
if h == partial_doc.unverified_handle {
476
+
h.to_string()
477
+
} else {
478
+
"handle.invalid".to_string()
479
+
}
480
+
} else {
481
+
let Ok(handle_did) = self
482
+
.identity
483
+
.handle_to_did(partial_doc.unverified_handle.clone())
484
+
.await
485
+
else {
486
+
return invalid("Failed to get DID doc's handle");
487
+
};
488
+
let Some(handle_did) = handle_did else {
489
+
return invalid("Failed to resolve DID doc's handle");
490
+
};
491
+
if handle_did == did {
492
+
partial_doc.unverified_handle.to_string()
493
+
} else {
494
+
"handle.invalid".to_string()
495
+
}
496
+
};
497
+
498
+
ResolveMiniIDResponse::Ok(Json(MiniDocResponseObject {
499
+
did: did.to_string(),
500
+
handle,
501
+
pds: partial_doc.pds,
502
+
signing_key: partial_doc.signing_key,
503
+
}))
504
+
}
505
+
506
+
async fn get_record_impl(
507
+
&self,
508
+
repo: String,
509
+
collection: String,
510
+
rkey: String,
511
+
cid: Option<String>,
512
+
) -> GetRecordResponse {
154
513
let did = match Did::new(repo.clone()) {
155
514
Ok(did) => did,
156
515
Err(_) => {
157
516
let Ok(handle) = Handle::new(repo) else {
158
517
return GetRecordResponse::BadRequest(xrpc_error(
159
518
"InvalidRequest",
160
-
"repo was not a valid DID or handle",
519
+
"Repo was not a valid DID or handle",
161
520
));
162
521
};
163
-
if let Ok(res) = self.identity.handle_to_did(handle).await {
164
-
if let Some(did) = res {
165
-
did
166
-
} else {
167
-
return GetRecordResponse::BadRequest(xrpc_error(
168
-
"InvalidRequest",
169
-
"Could not resolve handle repo to a DID",
522
+
match self.identity.handle_to_did(handle).await {
523
+
Ok(res) => {
524
+
if let Some(did) = res {
525
+
did
526
+
} else {
527
+
return GetRecordResponse::BadRequest(xrpc_error(
528
+
"InvalidRequest",
529
+
"Could not resolve handle repo to a DID",
530
+
));
531
+
}
532
+
}
533
+
Err(e) => {
534
+
log::debug!("handle resolution failed: {e}");
535
+
return GetRecordResponse::ServerError(xrpc_error(
536
+
"ResolutionFailed",
537
+
"Errored while trying to resolve handle to DID",
170
538
));
171
539
}
172
-
} else {
173
-
return GetRecordResponse::ServerError(xrpc_error(
174
-
"ResolutionFailed",
175
-
"errored while trying to resolve handle to DID",
176
-
));
177
540
}
178
541
}
179
542
};
···
181
544
let Ok(collection) = Nsid::new(collection) else {
182
545
return GetRecordResponse::BadRequest(xrpc_error(
183
546
"InvalidRequest",
184
-
"invalid NSID for collection",
547
+
"Invalid NSID for collection",
185
548
));
186
549
};
187
550
188
551
let Ok(rkey) = RecordKey::new(rkey) else {
189
-
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "invalid rkey"));
552
+
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "Invalid rkey"));
190
553
};
191
554
192
555
let cid: Option<Cid> = if let Some(cid) = cid {
193
556
let Ok(cid) = Cid::from_str(&cid) else {
194
-
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "invalid CID"));
557
+
return GetRecordResponse::BadRequest(xrpc_error("InvalidRequest", "Invalid CID"));
195
558
};
196
559
Some(cid)
197
560
} else {
···
200
563
201
564
let at_uri = format!("at://{}/{}/{}", &*did, &*collection, &*rkey);
202
565
203
-
let entry = self
566
+
let fr = self
204
567
.cache
205
568
.fetch(at_uri.clone(), {
206
569
let cid = cid.clone();
···
212
575
.map_err(|e| foyer::Error::Other(Box::new(e)))
213
576
}
214
577
})
215
-
.await
216
-
.unwrap(); // todo
578
+
.await;
579
+
580
+
let entry = match fr {
581
+
Ok(e) => e,
582
+
Err(foyer::Error::Other(e)) => {
583
+
let record_error = match e.downcast::<RecordError>() {
584
+
Ok(e) => e,
585
+
Err(e) => {
586
+
log::error!("error (foyer other) getting cache entry, {e:?}");
587
+
return GetRecordResponse::ServerError(xrpc_error(
588
+
"ServerError",
589
+
"sorry, something went wrong",
590
+
));
591
+
}
592
+
};
593
+
let RecordError::UpstreamBadRequest(ErrorResponseObject { error, message }) =
594
+
*record_error
595
+
else {
596
+
log::error!("RecordError getting cache entry, {record_error:?}");
597
+
return GetRecordResponse::ServerError(xrpc_error(
598
+
"ServerError",
599
+
"sorry, something went wrong",
600
+
));
601
+
};
217
602
218
-
// TODO: actual 404
603
+
// all of the noise around here is so that we can ultimately reach this:
604
+
// upstream BadRequest extracted from the foyer result which we can proxy back
605
+
return GetRecordResponse::BadRequest(xrpc_error(
606
+
error,
607
+
format!("Upstream bad request: {message}"),
608
+
));
609
+
}
610
+
Err(e) => {
611
+
log::error!("error (foyer) getting cache entry, {e:?}");
612
+
return GetRecordResponse::ServerError(xrpc_error(
613
+
"ServerError",
614
+
"sorry, something went wrong",
615
+
));
616
+
}
617
+
};
219
618
220
619
match *entry {
221
620
CachedRecord::Found(ref raw) => {
···
276
675
///
277
676
/// - PDS proxying offers a level of client IP anonymity from slingshot
278
677
/// - slingshot *may* implement more generous per-user rate-limits for proxied requests in the future
279
-
fn get_did_doc(host: &str) -> impl Endpoint + use<> {
678
+
fn get_did_doc(domain: &str) -> impl Endpoint + use<> {
280
679
let doc = poem::web::Json(AppViewDoc {
281
-
id: format!("did:web:{host}"),
680
+
id: format!("did:web:{domain}"),
282
681
service: [AppViewService {
283
682
id: "#slingshot".to_string(),
284
683
r#type: "SlingshotRecordProxy".to_string(),
285
-
service_endpoint: format!("https://{host}"),
684
+
service_endpoint: format!("https://{domain}"),
286
685
}],
287
686
});
288
687
make_sync(move |_| doc.clone())
···
292
691
cache: HybridCache<String, CachedRecord>,
293
692
identity: Identity,
294
693
repo: Repo,
295
-
host: Option<String>,
296
-
_shutdown: CancellationToken,
694
+
domain: Option<String>,
695
+
acme_contact: Option<String>,
696
+
certs: Option<PathBuf>,
697
+
host: String,
698
+
port: u16,
699
+
shutdown: CancellationToken,
297
700
) -> Result<(), ServerError> {
298
701
let repo = Arc::new(repo);
299
702
let api_service = OpenApiService::new(
···
305
708
"Slingshot",
306
709
env!("CARGO_PKG_VERSION"),
307
710
)
308
-
.server("http://localhost:3000")
309
-
.url_prefix("/xrpc");
711
+
.server(if let Some(ref h) = domain {
712
+
format!("https://{h}")
713
+
} else {
714
+
"http://localhost:3000".to_string()
715
+
})
716
+
.url_prefix("/xrpc")
717
+
.contact(
718
+
ContactObject::new()
719
+
.name("@microcosm.blue")
720
+
.url("https://bsky.app/profile/microcosm.blue"),
721
+
)
722
+
.description(include_str!("../api-description.md"))
723
+
.external_document(ExternalDocumentObject::new(
724
+
"https://microcosm.blue/slingshot",
725
+
));
310
726
311
727
let mut app = Route::new()
312
-
.nest("/", api_service.scalar())
313
-
.nest("/openapi.json", api_service.spec_endpoint())
728
+
.at("/", StaticFileEndpoint::new("./static/index.html"))
729
+
.nest("/openapi", api_service.spec_endpoint())
314
730
.nest("/xrpc/", api_service);
315
731
316
-
if let Some(host) = host {
732
+
if let Some(domain) = domain {
317
733
rustls::crypto::aws_lc_rs::default_provider()
318
734
.install_default()
319
735
.expect("alskfjalksdjf");
320
736
321
-
app = app.at("/.well-known/did.json", get_did_doc(&host));
737
+
app = app.at("/.well-known/did.json", get_did_doc(&domain));
322
738
323
-
let auto_cert = AutoCert::builder()
739
+
let mut auto_cert = AutoCert::builder()
324
740
.directory_url(LETS_ENCRYPT_PRODUCTION)
325
-
.domain(&host)
326
-
.build()
327
-
.map_err(ServerError::AcmeBuildError)?;
741
+
.domain(&domain);
742
+
if let Some(contact) = acme_contact {
743
+
auto_cert = auto_cert.contact(contact);
744
+
}
745
+
if let Some(certs) = certs {
746
+
auto_cert = auto_cert.cache_path(certs);
747
+
}
748
+
let auto_cert = auto_cert.build().map_err(ServerError::AcmeBuildError)?;
328
749
329
-
run(TcpListener::bind("0.0.0.0:443").acme(auto_cert), app).await
750
+
run(
751
+
TcpListener::bind("0.0.0.0:443").acme(auto_cert),
752
+
app,
753
+
shutdown,
754
+
)
755
+
.await
330
756
} else {
331
-
run(TcpListener::bind("127.0.0.1:3000"), app).await
757
+
run(
758
+
TcpListener::bind(format!("{host}:{port}")),
759
+
app,
760
+
shutdown,
761
+
)
762
+
.await
332
763
}
333
764
}
334
765
335
-
async fn run<L>(listener: L, app: Route) -> Result<(), ServerError>
766
+
async fn run<L>(listener: L, app: Route, shutdown: CancellationToken) -> Result<(), ServerError>
336
767
where
337
768
L: Listener + 'static,
338
769
{
···
343
774
.allow_methods([Method::GET])
344
775
.allow_credentials(false),
345
776
)
777
+
.with(CatchPanic::new())
346
778
.with(Tracing);
347
779
Server::new(listener)
348
780
.name("slingshot")
349
-
.run(app)
781
+
.run_with_graceful_shutdown(app, shutdown.cancelled(), None)
350
782
.await
351
783
.map_err(ServerError::ServerExited)
784
+
.inspect(|()| log::info!("server ended. goodbye."))
352
785
}
+67
slingshot/static/index.html
+67
slingshot/static/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<title>Slingshot: atproto edge record cache</title>
6
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7
+
<meta name="description" content="API Documentation for Slingshot, a firehose-listening atproto edge record and identity cache." />
8
+
<style>
9
+
:root {
10
+
--scalar-small: 13px;
11
+
}
12
+
.scalar-app .markdown .markdown-alert {
13
+
font-size: var(--scalar-small);
14
+
}
15
+
.sidebar-heading-link-title {
16
+
line-height: 1.2;
17
+
}
18
+
.custom-header {
19
+
height: 42px;
20
+
background-color: #221828;
21
+
box-shadow: inset 0 -1px 0 var(--scalar-border-color);
22
+
color: var(--scalar-color-1);
23
+
font-size: var(--scalar-font-size-3);
24
+
font-family: 'Iowan Old Style', 'Palatino Linotype', 'URW Palladio L', P052, serif;
25
+
padding: 0 18px;
26
+
justify-content: space-between;
27
+
}
28
+
.custom-header,
29
+
.custom-header nav {
30
+
display: flex;
31
+
align-items: center;
32
+
gap: 18px;
33
+
}
34
+
.custom-header a:hover {
35
+
color: var(--scalar-color-2);
36
+
}
37
+
38
+
.light-mode .custom-header {
39
+
background-color: thistle;
40
+
}
41
+
</style>
42
+
</head>
43
+
<body>
44
+
<header class="custom-header scalar-app">
45
+
<p>
46
+
TODO: thing
47
+
</p>
48
+
<nav>
49
+
<b>a <a href="https://microcosm.blue">microcosm</a> project</b>
50
+
<a href="https://bsky.app/profile/microcosm.blue">@microcosm.blue</a>
51
+
<a href="https://github.com/at-microcosm">github</a>
52
+
</nav>
53
+
</header>
54
+
55
+
<script id="api-reference" type="application/json" data-url="/openapi"></script>
56
+
57
+
<script>
58
+
var configuration = {
59
+
theme: 'purple',
60
+
hideModels: true,
61
+
}
62
+
document.getElementById('api-reference').dataset.configuration = JSON.stringify(configuration)
63
+
</script>
64
+
65
+
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
66
+
</body>
67
+
</html>
+5
-5
spacedust/src/subscriber.rs
+5
-5
spacedust/src/subscriber.rs
···
42
42
loop {
43
43
tokio::select! {
44
44
l = receiver.recv() => match l {
45
-
Ok(link) => if self.filter(&link.properties) {
46
-
if let Err(e) = ws_sender.send(link.message.clone()).await {
47
-
log::warn!("failed to send link, dropping subscriber: {e:?}");
48
-
break;
49
-
}
45
+
Ok(link) => if self.filter(&link.properties)
46
+
&& let Err(e) = ws_sender.send(link.message.clone()).await
47
+
{
48
+
log::warn!("failed to send link, dropping subscriber: {e:?}");
49
+
break;
50
50
},
51
51
Err(RecvError::Closed) => self.shutdown.cancel(),
52
52
Err(RecvError::Lagged(n)) => {
+1
-1
ufos/Cargo.toml
+1
-1
ufos/Cargo.toml
···
13
13
clap = { version = "4.5.31", features = ["derive"] }
14
14
dropshot = "0.16.0"
15
15
env_logger = "0.11.7"
16
-
fjall = { version = "2.8.0", features = ["lz4"] }
16
+
fjall = { git = "https://github.com/fjall-rs/fjall.git", features = ["lz4"] }
17
17
getrandom = "0.3.3"
18
18
http = "1.3.1"
19
19
jetstream = { path = "../jetstream", features = ["metrics"] }
+42
-10
ufos/src/main.rs
+42
-10
ufos/src/main.rs
···
4
4
use metrics_exporter_prometheus::PrometheusBuilder;
5
5
use std::path::PathBuf;
6
6
use std::time::{Duration, SystemTime};
7
+
use tokio::task::JoinSet;
7
8
use ufos::consumer;
8
9
use ufos::file_consumer;
9
10
use ufos::server;
···
72
73
Ok(())
73
74
}
74
75
75
-
async fn go<B: StoreBackground>(
76
+
async fn go<B: StoreBackground + 'static>(
76
77
args: Args,
77
78
read_store: impl StoreReader + 'static + Clone,
78
79
mut write_store: impl StoreWriter<B> + 'static,
79
80
cursor: Option<Cursor>,
80
81
sketch_secret: SketchSecretPrefix,
81
82
) -> anyhow::Result<()> {
83
+
let mut whatever_tasks: JoinSet<anyhow::Result<()>> = JoinSet::new();
84
+
let mut consumer_tasks: JoinSet<anyhow::Result<()>> = JoinSet::new();
85
+
82
86
println!("starting server with storage...");
83
87
let serving = server::serve(read_store.clone());
88
+
whatever_tasks.spawn(async move {
89
+
serving.await.map_err(|e| {
90
+
log::warn!("server ended: {e}");
91
+
anyhow::anyhow!(e)
92
+
})
93
+
});
84
94
85
95
if args.pause_writer {
86
96
log::info!("not starting jetstream or the write loop.");
87
-
serving.await.map_err(|e| anyhow::anyhow!(e))?;
97
+
for t in whatever_tasks.join_all().await {
98
+
if let Err(e) = t {
99
+
return Err(anyhow::anyhow!(e));
100
+
}
101
+
}
88
102
return Ok(());
89
103
}
90
104
···
102
116
let rolling = write_store
103
117
.background_tasks(args.reroll)?
104
118
.run(args.backfill);
105
-
let consuming = write_store.receive_batches(batches);
119
+
whatever_tasks.spawn(async move {
120
+
rolling
121
+
.await
122
+
.inspect_err(|e| log::warn!("rollup ended: {e}"))?;
123
+
Ok(())
124
+
});
106
125
107
-
let stating = do_update_stuff(read_store);
126
+
consumer_tasks.spawn(async move {
127
+
write_store
128
+
.receive_batches(batches)
129
+
.await
130
+
.inspect_err(|e| log::warn!("consumer ended: {e}"))?;
131
+
Ok(())
132
+
});
133
+
134
+
whatever_tasks.spawn(async move {
135
+
do_update_stuff(read_store).await;
136
+
log::warn!("status task ended");
137
+
Ok(())
138
+
});
108
139
109
140
install_metrics_server()?;
110
141
111
-
tokio::select! {
112
-
z = serving => log::warn!("serve task ended: {z:?}"),
113
-
z = rolling => log::warn!("rollup task ended: {z:?}"),
114
-
z = consuming => log::warn!("consuming task ended: {z:?}"),
115
-
z = stating => log::warn!("status task ended: {z:?}"),
116
-
};
142
+
for (i, t) in consumer_tasks.join_all().await.iter().enumerate() {
143
+
log::warn!("task {i} done: {t:?}");
144
+
}
145
+
146
+
println!("consumer tasks all completed, killing the others");
147
+
whatever_tasks.shutdown().await;
117
148
118
149
println!("bye!");
119
150
···
162
193
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
163
194
loop {
164
195
interval.tick().await;
196
+
read_store.update_metrics();
165
197
match read_store.get_consumer_info().await {
166
198
Err(e) => log::warn!("failed to get jetstream consumer info: {e:?}"),
167
199
Ok(ConsumerInfo::Jetstream {
+11
-1
ufos/src/storage.rs
+11
-1
ufos/src/storage.rs
···
41
41
Unit::Microseconds,
42
42
"batches that took more than 3s to insert"
43
43
);
44
+
describe_histogram!(
45
+
"storage_batch_insert_time",
46
+
Unit::Microseconds,
47
+
"total time to insert one commit batch"
48
+
);
44
49
while let Some(event_batch) = batches.recv().await {
45
50
let token = CancellationToken::new();
46
51
let cancelled = token.clone();
···
69
74
let mut me = self.clone();
70
75
move || {
71
76
let _guard = token.drop_guard();
72
-
me.insert_batch(event_batch)
77
+
let t0 = Instant::now();
78
+
let r = me.insert_batch(event_batch);
79
+
histogram!("storage_batch_insert_time").record(t0.elapsed().as_micros() as f64);
80
+
r
73
81
}
74
82
})
75
83
.await??;
···
103
111
#[async_trait]
104
112
pub trait StoreReader: Send + Sync {
105
113
fn name(&self) -> String;
114
+
115
+
fn update_metrics(&self) {}
106
116
107
117
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value>;
108
118
+104
-20
ufos/src/storage_fjall.rs
+104
-20
ufos/src/storage_fjall.rs
···
23
23
Batch as FjallBatch, Config, Keyspace, PartitionCreateOptions, PartitionHandle, Snapshot,
24
24
};
25
25
use jetstream::events::Cursor;
26
-
use metrics::{counter, describe_counter, describe_histogram, histogram, Unit};
26
+
use lsm_tree::AbstractTree;
27
+
use metrics::{
28
+
counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram, Unit,
29
+
};
27
30
use std::collections::{HashMap, HashSet};
28
31
use std::iter::Peekable;
29
32
use std::ops::Bound;
···
227
230
feeds: feeds.clone(),
228
231
records: records.clone(),
229
232
rollups: rollups.clone(),
233
+
queues: queues.clone(),
230
234
};
235
+
reader.describe_metrics();
231
236
let writer = FjallWriter {
232
237
bg_taken: Arc::new(AtomicBool::new(false)),
233
238
keyspace,
···
237
242
rollups,
238
243
queues,
239
244
};
245
+
writer.describe_metrics();
240
246
Ok((reader, writer, js_cursor, sketch_secret))
241
247
}
242
248
}
···
250
256
feeds: PartitionHandle,
251
257
records: PartitionHandle,
252
258
rollups: PartitionHandle,
259
+
queues: PartitionHandle,
253
260
}
254
261
255
262
/// An iterator that knows how to skip over deleted/invalidated records
···
381
388
type CollectionSerieses = HashMap<Nsid, Vec<CountsValue>>;
382
389
383
390
impl FjallReader {
391
+
fn describe_metrics(&self) {
392
+
describe_gauge!(
393
+
"storage_fjall_l0_run_count",
394
+
Unit::Count,
395
+
"number of L0 runs in a partition"
396
+
);
397
+
describe_gauge!(
398
+
"storage_fjall_keyspace_disk_space",
399
+
Unit::Bytes,
400
+
"total storage used according to fjall"
401
+
);
402
+
describe_gauge!(
403
+
"storage_fjall_journal_count",
404
+
Unit::Count,
405
+
"total keyspace journals according to fjall"
406
+
);
407
+
describe_gauge!(
408
+
"storage_fjall_keyspace_sequence",
409
+
Unit::Count,
410
+
"fjall keyspace sequence"
411
+
);
412
+
}
413
+
384
414
fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
385
415
let rollup_cursor =
386
416
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
···
1000
1030
fn name(&self) -> String {
1001
1031
"fjall storage v2".into()
1002
1032
}
1033
+
fn update_metrics(&self) {
1034
+
gauge!("storage_fjall_l0_run_count", "partition" => "global")
1035
+
.set(self.global.tree.l0_run_count() as f64);
1036
+
gauge!("storage_fjall_l0_run_count", "partition" => "feeds")
1037
+
.set(self.feeds.tree.l0_run_count() as f64);
1038
+
gauge!("storage_fjall_l0_run_count", "partition" => "records")
1039
+
.set(self.records.tree.l0_run_count() as f64);
1040
+
gauge!("storage_fjall_l0_run_count", "partition" => "rollups")
1041
+
.set(self.rollups.tree.l0_run_count() as f64);
1042
+
gauge!("storage_fjall_l0_run_count", "partition" => "queues")
1043
+
.set(self.queues.tree.l0_run_count() as f64);
1044
+
gauge!("storage_fjall_keyspace_disk_space").set(self.keyspace.disk_space() as f64);
1045
+
gauge!("storage_fjall_journal_count").set(self.keyspace.journal_count() as f64);
1046
+
gauge!("storage_fjall_keyspace_sequence").set(self.keyspace.instant() as f64);
1047
+
}
1003
1048
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
1004
1049
let s = self.clone();
1005
1050
tokio::task::spawn_blocking(move || FjallReader::get_storage_stats(&s)).await?
···
1091
1136
}
1092
1137
1093
1138
impl FjallWriter {
1139
+
fn describe_metrics(&self) {
1140
+
describe_histogram!(
1141
+
"storage_insert_batch_db_batch_items",
1142
+
Unit::Count,
1143
+
"how many items are in the fjall batch for batched inserts"
1144
+
);
1145
+
describe_histogram!(
1146
+
"storage_rollup_counts_db_batch_items",
1147
+
Unit::Count,
1148
+
"how many items are in the fjall batch for a timlies rollup"
1149
+
);
1150
+
describe_counter!(
1151
+
"storage_delete_account_partial_commits",
1152
+
Unit::Count,
1153
+
"fjall checkpoint commits for cleaning up accounts with too many records"
1154
+
);
1155
+
describe_counter!(
1156
+
"storage_delete_account_completions",
1157
+
Unit::Count,
1158
+
"total count of account deletes handled"
1159
+
);
1160
+
describe_counter!(
1161
+
"storage_delete_account_records_deleted",
1162
+
Unit::Count,
1163
+
"total records deleted when handling account deletes"
1164
+
);
1165
+
describe_histogram!(
1166
+
"storage_trim_dirty_nsids",
1167
+
Unit::Count,
1168
+
"number of NSIDs trimmed"
1169
+
);
1170
+
describe_histogram!(
1171
+
"storage_trim_duration",
1172
+
Unit::Microseconds,
1173
+
"how long it took to trim the dirty NSIDs"
1174
+
);
1175
+
describe_counter!(
1176
+
"storage_trim_removed",
1177
+
Unit::Count,
1178
+
"how many records were removed during trim"
1179
+
);
1180
+
}
1094
1181
fn rollup_delete_account(
1095
1182
&mut self,
1096
1183
cursor: Cursor,
···
1222
1309
AllTimeRecordsKey::new(new_creates_count.into(), &nsid).to_db_bytes()?,
1223
1310
),
1224
1311
};
1225
-
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1312
+
// remove_weak is allowed here because the secondary ranking index only ever inserts once at a key
1313
+
batch.remove_weak(&self.rollups, &old_k);
1226
1314
batch.insert(&self.rollups, &new_k, "");
1227
1315
}
1228
1316
···
1246
1334
AllTimeDidsKey::new(new_dids_estimate.into(), &nsid).to_db_bytes()?,
1247
1335
),
1248
1336
};
1249
-
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1337
+
// remove_weak is allowed here because the secondary ranking index only ever inserts once at a key
1338
+
batch.remove_weak(&self.rollups, &old_k);
1250
1339
batch.insert(&self.rollups, &new_k, "");
1251
1340
}
1252
1341
···
1256
1345
1257
1346
insert_batch_static_neu::<NewRollupCursorKey>(&mut batch, &self.global, last_cursor)?;
1258
1347
1348
+
histogram!("storage_rollup_counts_db_batch_items").record(batch.len() as f64);
1259
1349
batch.commit()?;
1260
1350
Ok((cursors_advanced, dirty_nsids))
1261
1351
}
···
1266
1356
if self.bg_taken.swap(true, Ordering::SeqCst) {
1267
1357
return Err(StorageError::BackgroundAlreadyStarted);
1268
1358
}
1269
-
describe_histogram!(
1270
-
"storage_trim_dirty_nsids",
1271
-
Unit::Count,
1272
-
"number of NSIDs trimmed"
1273
-
);
1274
-
describe_histogram!(
1275
-
"storage_trim_duration",
1276
-
Unit::Microseconds,
1277
-
"how long it took to trim the dirty NSIDs"
1278
-
);
1279
-
describe_counter!(
1280
-
"storage_trim_removed",
1281
-
Unit::Count,
1282
-
"how many records were removed during trim"
1283
-
);
1284
1359
if reroll {
1285
1360
log::info!("reroll: resetting rollup cursor...");
1286
1361
insert_static_neu::<NewRollupCursorKey>(&self.global, Cursor::from_start())?;
···
1375
1450
latest.to_db_bytes()?,
1376
1451
);
1377
1452
1453
+
histogram!("storage_insert_batch_db_batch_items").record(batch.len() as f64);
1378
1454
batch.commit()?;
1379
1455
Ok(())
1380
1456
}
···
1529
1605
candidate_new_feed_lower_cursor = Some(feed_key.cursor());
1530
1606
}
1531
1607
1532
-
self.feeds.remove(&location_key_bytes)?;
1608
+
self.records.remove(&location_key_bytes)?;
1533
1609
self.feeds.remove(key_bytes)?;
1534
1610
records_deleted += 1;
1535
1611
}
···
1556
1632
batch.remove(&self.records, key_bytes);
1557
1633
records_deleted += 1;
1558
1634
if batch.len() >= MAX_BATCHED_ACCOUNT_DELETE_RECORDS {
1635
+
counter!("storage_delete_account_partial_commits").increment(1);
1559
1636
batch.commit()?;
1560
1637
batch = self.keyspace.batch();
1561
1638
}
1562
1639
}
1640
+
counter!("storage_delete_account_completions").increment(1);
1641
+
counter!("storage_delete_account_records_deleted").increment(records_deleted as u64);
1563
1642
batch.commit()?;
1564
1643
Ok(records_deleted)
1565
1644
}
···
1619
1698
histogram!("storage_trim_dirty_nsids").record(completed.len() as f64);
1620
1699
histogram!("storage_trim_duration").record(dt.as_micros() as f64);
1621
1700
counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);
1622
-
counter!("storage_trim_removed", "dangling" => "false").increment((total_deleted - total_danglers) as u64);
1701
+
if total_deleted >= total_danglers {
1702
+
counter!("storage_trim_removed", "dangling" => "false").increment((total_deleted - total_danglers) as u64);
1703
+
} else {
1704
+
// TODO: probably think through what's happening here
1705
+
log::warn!("weird trim case: more danglers than deleted? metric will be missing for dangling=false. deleted={total_deleted} danglers={total_danglers}");
1706
+
}
1623
1707
for c in completed {
1624
1708
dirty_nsids.remove(&c);
1625
1709
}
-196
ufos ops (move to micro-ops).md
-196
ufos ops (move to micro-ops).md
···
1
-
ufos ops
2
-
3
-
btrfs snapshots: snapper
4
-
5
-
```bash
6
-
sudo apt install snapper
7
-
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
-
9
-
# edit /etc/snapper/configs/ufos-db
10
-
# change
11
-
TIMELINE_MIN_AGE="1800"
12
-
TIMELINE_LIMIT_HOURLY="10"
13
-
TIMELINE_LIMIT_DAILY="10"
14
-
TIMELINE_LIMIT_WEEKLY="0"
15
-
TIMELINE_LIMIT_MONTHLY="10"
16
-
TIMELINE_LIMIT_YEARLY="10"
17
-
# to
18
-
TIMELINE_MIN_AGE="1800"
19
-
TIMELINE_LIMIT_HOURLY="22"
20
-
TIMELINE_LIMIT_DAILY="4"
21
-
TIMELINE_LIMIT_WEEKLY="0"
22
-
TIMELINE_LIMIT_MONTHLY="0"
23
-
TIMELINE_LIMIT_YEARLY="0"
24
-
```
25
-
26
-
this should be enough?
27
-
28
-
list snapshots:
29
-
30
-
```bash
31
-
sudo snapper -c ufos-db list
32
-
```
33
-
34
-
systemd
35
-
36
-
create file: `/etc/systemd/system/ufos.service`
37
-
38
-
```ini
39
-
[Unit]
40
-
Description=UFOs-API
41
-
After=network.target
42
-
43
-
[Service]
44
-
User=pi
45
-
WorkingDirectory=/home/pi/
46
-
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
-
Environment="RUST_LOG=info"
48
-
LimitNOFILE=16384
49
-
Restart=always
50
-
51
-
[Install]
52
-
WantedBy=multi-user.target
53
-
```
54
-
55
-
then
56
-
57
-
```bash
58
-
sudo systemctl daemon-reload
59
-
sudo systemctl enable ufos
60
-
sudo systemctl start ufos
61
-
```
62
-
63
-
monitor with
64
-
65
-
```bash
66
-
journalctl -u ufos -f
67
-
```
68
-
69
-
make sure a backup dir exists
70
-
71
-
```bash
72
-
mkdir /home/pi/backup
73
-
```
74
-
75
-
mount the NAS
76
-
77
-
```bash
78
-
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
-
```
80
-
81
-
manual rsync
82
-
83
-
```bash
84
-
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
-
```
86
-
87
-
backup script sketch
88
-
89
-
```bash
90
-
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
-
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
-
```
93
-
94
-
just crontab it?
95
-
96
-
`sudo crontab -e`
97
-
```bash
98
-
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
-
```
100
-
101
-
^^ try once initial backup is done
102
-
103
-
104
-
--columns subvolume,number
105
-
106
-
subvolume
107
-
number
108
-
109
-
110
-
111
-
112
-
gateway: follow constellation for nginx->prom thing
113
-
114
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
-
116
-
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
-
118
-
```hcl
119
-
listen {
120
-
port = 4044
121
-
}
122
-
123
-
namespace "nginx" {
124
-
source = {
125
-
files = [
126
-
"/var/log/nginx/constellation-access.log"
127
-
]
128
-
}
129
-
130
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
-
132
-
labels {
133
-
app = "constellation"
134
-
}
135
-
136
-
relabel "cache_status" {
137
-
from = "upstream_cache_status"
138
-
}
139
-
}
140
-
```
141
-
142
-
after:
143
-
144
-
```hcl
145
-
listen {
146
-
port = 4044
147
-
}
148
-
149
-
namespace "constellation" {
150
-
source = {
151
-
files = [
152
-
"/var/log/nginx/constellation-access.log"
153
-
]
154
-
}
155
-
156
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
-
158
-
labels {
159
-
app = "constellation"
160
-
}
161
-
162
-
relabel "cache_status" {
163
-
from = "upstream_cache_status"
164
-
}
165
-
166
-
namespace_label = "vhost"
167
-
metrics_override = { prefix = "nginx" }
168
-
}
169
-
170
-
namespace "ufos" {
171
-
source = {
172
-
files = [
173
-
"/var/log/nginx/ufos-access.log"
174
-
]
175
-
}
176
-
177
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
-
179
-
labels {
180
-
app = "ufos"
181
-
}
182
-
183
-
relabel "cache_status" {
184
-
from = "upstream_cache_status"
185
-
}
186
-
187
-
namespace_label = "vhost"
188
-
metrics_override = { prefix = "nginx" }
189
-
}
190
-
```
191
-
192
-
193
-
```bash
194
-
systemctl start prometheus-nginxlog-exporter.service
195
-
```
196
-
+4
-4
who-am-i/src/server.rs
+4
-4
who-am-i/src/server.rs
···
268
268
Some(parent_host),
269
269
);
270
270
}
271
-
if let Some(ref app) = params.app {
272
-
if !allowed_hosts.contains(app) {
273
-
return err("Login is not allowed for this app", false, Some(app));
274
-
}
271
+
if let Some(ref app) = params.app
272
+
&& !allowed_hosts.contains(app)
273
+
{
274
+
return err("Login is not allowed for this app", false, Some(app));
275
275
}
276
276
let parent_origin = url.origin().ascii_serialization();
277
277
if parent_origin == "null" {