+21
.github/workflows/build.yml
+21
.github/workflows/build.yml
···
1
+
name: Build
2
+
3
+
on:
4
+
push:
5
+
tags:
6
+
- "reflector-v*.*.*"
7
+
8
+
jobs:
9
+
build:
10
+
runs-on: ubuntu-latest
11
+
permissions:
12
+
contents: write
13
+
14
+
steps:
15
+
- uses: actions/checkout@v4
16
+
- name: build reflector
17
+
run: cargo build --bin reflector --release && mv target/release/reflector target/release/reflector_amd64
18
+
- name: release
19
+
uses: softprops/action-gh-release@v2
20
+
with:
21
+
files: target/release/reflector_amd64
+1
-1
.github/workflows/checks.yml
+1
-1
.github/workflows/checks.yml
···
28
28
- name: get nightly toolchain for jetstream fmt
29
29
run: rustup toolchain install nightly --allow-downgrade -c rustfmt
30
30
- name: fmt
31
-
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot -- --check
31
+
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot --package pocket -- --check
32
32
- name: fmt jetstream (nightly)
33
33
run: cargo +nightly fmt --package jetstream -- --check
34
34
- name: clippy
+832
-259
Cargo.lock
+832
-259
Cargo.lock
···
112
112
113
113
[[package]]
114
114
name = "anyhow"
115
-
version = "1.0.97"
115
+
version = "1.0.100"
116
116
source = "registry+https://github.com/rust-lang/crates.io-index"
117
-
checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f"
117
+
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
118
118
119
119
[[package]]
120
120
name = "arbitrary"
···
129
129
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
130
130
131
131
[[package]]
132
+
name = "arrayref"
133
+
version = "0.3.9"
134
+
source = "registry+https://github.com/rust-lang/crates.io-index"
135
+
checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
136
+
137
+
[[package]]
132
138
name = "arrayvec"
133
139
version = "0.7.6"
134
140
source = "registry+https://github.com/rust-lang/crates.io-index"
···
162
168
"proc-macro2",
163
169
"quote",
164
170
"serde",
165
-
"syn 2.0.103",
171
+
"syn 2.0.106",
166
172
]
167
173
168
174
[[package]]
···
192
198
"nom",
193
199
"num-traits",
194
200
"rusticata-macros",
195
-
"thiserror 2.0.12",
201
+
"thiserror 2.0.17",
196
202
"time",
197
203
]
198
204
···
204
210
dependencies = [
205
211
"proc-macro2",
206
212
"quote",
207
-
"syn 2.0.103",
213
+
"syn 2.0.106",
208
214
"synstructure",
209
215
]
210
216
···
216
222
dependencies = [
217
223
"proc-macro2",
218
224
"quote",
219
-
"syn 2.0.103",
225
+
"syn 2.0.106",
220
226
]
221
227
222
228
[[package]]
···
274
280
dependencies = [
275
281
"proc-macro2",
276
282
"quote",
277
-
"syn 2.0.103",
283
+
"syn 2.0.106",
278
284
]
279
285
280
286
[[package]]
···
291
297
dependencies = [
292
298
"proc-macro2",
293
299
"quote",
294
-
"syn 2.0.103",
300
+
"syn 2.0.106",
295
301
]
296
302
297
303
[[package]]
···
306
312
source = "registry+https://github.com/rust-lang/crates.io-index"
307
313
checksum = "46355d3245edc7b3160b2a45fe55d09a6963ebd3eee0252feb6b72fb0eb71463"
308
314
dependencies = [
309
-
"atrium-common",
310
-
"atrium-xrpc",
315
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
316
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
317
+
"chrono",
318
+
"http",
319
+
"ipld-core",
320
+
"langtag",
321
+
"regex",
322
+
"serde",
323
+
"serde_bytes",
324
+
"serde_json",
325
+
"thiserror 1.0.69",
326
+
"tokio",
327
+
"trait-variant",
328
+
]
329
+
330
+
[[package]]
331
+
name = "atrium-api"
332
+
version = "0.25.4"
333
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
334
+
dependencies = [
335
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
336
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
311
337
"chrono",
312
338
"http",
313
339
"ipld-core",
···
337
363
]
338
364
339
365
[[package]]
366
+
name = "atrium-common"
367
+
version = "0.1.2"
368
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
369
+
dependencies = [
370
+
"dashmap",
371
+
"lru",
372
+
"moka",
373
+
"thiserror 1.0.69",
374
+
"tokio",
375
+
"trait-variant",
376
+
"web-time",
377
+
]
378
+
379
+
[[package]]
380
+
name = "atrium-crypto"
381
+
version = "0.1.2"
382
+
source = "registry+https://github.com/rust-lang/crates.io-index"
383
+
checksum = "73a3da430c71dd9006d61072c20771f264e5c498420a49c32305ceab8bd71955"
384
+
dependencies = [
385
+
"ecdsa",
386
+
"k256",
387
+
"multibase",
388
+
"p256",
389
+
"thiserror 1.0.69",
390
+
]
391
+
392
+
[[package]]
340
393
name = "atrium-identity"
341
394
version = "0.1.5"
342
395
source = "registry+https://github.com/rust-lang/crates.io-index"
343
396
checksum = "c9e2d42bb4dbea038f4f5f45e3af2a89d61a9894a75f06aa550b74a60d2be380"
344
397
dependencies = [
345
-
"atrium-api",
346
-
"atrium-common",
347
-
"atrium-xrpc",
398
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
399
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
400
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
401
+
"serde",
402
+
"serde_html_form",
403
+
"serde_json",
404
+
"thiserror 1.0.69",
405
+
"trait-variant",
406
+
]
407
+
408
+
[[package]]
409
+
name = "atrium-identity"
410
+
version = "0.1.5"
411
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
412
+
dependencies = [
413
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
414
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
415
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
348
416
"serde",
349
417
"serde_html_form",
350
418
"serde_json",
···
358
426
source = "registry+https://github.com/rust-lang/crates.io-index"
359
427
checksum = "ca22dc4eaf77fd9bf050b21192ac58cd654a437d28e000ec114ebd93a51d36f5"
360
428
dependencies = [
361
-
"atrium-api",
362
-
"atrium-common",
363
-
"atrium-identity",
364
-
"atrium-xrpc",
429
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
430
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
431
+
"atrium-identity 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
432
+
"atrium-xrpc 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)",
433
+
"base64 0.22.1",
434
+
"chrono",
435
+
"dashmap",
436
+
"ecdsa",
437
+
"elliptic-curve",
438
+
"jose-jwa",
439
+
"jose-jwk",
440
+
"p256",
441
+
"rand 0.8.5",
442
+
"reqwest",
443
+
"serde",
444
+
"serde_html_form",
445
+
"serde_json",
446
+
"sha2",
447
+
"thiserror 1.0.69",
448
+
"tokio",
449
+
"trait-variant",
450
+
]
451
+
452
+
[[package]]
453
+
name = "atrium-oauth"
454
+
version = "0.1.3"
455
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
456
+
dependencies = [
457
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
458
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
459
+
"atrium-identity 0.1.5 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
460
+
"atrium-xrpc 0.12.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
365
461
"base64 0.22.1",
366
462
"chrono",
367
463
"dashmap",
···
396
492
]
397
493
398
494
[[package]]
495
+
name = "atrium-xrpc"
496
+
version = "0.12.3"
497
+
source = "git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace#80a355991ac9b48ba3f559d12aac74f071fc638c"
498
+
dependencies = [
499
+
"http",
500
+
"serde",
501
+
"serde_html_form",
502
+
"serde_json",
503
+
"thiserror 1.0.69",
504
+
"trait-variant",
505
+
]
506
+
507
+
[[package]]
399
508
name = "auto_enums"
400
509
version = "0.8.7"
401
510
source = "registry+https://github.com/rust-lang/crates.io-index"
···
404
513
"derive_utils",
405
514
"proc-macro2",
406
515
"quote",
407
-
"syn 2.0.103",
516
+
"syn 2.0.106",
408
517
]
409
518
410
519
[[package]]
···
500
609
"axum-core",
501
610
"bytes",
502
611
"cookie",
612
+
"form_urlencoded",
503
613
"futures-util",
504
614
"headers",
505
615
"http",
···
509
619
"pin-project-lite",
510
620
"rustversion",
511
621
"serde",
622
+
"serde_html_form",
623
+
"serde_path_to_error",
512
624
"tower",
513
625
"tower-layer",
514
626
"tower-service",
···
538
650
"axum",
539
651
"handlebars",
540
652
"serde",
541
-
"thiserror 2.0.12",
653
+
"thiserror 2.0.17",
542
654
]
543
655
544
656
[[package]]
···
569
681
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
570
682
571
683
[[package]]
684
+
name = "base256emoji"
685
+
version = "1.0.2"
686
+
source = "registry+https://github.com/rust-lang/crates.io-index"
687
+
checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c"
688
+
dependencies = [
689
+
"const-str",
690
+
"match-lookup",
691
+
]
692
+
693
+
[[package]]
572
694
name = "base64"
573
695
version = "0.21.7"
574
696
source = "registry+https://github.com/rust-lang/crates.io-index"
···
643
765
"regex",
644
766
"rustc-hash 1.1.0",
645
767
"shlex",
646
-
"syn 2.0.103",
768
+
"syn 2.0.106",
647
769
"which",
648
770
]
649
771
···
662
784
"regex",
663
785
"rustc-hash 1.1.0",
664
786
"shlex",
665
-
"syn 2.0.103",
787
+
"syn 2.0.106",
666
788
]
667
789
668
790
[[package]]
···
680
802
"regex",
681
803
"rustc-hash 2.1.1",
682
804
"shlex",
683
-
"syn 2.0.103",
805
+
"syn 2.0.106",
806
+
]
807
+
808
+
[[package]]
809
+
name = "bitcoin-io"
810
+
version = "0.1.3"
811
+
source = "registry+https://github.com/rust-lang/crates.io-index"
812
+
checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf"
813
+
814
+
[[package]]
815
+
name = "bitcoin_hashes"
816
+
version = "0.14.0"
817
+
source = "registry+https://github.com/rust-lang/crates.io-index"
818
+
checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16"
819
+
dependencies = [
820
+
"bitcoin-io",
821
+
"hex-conservative",
684
822
]
685
823
686
824
[[package]]
···
690
828
checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd"
691
829
692
830
[[package]]
831
+
name = "blake3"
832
+
version = "1.8.2"
833
+
source = "registry+https://github.com/rust-lang/crates.io-index"
834
+
checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0"
835
+
dependencies = [
836
+
"arrayref",
837
+
"arrayvec",
838
+
"cc",
839
+
"cfg-if",
840
+
"constant_time_eq",
841
+
]
842
+
843
+
[[package]]
693
844
name = "block-buffer"
694
845
version = "0.10.4"
695
846
source = "registry+https://github.com/rust-lang/crates.io-index"
···
717
868
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
718
869
719
870
[[package]]
871
+
name = "byteorder-lite"
872
+
version = "0.1.0"
873
+
source = "registry+https://github.com/rust-lang/crates.io-index"
874
+
checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495"
875
+
876
+
[[package]]
720
877
name = "bytes"
721
878
version = "1.10.1"
722
879
source = "registry+https://github.com/rust-lang/crates.io-index"
···
729
886
checksum = "6236364b88b9b6d0bc181ba374cf1ab55ba3ef97a1cb6f8cddad48a273767fb5"
730
887
731
888
[[package]]
889
+
name = "byteview"
890
+
version = "0.8.0"
891
+
source = "registry+https://github.com/rust-lang/crates.io-index"
892
+
checksum = "1e6b0e42e210b794e14b152c6fe1a55831e30ef4a0f5dc39d73d714fb5f1906c"
893
+
894
+
[[package]]
732
895
name = "bzip2-sys"
733
896
version = "0.1.13+1.0.8"
734
897
source = "registry+https://github.com/rust-lang/crates.io-index"
···
740
903
741
904
[[package]]
742
905
name = "camino"
743
-
version = "1.1.9"
906
+
version = "1.2.1"
744
907
source = "registry+https://github.com/rust-lang/crates.io-index"
745
-
checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3"
908
+
checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609"
746
909
dependencies = [
747
-
"serde",
910
+
"serde_core",
748
911
]
749
912
750
913
[[package]]
···
770
933
]
771
934
772
935
[[package]]
936
+
name = "cbor4ii"
937
+
version = "0.2.14"
938
+
source = "registry+https://github.com/rust-lang/crates.io-index"
939
+
checksum = "b544cf8c89359205f4f990d0e6f3828db42df85b5dac95d09157a250eb0749c4"
940
+
dependencies = [
941
+
"serde",
942
+
]
943
+
944
+
[[package]]
945
+
name = "cbor4ii"
946
+
version = "1.2.0"
947
+
source = "registry+https://github.com/rust-lang/crates.io-index"
948
+
checksum = "b28d2802395e3bccd95cc4ae984bff7444b6c1f5981da46a41360c42a2c7e2d9"
949
+
950
+
[[package]]
773
951
name = "cc"
774
952
version = "1.2.18"
775
953
source = "registry+https://github.com/rust-lang/crates.io-index"
···
817
995
]
818
996
819
997
[[package]]
998
+
name = "ciborium"
999
+
version = "0.2.2"
1000
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1001
+
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
1002
+
dependencies = [
1003
+
"ciborium-io",
1004
+
"ciborium-ll",
1005
+
"serde",
1006
+
]
1007
+
1008
+
[[package]]
1009
+
name = "ciborium-io"
1010
+
version = "0.2.2"
1011
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1012
+
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
1013
+
1014
+
[[package]]
1015
+
name = "ciborium-ll"
1016
+
version = "0.2.2"
1017
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1018
+
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
1019
+
dependencies = [
1020
+
"ciborium-io",
1021
+
"half",
1022
+
]
1023
+
1024
+
[[package]]
820
1025
name = "cid"
821
1026
version = "0.11.1"
822
1027
source = "registry+https://github.com/rust-lang/crates.io-index"
···
827
1032
"multihash",
828
1033
"serde",
829
1034
"serde_bytes",
830
-
"unsigned-varint",
1035
+
"unsigned-varint 0.8.0",
831
1036
]
832
1037
833
1038
[[package]]
···
843
1048
844
1049
[[package]]
845
1050
name = "clap"
846
-
version = "4.5.41"
1051
+
version = "4.5.48"
847
1052
source = "registry+https://github.com/rust-lang/crates.io-index"
848
-
checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9"
1053
+
checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae"
849
1054
dependencies = [
850
1055
"clap_builder",
851
1056
"clap_derive",
···
853
1058
854
1059
[[package]]
855
1060
name = "clap_builder"
856
-
version = "4.5.41"
1061
+
version = "4.5.48"
857
1062
source = "registry+https://github.com/rust-lang/crates.io-index"
858
-
checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d"
1063
+
checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9"
859
1064
dependencies = [
860
1065
"anstream",
861
1066
"anstyle",
···
865
1070
866
1071
[[package]]
867
1072
name = "clap_derive"
868
-
version = "4.5.41"
1073
+
version = "4.5.47"
869
1074
source = "registry+https://github.com/rust-lang/crates.io-index"
870
-
checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491"
1075
+
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
871
1076
dependencies = [
872
1077
"heck",
873
1078
"proc-macro2",
874
1079
"quote",
875
-
"syn 2.0.103",
1080
+
"syn 2.0.106",
876
1081
]
877
1082
878
1083
[[package]]
···
938
1143
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
939
1144
940
1145
[[package]]
1146
+
name = "const-str"
1147
+
version = "0.4.3"
1148
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1149
+
checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3"
1150
+
1151
+
[[package]]
1152
+
name = "constant_time_eq"
1153
+
version = "0.3.1"
1154
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1155
+
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
1156
+
1157
+
[[package]]
941
1158
name = "constellation"
942
1159
version = "0.1.0"
943
1160
dependencies = [
···
1082
1299
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
1083
1300
1084
1301
[[package]]
1302
+
name = "crunchy"
1303
+
version = "0.2.4"
1304
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1305
+
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
1306
+
1307
+
[[package]]
1085
1308
name = "crypto-bigint"
1086
1309
version = "0.5.5"
1087
1310
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1158
1381
"proc-macro2",
1159
1382
"quote",
1160
1383
"strsim 0.11.1",
1161
-
"syn 2.0.103",
1384
+
"syn 2.0.106",
1162
1385
]
1163
1386
1164
1387
[[package]]
···
1180
1403
dependencies = [
1181
1404
"darling_core 0.20.11",
1182
1405
"quote",
1183
-
"syn 2.0.103",
1406
+
"syn 2.0.106",
1184
1407
]
1185
1408
1186
1409
[[package]]
···
1198
1421
]
1199
1422
1200
1423
[[package]]
1424
+
name = "dasl"
1425
+
version = "0.2.0"
1426
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1427
+
checksum = "b59666035a4386b0fd272bd78da4cbc3ccb558941e97579ab00f0eb4639f2a49"
1428
+
dependencies = [
1429
+
"blake3",
1430
+
"cbor4ii 1.2.0",
1431
+
"data-encoding",
1432
+
"data-encoding-macro",
1433
+
"scopeguard",
1434
+
"serde",
1435
+
"serde_bytes",
1436
+
"sha2",
1437
+
"thiserror 2.0.17",
1438
+
]
1439
+
1440
+
[[package]]
1201
1441
name = "data-encoding"
1202
-
version = "2.8.0"
1442
+
version = "2.9.0"
1203
1443
source = "registry+https://github.com/rust-lang/crates.io-index"
1204
-
checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010"
1444
+
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
1205
1445
1206
1446
[[package]]
1207
1447
name = "data-encoding-macro"
1208
-
version = "0.1.17"
1448
+
version = "0.1.18"
1209
1449
source = "registry+https://github.com/rust-lang/crates.io-index"
1210
-
checksum = "9f9724adfcf41f45bf652b3995837669d73c4d49a1b5ac1ff82905ac7d9b5558"
1450
+
checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d"
1211
1451
dependencies = [
1212
1452
"data-encoding",
1213
1453
"data-encoding-macro-internal",
···
1215
1455
1216
1456
[[package]]
1217
1457
name = "data-encoding-macro-internal"
1218
-
version = "0.1.15"
1458
+
version = "0.1.16"
1219
1459
source = "registry+https://github.com/rust-lang/crates.io-index"
1220
-
checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f"
1460
+
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
1221
1461
dependencies = [
1222
1462
"data-encoding",
1223
-
"syn 2.0.103",
1463
+
"syn 2.0.106",
1224
1464
]
1225
1465
1226
1466
[[package]]
···
1282
1522
"darling 0.20.11",
1283
1523
"proc-macro2",
1284
1524
"quote",
1285
-
"syn 2.0.103",
1525
+
"syn 2.0.106",
1286
1526
]
1287
1527
1288
1528
[[package]]
···
1292
1532
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
1293
1533
dependencies = [
1294
1534
"derive_builder_core",
1295
-
"syn 2.0.103",
1535
+
"syn 2.0.106",
1296
1536
]
1297
1537
1298
1538
[[package]]
···
1312
1552
dependencies = [
1313
1553
"proc-macro2",
1314
1554
"quote",
1315
-
"syn 2.0.103",
1555
+
"syn 2.0.106",
1316
1556
"unicode-xid",
1317
1557
]
1318
1558
···
1324
1564
dependencies = [
1325
1565
"proc-macro2",
1326
1566
"quote",
1327
-
"syn 2.0.103",
1567
+
"syn 2.0.106",
1328
1568
]
1329
1569
1330
1570
[[package]]
···
1368
1608
dependencies = [
1369
1609
"proc-macro2",
1370
1610
"quote",
1371
-
"syn 2.0.103",
1611
+
"syn 2.0.106",
1372
1612
]
1373
1613
1374
1614
[[package]]
···
1385
1625
1386
1626
[[package]]
1387
1627
name = "dropshot"
1388
-
version = "0.16.2"
1628
+
version = "0.16.3"
1389
1629
source = "registry+https://github.com/rust-lang/crates.io-index"
1390
-
checksum = "50e8fed669e35e757646ad10f97c4d26dd22cce3da689b307954f7000d2719d0"
1630
+
checksum = "eedf902e40c1024b8ed9ca16378a54e9655cdf0e698245ba82d81a3778dcbc54"
1391
1631
dependencies = [
1392
1632
"async-stream",
1393
1633
"async-trait",
···
1404
1644
"http-body-util",
1405
1645
"hyper",
1406
1646
"hyper-util",
1407
-
"indexmap 2.9.0",
1647
+
"indexmap 2.11.4",
1408
1648
"multer",
1409
1649
"openapiv3",
1410
1650
"paste",
···
1424
1664
"slog-bunyan",
1425
1665
"slog-json",
1426
1666
"slog-term",
1427
-
"thiserror 2.0.12",
1667
+
"thiserror 2.0.17",
1428
1668
"tokio",
1429
1669
"tokio-rustls 0.25.0",
1430
-
"toml",
1670
+
"toml 0.9.7",
1431
1671
"uuid",
1432
1672
"version_check",
1433
1673
"waitgroup",
···
1435
1675
1436
1676
[[package]]
1437
1677
name = "dropshot_endpoint"
1438
-
version = "0.16.2"
1678
+
version = "0.16.4"
1439
1679
source = "registry+https://github.com/rust-lang/crates.io-index"
1440
-
checksum = "acebb687581abdeaa2c89fa448818a5f803b0e68e5d7e7a1cf585a8f3c5c57ac"
1680
+
checksum = "89d09440e73a9dcf8a0f7fbd6ab889a7751d59f0fe76e5082a0a6d5623ec6da3"
1441
1681
dependencies = [
1442
1682
"heck",
1443
1683
"proc-macro2",
···
1445
1685
"semver",
1446
1686
"serde",
1447
1687
"serde_tokenstream",
1448
-
"syn 2.0.103",
1688
+
"syn 2.0.106",
1449
1689
]
1450
1690
1451
1691
[[package]]
···
1518
1758
"heck",
1519
1759
"proc-macro2",
1520
1760
"quote",
1521
-
"syn 2.0.103",
1761
+
"syn 2.0.106",
1522
1762
]
1523
1763
1524
1764
[[package]]
···
1530
1770
"once_cell",
1531
1771
"proc-macro2",
1532
1772
"quote",
1533
-
"syn 2.0.103",
1773
+
"syn 2.0.106",
1534
1774
]
1535
1775
1536
1776
[[package]]
···
1594
1834
]
1595
1835
1596
1836
[[package]]
1837
+
name = "fallible-iterator"
1838
+
version = "0.3.0"
1839
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1840
+
checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
1841
+
1842
+
[[package]]
1843
+
name = "fallible-streaming-iterator"
1844
+
version = "0.1.9"
1845
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1846
+
checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
1847
+
1848
+
[[package]]
1597
1849
name = "fastrand"
1598
1850
version = "2.3.0"
1599
1851
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1611
1863
1612
1864
[[package]]
1613
1865
name = "fjall"
1614
-
version = "2.8.0"
1866
+
version = "2.11.2"
1615
1867
source = "registry+https://github.com/rust-lang/crates.io-index"
1616
-
checksum = "26b2ced3483989a62b3533c9f99054d73b527c6c0045cf22b00fe87956f1a46f"
1868
+
checksum = "0b25ad44cd4360a0448a9b5a0a6f1c7a621101cca4578706d43c9a821418aebc"
1617
1869
dependencies = [
1618
1870
"byteorder",
1619
-
"byteview",
1871
+
"byteview 0.6.1",
1620
1872
"dashmap",
1621
1873
"log",
1622
-
"lsm-tree",
1874
+
"lsm-tree 2.10.4",
1623
1875
"path-absolutize",
1624
1876
"std-semaphore",
1625
1877
"tempfile",
···
1627
1879
]
1628
1880
1629
1881
[[package]]
1882
+
name = "fjall"
1883
+
version = "2.11.2"
1884
+
source = "git+https://github.com/fjall-rs/fjall.git#42d811f7c8cc9004407d520d37d2a1d8d246c03d"
1885
+
dependencies = [
1886
+
"byteorder",
1887
+
"byteview 0.6.1",
1888
+
"dashmap",
1889
+
"log",
1890
+
"lsm-tree 2.10.4",
1891
+
"path-absolutize",
1892
+
"std-semaphore",
1893
+
"tempfile",
1894
+
"xxhash-rust",
1895
+
]
1896
+
1897
+
[[package]]
1898
+
name = "fjall"
1899
+
version = "3.0.0-pre.0"
1900
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1901
+
checksum = "467588c1f15d1cfa9e43f02a45cf55d82fa1f12a6ae961b848c520458525600c"
1902
+
dependencies = [
1903
+
"byteorder-lite",
1904
+
"byteview 0.8.0",
1905
+
"dashmap",
1906
+
"log",
1907
+
"lsm-tree 3.0.0-pre.0",
1908
+
"std-semaphore",
1909
+
"tempfile",
1910
+
"xxhash-rust",
1911
+
]
1912
+
1913
+
[[package]]
1630
1914
name = "flate2"
1631
1915
version = "1.1.2"
1632
1916
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1708
1992
"mixtrics",
1709
1993
"pin-project",
1710
1994
"serde",
1711
-
"thiserror 2.0.12",
1995
+
"thiserror 2.0.17",
1712
1996
"tokio",
1713
1997
"tracing",
1714
1998
]
···
1728
2012
"parking_lot",
1729
2013
"pin-project",
1730
2014
"serde",
1731
-
"thiserror 2.0.12",
2015
+
"thiserror 2.0.17",
1732
2016
"tokio",
1733
2017
"twox-hash",
1734
2018
]
···
1761
2045
"parking_lot",
1762
2046
"pin-project",
1763
2047
"serde",
1764
-
"thiserror 2.0.12",
2048
+
"thiserror 2.0.17",
1765
2049
"tokio",
1766
2050
"tracing",
1767
2051
]
···
1793
2077
"pin-project",
1794
2078
"rand 0.9.1",
1795
2079
"serde",
1796
-
"thiserror 2.0.12",
2080
+
"thiserror 2.0.17",
1797
2081
"tokio",
1798
2082
"tracing",
1799
2083
"twox-hash",
···
1882
2166
dependencies = [
1883
2167
"proc-macro2",
1884
2168
"quote",
1885
-
"syn 2.0.103",
2169
+
"syn 2.0.106",
1886
2170
]
1887
2171
1888
2172
[[package]]
···
2007
2291
"futures-core",
2008
2292
"futures-sink",
2009
2293
"http",
2010
-
"indexmap 2.9.0",
2294
+
"indexmap 2.11.4",
2011
2295
"slab",
2012
2296
"tokio",
2013
2297
"tokio-util",
···
2015
2299
]
2016
2300
2017
2301
[[package]]
2302
+
name = "half"
2303
+
version = "2.6.0"
2304
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2305
+
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
2306
+
dependencies = [
2307
+
"cfg-if",
2308
+
"crunchy",
2309
+
]
2310
+
2311
+
[[package]]
2018
2312
name = "handlebars"
2019
2313
version = "6.3.2"
2020
2314
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2027
2321
"pest_derive",
2028
2322
"serde",
2029
2323
"serde_json",
2030
-
"thiserror 2.0.12",
2324
+
"thiserror 2.0.17",
2031
2325
"walkdir",
2032
2326
]
2033
2327
···
2064
2358
]
2065
2359
2066
2360
[[package]]
2361
+
name = "hashlink"
2362
+
version = "0.10.0"
2363
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2364
+
checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
2365
+
dependencies = [
2366
+
"hashbrown 0.15.2",
2367
+
]
2368
+
2369
+
[[package]]
2067
2370
name = "headers"
2068
2371
version = "0.4.0"
2069
2372
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2117
2420
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
2118
2421
2119
2422
[[package]]
2423
+
name = "hex-conservative"
2424
+
version = "0.2.1"
2425
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2426
+
checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd"
2427
+
dependencies = [
2428
+
"arrayvec",
2429
+
]
2430
+
2431
+
[[package]]
2120
2432
name = "hickory-proto"
2121
2433
version = "0.25.2"
2122
2434
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2134
2446
"once_cell",
2135
2447
"rand 0.9.1",
2136
2448
"ring",
2137
-
"thiserror 2.0.12",
2449
+
"thiserror 2.0.17",
2138
2450
"tinyvec",
2139
2451
"tokio",
2140
2452
"tracing",
···
2157
2469
"rand 0.9.1",
2158
2470
"resolv-conf",
2159
2471
"smallvec",
2160
-
"thiserror 2.0.12",
2472
+
"thiserror 2.0.17",
2161
2473
"tokio",
2162
2474
"tracing",
2163
2475
]
···
2349
2661
"js-sys",
2350
2662
"log",
2351
2663
"wasm-bindgen",
2352
-
"windows-core 0.61.0",
2664
+
"windows-core",
2353
2665
]
2354
2666
2355
2667
[[package]]
···
2476
2788
dependencies = [
2477
2789
"proc-macro2",
2478
2790
"quote",
2479
-
"syn 2.0.103",
2791
+
"syn 2.0.106",
2480
2792
]
2481
2793
2482
2794
[[package]]
···
2519
2831
2520
2832
[[package]]
2521
2833
name = "indexmap"
2522
-
version = "2.9.0"
2834
+
version = "2.11.4"
2523
2835
source = "registry+https://github.com/rust-lang/crates.io-index"
2524
-
checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e"
2836
+
checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5"
2525
2837
dependencies = [
2526
2838
"equivalent",
2527
2839
"hashbrown 0.15.2",
2528
2840
"serde",
2841
+
"serde_core",
2529
2842
]
2530
2843
2531
2844
[[package]]
···
2588
2901
]
2589
2902
2590
2903
[[package]]
2904
+
name = "iroh-car"
2905
+
version = "0.5.1"
2906
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2907
+
checksum = "cb7f8cd4cb9aa083fba8b52e921764252d0b4dcb1cd6d120b809dbfe1106e81a"
2908
+
dependencies = [
2909
+
"anyhow",
2910
+
"cid",
2911
+
"futures",
2912
+
"serde",
2913
+
"serde_ipld_dagcbor",
2914
+
"thiserror 1.0.69",
2915
+
"tokio",
2916
+
"unsigned-varint 0.7.2",
2917
+
]
2918
+
2919
+
[[package]]
2591
2920
name = "is-terminal"
2592
2921
version = "0.4.16"
2593
2922
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2643
2972
dependencies = [
2644
2973
"anyhow",
2645
2974
"async-trait",
2646
-
"atrium-api",
2975
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
2647
2976
"chrono",
2648
2977
"clap",
2649
2978
"futures-util",
···
2651
2980
"metrics",
2652
2981
"serde",
2653
2982
"serde_json",
2654
-
"thiserror 2.0.12",
2983
+
"thiserror 2.0.17",
2655
2984
"tokio",
2656
2985
"tokio-tungstenite 0.26.2",
2657
2986
"url",
···
2679
3008
dependencies = [
2680
3009
"proc-macro2",
2681
3010
"quote",
2682
-
"syn 2.0.103",
3011
+
"syn 2.0.106",
2683
3012
]
2684
3013
2685
3014
[[package]]
···
2754
3083
]
2755
3084
2756
3085
[[package]]
3086
+
name = "jwt-compact"
3087
+
version = "0.9.0-beta.1"
3088
+
source = "git+https://github.com/fatfingers23/jwt-compact.git#aed088b8ff5ad44ef2785c453f6a4b7916728b1c"
3089
+
dependencies = [
3090
+
"anyhow",
3091
+
"base64ct",
3092
+
"chrono",
3093
+
"ciborium",
3094
+
"hmac",
3095
+
"lazy_static",
3096
+
"rand_core 0.6.4",
3097
+
"secp256k1",
3098
+
"serde",
3099
+
"serde_json",
3100
+
"sha2",
3101
+
"smallvec",
3102
+
"subtle",
3103
+
"zeroize",
3104
+
]
3105
+
3106
+
[[package]]
3107
+
name = "k256"
3108
+
version = "0.13.4"
3109
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3110
+
checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b"
3111
+
dependencies = [
3112
+
"cfg-if",
3113
+
"ecdsa",
3114
+
"elliptic-curve",
3115
+
"sha2",
3116
+
]
3117
+
3118
+
[[package]]
2757
3119
name = "langtag"
2758
3120
version = "0.3.4"
2759
3121
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2846
3208
]
2847
3209
2848
3210
[[package]]
3211
+
name = "libsqlite3-sys"
3212
+
version = "0.35.0"
3213
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3214
+
checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f"
3215
+
dependencies = [
3216
+
"pkg-config",
3217
+
"vcpkg",
3218
+
]
3219
+
3220
+
[[package]]
2849
3221
name = "libz-sys"
2850
3222
version = "1.1.22"
2851
3223
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2861
3233
version = "0.1.0"
2862
3234
dependencies = [
2863
3235
"anyhow",
3236
+
"dasl",
2864
3237
"fluent-uri",
2865
3238
"nom",
2866
-
"thiserror 2.0.12",
3239
+
"serde",
3240
+
"thiserror 2.0.17",
2867
3241
"tinyjson",
2868
3242
]
2869
3243
···
2897
3271
2898
3272
[[package]]
2899
3273
name = "log"
2900
-
version = "0.4.27"
3274
+
version = "0.4.28"
2901
3275
source = "registry+https://github.com/rust-lang/crates.io-index"
2902
-
checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94"
3276
+
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
2903
3277
2904
3278
[[package]]
2905
3279
name = "loom"
···
2931
3305
2932
3306
[[package]]
2933
3307
name = "lsm-tree"
2934
-
version = "2.8.0"
3308
+
version = "2.10.4"
2935
3309
source = "registry+https://github.com/rust-lang/crates.io-index"
2936
-
checksum = "d0a63a5e98a38b51765274137d8aedfbd848da5f4d016867e186b673fcc06a8c"
3310
+
checksum = "799399117a2bfb37660e08be33f470958babb98386b04185288d829df362ea15"
2937
3311
dependencies = [
2938
3312
"byteorder",
2939
3313
"crossbeam-skiplist",
···
2954
3328
]
2955
3329
2956
3330
[[package]]
3331
+
name = "lsm-tree"
3332
+
version = "3.0.0-pre.0"
3333
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3334
+
checksum = "be375d45e348328e78582dffbda4f1709dd52fca27c1a81c7bf6ca134e6335f7"
3335
+
dependencies = [
3336
+
"byteorder-lite",
3337
+
"byteview 0.8.0",
3338
+
"crossbeam-skiplist",
3339
+
"enum_dispatch",
3340
+
"interval-heap",
3341
+
"log",
3342
+
"lz4_flex",
3343
+
"quick_cache",
3344
+
"rustc-hash 2.1.1",
3345
+
"self_cell",
3346
+
"sfa",
3347
+
"tempfile",
3348
+
"varint-rs",
3349
+
"xxhash-rust",
3350
+
]
3351
+
3352
+
[[package]]
2957
3353
name = "lz4"
2958
3354
version = "1.28.1"
2959
3355
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2974
3370
2975
3371
[[package]]
2976
3372
name = "lz4_flex"
2977
-
version = "0.11.3"
3373
+
version = "0.11.5"
2978
3374
source = "registry+https://github.com/rust-lang/crates.io-index"
2979
-
checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5"
3375
+
checksum = "08ab2867e3eeeca90e844d1940eab391c9dc5228783db2ed999acbc0a9ed375a"
2980
3376
2981
3377
[[package]]
2982
3378
name = "mach2"
···
3013
3409
"spin",
3014
3410
"tokio",
3015
3411
"tokio-util",
3016
-
"toml",
3412
+
"toml 0.8.23",
3017
3413
"tracing",
3018
3414
"tracing-subscriber",
3019
3415
]
···
3042
3438
]
3043
3439
3044
3440
[[package]]
3441
+
name = "match-lookup"
3442
+
version = "0.1.1"
3443
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3444
+
checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e"
3445
+
dependencies = [
3446
+
"proc-macro2",
3447
+
"quote",
3448
+
"syn 1.0.109",
3449
+
]
3450
+
3451
+
[[package]]
3045
3452
name = "match_cfg"
3046
3453
version = "0.1.0"
3047
3454
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3049
3456
3050
3457
[[package]]
3051
3458
name = "matchers"
3052
-
version = "0.1.0"
3459
+
version = "0.2.0"
3053
3460
source = "registry+https://github.com/rust-lang/crates.io-index"
3054
-
checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558"
3461
+
checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9"
3055
3462
dependencies = [
3056
-
"regex-automata 0.1.10",
3463
+
"regex-automata",
3057
3464
]
3058
3465
3059
3466
[[package]]
···
3103
3510
"http-body-util",
3104
3511
"hyper",
3105
3512
"hyper-util",
3106
-
"indexmap 2.9.0",
3513
+
"indexmap 2.11.4",
3107
3514
"ipnet",
3108
3515
"metrics",
3109
3516
"metrics-util 0.19.0",
···
3124
3531
"hyper",
3125
3532
"hyper-rustls",
3126
3533
"hyper-util",
3127
-
"indexmap 2.9.0",
3534
+
"indexmap 2.11.4",
3128
3535
"ipnet",
3129
3536
"metrics",
3130
3537
"metrics-util 0.20.0",
3131
3538
"quanta",
3132
-
"thiserror 2.0.12",
3539
+
"thiserror 2.0.17",
3133
3540
"tokio",
3134
3541
"tracing",
3135
3542
]
···
3276
3683
3277
3684
[[package]]
3278
3685
name = "multibase"
3279
-
version = "0.9.1"
3686
+
version = "0.9.2"
3280
3687
source = "registry+https://github.com/rust-lang/crates.io-index"
3281
-
checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404"
3688
+
checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77"
3282
3689
dependencies = [
3283
3690
"base-x",
3691
+
"base256emoji",
3284
3692
"data-encoding",
3285
3693
"data-encoding-macro",
3286
3694
]
···
3293
3701
dependencies = [
3294
3702
"core2",
3295
3703
"serde",
3296
-
"unsigned-varint",
3704
+
"unsigned-varint 0.8.0",
3297
3705
]
3298
3706
3299
3707
[[package]]
···
3352
3760
3353
3761
[[package]]
3354
3762
name = "nu-ansi-term"
3355
-
version = "0.46.0"
3763
+
version = "0.50.1"
3356
3764
source = "registry+https://github.com/rust-lang/crates.io-index"
3357
-
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
3765
+
checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399"
3358
3766
dependencies = [
3359
-
"overload",
3360
-
"winapi",
3767
+
"windows-sys 0.52.0",
3361
3768
]
3362
3769
3363
3770
[[package]]
···
3487
3894
3488
3895
[[package]]
3489
3896
name = "openapiv3"
3490
-
version = "2.0.0"
3897
+
version = "2.2.0"
3491
3898
source = "registry+https://github.com/rust-lang/crates.io-index"
3492
-
checksum = "cc02deea53ffe807708244e5914f6b099ad7015a207ee24317c22112e17d9c5c"
3899
+
checksum = "5c8d427828b22ae1fff2833a03d8486c2c881367f1c336349f307f321e7f4d05"
3493
3900
dependencies = [
3494
-
"indexmap 2.9.0",
3901
+
"indexmap 2.11.4",
3495
3902
"serde",
3496
3903
"serde_json",
3497
3904
]
···
3519
3926
dependencies = [
3520
3927
"proc-macro2",
3521
3928
"quote",
3522
-
"syn 2.0.103",
3929
+
"syn 2.0.106",
3523
3930
]
3524
3931
3525
3932
[[package]]
···
3560
3967
]
3561
3968
3562
3969
[[package]]
3563
-
name = "overload"
3564
-
version = "0.1.1"
3565
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3566
-
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
3567
-
3568
-
[[package]]
3569
3970
name = "p256"
3570
3971
version = "0.13.2"
3571
3972
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3678
4079
checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323"
3679
4080
dependencies = [
3680
4081
"memchr",
3681
-
"thiserror 2.0.12",
4082
+
"thiserror 2.0.17",
3682
4083
"ucd-trie",
3683
4084
]
3684
4085
···
3702
4103
"pest_meta",
3703
4104
"proc-macro2",
3704
4105
"quote",
3705
-
"syn 2.0.103",
4106
+
"syn 2.0.106",
3706
4107
]
3707
4108
3708
4109
[[package]]
···
3732
4133
dependencies = [
3733
4134
"proc-macro2",
3734
4135
"quote",
3735
-
"syn 2.0.103",
4136
+
"syn 2.0.106",
3736
4137
]
3737
4138
3738
4139
[[package]]
···
3775
4176
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
3776
4177
3777
4178
[[package]]
4179
+
name = "pocket"
4180
+
version = "0.1.0"
4181
+
dependencies = [
4182
+
"atrium-crypto",
4183
+
"clap",
4184
+
"jwt-compact",
4185
+
"log",
4186
+
"poem",
4187
+
"poem-openapi",
4188
+
"reqwest",
4189
+
"rusqlite",
4190
+
"serde",
4191
+
"serde_json",
4192
+
"thiserror 2.0.17",
4193
+
"tokio",
4194
+
"tracing-subscriber",
4195
+
]
4196
+
4197
+
[[package]]
3778
4198
name = "poem"
3779
4199
version = "3.1.12"
3780
4200
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3787
4207
"headers",
3788
4208
"http",
3789
4209
"http-body-util",
4210
+
"httpdate",
3790
4211
"hyper",
3791
4212
"hyper-util",
3792
4213
"mime",
4214
+
"mime_guess",
3793
4215
"multer",
3794
4216
"nix",
3795
4217
"parking_lot",
···
3810
4232
"smallvec",
3811
4233
"sync_wrapper",
3812
4234
"tempfile",
3813
-
"thiserror 2.0.12",
4235
+
"thiserror 2.0.17",
3814
4236
"tokio",
3815
4237
"tokio-rustls 0.26.2",
3816
4238
"tokio-stream",
···
3829
4251
"proc-macro-crate",
3830
4252
"proc-macro2",
3831
4253
"quote",
3832
-
"syn 2.0.103",
4254
+
"syn 2.0.106",
3833
4255
]
3834
4256
3835
4257
[[package]]
···
3842
4264
"bytes",
3843
4265
"derive_more",
3844
4266
"futures-util",
3845
-
"indexmap 2.9.0",
4267
+
"indexmap 2.11.4",
3846
4268
"itertools 0.14.0",
3847
4269
"mime",
3848
4270
"num-traits",
···
3854
4276
"serde_json",
3855
4277
"serde_urlencoded",
3856
4278
"serde_yaml",
3857
-
"thiserror 2.0.12",
4279
+
"thiserror 2.0.17",
3858
4280
"tokio",
3859
4281
]
3860
4282
···
3866
4288
dependencies = [
3867
4289
"darling 0.20.11",
3868
4290
"http",
3869
-
"indexmap 2.9.0",
4291
+
"indexmap 2.11.4",
3870
4292
"mime",
3871
4293
"proc-macro-crate",
3872
4294
"proc-macro2",
3873
4295
"quote",
3874
4296
"regex",
3875
-
"syn 2.0.103",
3876
-
"thiserror 2.0.12",
4297
+
"syn 2.0.106",
4298
+
"thiserror 2.0.17",
3877
4299
]
3878
4300
3879
4301
[[package]]
···
3913
4335
checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55"
3914
4336
dependencies = [
3915
4337
"proc-macro2",
3916
-
"syn 2.0.103",
4338
+
"syn 2.0.106",
3917
4339
]
3918
4340
3919
4341
[[package]]
···
3981
4403
]
3982
4404
3983
4405
[[package]]
4406
+
name = "quasar"
4407
+
version = "0.1.0"
4408
+
dependencies = [
4409
+
"clap",
4410
+
"fjall 2.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
4411
+
]
4412
+
4413
+
[[package]]
3984
4414
name = "quick-xml"
3985
4415
version = "0.36.2"
3986
4416
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3992
4422
3993
4423
[[package]]
3994
4424
name = "quick_cache"
3995
-
version = "0.6.12"
4425
+
version = "0.6.16"
3996
4426
source = "registry+https://github.com/rust-lang/crates.io-index"
3997
-
checksum = "8f8ed0655cbaf18a26966142ad23b95d8ab47221c50c4f73a1db7d0d2d6e3da8"
4427
+
checksum = "9ad6644cb07b7f3488b9f3d2fde3b4c0a7fa367cafefb39dff93a659f76eb786"
3998
4428
dependencies = [
3999
4429
"equivalent",
4000
4430
"hashbrown 0.15.2",
···
4014
4444
"rustc-hash 2.1.1",
4015
4445
"rustls 0.23.31",
4016
4446
"socket2 0.5.9",
4017
-
"thiserror 2.0.12",
4447
+
"thiserror 2.0.17",
4018
4448
"tokio",
4019
4449
"tracing",
4020
4450
"web-time",
···
4035
4465
"rustls 0.23.31",
4036
4466
"rustls-pki-types",
4037
4467
"slab",
4038
-
"thiserror 2.0.12",
4468
+
"thiserror 2.0.17",
4039
4469
"tinyvec",
4040
4470
"tracing",
4041
4471
"web-time",
···
4216
4646
dependencies = [
4217
4647
"proc-macro2",
4218
4648
"quote",
4219
-
"syn 2.0.103",
4649
+
"syn 2.0.106",
4650
+
]
4651
+
4652
+
[[package]]
4653
+
name = "reflector"
4654
+
version = "0.1.0"
4655
+
dependencies = [
4656
+
"clap",
4657
+
"log",
4658
+
"poem",
4659
+
"serde",
4660
+
"tokio",
4661
+
"tracing-subscriber",
4220
4662
]
4221
4663
4222
4664
[[package]]
···
4227
4669
dependencies = [
4228
4670
"aho-corasick",
4229
4671
"memchr",
4230
-
"regex-automata 0.4.9",
4231
-
"regex-syntax 0.8.5",
4232
-
]
4233
-
4234
-
[[package]]
4235
-
name = "regex-automata"
4236
-
version = "0.1.10"
4237
-
source = "registry+https://github.com/rust-lang/crates.io-index"
4238
-
checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
4239
-
dependencies = [
4240
-
"regex-syntax 0.6.29",
4672
+
"regex-automata",
4673
+
"regex-syntax",
4241
4674
]
4242
4675
4243
4676
[[package]]
···
4248
4681
dependencies = [
4249
4682
"aho-corasick",
4250
4683
"memchr",
4251
-
"regex-syntax 0.8.5",
4684
+
"regex-syntax",
4252
4685
]
4253
4686
4254
4687
[[package]]
4255
4688
name = "regex-syntax"
4256
-
version = "0.6.29"
4689
+
version = "0.8.5"
4257
4690
source = "registry+https://github.com/rust-lang/crates.io-index"
4258
-
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
4691
+
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
4259
4692
4260
4693
[[package]]
4261
-
name = "regex-syntax"
4262
-
version = "0.8.5"
4694
+
name = "repo-stream"
4695
+
version = "0.2.2"
4263
4696
source = "registry+https://github.com/rust-lang/crates.io-index"
4264
-
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
4697
+
checksum = "093b48e604c138949bf3d4a1a9bc1165feb1db28a73af0101c84eb703d279f43"
4698
+
dependencies = [
4699
+
"bincode 2.0.1",
4700
+
"futures",
4701
+
"futures-core",
4702
+
"ipld-core",
4703
+
"iroh-car",
4704
+
"log",
4705
+
"multibase",
4706
+
"rusqlite",
4707
+
"serde",
4708
+
"serde_bytes",
4709
+
"serde_ipld_dagcbor",
4710
+
"sha2",
4711
+
"thiserror 2.0.17",
4712
+
"tokio",
4713
+
]
4265
4714
4266
4715
[[package]]
4267
4716
name = "reqwest"
4268
-
version = "0.12.22"
4717
+
version = "0.12.24"
4269
4718
source = "registry+https://github.com/rust-lang/crates.io-index"
4270
-
checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531"
4719
+
checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
4271
4720
dependencies = [
4272
4721
"async-compression",
4273
4722
"base64 0.22.1",
···
4307
4756
"url",
4308
4757
"wasm-bindgen",
4309
4758
"wasm-bindgen-futures",
4759
+
"wasm-streams",
4310
4760
"web-sys",
4311
4761
]
4312
4762
···
4386
4836
"spki",
4387
4837
"subtle",
4388
4838
"zeroize",
4839
+
]
4840
+
4841
+
[[package]]
4842
+
name = "rusqlite"
4843
+
version = "0.37.0"
4844
+
source = "registry+https://github.com/rust-lang/crates.io-index"
4845
+
checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f"
4846
+
dependencies = [
4847
+
"bitflags",
4848
+
"fallible-iterator",
4849
+
"fallible-streaming-iterator",
4850
+
"hashlink",
4851
+
"libsqlite3-sys",
4852
+
"smallvec",
4389
4853
]
4390
4854
4391
4855
[[package]]
···
4587
5051
"proc-macro2",
4588
5052
"quote",
4589
5053
"serde_derive_internals",
4590
-
"syn 2.0.103",
5054
+
"syn 2.0.106",
4591
5055
]
4592
5056
4593
5057
[[package]]
···
4617
5081
]
4618
5082
4619
5083
[[package]]
5084
+
name = "secp256k1"
5085
+
version = "0.30.0"
5086
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5087
+
checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252"
5088
+
dependencies = [
5089
+
"bitcoin_hashes",
5090
+
"rand 0.8.5",
5091
+
"secp256k1-sys",
5092
+
]
5093
+
5094
+
[[package]]
5095
+
name = "secp256k1-sys"
5096
+
version = "0.10.1"
5097
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5098
+
checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9"
5099
+
dependencies = [
5100
+
"cc",
5101
+
]
5102
+
5103
+
[[package]]
4620
5104
name = "security-framework"
4621
5105
version = "2.11.1"
4622
5106
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4654
5138
4655
5139
[[package]]
4656
5140
name = "self_cell"
4657
-
version = "1.1.0"
5141
+
version = "1.2.0"
4658
5142
source = "registry+https://github.com/rust-lang/crates.io-index"
4659
-
checksum = "c2fdfc24bc566f839a2da4c4295b82db7d25a24253867d5c64355abb5799bdbe"
5143
+
checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749"
4660
5144
4661
5145
[[package]]
4662
5146
name = "semver"
···
4666
5150
4667
5151
[[package]]
4668
5152
name = "serde"
4669
-
version = "1.0.219"
5153
+
version = "1.0.228"
4670
5154
source = "registry+https://github.com/rust-lang/crates.io-index"
4671
-
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
5155
+
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
4672
5156
dependencies = [
5157
+
"serde_core",
4673
5158
"serde_derive",
4674
5159
]
4675
5160
4676
5161
[[package]]
4677
5162
name = "serde_bytes"
4678
-
version = "0.11.17"
5163
+
version = "0.11.19"
4679
5164
source = "registry+https://github.com/rust-lang/crates.io-index"
4680
-
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
5165
+
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
4681
5166
dependencies = [
4682
5167
"serde",
5168
+
"serde_core",
5169
+
]
5170
+
5171
+
[[package]]
5172
+
name = "serde_core"
5173
+
version = "1.0.228"
5174
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5175
+
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
5176
+
dependencies = [
5177
+
"serde_derive",
4683
5178
]
4684
5179
4685
5180
[[package]]
4686
5181
name = "serde_derive"
4687
-
version = "1.0.219"
5182
+
version = "1.0.228"
4688
5183
source = "registry+https://github.com/rust-lang/crates.io-index"
4689
-
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
5184
+
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
4690
5185
dependencies = [
4691
5186
"proc-macro2",
4692
5187
"quote",
4693
-
"syn 2.0.103",
5188
+
"syn 2.0.106",
4694
5189
]
4695
5190
4696
5191
[[package]]
···
4701
5196
dependencies = [
4702
5197
"proc-macro2",
4703
5198
"quote",
4704
-
"syn 2.0.103",
5199
+
"syn 2.0.106",
4705
5200
]
4706
5201
4707
5202
[[package]]
···
4711
5206
checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4"
4712
5207
dependencies = [
4713
5208
"form_urlencoded",
4714
-
"indexmap 2.9.0",
5209
+
"indexmap 2.11.4",
4715
5210
"itoa",
4716
5211
"ryu",
4717
5212
"serde",
4718
5213
]
4719
5214
4720
5215
[[package]]
5216
+
name = "serde_ipld_dagcbor"
5217
+
version = "0.6.4"
5218
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5219
+
checksum = "46182f4f08349a02b45c998ba3215d3f9de826246ba02bb9dddfe9a2a2100778"
5220
+
dependencies = [
5221
+
"cbor4ii 0.2.14",
5222
+
"ipld-core",
5223
+
"scopeguard",
5224
+
"serde",
5225
+
]
5226
+
5227
+
[[package]]
4721
5228
name = "serde_json"
4722
-
version = "1.0.141"
5229
+
version = "1.0.145"
4723
5230
source = "registry+https://github.com/rust-lang/crates.io-index"
4724
-
checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3"
5231
+
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
4725
5232
dependencies = [
4726
5233
"itoa",
4727
5234
"memchr",
4728
5235
"ryu",
4729
5236
"serde",
5237
+
"serde_core",
4730
5238
]
4731
5239
4732
5240
[[package]]
···
4749
5257
"percent-encoding",
4750
5258
"ryu",
4751
5259
"serde",
4752
-
"thiserror 2.0.12",
5260
+
"thiserror 2.0.17",
4753
5261
]
4754
5262
4755
5263
[[package]]
···
4762
5270
]
4763
5271
4764
5272
[[package]]
5273
+
name = "serde_spanned"
5274
+
version = "1.0.2"
5275
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5276
+
checksum = "5417783452c2be558477e104686f7de5dae53dba813c28435e0e70f82d9b04ee"
5277
+
dependencies = [
5278
+
"serde_core",
5279
+
]
5280
+
5281
+
[[package]]
4765
5282
name = "serde_tokenstream"
4766
5283
version = "0.2.2"
4767
5284
source = "registry+https://github.com/rust-lang/crates.io-index"
···
4770
5287
"proc-macro2",
4771
5288
"quote",
4772
5289
"serde",
4773
-
"syn 2.0.103",
5290
+
"syn 2.0.106",
4774
5291
]
4775
5292
4776
5293
[[package]]
···
4795
5312
"chrono",
4796
5313
"hex",
4797
5314
"indexmap 1.9.3",
4798
-
"indexmap 2.9.0",
5315
+
"indexmap 2.11.4",
4799
5316
"serde",
4800
5317
"serde_derive",
4801
5318
"serde_json",
···
4812
5329
"darling 0.20.11",
4813
5330
"proc-macro2",
4814
5331
"quote",
4815
-
"syn 2.0.103",
5332
+
"syn 2.0.106",
4816
5333
]
4817
5334
4818
5335
[[package]]
···
4821
5338
source = "registry+https://github.com/rust-lang/crates.io-index"
4822
5339
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
4823
5340
dependencies = [
4824
-
"indexmap 2.9.0",
5341
+
"indexmap 2.11.4",
4825
5342
"itoa",
4826
5343
"ryu",
4827
5344
"serde",
4828
5345
"unsafe-libyaml",
5346
+
]
5347
+
5348
+
[[package]]
5349
+
name = "sfa"
5350
+
version = "0.0.1"
5351
+
source = "registry+https://github.com/rust-lang/crates.io-index"
5352
+
checksum = "e5f5f9dc21f55409f15103d5a7e7601b804935923c7fe4746dc806c3a422a038"
5353
+
dependencies = [
5354
+
"byteorder-lite",
5355
+
"log",
5356
+
"xxhash-rust",
4829
5357
]
4830
5358
4831
5359
[[package]]
···
4892
5420
dependencies = [
4893
5421
"num-bigint",
4894
5422
"num-traits",
4895
-
"thiserror 2.0.12",
5423
+
"thiserror 2.0.17",
4896
5424
"time",
4897
5425
]
4898
5426
···
4915
5443
name = "slingshot"
4916
5444
version = "0.1.0"
4917
5445
dependencies = [
4918
-
"atrium-api",
4919
-
"atrium-common",
4920
-
"atrium-identity",
4921
-
"atrium-oauth",
5446
+
"atrium-api 0.25.4 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5447
+
"atrium-common 0.1.2 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5448
+
"atrium-identity 0.1.5 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
5449
+
"atrium-oauth 0.1.3 (git+https://github.com/uniphil/atrium.git?branch=fix%2Fresolve-handle-https-accept-whitespace)",
4922
5450
"clap",
4923
5451
"ctrlc",
4924
5452
"foyer",
···
4934
5462
"rustls 0.23.31",
4935
5463
"serde",
4936
5464
"serde_json",
4937
-
"thiserror 2.0.12",
5465
+
"thiserror 2.0.17",
4938
5466
"time",
4939
5467
"tokio",
4940
5468
"tokio-util",
···
5027
5555
name = "spacedust"
5028
5556
version = "0.1.0"
5029
5557
dependencies = [
5558
+
"anyhow",
5559
+
"async-channel",
5030
5560
"async-trait",
5031
5561
"clap",
5032
5562
"ctrlc",
5563
+
"dasl",
5033
5564
"dropshot",
5034
5565
"env_logger",
5566
+
"fjall 3.0.0-pre.0",
5035
5567
"futures",
5036
5568
"http",
5569
+
"ipld-core",
5037
5570
"jetstream",
5038
5571
"links",
5039
5572
"log",
5040
5573
"metrics",
5041
5574
"metrics-exporter-prometheus 0.17.2",
5042
5575
"rand 0.9.1",
5576
+
"repo-stream",
5577
+
"reqwest",
5043
5578
"schemars",
5044
5579
"semver",
5045
5580
"serde",
5581
+
"serde_ipld_dagcbor",
5046
5582
"serde_json",
5047
5583
"serde_qs",
5048
-
"thiserror 2.0.12",
5584
+
"thiserror 2.0.17",
5049
5585
"tinyjson",
5050
5586
"tokio",
5051
5587
"tokio-tungstenite 0.27.0",
···
5114
5650
5115
5651
[[package]]
5116
5652
name = "syn"
5117
-
version = "2.0.103"
5653
+
version = "2.0.106"
5118
5654
source = "registry+https://github.com/rust-lang/crates.io-index"
5119
-
checksum = "e4307e30089d6fd6aff212f2da3a1f9e32f3223b1f010fb09b7c95f90f3ca1e8"
5655
+
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
5120
5656
dependencies = [
5121
5657
"proc-macro2",
5122
5658
"quote",
···
5140
5676
dependencies = [
5141
5677
"proc-macro2",
5142
5678
"quote",
5143
-
"syn 2.0.103",
5679
+
"syn 2.0.106",
5144
5680
]
5145
5681
5146
5682
[[package]]
···
5178
5714
5179
5715
[[package]]
5180
5716
name = "tempfile"
5181
-
version = "3.19.1"
5717
+
version = "3.23.0"
5182
5718
source = "registry+https://github.com/rust-lang/crates.io-index"
5183
-
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
5719
+
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
5184
5720
dependencies = [
5185
5721
"fastrand",
5186
5722
"getrandom 0.3.3",
···
5211
5747
5212
5748
[[package]]
5213
5749
name = "thiserror"
5214
-
version = "2.0.12"
5750
+
version = "2.0.17"
5215
5751
source = "registry+https://github.com/rust-lang/crates.io-index"
5216
-
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
5752
+
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
5217
5753
dependencies = [
5218
-
"thiserror-impl 2.0.12",
5754
+
"thiserror-impl 2.0.17",
5219
5755
]
5220
5756
5221
5757
[[package]]
···
5226
5762
dependencies = [
5227
5763
"proc-macro2",
5228
5764
"quote",
5229
-
"syn 2.0.103",
5765
+
"syn 2.0.106",
5230
5766
]
5231
5767
5232
5768
[[package]]
5233
5769
name = "thiserror-impl"
5234
-
version = "2.0.12"
5770
+
version = "2.0.17"
5235
5771
source = "registry+https://github.com/rust-lang/crates.io-index"
5236
-
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
5772
+
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
5237
5773
dependencies = [
5238
5774
"proc-macro2",
5239
5775
"quote",
5240
-
"syn 2.0.103",
5776
+
"syn 2.0.106",
5241
5777
]
5242
5778
5243
5779
[[package]]
···
5336
5872
5337
5873
[[package]]
5338
5874
name = "tokio"
5339
-
version = "1.47.0"
5875
+
version = "1.47.1"
5340
5876
source = "registry+https://github.com/rust-lang/crates.io-index"
5341
-
checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35"
5877
+
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
5342
5878
dependencies = [
5343
5879
"backtrace",
5344
5880
"bytes",
···
5362
5898
dependencies = [
5363
5899
"proc-macro2",
5364
5900
"quote",
5365
-
"syn 2.0.103",
5901
+
"syn 2.0.106",
5366
5902
]
5367
5903
5368
5904
[[package]]
···
5453
5989
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
5454
5990
dependencies = [
5455
5991
"serde",
5456
-
"serde_spanned",
5457
-
"toml_datetime",
5992
+
"serde_spanned 0.6.9",
5993
+
"toml_datetime 0.6.11",
5458
5994
"toml_edit",
5459
5995
]
5460
5996
5461
5997
[[package]]
5998
+
name = "toml"
5999
+
version = "0.9.7"
6000
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6001
+
checksum = "00e5e5d9bf2475ac9d4f0d9edab68cc573dc2fd644b0dba36b0c30a92dd9eaa0"
6002
+
dependencies = [
6003
+
"indexmap 2.11.4",
6004
+
"serde_core",
6005
+
"serde_spanned 1.0.2",
6006
+
"toml_datetime 0.7.2",
6007
+
"toml_parser",
6008
+
"toml_writer",
6009
+
"winnow",
6010
+
]
6011
+
6012
+
[[package]]
5462
6013
name = "toml_datetime"
5463
6014
version = "0.6.11"
5464
6015
source = "registry+https://github.com/rust-lang/crates.io-index"
···
5468
6019
]
5469
6020
5470
6021
[[package]]
6022
+
name = "toml_datetime"
6023
+
version = "0.7.2"
6024
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6025
+
checksum = "32f1085dec27c2b6632b04c80b3bb1b4300d6495d1e129693bdda7d91e72eec1"
6026
+
dependencies = [
6027
+
"serde_core",
6028
+
]
6029
+
6030
+
[[package]]
5471
6031
name = "toml_edit"
5472
6032
version = "0.22.27"
5473
6033
source = "registry+https://github.com/rust-lang/crates.io-index"
5474
6034
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
5475
6035
dependencies = [
5476
-
"indexmap 2.9.0",
6036
+
"indexmap 2.11.4",
5477
6037
"serde",
5478
-
"serde_spanned",
5479
-
"toml_datetime",
6038
+
"serde_spanned 0.6.9",
6039
+
"toml_datetime 0.6.11",
5480
6040
"toml_write",
5481
6041
"winnow",
5482
6042
]
5483
6043
5484
6044
[[package]]
6045
+
name = "toml_parser"
6046
+
version = "1.0.3"
6047
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6048
+
checksum = "4cf893c33be71572e0e9aa6dd15e6677937abd686b066eac3f8cd3531688a627"
6049
+
dependencies = [
6050
+
"winnow",
6051
+
]
6052
+
6053
+
[[package]]
5485
6054
name = "toml_write"
5486
6055
version = "0.1.2"
5487
6056
source = "registry+https://github.com/rust-lang/crates.io-index"
5488
6057
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
6058
+
6059
+
[[package]]
6060
+
name = "toml_writer"
6061
+
version = "1.0.3"
6062
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6063
+
checksum = "d163a63c116ce562a22cda521fcc4d79152e7aba014456fb5eb442f6d6a10109"
5489
6064
5490
6065
[[package]]
5491
6066
name = "tower"
···
5553
6128
dependencies = [
5554
6129
"proc-macro2",
5555
6130
"quote",
5556
-
"syn 2.0.103",
6131
+
"syn 2.0.106",
5557
6132
]
5558
6133
5559
6134
[[package]]
···
5579
6154
5580
6155
[[package]]
5581
6156
name = "tracing-subscriber"
5582
-
version = "0.3.19"
6157
+
version = "0.3.20"
5583
6158
source = "registry+https://github.com/rust-lang/crates.io-index"
5584
-
checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008"
6159
+
checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5"
5585
6160
dependencies = [
5586
6161
"matchers",
5587
6162
"nu-ansi-term",
5588
6163
"once_cell",
5589
-
"regex",
6164
+
"regex-automata",
5590
6165
"sharded-slab",
5591
6166
"smallvec",
5592
6167
"thread_local",
···
5603
6178
dependencies = [
5604
6179
"proc-macro2",
5605
6180
"quote",
5606
-
"syn 2.0.103",
6181
+
"syn 2.0.106",
5607
6182
]
5608
6183
5609
6184
[[package]]
···
5626
6201
"native-tls",
5627
6202
"rand 0.9.1",
5628
6203
"sha1",
5629
-
"thiserror 2.0.12",
6204
+
"thiserror 2.0.17",
5630
6205
"url",
5631
6206
"utf-8",
5632
6207
]
···
5644
6219
"log",
5645
6220
"rand 0.9.1",
5646
6221
"sha1",
5647
-
"thiserror 2.0.12",
6222
+
"thiserror 2.0.17",
5648
6223
"utf-8",
5649
6224
]
5650
6225
···
5682
6257
"clap",
5683
6258
"dropshot",
5684
6259
"env_logger",
5685
-
"fjall",
6260
+
"fjall 2.11.2 (git+https://github.com/fjall-rs/fjall.git)",
5686
6261
"getrandom 0.3.3",
5687
6262
"http",
5688
6263
"jetstream",
5689
6264
"log",
5690
-
"lsm-tree",
6265
+
"lsm-tree 2.10.4",
5691
6266
"metrics",
5692
6267
"metrics-exporter-prometheus 0.17.2",
5693
6268
"schemars",
···
5697
6272
"serde_qs",
5698
6273
"sha2",
5699
6274
"tempfile",
5700
-
"thiserror 2.0.12",
6275
+
"thiserror 2.0.17",
5701
6276
"tikv-jemallocator",
5702
6277
"tokio",
5703
6278
"tokio-util",
···
5750
6325
5751
6326
[[package]]
5752
6327
name = "unsigned-varint"
6328
+
version = "0.7.2"
6329
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6330
+
checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105"
6331
+
6332
+
[[package]]
6333
+
name = "unsigned-varint"
5753
6334
version = "0.8.0"
5754
6335
source = "registry+https://github.com/rust-lang/crates.io-index"
5755
6336
checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06"
···
5803
6384
5804
6385
[[package]]
5805
6386
name = "uuid"
5806
-
version = "1.16.0"
6387
+
version = "1.18.1"
5807
6388
source = "registry+https://github.com/rust-lang/crates.io-index"
5808
-
checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9"
6389
+
checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
5809
6390
dependencies = [
5810
6391
"getrandom 0.3.3",
6392
+
"js-sys",
5811
6393
"serde",
6394
+
"wasm-bindgen",
5812
6395
]
5813
6396
5814
6397
[[package]]
···
5819
6402
5820
6403
[[package]]
5821
6404
name = "value-log"
5822
-
version = "1.8.0"
6405
+
version = "1.9.0"
5823
6406
source = "registry+https://github.com/rust-lang/crates.io-index"
5824
-
checksum = "fd29b17c041f94e0885179637289815cd038f0c9fc19c4549d5a97017404fb7d"
6407
+
checksum = "62fc7c4ce161f049607ecea654dca3f2d727da5371ae85e2e4f14ce2b98ed67c"
5825
6408
dependencies = [
5826
6409
"byteorder",
5827
-
"byteview",
6410
+
"byteview 0.6.1",
5828
6411
"interval-heap",
5829
6412
"log",
5830
6413
"path-absolutize",
···
5923
6506
"log",
5924
6507
"proc-macro2",
5925
6508
"quote",
5926
-
"syn 2.0.103",
6509
+
"syn 2.0.106",
5927
6510
"wasm-bindgen-shared",
5928
6511
]
5929
6512
···
5958
6541
dependencies = [
5959
6542
"proc-macro2",
5960
6543
"quote",
5961
-
"syn 2.0.103",
6544
+
"syn 2.0.106",
5962
6545
"wasm-bindgen-backend",
5963
6546
"wasm-bindgen-shared",
5964
6547
]
···
5973
6556
]
5974
6557
5975
6558
[[package]]
6559
+
name = "wasm-streams"
6560
+
version = "0.4.2"
6561
+
source = "registry+https://github.com/rust-lang/crates.io-index"
6562
+
checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65"
6563
+
dependencies = [
6564
+
"futures-util",
6565
+
"js-sys",
6566
+
"wasm-bindgen",
6567
+
"wasm-bindgen-futures",
6568
+
"web-sys",
6569
+
]
6570
+
6571
+
[[package]]
5976
6572
name = "web-sys"
5977
6573
version = "0.3.77"
5978
6574
source = "registry+https://github.com/rust-lang/crates.io-index"
···
6008
6604
name = "who-am-i"
6009
6605
version = "0.1.0"
6010
6606
dependencies = [
6011
-
"atrium-api",
6012
-
"atrium-common",
6013
-
"atrium-identity",
6014
-
"atrium-oauth",
6607
+
"atrium-api 0.25.4 (registry+https://github.com/rust-lang/crates.io-index)",
6608
+
"atrium-common 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
6609
+
"atrium-identity 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
6610
+
"atrium-oauth 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
6015
6611
"axum",
6016
6612
"axum-extra",
6017
6613
"axum-template",
···
6031
6627
"reqwest",
6032
6628
"serde",
6033
6629
"serde_json",
6034
-
"thiserror 2.0.12",
6630
+
"thiserror 2.0.17",
6035
6631
"tokio",
6036
6632
"tokio-util",
6037
6633
"url",
···
6086
6682
source = "registry+https://github.com/rust-lang/crates.io-index"
6087
6683
checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6"
6088
6684
dependencies = [
6089
-
"windows-core 0.58.0",
6685
+
"windows-core",
6090
6686
"windows-targets 0.52.6",
6091
6687
]
6092
6688
···
6096
6692
source = "registry+https://github.com/rust-lang/crates.io-index"
6097
6693
checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99"
6098
6694
dependencies = [
6099
-
"windows-implement 0.58.0",
6100
-
"windows-interface 0.58.0",
6695
+
"windows-implement",
6696
+
"windows-interface",
6101
6697
"windows-result 0.2.0",
6102
6698
"windows-strings 0.1.0",
6103
6699
"windows-targets 0.52.6",
6104
6700
]
6105
6701
6106
6702
[[package]]
6107
-
name = "windows-core"
6108
-
version = "0.61.0"
6109
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6110
-
checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980"
6111
-
dependencies = [
6112
-
"windows-implement 0.60.0",
6113
-
"windows-interface 0.59.1",
6114
-
"windows-link",
6115
-
"windows-result 0.3.4",
6116
-
"windows-strings 0.4.2",
6117
-
]
6118
-
6119
-
[[package]]
6120
6703
name = "windows-implement"
6121
6704
version = "0.58.0"
6122
6705
source = "registry+https://github.com/rust-lang/crates.io-index"
···
6124
6707
dependencies = [
6125
6708
"proc-macro2",
6126
6709
"quote",
6127
-
"syn 2.0.103",
6128
-
]
6129
-
6130
-
[[package]]
6131
-
name = "windows-implement"
6132
-
version = "0.60.0"
6133
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6134
-
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
6135
-
dependencies = [
6136
-
"proc-macro2",
6137
-
"quote",
6138
-
"syn 2.0.103",
6710
+
"syn 2.0.106",
6139
6711
]
6140
6712
6141
6713
[[package]]
···
6146
6718
dependencies = [
6147
6719
"proc-macro2",
6148
6720
"quote",
6149
-
"syn 2.0.103",
6150
-
]
6151
-
6152
-
[[package]]
6153
-
name = "windows-interface"
6154
-
version = "0.59.1"
6155
-
source = "registry+https://github.com/rust-lang/crates.io-index"
6156
-
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
6157
-
dependencies = [
6158
-
"proc-macro2",
6159
-
"quote",
6160
-
"syn 2.0.103",
6721
+
"syn 2.0.106",
6161
6722
]
6162
6723
6163
6724
[[package]]
···
6364
6925
6365
6926
[[package]]
6366
6927
name = "winnow"
6367
-
version = "0.7.11"
6928
+
version = "0.7.13"
6368
6929
source = "registry+https://github.com/rust-lang/crates.io-index"
6369
-
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
6930
+
checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf"
6370
6931
dependencies = [
6371
6932
"memchr",
6372
6933
]
···
6424
6985
"nom",
6425
6986
"oid-registry",
6426
6987
"rusticata-macros",
6427
-
"thiserror 2.0.12",
6988
+
"thiserror 2.0.17",
6428
6989
"time",
6429
6990
]
6430
6991
···
6463
7024
dependencies = [
6464
7025
"proc-macro2",
6465
7026
"quote",
6466
-
"syn 2.0.103",
7027
+
"syn 2.0.106",
6467
7028
"synstructure",
6468
7029
]
6469
7030
···
6493
7054
dependencies = [
6494
7055
"proc-macro2",
6495
7056
"quote",
6496
-
"syn 2.0.103",
7057
+
"syn 2.0.106",
6497
7058
]
6498
7059
6499
7060
[[package]]
···
6504
7065
dependencies = [
6505
7066
"proc-macro2",
6506
7067
"quote",
6507
-
"syn 2.0.103",
7068
+
"syn 2.0.106",
6508
7069
]
6509
7070
6510
7071
[[package]]
···
6524
7085
dependencies = [
6525
7086
"proc-macro2",
6526
7087
"quote",
6527
-
"syn 2.0.103",
7088
+
"syn 2.0.106",
6528
7089
"synstructure",
6529
7090
]
6530
7091
···
6535
7096
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
6536
7097
dependencies = [
6537
7098
"serde",
7099
+
"zeroize_derive",
7100
+
]
7101
+
7102
+
[[package]]
7103
+
name = "zeroize_derive"
7104
+
version = "1.4.2"
7105
+
source = "registry+https://github.com/rust-lang/crates.io-index"
7106
+
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
7107
+
dependencies = [
7108
+
"proc-macro2",
7109
+
"quote",
7110
+
"syn 2.0.106",
6538
7111
]
6539
7112
6540
7113
[[package]]
···
6556
7129
dependencies = [
6557
7130
"proc-macro2",
6558
7131
"quote",
6559
-
"syn 2.0.103",
7132
+
"syn 2.0.106",
6560
7133
]
6561
7134
6562
7135
[[package]]
+3
Cargo.toml
+3
Cargo.toml
+8
-1
Makefile
+8
-1
Makefile
···
5
5
cargo test --all-features
6
6
7
7
fmt:
8
-
cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i --package slingshot
8
+
cargo fmt --package links \
9
+
--package constellation \
10
+
--package ufos \
11
+
--package spacedust \
12
+
--package who-am-i \
13
+
--package slingshot \
14
+
--package pocket \
15
+
--package reflector
9
16
cargo +nightly fmt --package jetstream
10
17
11
18
clippy:
+1
-1
constellation/Cargo.toml
+1
-1
constellation/Cargo.toml
···
8
8
anyhow = "1.0.95"
9
9
askama = { version = "0.12.1", features = ["serde-json"] }
10
10
axum = "0.8.1"
11
-
axum-extra = { version = "0.10.0", features = ["typed-header"] }
11
+
axum-extra = { version = "0.10.0", features = ["query", "typed-header"] }
12
12
axum-metrics = "0.2"
13
13
bincode = "1.3.3"
14
14
clap = { version = "4.5.26", features = ["derive"] }
+661
constellation/LICENSE
+661
constellation/LICENSE
···
1
+
GNU AFFERO GENERAL PUBLIC LICENSE
2
+
Version 3, 19 November 2007
3
+
4
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+
Everyone is permitted to copy and distribute verbatim copies
6
+
of this license document, but changing it is not allowed.
7
+
8
+
Preamble
9
+
10
+
The GNU Affero General Public License is a free, copyleft license for
11
+
software and other kinds of works, specifically designed to ensure
12
+
cooperation with the community in the case of network server software.
13
+
14
+
The licenses for most software and other practical works are designed
15
+
to take away your freedom to share and change the works. By contrast,
16
+
our General Public Licenses are intended to guarantee your freedom to
17
+
share and change all versions of a program--to make sure it remains free
18
+
software for all its users.
19
+
20
+
When we speak of free software, we are referring to freedom, not
21
+
price. Our General Public Licenses are designed to make sure that you
22
+
have the freedom to distribute copies of free software (and charge for
23
+
them if you wish), that you receive source code or can get it if you
24
+
want it, that you can change the software or use pieces of it in new
25
+
free programs, and that you know you can do these things.
26
+
27
+
Developers that use our General Public Licenses protect your rights
28
+
with two steps: (1) assert copyright on the software, and (2) offer
29
+
you this License which gives you legal permission to copy, distribute
30
+
and/or modify the software.
31
+
32
+
A secondary benefit of defending all users' freedom is that
33
+
improvements made in alternate versions of the program, if they
34
+
receive widespread use, become available for other developers to
35
+
incorporate. Many developers of free software are heartened and
36
+
encouraged by the resulting cooperation. However, in the case of
37
+
software used on network servers, this result may fail to come about.
38
+
The GNU General Public License permits making a modified version and
39
+
letting the public access it on a server without ever releasing its
40
+
source code to the public.
41
+
42
+
The GNU Affero General Public License is designed specifically to
43
+
ensure that, in such cases, the modified source code becomes available
44
+
to the community. It requires the operator of a network server to
45
+
provide the source code of the modified version running there to the
46
+
users of that server. Therefore, public use of a modified version, on
47
+
a publicly accessible server, gives the public access to the source
48
+
code of the modified version.
49
+
50
+
An older license, called the Affero General Public License and
51
+
published by Affero, was designed to accomplish similar goals. This is
52
+
a different license, not a version of the Affero GPL, but Affero has
53
+
released a new version of the Affero GPL which permits relicensing under
54
+
this license.
55
+
56
+
The precise terms and conditions for copying, distribution and
57
+
modification follow.
58
+
59
+
TERMS AND CONDITIONS
60
+
61
+
0. Definitions.
62
+
63
+
"This License" refers to version 3 of the GNU Affero General Public License.
64
+
65
+
"Copyright" also means copyright-like laws that apply to other kinds of
66
+
works, such as semiconductor masks.
67
+
68
+
"The Program" refers to any copyrightable work licensed under this
69
+
License. Each licensee is addressed as "you". "Licensees" and
70
+
"recipients" may be individuals or organizations.
71
+
72
+
To "modify" a work means to copy from or adapt all or part of the work
73
+
in a fashion requiring copyright permission, other than the making of an
74
+
exact copy. The resulting work is called a "modified version" of the
75
+
earlier work or a work "based on" the earlier work.
76
+
77
+
A "covered work" means either the unmodified Program or a work based
78
+
on the Program.
79
+
80
+
To "propagate" a work means to do anything with it that, without
81
+
permission, would make you directly or secondarily liable for
82
+
infringement under applicable copyright law, except executing it on a
83
+
computer or modifying a private copy. Propagation includes copying,
84
+
distribution (with or without modification), making available to the
85
+
public, and in some countries other activities as well.
86
+
87
+
To "convey" a work means any kind of propagation that enables other
88
+
parties to make or receive copies. Mere interaction with a user through
89
+
a computer network, with no transfer of a copy, is not conveying.
90
+
91
+
An interactive user interface displays "Appropriate Legal Notices"
92
+
to the extent that it includes a convenient and prominently visible
93
+
feature that (1) displays an appropriate copyright notice, and (2)
94
+
tells the user that there is no warranty for the work (except to the
95
+
extent that warranties are provided), that licensees may convey the
96
+
work under this License, and how to view a copy of this License. If
97
+
the interface presents a list of user commands or options, such as a
98
+
menu, a prominent item in the list meets this criterion.
99
+
100
+
1. Source Code.
101
+
102
+
The "source code" for a work means the preferred form of the work
103
+
for making modifications to it. "Object code" means any non-source
104
+
form of a work.
105
+
106
+
A "Standard Interface" means an interface that either is an official
107
+
standard defined by a recognized standards body, or, in the case of
108
+
interfaces specified for a particular programming language, one that
109
+
is widely used among developers working in that language.
110
+
111
+
The "System Libraries" of an executable work include anything, other
112
+
than the work as a whole, that (a) is included in the normal form of
113
+
packaging a Major Component, but which is not part of that Major
114
+
Component, and (b) serves only to enable use of the work with that
115
+
Major Component, or to implement a Standard Interface for which an
116
+
implementation is available to the public in source code form. A
117
+
"Major Component", in this context, means a major essential component
118
+
(kernel, window system, and so on) of the specific operating system
119
+
(if any) on which the executable work runs, or a compiler used to
120
+
produce the work, or an object code interpreter used to run it.
121
+
122
+
The "Corresponding Source" for a work in object code form means all
123
+
the source code needed to generate, install, and (for an executable
124
+
work) run the object code and to modify the work, including scripts to
125
+
control those activities. However, it does not include the work's
126
+
System Libraries, or general-purpose tools or generally available free
127
+
programs which are used unmodified in performing those activities but
128
+
which are not part of the work. For example, Corresponding Source
129
+
includes interface definition files associated with source files for
130
+
the work, and the source code for shared libraries and dynamically
131
+
linked subprograms that the work is specifically designed to require,
132
+
such as by intimate data communication or control flow between those
133
+
subprograms and other parts of the work.
134
+
135
+
The Corresponding Source need not include anything that users
136
+
can regenerate automatically from other parts of the Corresponding
137
+
Source.
138
+
139
+
The Corresponding Source for a work in source code form is that
140
+
same work.
141
+
142
+
2. Basic Permissions.
143
+
144
+
All rights granted under this License are granted for the term of
145
+
copyright on the Program, and are irrevocable provided the stated
146
+
conditions are met. This License explicitly affirms your unlimited
147
+
permission to run the unmodified Program. The output from running a
148
+
covered work is covered by this License only if the output, given its
149
+
content, constitutes a covered work. This License acknowledges your
150
+
rights of fair use or other equivalent, as provided by copyright law.
151
+
152
+
You may make, run and propagate covered works that you do not
153
+
convey, without conditions so long as your license otherwise remains
154
+
in force. You may convey covered works to others for the sole purpose
155
+
of having them make modifications exclusively for you, or provide you
156
+
with facilities for running those works, provided that you comply with
157
+
the terms of this License in conveying all material for which you do
158
+
not control copyright. Those thus making or running the covered works
159
+
for you must do so exclusively on your behalf, under your direction
160
+
and control, on terms that prohibit them from making any copies of
161
+
your copyrighted material outside their relationship with you.
162
+
163
+
Conveying under any other circumstances is permitted solely under
164
+
the conditions stated below. Sublicensing is not allowed; section 10
165
+
makes it unnecessary.
166
+
167
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
168
+
169
+
No covered work shall be deemed part of an effective technological
170
+
measure under any applicable law fulfilling obligations under article
171
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
172
+
similar laws prohibiting or restricting circumvention of such
173
+
measures.
174
+
175
+
When you convey a covered work, you waive any legal power to forbid
176
+
circumvention of technological measures to the extent such circumvention
177
+
is effected by exercising rights under this License with respect to
178
+
the covered work, and you disclaim any intention to limit operation or
179
+
modification of the work as a means of enforcing, against the work's
180
+
users, your or third parties' legal rights to forbid circumvention of
181
+
technological measures.
182
+
183
+
4. Conveying Verbatim Copies.
184
+
185
+
You may convey verbatim copies of the Program's source code as you
186
+
receive it, in any medium, provided that you conspicuously and
187
+
appropriately publish on each copy an appropriate copyright notice;
188
+
keep intact all notices stating that this License and any
189
+
non-permissive terms added in accord with section 7 apply to the code;
190
+
keep intact all notices of the absence of any warranty; and give all
191
+
recipients a copy of this License along with the Program.
192
+
193
+
You may charge any price or no price for each copy that you convey,
194
+
and you may offer support or warranty protection for a fee.
195
+
196
+
5. Conveying Modified Source Versions.
197
+
198
+
You may convey a work based on the Program, or the modifications to
199
+
produce it from the Program, in the form of source code under the
200
+
terms of section 4, provided that you also meet all of these conditions:
201
+
202
+
a) The work must carry prominent notices stating that you modified
203
+
it, and giving a relevant date.
204
+
205
+
b) The work must carry prominent notices stating that it is
206
+
released under this License and any conditions added under section
207
+
7. This requirement modifies the requirement in section 4 to
208
+
"keep intact all notices".
209
+
210
+
c) You must license the entire work, as a whole, under this
211
+
License to anyone who comes into possession of a copy. This
212
+
License will therefore apply, along with any applicable section 7
213
+
additional terms, to the whole of the work, and all its parts,
214
+
regardless of how they are packaged. This License gives no
215
+
permission to license the work in any other way, but it does not
216
+
invalidate such permission if you have separately received it.
217
+
218
+
d) If the work has interactive user interfaces, each must display
219
+
Appropriate Legal Notices; however, if the Program has interactive
220
+
interfaces that do not display Appropriate Legal Notices, your
221
+
work need not make them do so.
222
+
223
+
A compilation of a covered work with other separate and independent
224
+
works, which are not by their nature extensions of the covered work,
225
+
and which are not combined with it such as to form a larger program,
226
+
in or on a volume of a storage or distribution medium, is called an
227
+
"aggregate" if the compilation and its resulting copyright are not
228
+
used to limit the access or legal rights of the compilation's users
229
+
beyond what the individual works permit. Inclusion of a covered work
230
+
in an aggregate does not cause this License to apply to the other
231
+
parts of the aggregate.
232
+
233
+
6. Conveying Non-Source Forms.
234
+
235
+
You may convey a covered work in object code form under the terms
236
+
of sections 4 and 5, provided that you also convey the
237
+
machine-readable Corresponding Source under the terms of this License,
238
+
in one of these ways:
239
+
240
+
a) Convey the object code in, or embodied in, a physical product
241
+
(including a physical distribution medium), accompanied by the
242
+
Corresponding Source fixed on a durable physical medium
243
+
customarily used for software interchange.
244
+
245
+
b) Convey the object code in, or embodied in, a physical product
246
+
(including a physical distribution medium), accompanied by a
247
+
written offer, valid for at least three years and valid for as
248
+
long as you offer spare parts or customer support for that product
249
+
model, to give anyone who possesses the object code either (1) a
250
+
copy of the Corresponding Source for all the software in the
251
+
product that is covered by this License, on a durable physical
252
+
medium customarily used for software interchange, for a price no
253
+
more than your reasonable cost of physically performing this
254
+
conveying of source, or (2) access to copy the
255
+
Corresponding Source from a network server at no charge.
256
+
257
+
c) Convey individual copies of the object code with a copy of the
258
+
written offer to provide the Corresponding Source. This
259
+
alternative is allowed only occasionally and noncommercially, and
260
+
only if you received the object code with such an offer, in accord
261
+
with subsection 6b.
262
+
263
+
d) Convey the object code by offering access from a designated
264
+
place (gratis or for a charge), and offer equivalent access to the
265
+
Corresponding Source in the same way through the same place at no
266
+
further charge. You need not require recipients to copy the
267
+
Corresponding Source along with the object code. If the place to
268
+
copy the object code is a network server, the Corresponding Source
269
+
may be on a different server (operated by you or a third party)
270
+
that supports equivalent copying facilities, provided you maintain
271
+
clear directions next to the object code saying where to find the
272
+
Corresponding Source. Regardless of what server hosts the
273
+
Corresponding Source, you remain obligated to ensure that it is
274
+
available for as long as needed to satisfy these requirements.
275
+
276
+
e) Convey the object code using peer-to-peer transmission, provided
277
+
you inform other peers where the object code and Corresponding
278
+
Source of the work are being offered to the general public at no
279
+
charge under subsection 6d.
280
+
281
+
A separable portion of the object code, whose source code is excluded
282
+
from the Corresponding Source as a System Library, need not be
283
+
included in conveying the object code work.
284
+
285
+
A "User Product" is either (1) a "consumer product", which means any
286
+
tangible personal property which is normally used for personal, family,
287
+
or household purposes, or (2) anything designed or sold for incorporation
288
+
into a dwelling. In determining whether a product is a consumer product,
289
+
doubtful cases shall be resolved in favor of coverage. For a particular
290
+
product received by a particular user, "normally used" refers to a
291
+
typical or common use of that class of product, regardless of the status
292
+
of the particular user or of the way in which the particular user
293
+
actually uses, or expects or is expected to use, the product. A product
294
+
is a consumer product regardless of whether the product has substantial
295
+
commercial, industrial or non-consumer uses, unless such uses represent
296
+
the only significant mode of use of the product.
297
+
298
+
"Installation Information" for a User Product means any methods,
299
+
procedures, authorization keys, or other information required to install
300
+
and execute modified versions of a covered work in that User Product from
301
+
a modified version of its Corresponding Source. The information must
302
+
suffice to ensure that the continued functioning of the modified object
303
+
code is in no case prevented or interfered with solely because
304
+
modification has been made.
305
+
306
+
If you convey an object code work under this section in, or with, or
307
+
specifically for use in, a User Product, and the conveying occurs as
308
+
part of a transaction in which the right of possession and use of the
309
+
User Product is transferred to the recipient in perpetuity or for a
310
+
fixed term (regardless of how the transaction is characterized), the
311
+
Corresponding Source conveyed under this section must be accompanied
312
+
by the Installation Information. But this requirement does not apply
313
+
if neither you nor any third party retains the ability to install
314
+
modified object code on the User Product (for example, the work has
315
+
been installed in ROM).
316
+
317
+
The requirement to provide Installation Information does not include a
318
+
requirement to continue to provide support service, warranty, or updates
319
+
for a work that has been modified or installed by the recipient, or for
320
+
the User Product in which it has been modified or installed. Access to a
321
+
network may be denied when the modification itself materially and
322
+
adversely affects the operation of the network or violates the rules and
323
+
protocols for communication across the network.
324
+
325
+
Corresponding Source conveyed, and Installation Information provided,
326
+
in accord with this section must be in a format that is publicly
327
+
documented (and with an implementation available to the public in
328
+
source code form), and must require no special password or key for
329
+
unpacking, reading or copying.
330
+
331
+
7. Additional Terms.
332
+
333
+
"Additional permissions" are terms that supplement the terms of this
334
+
License by making exceptions from one or more of its conditions.
335
+
Additional permissions that are applicable to the entire Program shall
336
+
be treated as though they were included in this License, to the extent
337
+
that they are valid under applicable law. If additional permissions
338
+
apply only to part of the Program, that part may be used separately
339
+
under those permissions, but the entire Program remains governed by
340
+
this License without regard to the additional permissions.
341
+
342
+
When you convey a copy of a covered work, you may at your option
343
+
remove any additional permissions from that copy, or from any part of
344
+
it. (Additional permissions may be written to require their own
345
+
removal in certain cases when you modify the work.) You may place
346
+
additional permissions on material, added by you to a covered work,
347
+
for which you have or can give appropriate copyright permission.
348
+
349
+
Notwithstanding any other provision of this License, for material you
350
+
add to a covered work, you may (if authorized by the copyright holders of
351
+
that material) supplement the terms of this License with terms:
352
+
353
+
a) Disclaiming warranty or limiting liability differently from the
354
+
terms of sections 15 and 16 of this License; or
355
+
356
+
b) Requiring preservation of specified reasonable legal notices or
357
+
author attributions in that material or in the Appropriate Legal
358
+
Notices displayed by works containing it; or
359
+
360
+
c) Prohibiting misrepresentation of the origin of that material, or
361
+
requiring that modified versions of such material be marked in
362
+
reasonable ways as different from the original version; or
363
+
364
+
d) Limiting the use for publicity purposes of names of licensors or
365
+
authors of the material; or
366
+
367
+
e) Declining to grant rights under trademark law for use of some
368
+
trade names, trademarks, or service marks; or
369
+
370
+
f) Requiring indemnification of licensors and authors of that
371
+
material by anyone who conveys the material (or modified versions of
372
+
it) with contractual assumptions of liability to the recipient, for
373
+
any liability that these contractual assumptions directly impose on
374
+
those licensors and authors.
375
+
376
+
All other non-permissive additional terms are considered "further
377
+
restrictions" within the meaning of section 10. If the Program as you
378
+
received it, or any part of it, contains a notice stating that it is
379
+
governed by this License along with a term that is a further
380
+
restriction, you may remove that term. If a license document contains
381
+
a further restriction but permits relicensing or conveying under this
382
+
License, you may add to a covered work material governed by the terms
383
+
of that license document, provided that the further restriction does
384
+
not survive such relicensing or conveying.
385
+
386
+
If you add terms to a covered work in accord with this section, you
387
+
must place, in the relevant source files, a statement of the
388
+
additional terms that apply to those files, or a notice indicating
389
+
where to find the applicable terms.
390
+
391
+
Additional terms, permissive or non-permissive, may be stated in the
392
+
form of a separately written license, or stated as exceptions;
393
+
the above requirements apply either way.
394
+
395
+
8. Termination.
396
+
397
+
You may not propagate or modify a covered work except as expressly
398
+
provided under this License. Any attempt otherwise to propagate or
399
+
modify it is void, and will automatically terminate your rights under
400
+
this License (including any patent licenses granted under the third
401
+
paragraph of section 11).
402
+
403
+
However, if you cease all violation of this License, then your
404
+
license from a particular copyright holder is reinstated (a)
405
+
provisionally, unless and until the copyright holder explicitly and
406
+
finally terminates your license, and (b) permanently, if the copyright
407
+
holder fails to notify you of the violation by some reasonable means
408
+
prior to 60 days after the cessation.
409
+
410
+
Moreover, your license from a particular copyright holder is
411
+
reinstated permanently if the copyright holder notifies you of the
412
+
violation by some reasonable means, this is the first time you have
413
+
received notice of violation of this License (for any work) from that
414
+
copyright holder, and you cure the violation prior to 30 days after
415
+
your receipt of the notice.
416
+
417
+
Termination of your rights under this section does not terminate the
418
+
licenses of parties who have received copies or rights from you under
419
+
this License. If your rights have been terminated and not permanently
420
+
reinstated, you do not qualify to receive new licenses for the same
421
+
material under section 10.
422
+
423
+
9. Acceptance Not Required for Having Copies.
424
+
425
+
You are not required to accept this License in order to receive or
426
+
run a copy of the Program. Ancillary propagation of a covered work
427
+
occurring solely as a consequence of using peer-to-peer transmission
428
+
to receive a copy likewise does not require acceptance. However,
429
+
nothing other than this License grants you permission to propagate or
430
+
modify any covered work. These actions infringe copyright if you do
431
+
not accept this License. Therefore, by modifying or propagating a
432
+
covered work, you indicate your acceptance of this License to do so.
433
+
434
+
10. Automatic Licensing of Downstream Recipients.
435
+
436
+
Each time you convey a covered work, the recipient automatically
437
+
receives a license from the original licensors, to run, modify and
438
+
propagate that work, subject to this License. You are not responsible
439
+
for enforcing compliance by third parties with this License.
440
+
441
+
An "entity transaction" is a transaction transferring control of an
442
+
organization, or substantially all assets of one, or subdividing an
443
+
organization, or merging organizations. If propagation of a covered
444
+
work results from an entity transaction, each party to that
445
+
transaction who receives a copy of the work also receives whatever
446
+
licenses to the work the party's predecessor in interest had or could
447
+
give under the previous paragraph, plus a right to possession of the
448
+
Corresponding Source of the work from the predecessor in interest, if
449
+
the predecessor has it or can get it with reasonable efforts.
450
+
451
+
You may not impose any further restrictions on the exercise of the
452
+
rights granted or affirmed under this License. For example, you may
453
+
not impose a license fee, royalty, or other charge for exercise of
454
+
rights granted under this License, and you may not initiate litigation
455
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
456
+
any patent claim is infringed by making, using, selling, offering for
457
+
sale, or importing the Program or any portion of it.
458
+
459
+
11. Patents.
460
+
461
+
A "contributor" is a copyright holder who authorizes use under this
462
+
License of the Program or a work on which the Program is based. The
463
+
work thus licensed is called the contributor's "contributor version".
464
+
465
+
A contributor's "essential patent claims" are all patent claims
466
+
owned or controlled by the contributor, whether already acquired or
467
+
hereafter acquired, that would be infringed by some manner, permitted
468
+
by this License, of making, using, or selling its contributor version,
469
+
but do not include claims that would be infringed only as a
470
+
consequence of further modification of the contributor version. For
471
+
purposes of this definition, "control" includes the right to grant
472
+
patent sublicenses in a manner consistent with the requirements of
473
+
this License.
474
+
475
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
476
+
patent license under the contributor's essential patent claims, to
477
+
make, use, sell, offer for sale, import and otherwise run, modify and
478
+
propagate the contents of its contributor version.
479
+
480
+
In the following three paragraphs, a "patent license" is any express
481
+
agreement or commitment, however denominated, not to enforce a patent
482
+
(such as an express permission to practice a patent or covenant not to
483
+
sue for patent infringement). To "grant" such a patent license to a
484
+
party means to make such an agreement or commitment not to enforce a
485
+
patent against the party.
486
+
487
+
If you convey a covered work, knowingly relying on a patent license,
488
+
and the Corresponding Source of the work is not available for anyone
489
+
to copy, free of charge and under the terms of this License, through a
490
+
publicly available network server or other readily accessible means,
491
+
then you must either (1) cause the Corresponding Source to be so
492
+
available, or (2) arrange to deprive yourself of the benefit of the
493
+
patent license for this particular work, or (3) arrange, in a manner
494
+
consistent with the requirements of this License, to extend the patent
495
+
license to downstream recipients. "Knowingly relying" means you have
496
+
actual knowledge that, but for the patent license, your conveying the
497
+
covered work in a country, or your recipient's use of the covered work
498
+
in a country, would infringe one or more identifiable patents in that
499
+
country that you have reason to believe are valid.
500
+
501
+
If, pursuant to or in connection with a single transaction or
502
+
arrangement, you convey, or propagate by procuring conveyance of, a
503
+
covered work, and grant a patent license to some of the parties
504
+
receiving the covered work authorizing them to use, propagate, modify
505
+
or convey a specific copy of the covered work, then the patent license
506
+
you grant is automatically extended to all recipients of the covered
507
+
work and works based on it.
508
+
509
+
A patent license is "discriminatory" if it does not include within
510
+
the scope of its coverage, prohibits the exercise of, or is
511
+
conditioned on the non-exercise of one or more of the rights that are
512
+
specifically granted under this License. You may not convey a covered
513
+
work if you are a party to an arrangement with a third party that is
514
+
in the business of distributing software, under which you make payment
515
+
to the third party based on the extent of your activity of conveying
516
+
the work, and under which the third party grants, to any of the
517
+
parties who would receive the covered work from you, a discriminatory
518
+
patent license (a) in connection with copies of the covered work
519
+
conveyed by you (or copies made from those copies), or (b) primarily
520
+
for and in connection with specific products or compilations that
521
+
contain the covered work, unless you entered into that arrangement,
522
+
or that patent license was granted, prior to 28 March 2007.
523
+
524
+
Nothing in this License shall be construed as excluding or limiting
525
+
any implied license or other defenses to infringement that may
526
+
otherwise be available to you under applicable patent law.
527
+
528
+
12. No Surrender of Others' Freedom.
529
+
530
+
If conditions are imposed on you (whether by court order, agreement or
531
+
otherwise) that contradict the conditions of this License, they do not
532
+
excuse you from the conditions of this License. If you cannot convey a
533
+
covered work so as to satisfy simultaneously your obligations under this
534
+
License and any other pertinent obligations, then as a consequence you may
535
+
not convey it at all. For example, if you agree to terms that obligate you
536
+
to collect a royalty for further conveying from those to whom you convey
537
+
the Program, the only way you could satisfy both those terms and this
538
+
License would be to refrain entirely from conveying the Program.
539
+
540
+
13. Remote Network Interaction; Use with the GNU General Public License.
541
+
542
+
Notwithstanding any other provision of this License, if you modify the
543
+
Program, your modified version must prominently offer all users
544
+
interacting with it remotely through a computer network (if your version
545
+
supports such interaction) an opportunity to receive the Corresponding
546
+
Source of your version by providing access to the Corresponding Source
547
+
from a network server at no charge, through some standard or customary
548
+
means of facilitating copying of software. This Corresponding Source
549
+
shall include the Corresponding Source for any work covered by version 3
550
+
of the GNU General Public License that is incorporated pursuant to the
551
+
following paragraph.
552
+
553
+
Notwithstanding any other provision of this License, you have
554
+
permission to link or combine any covered work with a work licensed
555
+
under version 3 of the GNU General Public License into a single
556
+
combined work, and to convey the resulting work. The terms of this
557
+
License will continue to apply to the part which is the covered work,
558
+
but the work with which it is combined will remain governed by version
559
+
3 of the GNU General Public License.
560
+
561
+
14. Revised Versions of this License.
562
+
563
+
The Free Software Foundation may publish revised and/or new versions of
564
+
the GNU Affero General Public License from time to time. Such new versions
565
+
will be similar in spirit to the present version, but may differ in detail to
566
+
address new problems or concerns.
567
+
568
+
Each version is given a distinguishing version number. If the
569
+
Program specifies that a certain numbered version of the GNU Affero General
570
+
Public License "or any later version" applies to it, you have the
571
+
option of following the terms and conditions either of that numbered
572
+
version or of any later version published by the Free Software
573
+
Foundation. If the Program does not specify a version number of the
574
+
GNU Affero General Public License, you may choose any version ever published
575
+
by the Free Software Foundation.
576
+
577
+
If the Program specifies that a proxy can decide which future
578
+
versions of the GNU Affero General Public License can be used, that proxy's
579
+
public statement of acceptance of a version permanently authorizes you
580
+
to choose that version for the Program.
581
+
582
+
Later license versions may give you additional or different
583
+
permissions. However, no additional obligations are imposed on any
584
+
author or copyright holder as a result of your choosing to follow a
585
+
later version.
586
+
587
+
15. Disclaimer of Warranty.
588
+
589
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
590
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
591
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
592
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
593
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
594
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
595
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
596
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
597
+
598
+
16. Limitation of Liability.
599
+
600
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
601
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
602
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
603
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
604
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
605
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
606
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
607
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
608
+
SUCH DAMAGES.
609
+
610
+
17. Interpretation of Sections 15 and 16.
611
+
612
+
If the disclaimer of warranty and limitation of liability provided
613
+
above cannot be given local legal effect according to their terms,
614
+
reviewing courts shall apply local law that most closely approximates
615
+
an absolute waiver of all civil liability in connection with the
616
+
Program, unless a warranty or assumption of liability accompanies a
617
+
copy of the Program in return for a fee.
618
+
619
+
END OF TERMS AND CONDITIONS
620
+
621
+
How to Apply These Terms to Your New Programs
622
+
623
+
If you develop a new program, and you want it to be of the greatest
624
+
possible use to the public, the best way to achieve this is to make it
625
+
free software which everyone can redistribute and change under these terms.
626
+
627
+
To do so, attach the following notices to the program. It is safest
628
+
to attach them to the start of each source file to most effectively
629
+
state the exclusion of warranty; and each file should have at least
630
+
the "copyright" line and a pointer to where the full notice is found.
631
+
632
+
<one line to give the program's name and a brief idea of what it does.>
633
+
Copyright (C) <year> <name of author>
634
+
635
+
This program is free software: you can redistribute it and/or modify
636
+
it under the terms of the GNU Affero General Public License as published
637
+
by the Free Software Foundation, either version 3 of the License, or
638
+
(at your option) any later version.
639
+
640
+
This program is distributed in the hope that it will be useful,
641
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
642
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
643
+
GNU Affero General Public License for more details.
644
+
645
+
You should have received a copy of the GNU Affero General Public License
646
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
647
+
648
+
Also add information on how to contact you by electronic and paper mail.
649
+
650
+
If your software can interact with users remotely through a computer
651
+
network, you should also make sure that it provides a way for users to
652
+
get its source. For example, if your program is a web application, its
653
+
interface could display a "Source" link that leads users to an archive
654
+
of the code. There are many ways you could offer source, and different
655
+
solutions will be better for different programs; see section 13 for the
656
+
specific requirements.
657
+
658
+
You should also get your employer (if you work as a programmer) or school,
659
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
660
+
For more information on this, and how to apply and follow the GNU AGPL, see
661
+
<https://www.gnu.org/licenses/>.
+3
constellation/LICENSE.future
+3
constellation/LICENSE.future
+9
constellation/readme.md
+9
constellation/readme.md
···
83
83
```
84
84
85
85
86
+
## Contributions
87
+
88
+
### Licensing
89
+
90
+
Constellation's source code is currently available exclusively under the AGPL license (see [LICENSE](./LICENSE)).
91
+
92
+
In the future, its code MAY become available under the MIT and/or Apache2.0 licenses, at the sole discretion of the microcosm organization. Contributing implies acceptance with this possible future licensing change. The change has not happed yet and is not guaranteed.
93
+
94
+
86
95
some todos
87
96
88
97
- [x] find links and write them to rocksdb
+57
-14
constellation/src/bin/main.rs
+57
-14
constellation/src/bin/main.rs
···
1
1
use anyhow::{bail, Result};
2
2
use clap::{Parser, ValueEnum};
3
3
use metrics_exporter_prometheus::PrometheusBuilder;
4
+
use std::net::SocketAddr;
4
5
use std::num::NonZero;
5
6
use std::path::PathBuf;
6
7
use std::sync::{atomic::AtomicU32, Arc};
···
21
22
#[derive(Parser, Debug)]
22
23
#[command(version, about, long_about = None)]
23
24
struct Args {
24
-
#[arg(short, long)]
25
+
/// constellation server's listen address
26
+
#[arg(long)]
27
+
#[clap(default_value = "0.0.0.0:6789")]
28
+
bind: SocketAddr,
29
+
/// metrics server's listen address
30
+
#[arg(long)]
31
+
#[clap(default_value = "0.0.0.0:8765")]
32
+
bind_metrics: SocketAddr,
25
33
/// Jetstream server to connect to (exclusive with --fixture). Provide either a wss:// URL, or a shorhand value:
26
34
/// 'us-east-1', 'us-east-2', 'us-west-1', or 'us-west-2'
27
35
#[arg(short, long)]
···
46
54
/// Saved jsonl from jetstream to use instead of a live subscription
47
55
#[arg(short, long)]
48
56
fixture: Option<PathBuf>,
57
+
/// run a scan across the target id table and write all key -> ids to id -> keys
58
+
#[arg(long, action)]
59
+
repair_target_ids: bool,
49
60
}
50
61
51
62
#[derive(Debug, Clone, ValueEnum)]
···
78
89
let stream = jetstream_url(&args.jetstream);
79
90
println!("using jetstream server {stream:?}...",);
80
91
92
+
let bind = args.bind;
93
+
let metrics_bind = args.bind_metrics;
94
+
81
95
let stay_alive = CancellationToken::new();
82
96
83
97
match args.backend {
84
-
StorageBackend::Memory => run(MemStorage::new(), fixture, None, stream, stay_alive),
98
+
StorageBackend::Memory => run(
99
+
MemStorage::new(),
100
+
fixture,
101
+
None,
102
+
stream,
103
+
bind,
104
+
metrics_bind,
105
+
stay_alive,
106
+
),
85
107
#[cfg(feature = "rocks")]
86
108
StorageBackend::Rocks => {
87
109
let storage_dir = args.data.clone().unwrap_or("rocks.test".into());
···
96
118
rocks.start_backup(backup_dir, auto_backup, stay_alive.clone())?;
97
119
}
98
120
println!("rocks ready.");
99
-
run(rocks, fixture, args.data, stream, stay_alive)
121
+
std::thread::scope(|s| {
122
+
if args.repair_target_ids {
123
+
let rocks = rocks.clone();
124
+
let stay_alive = stay_alive.clone();
125
+
s.spawn(move || {
126
+
let rep = rocks.run_repair(time::Duration::from_millis(0), stay_alive);
127
+
eprintln!("repair finished: {rep:?}");
128
+
rep
129
+
});
130
+
}
131
+
s.spawn(|| {
132
+
let r = run(
133
+
rocks,
134
+
fixture,
135
+
args.data,
136
+
stream,
137
+
bind,
138
+
metrics_bind,
139
+
stay_alive,
140
+
);
141
+
eprintln!("run finished: {r:?}");
142
+
r
143
+
});
144
+
});
145
+
Ok(())
100
146
}
101
147
}
102
148
}
···
106
152
fixture: Option<PathBuf>,
107
153
data_dir: Option<PathBuf>,
108
154
stream: String,
155
+
bind: SocketAddr,
156
+
metrics_bind: SocketAddr,
109
157
stay_alive: CancellationToken,
110
158
) -> Result<()> {
111
159
ctrlc::set_handler({
···
150
198
.build()
151
199
.expect("axum startup")
152
200
.block_on(async {
153
-
install_metrics_server()?;
154
-
serve(readable, "0.0.0.0:6789", staying_alive).await
201
+
install_metrics_server(metrics_bind)?;
202
+
serve(readable, bind, staying_alive).await
155
203
})
156
204
.unwrap();
157
205
stay_alive.drop_guard();
···
184
232
185
233
'monitor: loop {
186
234
match readable.get_stats() {
187
-
Ok(StorageStats { dids, targetables, linking_records }) => {
235
+
Ok(StorageStats { dids, targetables, linking_records, .. }) => {
188
236
metrics::gauge!("storage.stats.dids").set(dids as f64);
189
237
metrics::gauge!("storage.stats.targetables").set(targetables as f64);
190
238
metrics::gauge!("storage.stats.linking_records").set(linking_records as f64);
···
218
266
Ok(())
219
267
}
220
268
221
-
fn install_metrics_server() -> Result<()> {
269
+
fn install_metrics_server(metrics_bind: SocketAddr) -> Result<()> {
222
270
println!("installing metrics server...");
223
-
let host = [0, 0, 0, 0];
224
-
let port = 8765;
225
271
PrometheusBuilder::new()
226
272
.set_quantiles(&[0.5, 0.9, 0.99, 1.0])?
227
273
.set_bucket_duration(time::Duration::from_secs(30))?
228
274
.set_bucket_count(NonZero::new(10).unwrap()) // count * duration = 5 mins. stuff doesn't happen that fast here.
229
275
.set_enable_unit_suffix(true)
230
-
.with_http_listener((host, port))
276
+
.with_http_listener(metrics_bind)
231
277
.install()?;
232
-
println!(
233
-
"metrics server installed! listening on http://{}.{}.{}.{}:{port}",
234
-
host[0], host[1], host[2], host[3]
235
-
);
278
+
println!("metrics server installed! listening at {metrics_bind:?}");
236
279
Ok(())
237
280
}
238
281
+239
-238
constellation/src/bin/rocks-link-stats.rs
+239
-238
constellation/src/bin/rocks-link-stats.rs
···
1
-
use bincode::config::Options;
2
-
use clap::Parser;
3
-
use serde::Serialize;
4
-
use std::collections::HashMap;
5
-
use std::path::PathBuf;
1
+
// use bincode::config::Options;
2
+
// use clap::Parser;
3
+
// use serde::Serialize;
4
+
// use std::collections::HashMap;
5
+
// use std::path::PathBuf;
6
6
7
-
use tokio_util::sync::CancellationToken;
7
+
// use tokio_util::sync::CancellationToken;
8
8
9
-
use constellation::storage::rocks_store::{
10
-
Collection, DidId, RKey, RPath, Target, TargetKey, TargetLinkers, _bincode_opts,
11
-
};
12
-
use constellation::storage::RocksStorage;
13
-
use constellation::Did;
9
+
// use constellation::storage::rocks_store::{
10
+
// Collection, DidId, RKey, RPath, Target, TargetKey, TargetLinkers, _bincode_opts,
11
+
// };
12
+
// use constellation::storage::RocksStorage;
13
+
// use constellation::Did;
14
14
15
-
use links::parse_any_link;
16
-
use rocksdb::IteratorMode;
17
-
use std::time;
15
+
// use links::parse_any_link;
16
+
// use rocksdb::IteratorMode;
17
+
// use std::time;
18
18
19
-
/// Aggregate links in the at-mosphere
20
-
#[derive(Parser, Debug)]
21
-
#[command(version, about, long_about = None)]
22
-
struct Args {
23
-
/// where is rocksdb's data
24
-
#[arg(short, long)]
25
-
data: PathBuf,
26
-
/// slow down so we don't kill the firehose consumer, if running concurrently
27
-
#[arg(short, long)]
28
-
limit: Option<u64>,
29
-
}
19
+
// xxxx/// Aggregate links in the at-mosphere
20
+
// #[derive(Parser, Debug)]
21
+
// #[command(version, about, long_about = None)]
22
+
// struct Args {
23
+
// /// where is rocksdb's data
24
+
// #[arg(short, long)]
25
+
// data: PathBuf,
26
+
// /// slow down so we don't kill the firehose consumer, if running concurrently
27
+
// #[arg(short, long)]
28
+
// limit: Option<u64>,
29
+
// }
30
30
31
-
type LinkType = String;
31
+
// type LinkType = String;
32
32
33
-
#[derive(Debug, Eq, Hash, PartialEq, Serialize)]
34
-
struct SourceLink(Collection, RPath, LinkType, Option<Collection>); // last is target collection, if it's an at-uri link with a collection
33
+
// #[derive(Debug, Eq, Hash, PartialEq, Serialize)]
34
+
// struct SourceLink(Collection, RPath, LinkType, Option<Collection>); // last is target collection, if it's an at-uri link with a collection
35
35
36
-
#[derive(Debug, Serialize)]
37
-
struct SourceSample {
38
-
did: String,
39
-
rkey: String,
40
-
}
36
+
// #[derive(Debug, Serialize)]
37
+
// struct SourceSample {
38
+
// did: String,
39
+
// rkey: String,
40
+
// }
41
41
42
-
#[derive(Debug, Default, Serialize)]
43
-
struct Bucket {
44
-
count: u64,
45
-
sum: u64,
46
-
sample: Option<SourceSample>,
47
-
}
42
+
// #[derive(Debug, Default, Serialize)]
43
+
// struct Bucket {
44
+
// count: u64,
45
+
// sum: u64,
46
+
// sample: Option<SourceSample>,
47
+
// }
48
48
49
-
#[derive(Debug, Default, Serialize)]
50
-
struct Buckets([Bucket; 23]);
49
+
// #[derive(Debug, Default, Serialize)]
50
+
// struct Buckets([Bucket; 23]);
51
51
52
-
const BUCKETS: [u64; 23] = [
53
-
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16_384, 65_535,
54
-
262_144, 1_048_576,
55
-
];
52
+
// const BUCKETS: [u64; 23] = [
53
+
// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16_384, 65_535,
54
+
// 262_144, 1_048_576,
55
+
// ];
56
56
57
-
// b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b12, b16, b32, b64, b128, b256, b512, b1024, b4096, b16384, b65535, b262144, bmax
57
+
// xxx// b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b12, b16, b32, b64, b128, b256, b512, b1024, b4096, b16384, b65535, b262144, bmax
58
58
59
-
static DID_IDS_CF: &str = "did_ids";
60
-
static TARGET_IDS_CF: &str = "target_ids";
61
-
static TARGET_LINKERS_CF: &str = "target_links";
59
+
// static DID_IDS_CF: &str = "did_ids";
60
+
// static TARGET_IDS_CF: &str = "target_ids";
61
+
// static TARGET_LINKERS_CF: &str = "target_links";
62
62
63
-
const REPORT_INTERVAL: usize = 50_000;
63
+
// const REPORT_INTERVAL: usize = 50_000;
64
64
65
-
type Stats = HashMap<SourceLink, Buckets>;
65
+
// type Stats = HashMap<SourceLink, Buckets>;
66
66
67
-
#[derive(Debug, Serialize)]
68
-
struct Printable {
69
-
collection: String,
70
-
path: String,
71
-
link_type: String,
72
-
target_collection: Option<String>,
73
-
buckets: Buckets,
74
-
}
67
+
// #[derive(Debug, Serialize)]
68
+
// struct Printable {
69
+
// collection: String,
70
+
// path: String,
71
+
// link_type: String,
72
+
// target_collection: Option<String>,
73
+
// buckets: Buckets,
74
+
// }
75
75
76
-
#[derive(Debug, Default)]
77
-
struct ErrStats {
78
-
failed_to_get_sample: usize,
79
-
failed_to_read_target_id: usize,
80
-
failed_to_deserialize_target_key: usize,
81
-
failed_to_parse_target_as_link: usize,
82
-
failed_to_get_links: usize,
83
-
failed_to_deserialize_linkers: usize,
84
-
}
76
+
// #[derive(Debug, Default)]
77
+
// struct ErrStats {
78
+
// failed_to_get_sample: usize,
79
+
// failed_to_read_target_id: usize,
80
+
// failed_to_deserialize_target_key: usize,
81
+
// failed_to_parse_target_as_link: usize,
82
+
// failed_to_get_links: usize,
83
+
// failed_to_deserialize_linkers: usize,
84
+
// }
85
85
86
-
fn thousands(n: usize) -> String {
87
-
n.to_string()
88
-
.as_bytes()
89
-
.rchunks(3)
90
-
.rev()
91
-
.map(std::str::from_utf8)
92
-
.collect::<Result<Vec<&str>, _>>()
93
-
.unwrap()
94
-
.join(",")
95
-
}
86
+
// fn thousands(n: usize) -> String {
87
+
// n.to_string()
88
+
// .as_bytes()
89
+
// .rchunks(3)
90
+
// .rev()
91
+
// .map(std::str::from_utf8)
92
+
// .collect::<Result<Vec<&str>, _>>()
93
+
// .unwrap()
94
+
// .join(",")
95
+
// }
96
96
97
-
fn main() {
98
-
let args = Args::parse();
97
+
// fn main() {
98
+
// let args = Args::parse();
99
99
100
-
let limit = args.limit.map(|amount| {
101
-
ratelimit::Ratelimiter::builder(amount, time::Duration::from_secs(1))
102
-
.max_tokens(amount)
103
-
.initial_available(amount)
104
-
.build()
105
-
.unwrap()
106
-
});
100
+
// let limit = args.limit.map(|amount| {
101
+
// ratelimit::Ratelimiter::builder(amount, time::Duration::from_secs(1))
102
+
// .max_tokens(amount)
103
+
// .initial_available(amount)
104
+
// .build()
105
+
// .unwrap()
106
+
// });
107
107
108
-
eprintln!("starting rocksdb...");
109
-
let rocks = RocksStorage::open_readonly(args.data).unwrap();
110
-
eprintln!("rocks ready.");
108
+
// eprintln!("starting rocksdb...");
109
+
// let rocks = RocksStorage::open_readonly(args.data).unwrap();
110
+
// eprintln!("rocks ready.");
111
111
112
-
let RocksStorage { ref db, .. } = rocks;
112
+
// let RocksStorage { ref db, .. } = rocks;
113
113
114
-
let stay_alive = CancellationToken::new();
115
-
ctrlc::set_handler({
116
-
let mut desperation: u8 = 0;
117
-
let stay_alive = stay_alive.clone();
118
-
move || match desperation {
119
-
0 => {
120
-
eprintln!("ok, shutting down...");
121
-
stay_alive.cancel();
122
-
desperation += 1;
123
-
}
124
-
1.. => panic!("fine, panicking!"),
125
-
}
126
-
})
127
-
.unwrap();
114
+
// let stay_alive = CancellationToken::new();
115
+
// ctrlc::set_handler({
116
+
// let mut desperation: u8 = 0;
117
+
// let stay_alive = stay_alive.clone();
118
+
// move || match desperation {
119
+
// 0 => {
120
+
// eprintln!("ok, shutting down...");
121
+
// stay_alive.cancel();
122
+
// desperation += 1;
123
+
// }
124
+
// 1.. => panic!("fine, panicking!"),
125
+
// }
126
+
// })
127
+
// .unwrap();
128
128
129
-
let mut stats = Stats::new();
130
-
let mut err_stats: ErrStats = Default::default();
129
+
// let mut stats = Stats::new();
130
+
// let mut err_stats: ErrStats = Default::default();
131
131
132
-
let did_ids_cf = db.cf_handle(DID_IDS_CF).unwrap();
133
-
let target_id_cf = db.cf_handle(TARGET_IDS_CF).unwrap();
134
-
let target_links_cf = db.cf_handle(TARGET_LINKERS_CF).unwrap();
132
+
// let did_ids_cf = db.cf_handle(DID_IDS_CF).unwrap();
133
+
// let target_id_cf = db.cf_handle(TARGET_IDS_CF).unwrap();
134
+
// let target_links_cf = db.cf_handle(TARGET_LINKERS_CF).unwrap();
135
135
136
-
let t0 = time::Instant::now();
137
-
let mut t_prev = t0;
136
+
// let t0 = time::Instant::now();
137
+
// let mut t_prev = t0;
138
138
139
-
let mut i = 0;
140
-
for item in db.iterator_cf(&target_id_cf, IteratorMode::Start) {
141
-
if stay_alive.is_cancelled() {
142
-
break;
143
-
}
139
+
// let mut i = 0;
140
+
// for item in db.iterator_cf(&target_id_cf, IteratorMode::Start) {
141
+
// if stay_alive.is_cancelled() {
142
+
// break;
143
+
// }
144
144
145
-
if let Some(ref limiter) = limit {
146
-
if let Err(dur) = limiter.try_wait() {
147
-
std::thread::sleep(dur)
148
-
}
149
-
}
145
+
// if let Some(ref limiter) = limit {
146
+
// if let Err(dur) = limiter.try_wait() {
147
+
// std::thread::sleep(dur)
148
+
// }
149
+
// }
150
150
151
-
if i > 0 && i % REPORT_INTERVAL == 0 {
152
-
let now = time::Instant::now();
153
-
let rate = (REPORT_INTERVAL as f32) / (now.duration_since(t_prev).as_secs_f32());
154
-
eprintln!(
155
-
"{i}\t({}k)\t{:.2}\t{rate:.1}/s",
156
-
thousands(i / 1000),
157
-
t0.elapsed().as_secs_f32()
158
-
);
159
-
t_prev = now;
160
-
}
161
-
i += 1;
151
+
// if i > 0 && i % REPORT_INTERVAL == 0 {
152
+
// let now = time::Instant::now();
153
+
// let rate = (REPORT_INTERVAL as f32) / (now.duration_since(t_prev).as_secs_f32());
154
+
// eprintln!(
155
+
// "{i}\t({}k)\t{:.2}\t{rate:.1}/s",
156
+
// thousands(i / 1000),
157
+
// t0.elapsed().as_secs_f32()
158
+
// );
159
+
// t_prev = now;
160
+
// }
161
+
// i += 1;
162
162
163
-
let Ok((target_key, target_id)) = item else {
164
-
err_stats.failed_to_read_target_id += 1;
165
-
continue;
166
-
};
163
+
// let Ok((target_key, target_id)) = item else {
164
+
// err_stats.failed_to_read_target_id += 1;
165
+
// continue;
166
+
// };
167
167
168
-
let Ok(TargetKey(Target(target), collection, rpath)) =
169
-
_bincode_opts().deserialize(&target_key)
170
-
else {
171
-
err_stats.failed_to_deserialize_target_key += 1;
172
-
continue;
173
-
};
168
+
// let Ok(TargetKey(Target(target), collection, rpath)) =
169
+
// _bincode_opts().deserialize(&target_key)
170
+
// else {
171
+
// err_stats.failed_to_deserialize_target_key += 1;
172
+
// continue;
173
+
// };
174
174
175
-
let source = {
176
-
let Some(parsed) = parse_any_link(&target) else {
177
-
err_stats.failed_to_parse_target_as_link += 1;
178
-
continue;
179
-
};
180
-
SourceLink(
181
-
collection,
182
-
rpath,
183
-
parsed.name().into(),
184
-
parsed.at_uri_collection().map(Collection),
185
-
)
186
-
};
175
+
// let source = {
176
+
// let Some(parsed) = parse_any_link(&target) else {
177
+
// err_stats.failed_to_parse_target_as_link += 1;
178
+
// continue;
179
+
// };
180
+
// SourceLink(
181
+
// collection,
182
+
// rpath,
183
+
// parsed.name().into(),
184
+
// parsed.at_uri_collection().map(Collection),
185
+
// )
186
+
// };
187
187
188
-
let Ok(Some(links_raw)) = db.get_cf(&target_links_cf, &target_id) else {
189
-
err_stats.failed_to_get_links += 1;
190
-
continue;
191
-
};
192
-
let Ok(linkers) = _bincode_opts().deserialize::<TargetLinkers>(&links_raw) else {
193
-
err_stats.failed_to_deserialize_linkers += 1;
194
-
continue;
195
-
};
196
-
let (n, _) = linkers.count();
188
+
// let Ok(Some(links_raw)) = db.get_cf(&target_links_cf, &target_id) else {
189
+
// err_stats.failed_to_get_links += 1;
190
+
// continue;
191
+
// };
192
+
// let Ok(linkers) = _bincode_opts().deserialize::<TargetLinkers>(&links_raw) else {
193
+
// err_stats.failed_to_deserialize_linkers += 1;
194
+
// continue;
195
+
// };
196
+
// let (n, _) = linkers.count();
197
197
198
-
if n == 0 {
199
-
continue;
200
-
}
198
+
// if n == 0 {
199
+
// continue;
200
+
// }
201
201
202
-
let mut bucket = 0;
203
-
for edge in BUCKETS {
204
-
if n <= edge || bucket == 22 {
205
-
break;
206
-
}
207
-
bucket += 1;
208
-
}
202
+
// let mut bucket = 0;
203
+
// for edge in BUCKETS {
204
+
// if n <= edge || bucket == 22 {
205
+
// break;
206
+
// }
207
+
// bucket += 1;
208
+
// }
209
209
210
-
let b = &mut stats.entry(source).or_default().0[bucket];
211
-
b.count += 1;
212
-
b.sum += n;
213
-
if b.sample.is_none() {
214
-
let (DidId(did_id), RKey(k)) = &linkers.0[(n - 1) as usize];
215
-
if let Ok(Some(did_bytes)) = db.get_cf(&did_ids_cf, did_id.to_be_bytes()) {
216
-
if let Ok(Did(did)) = _bincode_opts().deserialize(&did_bytes) {
217
-
b.sample = Some(SourceSample {
218
-
did,
219
-
rkey: k.clone(),
220
-
});
221
-
} else {
222
-
err_stats.failed_to_get_sample += 1;
223
-
}
224
-
} else {
225
-
err_stats.failed_to_get_sample += 1;
226
-
}
227
-
}
210
+
// let b = &mut stats.entry(source).or_default().0[bucket];
211
+
// b.count += 1;
212
+
// b.sum += n;
213
+
// if b.sample.is_none() {
214
+
// let (DidId(did_id), RKey(k)) = &linkers.0[(n - 1) as usize];
215
+
// if let Ok(Some(did_bytes)) = db.get_cf(&did_ids_cf, did_id.to_be_bytes()) {
216
+
// if let Ok(Did(did)) = _bincode_opts().deserialize(&did_bytes) {
217
+
// b.sample = Some(SourceSample {
218
+
// did,
219
+
// rkey: k.clone(),
220
+
// });
221
+
// } else {
222
+
// err_stats.failed_to_get_sample += 1;
223
+
// }
224
+
// } else {
225
+
// err_stats.failed_to_get_sample += 1;
226
+
// }
227
+
// }
228
228
229
-
// if i >= 40_000 {
230
-
// break;
231
-
// }
232
-
}
229
+
// // if i >= 40_000 {
230
+
// // break;
231
+
// // }
232
+
// }
233
233
234
-
let dt = t0.elapsed();
234
+
// let dt = t0.elapsed();
235
235
236
-
eprintln!("gathering stats for output...");
236
+
// eprintln!("gathering stats for output...");
237
237
238
-
let itemified = stats
239
-
.into_iter()
240
-
.map(
241
-
|(
242
-
SourceLink(Collection(collection), RPath(path), link_type, target_collection),
243
-
buckets,
244
-
)| Printable {
245
-
collection,
246
-
path,
247
-
link_type,
248
-
target_collection: target_collection.map(|Collection(c)| c),
249
-
buckets,
250
-
},
251
-
)
252
-
.collect::<Vec<_>>();
238
+
// let itemified = stats
239
+
// .into_iter()
240
+
// .map(
241
+
// |(
242
+
// SourceLink(Collection(collection), RPath(path), link_type, target_collection),
243
+
// buckets,
244
+
// )| Printable {
245
+
// collection,
246
+
// path,
247
+
// link_type,
248
+
// target_collection: target_collection.map(|Collection(c)| c),
249
+
// buckets,
250
+
// },
251
+
// )
252
+
// .collect::<Vec<_>>();
253
253
254
-
match serde_json::to_string(&itemified) {
255
-
Ok(s) => println!("{s}"),
256
-
Err(e) => eprintln!("failed to serialize results: {e:?}"),
257
-
}
254
+
// match serde_json::to_string(&itemified) {
255
+
// Ok(s) => println!("{s}"),
256
+
// Err(e) => eprintln!("failed to serialize results: {e:?}"),
257
+
// }
258
258
259
-
eprintln!(
260
-
"{} summarizing {} link targets in {:.1}s",
261
-
if stay_alive.is_cancelled() {
262
-
"STOPPED"
263
-
} else {
264
-
"FINISHED"
265
-
},
266
-
thousands(i),
267
-
dt.as_secs_f32()
268
-
);
269
-
eprintln!("{err_stats:?}");
270
-
eprintln!("bye.");
271
-
}
259
+
// eprintln!(
260
+
// "{} summarizing {} link targets in {:.1}s",
261
+
// if stay_alive.is_cancelled() {
262
+
// "STOPPED"
263
+
// } else {
264
+
// "FINISHED"
265
+
// },
266
+
// thousands(i),
267
+
// dt.as_secs_f32()
268
+
// );
269
+
// eprintln!("{err_stats:?}");
270
+
// eprintln!("bye.");
271
+
// }
272
272
273
-
// scan plan
273
+
// xxx// scan plan
274
274
275
-
// buckets (backlink count)
276
-
// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16384, 65535, 262144, 1048576+
277
-
// by
278
-
// - collection
279
-
// - json path
280
-
// - link type
281
-
// samples for each bucket for each variation
275
+
// xxx// buckets (backlink count)
276
+
// xxx// 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512, 1024, 4096, 16384, 65535, 262144, 1048576+
277
+
// xxx// by
278
+
// xxx// - collection
279
+
// xxx// - json path
280
+
// xxx// - link type
281
+
// xxx// samples for each bucket for each variation
282
+
fn main() {}
+2
constellation/src/bin/rocks-restore-from-backup.rs
+2
constellation/src/bin/rocks-restore-from-backup.rs
···
3
3
use clap::Parser;
4
4
use std::path::PathBuf;
5
5
6
+
#[cfg(feature = "rocks")]
6
7
use rocksdb::backup::{BackupEngine, BackupEngineOptions, RestoreOptions};
7
8
8
9
use std::time;
···
19
20
to_data_dir: PathBuf,
20
21
}
21
22
23
+
#[cfg(feature = "rocks")]
22
24
fn main() -> Result<()> {
23
25
let args = Args::parse();
24
26
+8
-6
constellation/src/server/filters.rs
+8
-6
constellation/src/server/filters.rs
···
5
5
Ok({
6
6
if let Some(link) = parse_any_link(s) {
7
7
match link {
8
-
Link::AtUri(at_uri) => at_uri.strip_prefix("at://").map(|noproto| {
9
-
format!("https://atproto-browser-plus-links.vercel.app/at/{noproto}")
10
-
}),
11
-
Link::Did(did) => Some(format!(
12
-
"https://atproto-browser-plus-links.vercel.app/at/{did}"
13
-
)),
8
+
Link::AtUri(at_uri) => at_uri
9
+
.strip_prefix("at://")
10
+
.map(|noproto| format!("https://pdsls.dev/at://{noproto}")),
11
+
Link::Did(did) => Some(format!("https://pdsls.dev/at://{did}")),
14
12
Link::Uri(uri) => Some(uri),
15
13
}
16
14
} else {
···
22
20
pub fn human_number(n: &u64) -> askama::Result<String> {
23
21
Ok(n.to_formatted_string(&Locale::en))
24
22
}
23
+
24
+
pub fn to_u64(n: usize) -> askama::Result<u64> {
25
+
Ok(n as u64)
26
+
}
+332
-19
constellation/src/server/mod.rs
+332
-19
constellation/src/server/mod.rs
···
11
11
use bincode::Options;
12
12
use serde::{Deserialize, Serialize};
13
13
use serde_with::serde_as;
14
-
use std::collections::HashMap;
14
+
use std::collections::{HashMap, HashSet};
15
15
use std::time::{Duration, UNIX_EPOCH};
16
16
use tokio::net::{TcpListener, ToSocketAddrs};
17
-
use tokio::task::block_in_place;
17
+
use tokio::task::spawn_blocking;
18
18
use tokio_util::sync::CancellationToken;
19
19
20
20
use crate::storage::{LinkReader, StorageStats};
···
28
28
const DEFAULT_CURSOR_LIMIT: u64 = 16;
29
29
const DEFAULT_CURSOR_LIMIT_MAX: u64 = 100;
30
30
31
-
const INDEX_BEGAN_AT_TS: u64 = 1738083600; // TODO: not this
31
+
fn get_default_cursor_limit() -> u64 {
32
+
DEFAULT_CURSOR_LIMIT
33
+
}
34
+
35
+
fn to500(e: tokio::task::JoinError) -> http::StatusCode {
36
+
eprintln!("handler error: {e}");
37
+
http::StatusCode::INTERNAL_SERVER_ERROR
38
+
}
32
39
33
40
pub async fn serve<S, A>(store: S, addr: A, stay_alive: CancellationToken) -> anyhow::Result<()>
34
41
where
···
41
48
"/",
42
49
get({
43
50
let store = store.clone();
44
-
move |accept| async { block_in_place(|| hello(accept, store)) }
51
+
move |accept| async {
52
+
spawn_blocking(|| hello(accept, store))
53
+
.await
54
+
.map_err(to500)?
55
+
}
56
+
}),
57
+
)
58
+
.route(
59
+
"/xrpc/blue.microcosm.links.getManyToManyCounts",
60
+
get({
61
+
let store = store.clone();
62
+
move |accept, query| async {
63
+
spawn_blocking(|| get_many_to_many_counts(accept, query, store))
64
+
.await
65
+
.map_err(to500)?
66
+
}
45
67
}),
46
68
)
47
69
.route(
48
70
"/links/count",
49
71
get({
50
72
let store = store.clone();
51
-
move |accept, query| async { block_in_place(|| count_links(accept, query, store)) }
73
+
move |accept, query| async {
74
+
spawn_blocking(|| count_links(accept, query, store))
75
+
.await
76
+
.map_err(to500)?
77
+
}
52
78
}),
53
79
)
54
80
.route(
···
56
82
get({
57
83
let store = store.clone();
58
84
move |accept, query| async {
59
-
block_in_place(|| count_distinct_dids(accept, query, store))
85
+
spawn_blocking(|| count_distinct_dids(accept, query, store))
86
+
.await
87
+
.map_err(to500)?
88
+
}
89
+
}),
90
+
)
91
+
.route(
92
+
"/xrpc/blue.microcosm.links.getBacklinks",
93
+
get({
94
+
let store = store.clone();
95
+
move |accept, query| async {
96
+
spawn_blocking(|| get_backlinks(accept, query, store))
97
+
.await
98
+
.map_err(to500)?
60
99
}
61
100
}),
62
101
)
···
64
103
"/links",
65
104
get({
66
105
let store = store.clone();
67
-
move |accept, query| async { block_in_place(|| get_links(accept, query, store)) }
106
+
move |accept, query| async {
107
+
spawn_blocking(|| get_links(accept, query, store))
108
+
.await
109
+
.map_err(to500)?
110
+
}
68
111
}),
69
112
)
70
113
.route(
···
72
115
get({
73
116
let store = store.clone();
74
117
move |accept, query| async {
75
-
block_in_place(|| get_distinct_dids(accept, query, store))
118
+
spawn_blocking(|| get_distinct_dids(accept, query, store))
119
+
.await
120
+
.map_err(to500)?
76
121
}
77
122
}),
78
123
)
···
82
127
get({
83
128
let store = store.clone();
84
129
move |accept, query| async {
85
-
block_in_place(|| count_all_links(accept, query, store))
130
+
spawn_blocking(|| count_all_links(accept, query, store))
131
+
.await
132
+
.map_err(to500)?
86
133
}
87
134
}),
88
135
)
···
91
138
get({
92
139
let store = store.clone();
93
140
move |accept, query| async {
94
-
block_in_place(|| explore_links(accept, query, store))
141
+
spawn_blocking(|| explore_links(accept, query, store))
142
+
.await
143
+
.map_err(to500)?
95
144
}
96
145
}),
97
146
)
···
150
199
#[template(path = "hello.html.j2")]
151
200
struct HelloReponse {
152
201
help: &'static str,
153
-
days_indexed: u64,
202
+
days_indexed: Option<u64>,
154
203
stats: StorageStats,
155
204
}
156
205
fn hello(
···
160
209
let stats = store
161
210
.get_stats()
162
211
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
163
-
let days_indexed = (UNIX_EPOCH + Duration::from_secs(INDEX_BEGAN_AT_TS))
164
-
.elapsed()
212
+
let days_indexed = stats
213
+
.started_at
214
+
.map(|c| (UNIX_EPOCH + Duration::from_micros(c)).elapsed())
215
+
.transpose()
165
216
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?
166
-
.as_secs()
167
-
/ 86400;
217
+
.map(|d| d.as_secs() / 86_400);
168
218
Ok(acceptable(accept, HelloReponse {
169
219
help: "open this URL in a web browser (or request with Accept: text/html) for information about this API.",
170
220
days_indexed,
···
173
223
}
174
224
175
225
#[derive(Clone, Deserialize)]
226
+
#[serde(rename_all = "camelCase")]
227
+
struct GetManyToManyCountsQuery {
228
+
subject: String,
229
+
source: String,
230
+
/// path to the secondary link in the linking record
231
+
path_to_other: String,
232
+
/// filter to linking records (join of the m2m) by these DIDs
233
+
#[serde(default)]
234
+
did: Vec<String>,
235
+
/// filter to specific secondary records
236
+
#[serde(default)]
237
+
other_subject: Vec<String>,
238
+
cursor: Option<OpaqueApiCursor>,
239
+
/// Set the max number of links to return per page of results
240
+
#[serde(default = "get_default_cursor_limit")]
241
+
limit: u64,
242
+
}
243
+
#[derive(Serialize)]
244
+
struct OtherSubjectCount {
245
+
subject: String,
246
+
total: u64,
247
+
distinct: u64,
248
+
}
249
+
#[derive(Template, Serialize)]
250
+
#[template(path = "get-many-to-many-counts.html.j2")]
251
+
struct GetManyToManyCountsResponse {
252
+
counts_by_other_subject: Vec<OtherSubjectCount>,
253
+
cursor: Option<OpaqueApiCursor>,
254
+
#[serde(skip_serializing)]
255
+
query: GetManyToManyCountsQuery,
256
+
}
257
+
fn get_many_to_many_counts(
258
+
accept: ExtractAccept,
259
+
query: axum_extra::extract::Query<GetManyToManyCountsQuery>,
260
+
store: impl LinkReader,
261
+
) -> Result<impl IntoResponse, http::StatusCode> {
262
+
let cursor_key = query
263
+
.cursor
264
+
.clone()
265
+
.map(|oc| ApiKeyedCursor::try_from(oc).map_err(|_| http::StatusCode::BAD_REQUEST))
266
+
.transpose()?
267
+
.map(|c| c.next);
268
+
269
+
let limit = query.limit;
270
+
if limit > DEFAULT_CURSOR_LIMIT_MAX {
271
+
return Err(http::StatusCode::BAD_REQUEST);
272
+
}
273
+
274
+
let filter_dids: HashSet<Did> = HashSet::from_iter(
275
+
query
276
+
.did
277
+
.iter()
278
+
.map(|d| d.trim())
279
+
.filter(|d| !d.is_empty())
280
+
.map(|d| Did(d.to_string())),
281
+
);
282
+
283
+
let filter_other_subjects: HashSet<String> = HashSet::from_iter(
284
+
query
285
+
.other_subject
286
+
.iter()
287
+
.map(|s| s.trim().to_string())
288
+
.filter(|s| !s.is_empty()),
289
+
);
290
+
291
+
let Some((collection, path)) = query.source.split_once(':') else {
292
+
return Err(http::StatusCode::BAD_REQUEST);
293
+
};
294
+
let path = format!(".{path}");
295
+
296
+
let path_to_other = format!(".{}", query.path_to_other);
297
+
298
+
let paged = store
299
+
.get_many_to_many_counts(
300
+
&query.subject,
301
+
collection,
302
+
&path,
303
+
&path_to_other,
304
+
limit,
305
+
cursor_key,
306
+
&filter_dids,
307
+
&filter_other_subjects,
308
+
)
309
+
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
310
+
311
+
let cursor = paged.next.map(|next| ApiKeyedCursor { next }.into());
312
+
313
+
let items = paged
314
+
.items
315
+
.into_iter()
316
+
.map(|(subject, total, distinct)| OtherSubjectCount {
317
+
subject,
318
+
total,
319
+
distinct,
320
+
})
321
+
.collect();
322
+
323
+
Ok(acceptable(
324
+
accept,
325
+
GetManyToManyCountsResponse {
326
+
counts_by_other_subject: items,
327
+
cursor,
328
+
query: (*query).clone(),
329
+
},
330
+
))
331
+
}
332
+
333
+
#[derive(Clone, Deserialize)]
176
334
struct GetLinksCountQuery {
177
335
target: String,
178
336
collection: String,
···
233
391
}
234
392
235
393
#[derive(Clone, Deserialize)]
394
+
struct GetBacklinksQuery {
395
+
/// The link target
396
+
///
397
+
/// can be an AT-URI, plain DID, or regular URI
398
+
subject: String,
399
+
/// Filter links only from this link source
400
+
///
401
+
/// eg.: `app.bsky.feed.like:subject.uri`
402
+
source: String,
403
+
cursor: Option<OpaqueApiCursor>,
404
+
/// Filter links only from these DIDs
405
+
///
406
+
/// include multiple times to filter by multiple source DIDs
407
+
#[serde(default)]
408
+
did: Vec<String>,
409
+
/// Set the max number of links to return per page of results
410
+
#[serde(default = "get_default_cursor_limit")]
411
+
limit: u64,
412
+
// TODO: allow reverse (er, forward) order as well
413
+
}
414
+
#[derive(Template, Serialize)]
415
+
#[template(path = "get-backlinks.html.j2")]
416
+
struct GetBacklinksResponse {
417
+
total: u64,
418
+
records: Vec<RecordId>,
419
+
cursor: Option<OpaqueApiCursor>,
420
+
#[serde(skip_serializing)]
421
+
query: GetBacklinksQuery,
422
+
#[serde(skip_serializing)]
423
+
collection: String,
424
+
#[serde(skip_serializing)]
425
+
path: String,
426
+
}
427
+
fn get_backlinks(
428
+
accept: ExtractAccept,
429
+
query: axum_extra::extract::Query<GetBacklinksQuery>, // supports multiple param occurrences
430
+
store: impl LinkReader,
431
+
) -> Result<impl IntoResponse, http::StatusCode> {
432
+
let until = query
433
+
.cursor
434
+
.clone()
435
+
.map(|oc| ApiCursor::try_from(oc).map_err(|_| http::StatusCode::BAD_REQUEST))
436
+
.transpose()?
437
+
.map(|c| c.next);
438
+
439
+
let limit = query.limit;
440
+
if limit > DEFAULT_CURSOR_LIMIT_MAX {
441
+
return Err(http::StatusCode::BAD_REQUEST);
442
+
}
443
+
444
+
let filter_dids: HashSet<Did> = HashSet::from_iter(
445
+
query
446
+
.did
447
+
.iter()
448
+
.map(|d| d.trim())
449
+
.filter(|d| !d.is_empty())
450
+
.map(|d| Did(d.to_string())),
451
+
);
452
+
453
+
let Some((collection, path)) = query.source.split_once(':') else {
454
+
return Err(http::StatusCode::BAD_REQUEST);
455
+
};
456
+
let path = format!(".{path}");
457
+
458
+
let paged = store
459
+
.get_links(
460
+
&query.subject,
461
+
collection,
462
+
&path,
463
+
limit,
464
+
until,
465
+
&filter_dids,
466
+
)
467
+
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
468
+
469
+
let cursor = paged.next.map(|next| {
470
+
ApiCursor {
471
+
version: paged.version,
472
+
next,
473
+
}
474
+
.into()
475
+
});
476
+
477
+
Ok(acceptable(
478
+
accept,
479
+
GetBacklinksResponse {
480
+
total: paged.total,
481
+
records: paged.items,
482
+
cursor,
483
+
query: (*query).clone(),
484
+
collection: collection.to_string(),
485
+
path,
486
+
},
487
+
))
488
+
}
489
+
490
+
#[derive(Clone, Deserialize)]
236
491
struct GetLinkItemsQuery {
237
492
target: String,
238
493
collection: String,
239
494
path: String,
240
495
cursor: Option<OpaqueApiCursor>,
241
-
limit: Option<u64>,
496
+
/// Filter links only from these DIDs
497
+
///
498
+
/// include multiple times to filter by multiple source DIDs
499
+
#[serde(default)]
500
+
did: Vec<String>,
501
+
/// [deprecated] Filter links only from these DIDs
502
+
///
503
+
/// format: comma-separated sequence of DIDs
504
+
///
505
+
/// errors: if `did` parameter is also present
506
+
///
507
+
/// deprecated: use `did`, which can be repeated multiple times
508
+
from_dids: Option<String>, // comma separated: gross
509
+
#[serde(default = "get_default_cursor_limit")]
510
+
limit: u64,
242
511
// TODO: allow reverse (er, forward) order as well
243
512
}
244
513
#[derive(Template, Serialize)]
···
255
524
}
256
525
fn get_links(
257
526
accept: ExtractAccept,
258
-
query: Query<GetLinkItemsQuery>,
527
+
query: axum_extra::extract::Query<GetLinkItemsQuery>, // supports multiple param occurrences
259
528
store: impl LinkReader,
260
529
) -> Result<impl IntoResponse, http::StatusCode> {
261
530
let until = query
···
265
534
.transpose()?
266
535
.map(|c| c.next);
267
536
268
-
let limit = query.limit.unwrap_or(DEFAULT_CURSOR_LIMIT);
537
+
let limit = query.limit;
269
538
if limit > DEFAULT_CURSOR_LIMIT_MAX {
270
539
return Err(http::StatusCode::BAD_REQUEST);
271
540
}
272
541
542
+
let mut filter_dids: HashSet<Did> = HashSet::from_iter(
543
+
query
544
+
.did
545
+
.iter()
546
+
.map(|d| d.trim())
547
+
.filter(|d| !d.is_empty())
548
+
.map(|d| Did(d.to_string())),
549
+
);
550
+
551
+
if let Some(comma_joined) = &query.from_dids {
552
+
if !filter_dids.is_empty() {
553
+
return Err(http::StatusCode::BAD_REQUEST);
554
+
}
555
+
for did in comma_joined.split(',') {
556
+
filter_dids.insert(Did(did.to_string()));
557
+
}
558
+
}
559
+
273
560
let paged = store
274
-
.get_links(&query.target, &query.collection, &query.path, limit, until)
561
+
.get_links(
562
+
&query.target,
563
+
&query.collection,
564
+
&query.path,
565
+
limit,
566
+
until,
567
+
&filter_dids,
568
+
)
275
569
.map_err(|_| http::StatusCode::INTERNAL_SERVER_ERROR)?;
276
570
277
571
let cursor = paged.next.map(|next| {
···
433
727
OpaqueApiCursor(bincode::DefaultOptions::new().serialize(&item).unwrap())
434
728
}
435
729
}
730
+
731
+
#[derive(Serialize, Deserialize)] // for bincode
732
+
struct ApiKeyedCursor {
733
+
next: String, // the key
734
+
}
735
+
736
+
impl TryFrom<OpaqueApiCursor> for ApiKeyedCursor {
737
+
type Error = bincode::Error;
738
+
739
+
fn try_from(item: OpaqueApiCursor) -> Result<Self, Self::Error> {
740
+
bincode::DefaultOptions::new().deserialize(&item.0)
741
+
}
742
+
}
743
+
744
+
impl From<ApiKeyedCursor> for OpaqueApiCursor {
745
+
fn from(item: ApiKeyedCursor) -> Self {
746
+
OpaqueApiCursor(bincode::DefaultOptions::new().serialize(&item).unwrap())
747
+
}
748
+
}
+93
-1
constellation/src/storage/mem_store.rs
+93
-1
constellation/src/storage/mem_store.rs
···
1
-
use super::{LinkReader, LinkStorage, PagedAppendingCollection, StorageStats};
1
+
use super::{
2
+
LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection, StorageStats,
3
+
};
2
4
use crate::{ActionableEvent, CountsByCount, Did, RecordId};
3
5
use anyhow::Result;
4
6
use links::CollectedLink;
···
132
134
}
133
135
134
136
impl LinkReader for MemStorage {
137
+
fn get_many_to_many_counts(
138
+
&self,
139
+
target: &str,
140
+
collection: &str,
141
+
path: &str,
142
+
path_to_other: &str,
143
+
limit: u64,
144
+
after: Option<String>,
145
+
filter_dids: &HashSet<Did>,
146
+
filter_to_targets: &HashSet<String>,
147
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>> {
148
+
let data = self.0.lock().unwrap();
149
+
let Some(paths) = data.targets.get(&Target::new(target)) else {
150
+
return Ok(PagedOrderedCollection::default());
151
+
};
152
+
let Some(linkers) = paths.get(&Source::new(collection, path)) else {
153
+
return Ok(PagedOrderedCollection::default());
154
+
};
155
+
156
+
let path_to_other = RecordPath::new(path_to_other);
157
+
let filter_to_targets: HashSet<Target> =
158
+
HashSet::from_iter(filter_to_targets.iter().map(|s| Target::new(s)));
159
+
160
+
let mut grouped_counts: HashMap<Target, (u64, HashSet<Did>)> = HashMap::new();
161
+
for (did, rkey) in linkers.iter().flatten().cloned() {
162
+
if !filter_dids.is_empty() && !filter_dids.contains(&did) {
163
+
continue;
164
+
}
165
+
if let Some(fwd_target) = data
166
+
.links
167
+
.get(&did)
168
+
.unwrap_or(&HashMap::new())
169
+
.get(&RepoId {
170
+
collection: collection.to_string(),
171
+
rkey,
172
+
})
173
+
.unwrap_or(&Vec::new())
174
+
.iter()
175
+
.filter_map(|(path, target)| {
176
+
if *path == path_to_other
177
+
&& (filter_to_targets.is_empty() || filter_to_targets.contains(target))
178
+
{
179
+
Some(target)
180
+
} else {
181
+
None
182
+
}
183
+
})
184
+
.take(1)
185
+
.next()
186
+
{
187
+
let e = grouped_counts.entry(fwd_target.clone()).or_default();
188
+
e.0 += 1;
189
+
e.1.insert(did.clone());
190
+
}
191
+
}
192
+
let mut items: Vec<(String, u64, u64)> = grouped_counts
193
+
.iter()
194
+
.map(|(k, (n, u))| (k.0.clone(), *n, u.len() as u64))
195
+
.collect();
196
+
items.sort();
197
+
items = items
198
+
.into_iter()
199
+
.skip_while(|(t, _, _)| after.as_ref().map(|a| t <= a).unwrap_or(false))
200
+
.take(limit as usize)
201
+
.collect();
202
+
let next = if items.len() as u64 >= limit {
203
+
items.last().map(|(t, _, _)| t.clone())
204
+
} else {
205
+
None
206
+
};
207
+
Ok(PagedOrderedCollection { items, next })
208
+
}
209
+
135
210
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64> {
136
211
let data = self.0.lock().unwrap();
137
212
let Some(paths) = data.targets.get(&Target::new(target)) else {
···
166
241
path: &str,
167
242
limit: u64,
168
243
until: Option<u64>,
244
+
filter_dids: &HashSet<Did>,
169
245
) -> Result<PagedAppendingCollection<RecordId>> {
170
246
let data = self.0.lock().unwrap();
171
247
let Some(paths) = data.targets.get(&Target::new(target)) else {
···
183
259
next: None,
184
260
total: 0,
185
261
});
262
+
};
263
+
264
+
let did_rkeys: Vec<_> = if !filter_dids.is_empty() {
265
+
did_rkeys
266
+
.iter()
267
+
.filter(|m| {
268
+
Option::<(Did, RKey)>::clone(m)
269
+
.map(|(did, _)| filter_dids.contains(&did))
270
+
.unwrap_or(false)
271
+
})
272
+
.cloned()
273
+
.collect()
274
+
} else {
275
+
did_rkeys.to_vec()
186
276
};
187
277
188
278
let total = did_rkeys.len();
···
338
428
dids,
339
429
targetables,
340
430
linking_records,
431
+
started_at: None,
432
+
other_data: Default::default(),
341
433
})
342
434
}
343
435
}
+484
-14
constellation/src/storage/mod.rs
+484
-14
constellation/src/storage/mod.rs
···
1
1
use crate::{ActionableEvent, CountsByCount, Did, RecordId};
2
2
use anyhow::Result;
3
3
use serde::{Deserialize, Serialize};
4
-
use std::collections::HashMap;
4
+
use std::collections::{HashMap, HashSet};
5
5
6
6
pub mod mem_store;
7
7
pub use mem_store::MemStorage;
···
19
19
pub total: u64,
20
20
}
21
21
22
+
/// A paged collection whose keys are sorted instead of indexed
23
+
///
24
+
/// this has weaker guarantees than PagedAppendingCollection: it might
25
+
/// return a totally consistent snapshot. but it should avoid duplicates
26
+
/// and each page should at least be internally consistent.
27
+
#[derive(Debug, PartialEq, Default)]
28
+
pub struct PagedOrderedCollection<T, K: Ord> {
29
+
pub items: Vec<T>,
30
+
pub next: Option<K>,
31
+
}
32
+
22
33
#[derive(Debug, Deserialize, Serialize, PartialEq)]
23
34
pub struct StorageStats {
24
35
/// estimate of how many accounts we've seen create links. the _subjects_ of any links are not represented here.
···
33
44
/// records with multiple links are single-counted.
34
45
/// for LSM stores, deleted links don't decrement this, and updated records with any links will likely increment it.
35
46
pub linking_records: u64,
47
+
48
+
/// first jetstream cursor when this instance first started
49
+
pub started_at: Option<u64>,
50
+
51
+
/// anything else we want to throw in
52
+
pub other_data: HashMap<String, u64>,
36
53
}
37
54
38
55
pub trait LinkStorage: Send + Sync {
···
48
65
}
49
66
50
67
pub trait LinkReader: Clone + Send + Sync + 'static {
68
+
#[allow(clippy::too_many_arguments)]
69
+
fn get_many_to_many_counts(
70
+
&self,
71
+
target: &str,
72
+
collection: &str,
73
+
path: &str,
74
+
path_to_other: &str,
75
+
limit: u64,
76
+
after: Option<String>,
77
+
filter_dids: &HashSet<Did>,
78
+
filter_to_targets: &HashSet<String>,
79
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>>;
80
+
51
81
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>;
52
82
53
83
fn get_distinct_did_count(&self, target: &str, collection: &str, path: &str) -> Result<u64>;
···
59
89
path: &str,
60
90
limit: u64,
61
91
until: Option<u64>,
92
+
filter_dids: &HashSet<Did>,
62
93
) -> Result<PagedAppendingCollection<RecordId>>;
63
94
64
95
fn get_distinct_dids(
···
145
176
);
146
177
assert_eq!(storage.get_distinct_did_count("", "", "")?, 0);
147
178
assert_eq!(
148
-
storage.get_links("a.com", "app.t.c", ".abc.uri", 100, None)?,
179
+
storage.get_links(
180
+
"a.com",
181
+
"app.t.c",
182
+
".abc.uri",
183
+
100,
184
+
None,
185
+
&HashSet::default()
186
+
)?,
149
187
PagedAppendingCollection {
150
188
version: (0, 0),
151
189
items: vec![],
···
641
679
0,
642
680
)?;
643
681
assert_eq!(
644
-
storage.get_links("a.com", "app.t.c", ".abc.uri", 100, None)?,
682
+
storage.get_links(
683
+
"a.com",
684
+
"app.t.c",
685
+
".abc.uri",
686
+
100,
687
+
None,
688
+
&HashSet::default()
689
+
)?,
645
690
PagedAppendingCollection {
646
691
version: (1, 0),
647
692
items: vec![RecordId {
···
682
727
0,
683
728
)?;
684
729
}
685
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
730
+
let links =
731
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
686
732
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, None)?;
687
733
assert_eq!(
688
734
links,
···
713
759
total: 5,
714
760
}
715
761
);
716
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
762
+
let links = storage.get_links(
763
+
"a.com",
764
+
"app.t.c",
765
+
".abc.uri",
766
+
2,
767
+
links.next,
768
+
&HashSet::default(),
769
+
)?;
717
770
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?;
718
771
assert_eq!(
719
772
links,
···
744
797
total: 5,
745
798
}
746
799
);
747
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
800
+
let links = storage.get_links(
801
+
"a.com",
802
+
"app.t.c",
803
+
".abc.uri",
804
+
2,
805
+
links.next,
806
+
&HashSet::default(),
807
+
)?;
748
808
let dids = storage.get_distinct_dids("a.com", "app.t.c", ".abc.uri", 2, dids.next)?;
749
809
assert_eq!(
750
810
links,
···
771
831
assert_stats(storage.get_stats()?, 5..=5, 1..=1, 5..=5);
772
832
});
773
833
834
+
test_each_storage!(get_filtered_links, |storage| {
835
+
let links = storage.get_links(
836
+
"a.com",
837
+
"app.t.c",
838
+
".abc.uri",
839
+
2,
840
+
None,
841
+
&HashSet::from([Did("did:plc:linker".to_string())]),
842
+
)?;
843
+
assert_eq!(
844
+
links,
845
+
PagedAppendingCollection {
846
+
version: (0, 0),
847
+
items: vec![],
848
+
next: None,
849
+
total: 0,
850
+
}
851
+
);
852
+
853
+
storage.push(
854
+
&ActionableEvent::CreateLinks {
855
+
record_id: RecordId {
856
+
did: "did:plc:linker".into(),
857
+
collection: "app.t.c".into(),
858
+
rkey: "asdf".into(),
859
+
},
860
+
links: vec![CollectedLink {
861
+
target: Link::Uri("a.com".into()),
862
+
path: ".abc.uri".into(),
863
+
}],
864
+
},
865
+
0,
866
+
)?;
867
+
868
+
let links = storage.get_links(
869
+
"a.com",
870
+
"app.t.c",
871
+
".abc.uri",
872
+
2,
873
+
None,
874
+
&HashSet::from([Did("did:plc:linker".to_string())]),
875
+
)?;
876
+
assert_eq!(
877
+
links,
878
+
PagedAppendingCollection {
879
+
version: (1, 0),
880
+
items: vec![RecordId {
881
+
did: "did:plc:linker".into(),
882
+
collection: "app.t.c".into(),
883
+
rkey: "asdf".into(),
884
+
},],
885
+
next: None,
886
+
total: 1,
887
+
}
888
+
);
889
+
890
+
let links = storage.get_links(
891
+
"a.com",
892
+
"app.t.c",
893
+
".abc.uri",
894
+
2,
895
+
None,
896
+
&HashSet::from([Did("did:plc:someone-else".to_string())]),
897
+
)?;
898
+
assert_eq!(
899
+
links,
900
+
PagedAppendingCollection {
901
+
version: (0, 0),
902
+
items: vec![],
903
+
next: None,
904
+
total: 0,
905
+
}
906
+
);
907
+
908
+
storage.push(
909
+
&ActionableEvent::CreateLinks {
910
+
record_id: RecordId {
911
+
did: "did:plc:linker".into(),
912
+
collection: "app.t.c".into(),
913
+
rkey: "asdf-2".into(),
914
+
},
915
+
links: vec![CollectedLink {
916
+
target: Link::Uri("a.com".into()),
917
+
path: ".abc.uri".into(),
918
+
}],
919
+
},
920
+
0,
921
+
)?;
922
+
storage.push(
923
+
&ActionableEvent::CreateLinks {
924
+
record_id: RecordId {
925
+
did: "did:plc:someone-else".into(),
926
+
collection: "app.t.c".into(),
927
+
rkey: "asdf".into(),
928
+
},
929
+
links: vec![CollectedLink {
930
+
target: Link::Uri("a.com".into()),
931
+
path: ".abc.uri".into(),
932
+
}],
933
+
},
934
+
0,
935
+
)?;
936
+
937
+
let links = storage.get_links(
938
+
"a.com",
939
+
"app.t.c",
940
+
".abc.uri",
941
+
2,
942
+
None,
943
+
&HashSet::from([Did("did:plc:linker".to_string())]),
944
+
)?;
945
+
assert_eq!(
946
+
links,
947
+
PagedAppendingCollection {
948
+
version: (2, 0),
949
+
items: vec![
950
+
RecordId {
951
+
did: "did:plc:linker".into(),
952
+
collection: "app.t.c".into(),
953
+
rkey: "asdf-2".into(),
954
+
},
955
+
RecordId {
956
+
did: "did:plc:linker".into(),
957
+
collection: "app.t.c".into(),
958
+
rkey: "asdf".into(),
959
+
},
960
+
],
961
+
next: None,
962
+
total: 2,
963
+
}
964
+
);
965
+
966
+
let links = storage.get_links(
967
+
"a.com",
968
+
"app.t.c",
969
+
".abc.uri",
970
+
2,
971
+
None,
972
+
&HashSet::from([
973
+
Did("did:plc:linker".to_string()),
974
+
Did("did:plc:someone-else".to_string()),
975
+
]),
976
+
)?;
977
+
assert_eq!(
978
+
links,
979
+
PagedAppendingCollection {
980
+
version: (3, 0),
981
+
items: vec![
982
+
RecordId {
983
+
did: "did:plc:someone-else".into(),
984
+
collection: "app.t.c".into(),
985
+
rkey: "asdf".into(),
986
+
},
987
+
RecordId {
988
+
did: "did:plc:linker".into(),
989
+
collection: "app.t.c".into(),
990
+
rkey: "asdf-2".into(),
991
+
},
992
+
],
993
+
next: Some(1),
994
+
total: 3,
995
+
}
996
+
);
997
+
998
+
let links = storage.get_links(
999
+
"a.com",
1000
+
"app.t.c",
1001
+
".abc.uri",
1002
+
2,
1003
+
None,
1004
+
&HashSet::from([Did("did:plc:someone-unknown".to_string())]),
1005
+
)?;
1006
+
assert_eq!(
1007
+
links,
1008
+
PagedAppendingCollection {
1009
+
version: (0, 0),
1010
+
items: vec![],
1011
+
next: None,
1012
+
total: 0,
1013
+
}
1014
+
);
1015
+
});
1016
+
774
1017
test_each_storage!(get_links_exact_multiple, |storage| {
775
1018
for i in 1..=4 {
776
1019
storage.push(
···
788
1031
0,
789
1032
)?;
790
1033
}
791
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1034
+
let links =
1035
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
792
1036
assert_eq!(
793
1037
links,
794
1038
PagedAppendingCollection {
···
809
1053
total: 4,
810
1054
}
811
1055
);
812
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1056
+
let links = storage.get_links(
1057
+
"a.com",
1058
+
"app.t.c",
1059
+
".abc.uri",
1060
+
2,
1061
+
links.next,
1062
+
&HashSet::default(),
1063
+
)?;
813
1064
assert_eq!(
814
1065
links,
815
1066
PagedAppendingCollection {
···
850
1101
0,
851
1102
)?;
852
1103
}
853
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1104
+
let links =
1105
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
854
1106
assert_eq!(
855
1107
links,
856
1108
PagedAppendingCollection {
···
885
1137
},
886
1138
0,
887
1139
)?;
888
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1140
+
let links = storage.get_links(
1141
+
"a.com",
1142
+
"app.t.c",
1143
+
".abc.uri",
1144
+
2,
1145
+
links.next,
1146
+
&HashSet::default(),
1147
+
)?;
889
1148
assert_eq!(
890
1149
links,
891
1150
PagedAppendingCollection {
···
926
1185
0,
927
1186
)?;
928
1187
}
929
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1188
+
let links =
1189
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
930
1190
assert_eq!(
931
1191
links,
932
1192
PagedAppendingCollection {
···
955
1215
}),
956
1216
0,
957
1217
)?;
958
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1218
+
let links = storage.get_links(
1219
+
"a.com",
1220
+
"app.t.c",
1221
+
".abc.uri",
1222
+
2,
1223
+
links.next,
1224
+
&HashSet::default(),
1225
+
)?;
959
1226
assert_eq!(
960
1227
links,
961
1228
PagedAppendingCollection {
···
989
1256
0,
990
1257
)?;
991
1258
}
992
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None)?;
1259
+
let links =
1260
+
storage.get_links("a.com", "app.t.c", ".abc.uri", 2, None, &HashSet::default())?;
993
1261
assert_eq!(
994
1262
links,
995
1263
PagedAppendingCollection {
···
1014
1282
&ActionableEvent::DeactivateAccount("did:plc:asdf-1".into()),
1015
1283
0,
1016
1284
)?;
1017
-
let links = storage.get_links("a.com", "app.t.c", ".abc.uri", 2, links.next)?;
1285
+
let links = storage.get_links(
1286
+
"a.com",
1287
+
"app.t.c",
1288
+
".abc.uri",
1289
+
2,
1290
+
links.next,
1291
+
&HashSet::default(),
1292
+
)?;
1018
1293
assert_eq!(
1019
1294
links,
1020
1295
PagedAppendingCollection {
···
1081
1356
counts
1082
1357
});
1083
1358
assert_stats(storage.get_stats()?, 1..=1, 2..=2, 1..=1);
1359
+
});
1360
+
1361
+
//////// many-to-many /////////
1362
+
1363
+
test_each_storage!(get_m2m_counts_empty, |storage| {
1364
+
assert_eq!(
1365
+
storage.get_many_to_many_counts(
1366
+
"a.com",
1367
+
"a.b.c",
1368
+
".d.e",
1369
+
".f.g",
1370
+
10,
1371
+
None,
1372
+
&HashSet::new(),
1373
+
&HashSet::new(),
1374
+
)?,
1375
+
PagedOrderedCollection {
1376
+
items: vec![],
1377
+
next: None,
1378
+
}
1379
+
);
1380
+
});
1381
+
1382
+
test_each_storage!(get_m2m_counts_single, |storage| {
1383
+
storage.push(
1384
+
&ActionableEvent::CreateLinks {
1385
+
record_id: RecordId {
1386
+
did: "did:plc:asdf".into(),
1387
+
collection: "app.t.c".into(),
1388
+
rkey: "asdf".into(),
1389
+
},
1390
+
links: vec![
1391
+
CollectedLink {
1392
+
target: Link::Uri("a.com".into()),
1393
+
path: ".abc.uri".into(),
1394
+
},
1395
+
CollectedLink {
1396
+
target: Link::Uri("b.com".into()),
1397
+
path: ".def.uri".into(),
1398
+
},
1399
+
CollectedLink {
1400
+
target: Link::Uri("b.com".into()),
1401
+
path: ".ghi.uri".into(),
1402
+
},
1403
+
],
1404
+
},
1405
+
0,
1406
+
)?;
1407
+
assert_eq!(
1408
+
storage.get_many_to_many_counts(
1409
+
"a.com",
1410
+
"app.t.c",
1411
+
".abc.uri",
1412
+
".def.uri",
1413
+
10,
1414
+
None,
1415
+
&HashSet::new(),
1416
+
&HashSet::new(),
1417
+
)?,
1418
+
PagedOrderedCollection {
1419
+
items: vec![("b.com".to_string(), 1, 1)],
1420
+
next: None,
1421
+
}
1422
+
);
1423
+
});
1424
+
1425
+
test_each_storage!(get_m2m_counts_filters, |storage| {
1426
+
storage.push(
1427
+
&ActionableEvent::CreateLinks {
1428
+
record_id: RecordId {
1429
+
did: "did:plc:asdf".into(),
1430
+
collection: "app.t.c".into(),
1431
+
rkey: "asdf".into(),
1432
+
},
1433
+
links: vec![
1434
+
CollectedLink {
1435
+
target: Link::Uri("a.com".into()),
1436
+
path: ".abc.uri".into(),
1437
+
},
1438
+
CollectedLink {
1439
+
target: Link::Uri("b.com".into()),
1440
+
path: ".def.uri".into(),
1441
+
},
1442
+
],
1443
+
},
1444
+
0,
1445
+
)?;
1446
+
storage.push(
1447
+
&ActionableEvent::CreateLinks {
1448
+
record_id: RecordId {
1449
+
did: "did:plc:asdfasdf".into(),
1450
+
collection: "app.t.c".into(),
1451
+
rkey: "asdf".into(),
1452
+
},
1453
+
links: vec![
1454
+
CollectedLink {
1455
+
target: Link::Uri("a.com".into()),
1456
+
path: ".abc.uri".into(),
1457
+
},
1458
+
CollectedLink {
1459
+
target: Link::Uri("b.com".into()),
1460
+
path: ".def.uri".into(),
1461
+
},
1462
+
],
1463
+
},
1464
+
1,
1465
+
)?;
1466
+
storage.push(
1467
+
&ActionableEvent::CreateLinks {
1468
+
record_id: RecordId {
1469
+
did: "did:plc:fdsa".into(),
1470
+
collection: "app.t.c".into(),
1471
+
rkey: "asdf".into(),
1472
+
},
1473
+
links: vec![
1474
+
CollectedLink {
1475
+
target: Link::Uri("a.com".into()),
1476
+
path: ".abc.uri".into(),
1477
+
},
1478
+
CollectedLink {
1479
+
target: Link::Uri("c.com".into()),
1480
+
path: ".def.uri".into(),
1481
+
},
1482
+
],
1483
+
},
1484
+
2,
1485
+
)?;
1486
+
storage.push(
1487
+
&ActionableEvent::CreateLinks {
1488
+
record_id: RecordId {
1489
+
did: "did:plc:fdsa".into(),
1490
+
collection: "app.t.c".into(),
1491
+
rkey: "asdf2".into(),
1492
+
},
1493
+
links: vec![
1494
+
CollectedLink {
1495
+
target: Link::Uri("a.com".into()),
1496
+
path: ".abc.uri".into(),
1497
+
},
1498
+
CollectedLink {
1499
+
target: Link::Uri("c.com".into()),
1500
+
path: ".def.uri".into(),
1501
+
},
1502
+
],
1503
+
},
1504
+
3,
1505
+
)?;
1506
+
assert_eq!(
1507
+
storage.get_many_to_many_counts(
1508
+
"a.com",
1509
+
"app.t.c",
1510
+
".abc.uri",
1511
+
".def.uri",
1512
+
10,
1513
+
None,
1514
+
&HashSet::new(),
1515
+
&HashSet::new(),
1516
+
)?,
1517
+
PagedOrderedCollection {
1518
+
items: vec![("b.com".to_string(), 2, 2), ("c.com".to_string(), 2, 1),],
1519
+
next: None,
1520
+
}
1521
+
);
1522
+
assert_eq!(
1523
+
storage.get_many_to_many_counts(
1524
+
"a.com",
1525
+
"app.t.c",
1526
+
".abc.uri",
1527
+
".def.uri",
1528
+
10,
1529
+
None,
1530
+
&HashSet::from_iter([Did("did:plc:fdsa".to_string())]),
1531
+
&HashSet::new(),
1532
+
)?,
1533
+
PagedOrderedCollection {
1534
+
items: vec![("c.com".to_string(), 2, 1),],
1535
+
next: None,
1536
+
}
1537
+
);
1538
+
assert_eq!(
1539
+
storage.get_many_to_many_counts(
1540
+
"a.com",
1541
+
"app.t.c",
1542
+
".abc.uri",
1543
+
".def.uri",
1544
+
10,
1545
+
None,
1546
+
&HashSet::new(),
1547
+
&HashSet::from_iter(["b.com".to_string()]),
1548
+
)?,
1549
+
PagedOrderedCollection {
1550
+
items: vec![("b.com".to_string(), 2, 2),],
1551
+
next: None,
1552
+
}
1553
+
);
1084
1554
});
1085
1555
}
+361
-41
constellation/src/storage/rocks_store.rs
+361
-41
constellation/src/storage/rocks_store.rs
···
1
-
use super::{ActionableEvent, LinkReader, LinkStorage, PagedAppendingCollection, StorageStats};
1
+
use super::{
2
+
ActionableEvent, LinkReader, LinkStorage, PagedAppendingCollection, PagedOrderedCollection,
3
+
StorageStats,
4
+
};
2
5
use crate::{CountsByCount, Did, RecordId};
3
6
use anyhow::{bail, Result};
4
7
use bincode::Options as BincodeOptions;
···
11
14
MultiThreaded, Options, PrefixRange, ReadOptions, WriteBatch,
12
15
};
13
16
use serde::{Deserialize, Serialize};
14
-
use std::collections::{HashMap, HashSet};
17
+
use std::collections::{BTreeMap, HashMap, HashSet};
15
18
use std::io::Read;
16
19
use std::marker::PhantomData;
17
20
use std::path::{Path, PathBuf};
···
20
23
Arc,
21
24
};
22
25
use std::thread;
23
-
use std::time::{Duration, Instant};
26
+
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
24
27
use tokio_util::sync::CancellationToken;
25
28
26
29
static DID_IDS_CF: &str = "did_ids";
···
29
32
static LINK_TARGETS_CF: &str = "link_targets";
30
33
31
34
static JETSTREAM_CURSOR_KEY: &str = "jetstream_cursor";
35
+
static STARTED_AT_KEY: &str = "jetstream_first_cursor";
36
+
// add reverse mappings for targets if this db was running before that was a thing
37
+
static TARGET_ID_REPAIR_STATE_KEY: &str = "target_id_table_repair_state";
38
+
39
+
static COZY_FIRST_CURSOR: u64 = 1_738_083_600_000_000; // constellation.microcosm.blue started
40
+
41
+
#[derive(Debug, Clone, Serialize, Deserialize)]
42
+
struct TargetIdRepairState {
43
+
/// start time for repair, microseconds timestamp
44
+
current_us_started_at: u64,
45
+
/// id table's latest id when repair started
46
+
id_when_started: u64,
47
+
/// id table id
48
+
latest_repaired_i: u64,
49
+
}
50
+
impl AsRocksValue for TargetIdRepairState {}
51
+
impl ValueFromRocks for TargetIdRepairState {}
32
52
33
53
// todo: actually understand and set these options probably better
34
54
fn rocks_opts_base() -> Options {
···
56
76
#[derive(Debug, Clone)]
57
77
pub struct RocksStorage {
58
78
pub db: Arc<DBWithThreadMode<MultiThreaded>>, // TODO: mov seqs here (concat merge op will be fun)
59
-
did_id_table: IdTable<Did, DidIdValue, true>,
60
-
target_id_table: IdTable<TargetKey, TargetId, false>,
79
+
did_id_table: IdTable<Did, DidIdValue>,
80
+
target_id_table: IdTable<TargetKey, TargetId>,
61
81
is_writer: bool,
62
82
backup_task: Arc<Option<thread::JoinHandle<Result<()>>>>,
63
83
}
···
85
105
fn cf_descriptor(&self) -> ColumnFamilyDescriptor {
86
106
ColumnFamilyDescriptor::new(&self.name, rocks_opts_base())
87
107
}
88
-
fn init<const WITH_REVERSE: bool>(
89
-
self,
90
-
db: &DBWithThreadMode<MultiThreaded>,
91
-
) -> Result<IdTable<Orig, IdVal, WITH_REVERSE>> {
108
+
fn init(self, db: &DBWithThreadMode<MultiThreaded>) -> Result<IdTable<Orig, IdVal>> {
92
109
if db.cf_handle(&self.name).is_none() {
93
110
bail!("failed to get cf handle from db -- was the db open with our .cf_descriptor()?");
94
111
}
···
119
136
}
120
137
}
121
138
#[derive(Debug, Clone)]
122
-
struct IdTable<Orig, IdVal: IdTableValue, const WITH_REVERSE: bool>
139
+
struct IdTable<Orig, IdVal: IdTableValue>
123
140
where
124
141
Orig: KeyFromRocks,
125
142
for<'a> &'a Orig: AsRocksKey,
···
127
144
base: IdTableBase<Orig, IdVal>,
128
145
priv_id_seq: u64,
129
146
}
130
-
impl<Orig: Clone, IdVal: IdTableValue, const WITH_REVERSE: bool> IdTable<Orig, IdVal, WITH_REVERSE>
147
+
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal>
131
148
where
132
149
Orig: KeyFromRocks,
133
150
for<'v> &'v IdVal: AsRocksValue,
···
139
156
_key_marker: PhantomData,
140
157
_val_marker: PhantomData,
141
158
name: name.into(),
142
-
id_seq: Arc::new(AtomicU64::new(0)), // zero is "uninint", first seq num will be 1
159
+
id_seq: Arc::new(AtomicU64::new(0)), // zero is "uninit", first seq num will be 1
143
160
}
144
161
}
145
162
fn get_id_val(
···
178
195
id_value
179
196
}))
180
197
}
198
+
181
199
fn estimate_count(&self) -> u64 {
182
200
self.base.id_seq.load(Ordering::SeqCst) - 1 // -1 because seq zero is reserved
183
201
}
184
-
}
185
-
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal, true>
186
-
where
187
-
Orig: KeyFromRocks,
188
-
for<'v> &'v IdVal: AsRocksValue,
189
-
for<'k> &'k Orig: AsRocksKey,
190
-
{
202
+
191
203
fn get_or_create_id_val(
192
204
&mut self,
193
205
db: &DBWithThreadMode<MultiThreaded>,
···
215
227
}
216
228
}
217
229
}
218
-
impl<Orig: Clone, IdVal: IdTableValue> IdTable<Orig, IdVal, false>
219
-
where
220
-
Orig: KeyFromRocks,
221
-
for<'v> &'v IdVal: AsRocksValue,
222
-
for<'k> &'k Orig: AsRocksKey,
223
-
{
224
-
fn get_or_create_id_val(
225
-
&mut self,
226
-
db: &DBWithThreadMode<MultiThreaded>,
227
-
batch: &mut WriteBatch,
228
-
orig: &Orig,
229
-
) -> Result<IdVal> {
230
-
let cf = db.cf_handle(&self.base.name).unwrap();
231
-
self.__get_or_create_id_val(&cf, db, batch, orig)
232
-
}
233
-
}
234
230
235
231
impl IdTableValue for DidIdValue {
236
232
fn new(v: u64) -> Self {
···
249
245
}
250
246
}
251
247
248
+
fn now() -> u64 {
249
+
SystemTime::now()
250
+
.duration_since(UNIX_EPOCH)
251
+
.unwrap()
252
+
.as_micros() as u64
253
+
}
254
+
252
255
impl RocksStorage {
253
256
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
254
257
Self::describe_metrics();
255
-
RocksStorage::open_readmode(path, false)
258
+
let me = RocksStorage::open_readmode(path, false)?;
259
+
me.global_init()?;
260
+
Ok(me)
256
261
}
257
262
258
263
pub fn open_readonly(path: impl AsRef<Path>) -> Result<Self> {
···
260
265
}
261
266
262
267
fn open_readmode(path: impl AsRef<Path>, readonly: bool) -> Result<Self> {
263
-
let did_id_table = IdTable::<_, _, true>::setup(DID_IDS_CF);
264
-
let target_id_table = IdTable::<_, _, false>::setup(TARGET_IDS_CF);
268
+
let did_id_table = IdTable::setup(DID_IDS_CF);
269
+
let target_id_table = IdTable::setup(TARGET_IDS_CF);
265
270
271
+
// note: global stuff like jetstream cursor goes in the default cf
272
+
// these are bonus extra cfs
266
273
let cfs = vec![
267
274
// id reference tables
268
275
did_id_table.cf_descriptor(),
···
296
303
is_writer: !readonly,
297
304
backup_task: None.into(),
298
305
})
306
+
}
307
+
308
+
fn global_init(&self) -> Result<()> {
309
+
let first_run = self.db.get(JETSTREAM_CURSOR_KEY)?.is_some();
310
+
if first_run {
311
+
self.db.put(STARTED_AT_KEY, _rv(now()))?;
312
+
313
+
// hack / temporary: if we're a new db, put in a completed repair
314
+
// state so we don't run repairs (repairs are for old-code dbs)
315
+
let completed = TargetIdRepairState {
316
+
id_when_started: 0,
317
+
current_us_started_at: 0,
318
+
latest_repaired_i: 0,
319
+
};
320
+
self.db.put(TARGET_ID_REPAIR_STATE_KEY, _rv(completed))?;
321
+
}
322
+
Ok(())
323
+
}
324
+
325
+
pub fn run_repair(&self, breather: Duration, stay_alive: CancellationToken) -> Result<bool> {
326
+
let mut state = match self
327
+
.db
328
+
.get(TARGET_ID_REPAIR_STATE_KEY)?
329
+
.map(|s| _vr(&s))
330
+
.transpose()?
331
+
{
332
+
Some(s) => s,
333
+
None => TargetIdRepairState {
334
+
id_when_started: self.did_id_table.priv_id_seq,
335
+
current_us_started_at: now(),
336
+
latest_repaired_i: 0,
337
+
},
338
+
};
339
+
340
+
eprintln!("initial repair state: {state:?}");
341
+
342
+
let cf = self.db.cf_handle(TARGET_IDS_CF).unwrap();
343
+
344
+
let mut iter = self.db.raw_iterator_cf(&cf);
345
+
iter.seek_to_first();
346
+
347
+
eprintln!("repair iterator sent to first key");
348
+
349
+
// skip ahead if we're done some, or take a single first step
350
+
for _ in 0..state.latest_repaired_i {
351
+
iter.next();
352
+
}
353
+
354
+
eprintln!(
355
+
"repair iterator skipped to {}th key",
356
+
state.latest_repaired_i
357
+
);
358
+
359
+
let mut maybe_done = false;
360
+
361
+
let mut write_fast = rocksdb::WriteOptions::default();
362
+
write_fast.set_sync(false);
363
+
write_fast.disable_wal(true);
364
+
365
+
while !stay_alive.is_cancelled() && !maybe_done {
366
+
// let mut batch = WriteBatch::default();
367
+
368
+
let mut any_written = false;
369
+
370
+
for _ in 0..1000 {
371
+
if state.latest_repaired_i % 1_000_000 == 0 {
372
+
eprintln!("target iter at {}", state.latest_repaired_i);
373
+
}
374
+
state.latest_repaired_i += 1;
375
+
376
+
if !iter.valid() {
377
+
eprintln!("invalid iter, are we done repairing?");
378
+
maybe_done = true;
379
+
break;
380
+
};
381
+
382
+
// eprintln!("iterator seems to be valid! getting the key...");
383
+
let raw_key = iter.key().unwrap();
384
+
if raw_key.len() == 8 {
385
+
// eprintln!("found an 8-byte key, skipping it since it's probably an id...");
386
+
iter.next();
387
+
continue;
388
+
}
389
+
let target: TargetKey = _kr::<TargetKey>(raw_key)?;
390
+
let target_id: TargetId = _vr(iter.value().unwrap())?;
391
+
392
+
self.db
393
+
.put_cf_opt(&cf, target_id.id().to_be_bytes(), _rv(&target), &write_fast)?;
394
+
any_written = true;
395
+
iter.next();
396
+
}
397
+
398
+
if any_written {
399
+
self.db
400
+
.put(TARGET_ID_REPAIR_STATE_KEY, _rv(state.clone()))?;
401
+
std::thread::sleep(breather);
402
+
}
403
+
}
404
+
405
+
eprintln!("repair iterator done.");
406
+
407
+
Ok(false)
299
408
}
300
409
301
410
pub fn start_backup(
···
826
935
}
827
936
828
937
impl LinkReader for RocksStorage {
938
+
fn get_many_to_many_counts(
939
+
&self,
940
+
target: &str,
941
+
collection: &str,
942
+
path: &str,
943
+
path_to_other: &str,
944
+
limit: u64,
945
+
after: Option<String>,
946
+
filter_dids: &HashSet<Did>,
947
+
filter_to_targets: &HashSet<String>,
948
+
) -> Result<PagedOrderedCollection<(String, u64, u64), String>> {
949
+
let collection = Collection(collection.to_string());
950
+
let path = RPath(path.to_string());
951
+
952
+
let target_key = TargetKey(Target(target.to_string()), collection.clone(), path.clone());
953
+
954
+
// unfortunately the cursor is a, uh, stringified number.
955
+
// this was easier for the memstore (plain target, not target id), and
956
+
// making it generic is a bit awful.
957
+
// so... parse the number out of a string here :(
958
+
// TODO: this should bubble up to a BAD_REQUEST response
959
+
let after = after.map(|s| s.parse::<u64>().map(TargetId)).transpose()?;
960
+
961
+
let Some(target_id) = self.target_id_table.get_id_val(&self.db, &target_key)? else {
962
+
eprintln!("nothin doin for this target, {target_key:?}");
963
+
return Ok(Default::default());
964
+
};
965
+
966
+
let filter_did_ids: HashMap<DidId, bool> = filter_dids
967
+
.iter()
968
+
.filter_map(|did| self.did_id_table.get_id_val(&self.db, did).transpose())
969
+
.collect::<Result<Vec<DidIdValue>>>()?
970
+
.into_iter()
971
+
.map(|DidIdValue(id, active)| (id, active))
972
+
.collect();
973
+
974
+
// stored targets are keyed by triples of (target, collection, path).
975
+
// target filtering only consideres the target itself, so we actually
976
+
// need to do a prefix iteration of all target ids for this target and
977
+
// keep them all.
978
+
// i *think* the number of keys at a target prefix should usually be
979
+
// pretty small, so this is hopefully fine. but if it turns out to be
980
+
// large, we can push this filtering back into the main links loop and
981
+
// do forward db queries per backlink to get the raw target back out.
982
+
let mut filter_to_target_ids: HashSet<TargetId> = HashSet::new();
983
+
for t in filter_to_targets {
984
+
for (_, target_id) in self.iter_targets_for_target(&Target(t.to_string())) {
985
+
filter_to_target_ids.insert(target_id);
986
+
}
987
+
}
988
+
989
+
let linkers = self.get_target_linkers(&target_id)?;
990
+
991
+
let mut grouped_counts: BTreeMap<TargetId, (u64, HashSet<DidId>)> = BTreeMap::new();
992
+
993
+
for (did_id, rkey) in linkers.0 {
994
+
if did_id.is_empty() {
995
+
continue;
996
+
}
997
+
998
+
if !filter_did_ids.is_empty() && filter_did_ids.get(&did_id) != Some(&true) {
999
+
continue;
1000
+
}
1001
+
1002
+
let record_link_key = RecordLinkKey(did_id, collection.clone(), rkey);
1003
+
let Some(targets) = self.get_record_link_targets(&record_link_key)? else {
1004
+
continue;
1005
+
};
1006
+
1007
+
let Some(fwd_target) = targets
1008
+
.0
1009
+
.into_iter()
1010
+
.filter_map(|RecordLinkTarget(rpath, target_id)| {
1011
+
if rpath.0 == path_to_other
1012
+
&& (filter_to_target_ids.is_empty()
1013
+
|| filter_to_target_ids.contains(&target_id))
1014
+
{
1015
+
Some(target_id)
1016
+
} else {
1017
+
None
1018
+
}
1019
+
})
1020
+
.take(1)
1021
+
.next()
1022
+
else {
1023
+
eprintln!("no forward match");
1024
+
continue;
1025
+
};
1026
+
1027
+
// small relief: we page over target ids, so we can already bail
1028
+
// reprocessing previous pages here
1029
+
if after.as_ref().map(|a| fwd_target <= *a).unwrap_or(false) {
1030
+
continue;
1031
+
}
1032
+
1033
+
// aand we can skip target ids that must be on future pages
1034
+
// (this check continues after the did-lookup, which we have to do)
1035
+
let page_is_full = grouped_counts.len() as u64 >= limit;
1036
+
if page_is_full {
1037
+
let current_max = grouped_counts.keys().next_back().unwrap(); // limit should be non-zero bleh
1038
+
if fwd_target > *current_max {
1039
+
continue;
1040
+
}
1041
+
}
1042
+
1043
+
// bit painful: 2-step lookup to make sure this did is active
1044
+
let Some(did) = self.did_id_table.get_val_from_id(&self.db, did_id.0)? else {
1045
+
eprintln!("failed to look up did from did_id {did_id:?}");
1046
+
continue;
1047
+
};
1048
+
let Some(DidIdValue(_, active)) = self.did_id_table.get_id_val(&self.db, &did)? else {
1049
+
eprintln!("failed to look up did_value from did_id {did_id:?}: {did:?}: data consistency bug?");
1050
+
continue;
1051
+
};
1052
+
if !active {
1053
+
continue;
1054
+
}
1055
+
1056
+
// page-management, continued
1057
+
// if we have a full page, and we're inserting a *new* key less than
1058
+
// the current max, then we can evict the current max
1059
+
let mut should_evict = false;
1060
+
let entry = grouped_counts.entry(fwd_target.clone()).or_insert_with(|| {
1061
+
// this is a *new* key, so kick the max if we're full
1062
+
should_evict = page_is_full;
1063
+
Default::default()
1064
+
});
1065
+
entry.0 += 1;
1066
+
entry.1.insert(did_id);
1067
+
1068
+
if should_evict {
1069
+
grouped_counts.pop_last();
1070
+
}
1071
+
}
1072
+
1073
+
let mut items: Vec<(String, u64, u64)> = Vec::with_capacity(grouped_counts.len());
1074
+
for (target_id, (n, dids)) in &grouped_counts {
1075
+
let Some(target) = self
1076
+
.target_id_table
1077
+
.get_val_from_id(&self.db, target_id.0)?
1078
+
else {
1079
+
eprintln!("failed to look up target from target_id {target_id:?}");
1080
+
continue;
1081
+
};
1082
+
items.push((target.0 .0, *n, dids.len() as u64));
1083
+
}
1084
+
1085
+
let next = if grouped_counts.len() as u64 >= limit {
1086
+
// yeah.... it's a number saved as a string......sorry
1087
+
grouped_counts
1088
+
.keys()
1089
+
.next_back()
1090
+
.map(|k| format!("{}", k.0))
1091
+
} else {
1092
+
None
1093
+
};
1094
+
1095
+
Ok(PagedOrderedCollection { items, next })
1096
+
}
1097
+
829
1098
fn get_count(&self, target: &str, collection: &str, path: &str) -> Result<u64> {
830
1099
let target_key = TargetKey(
831
1100
Target(target.to_string()),
···
860
1129
path: &str,
861
1130
limit: u64,
862
1131
until: Option<u64>,
1132
+
filter_dids: &HashSet<Did>,
863
1133
) -> Result<PagedAppendingCollection<RecordId>> {
864
1134
let target_key = TargetKey(
865
1135
Target(target.to_string()),
···
876
1146
});
877
1147
};
878
1148
879
-
let linkers = self.get_target_linkers(&target_id)?;
1149
+
let mut linkers = self.get_target_linkers(&target_id)?;
1150
+
if !filter_dids.is_empty() {
1151
+
let mut did_filter = HashSet::new();
1152
+
for did in filter_dids {
1153
+
let Some(DidIdValue(did_id, active)) =
1154
+
self.did_id_table.get_id_val(&self.db, did)?
1155
+
else {
1156
+
eprintln!("failed to find a did_id for {did:?}");
1157
+
continue;
1158
+
};
1159
+
if !active {
1160
+
eprintln!("excluding inactive did from filtered results");
1161
+
continue;
1162
+
}
1163
+
did_filter.insert(did_id);
1164
+
}
1165
+
linkers.0.retain(|linker| did_filter.contains(&linker.0));
1166
+
}
880
1167
881
1168
let (alive, gone) = linkers.count();
882
1169
let total = alive + gone;
···
1024
1311
.map(|s| s.parse::<u64>())
1025
1312
.transpose()?
1026
1313
.unwrap_or(0);
1314
+
let started_at = self
1315
+
.db
1316
+
.get(STARTED_AT_KEY)?
1317
+
.map(|c| _vr(&c))
1318
+
.transpose()?
1319
+
.unwrap_or(COZY_FIRST_CURSOR);
1320
+
1321
+
let other_data = self
1322
+
.db
1323
+
.get(TARGET_ID_REPAIR_STATE_KEY)?
1324
+
.map(|s| _vr(&s))
1325
+
.transpose()?
1326
+
.map(
1327
+
|TargetIdRepairState {
1328
+
current_us_started_at,
1329
+
id_when_started,
1330
+
latest_repaired_i,
1331
+
}| {
1332
+
HashMap::from([
1333
+
("current_us_started_at".to_string(), current_us_started_at),
1334
+
("id_when_started".to_string(), id_when_started),
1335
+
("latest_repaired_i".to_string(), latest_repaired_i),
1336
+
])
1337
+
},
1338
+
)
1339
+
.unwrap_or(HashMap::default());
1340
+
1027
1341
Ok(StorageStats {
1028
1342
dids,
1029
1343
targetables,
1030
1344
linking_records,
1345
+
started_at: Some(started_at),
1346
+
other_data,
1031
1347
})
1032
1348
}
1033
1349
}
···
1053
1369
impl AsRocksValue for &TargetId {}
1054
1370
impl KeyFromRocks for TargetKey {}
1055
1371
impl ValueFromRocks for TargetId {}
1372
+
1373
+
// temp?
1374
+
impl KeyFromRocks for TargetId {}
1375
+
impl AsRocksValue for &TargetKey {}
1056
1376
1057
1377
// target_links table
1058
1378
impl AsRocksKey for &TargetId {}
···
1124
1444
}
1125
1445
1126
1446
// target ids
1127
-
#[derive(Debug, Clone, Serialize, Deserialize)]
1447
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialOrd, Ord, PartialEq, Eq, Hash)]
1128
1448
struct TargetId(u64); // key
1129
1449
1130
-
#[derive(Debug, Clone, Serialize, Deserialize)]
1450
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
1131
1451
pub struct Target(pub String); // the actual target/uri
1132
1452
1133
1453
// targets (uris, dids, etc.): the reverse index
+1
-1
constellation/templates/dids.html.j2
+1
-1
constellation/templates/dids.html.j2
···
27
27
{% for did in linking_dids %}
28
28
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ did.0 }}
29
29
-> see <a href="/links/all?target={{ did.0|urlencode }}">links to this DID</a>
30
-
-> browse <a href="https://atproto-browser-plus-links.vercel.app/at/{{ did.0|urlencode }}">this DID record</a></pre>
30
+
-> browse <a href="https://pdsls.dev/at://{{ did.0|urlencode }}">this DID record</a></pre>
31
31
{% endfor %}
32
32
33
33
{% if let Some(c) = cursor %}
+54
constellation/templates/get-backlinks.html.j2
+54
constellation/templates/get-backlinks.html.j2
···
1
+
{% extends "base.html.j2" %}
2
+
{% import "try-it-macros.html.j2" as try_it %}
3
+
4
+
{% block title %}Backlinks{% endblock %}
5
+
{% block description %}All {{ query.source }} records with links to {{ query.subject }}{% endblock %}
6
+
7
+
{% block content %}
8
+
9
+
{% call try_it::get_backlinks(query.subject, query.source, query.did, query.limit) %}
10
+
11
+
<h2>
12
+
Links to <code>{{ query.subject }}</code>
13
+
{% if let Some(browseable_uri) = query.subject|to_browseable %}
14
+
<small style="font-weight: normal; font-size: 1rem"><a href="{{ browseable_uri }}">browse record</a></small>
15
+
{% endif %}
16
+
</h2>
17
+
18
+
<p><strong>{{ total|human_number }} links</strong> from <code>{{ query.source }}</code>.</p>
19
+
20
+
<ul>
21
+
<li>See distinct linking DIDs at <code>/links/distinct-dids</code>: <a href="/links/distinct-dids?target={{ query.subject|urlencode }}&collection={{ collection|urlencode }}&path={{ path|urlencode }}">/links/distinct-dids?target={{ query.subject }}&collection={{ collection }}&path={{ path }}</a></li>
22
+
<li>See all links to this target at <code>/links/all</code>: <a href="/links/all?target={{ query.subject|urlencode }}">/links/all?target={{ query.subject }}</a></li>
23
+
</ul>
24
+
25
+
<h3>Links, most recent first:</h3>
26
+
27
+
{% for record in records %}
28
+
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ record.did().0 }} (<a href="/links/all?target={{ record.did().0|urlencode }}">DID links</a>)
29
+
<strong>Collection</strong>: {{ record.collection }}
30
+
<strong>RKey</strong>: {{ record.rkey }}
31
+
-> <a href="https://pdsls.dev/at://{{ record.did().0 }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
32
+
{% endfor %}
33
+
34
+
{% if let Some(c) = cursor %}
35
+
<form method="get" action="/xrpc/blue.microcosm.links.getBacklinks">
36
+
<input type="hidden" name="subject" value="{{ query.subject }}" />
37
+
<input type="hidden" name="source" value="{{ query.source }}" />
38
+
<input type="hidden" name="limit" value="{{ query.limit }}" />
39
+
{% for did in query.did %}
40
+
<input type="hidden" name="did" value="{{ did }}" />
41
+
{% endfor %}
42
+
<input type="hidden" name="cursor" value={{ c|json|safe }} />
43
+
<button type="submit">next page…</button>
44
+
</form>
45
+
{% else %}
46
+
<button disabled><em>end of results</em></button>
47
+
{% endif %}
48
+
49
+
<details>
50
+
<summary>Raw JSON response</summary>
51
+
<pre class="code">{{ self|tojson }}</pre>
52
+
</details>
53
+
54
+
{% endblock %}
+67
constellation/templates/get-many-to-many-counts.html.j2
+67
constellation/templates/get-many-to-many-counts.html.j2
···
1
+
{% extends "base.html.j2" %}
2
+
{% import "try-it-macros.html.j2" as try_it %}
3
+
4
+
{% block title %}Many to Many counts{% endblock %}
5
+
{% block description %}Counts of many-to-many {{ query.source }} join records with links to {{ query.subject }} and a secondary target at {{ query.path_to_other }}{% endblock %}
6
+
7
+
{% block content %}
8
+
9
+
{% call try_it::get_many_to_many_counts(
10
+
query.subject,
11
+
query.source,
12
+
query.path_to_other,
13
+
query.did,
14
+
query.other_subject,
15
+
query.limit,
16
+
) %}
17
+
18
+
<h2>
19
+
Many-to-many links to <code>{{ query.subject }}</code> joining through <code>{{ query.path_to_other }}</code>
20
+
{% if let Some(browseable_uri) = query.subject|to_browseable %}
21
+
<small style="font-weight: normal; font-size: 1rem"><a href="{{ browseable_uri }}">browse record</a></small>
22
+
{% endif %}
23
+
</h2>
24
+
25
+
<p><strong>{% if cursor.is_some() || query.cursor.is_some() %}more than {% endif %}{{ counts_by_other_subject.len()|to_u64|human_number }} joins</strong> <code>{{ query.source }}โ{{ query.path_to_other }}</code></p>
26
+
27
+
<ul>
28
+
<li>See direct backlinks at <code>/xrpc/blue.microcosm.links.getBacklinks</code>: <a href="/xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject|urlencode }}&source={{ query.source|urlencode }}">/xrpc/blue.microcosm.links.getBacklinks?subject={{ query.subject }}&source={{ query.source }}</a></li>
29
+
<li>See all links to this target at <code>/links/all</code>: <a href="/links/all?target={{ query.subject|urlencode }}">/links/all?target={{ query.subject }}</a></li>
30
+
</ul>
31
+
32
+
<h3>Counts by other subject:</h3>
33
+
34
+
{% for counts in counts_by_other_subject %}
35
+
<pre style="display: block; margin: 1em 2em" class="code"><strong>Joined subject</strong>: {{ counts.subject }}
36
+
<strong>Joining records</strong>: {{ counts.total }}
37
+
<strong>Unique joiner ids</strong>: {{ counts.distinct }}
38
+
-> {% if let Some(browseable_uri) = counts.subject|to_browseable -%}
39
+
<a href="{{ browseable_uri }}">browse record</a>
40
+
{%- endif %}</pre>
41
+
{% endfor %}
42
+
43
+
{% if let Some(c) = cursor %}
44
+
<form method="get" action="/xrpc/blue.microcosm.links.getManyToManyCounts">
45
+
<input type="hidden" name="subject" value="{{ query.subject }}" />
46
+
<input type="hidden" name="source" value="{{ query.source }}" />
47
+
<input type="hidden" name="pathToOther" value="{{ query.path_to_other }}" />
48
+
{% for did in query.did %}
49
+
<input type="hidden" name="did" value="{{ did }}" />
50
+
{% endfor %}
51
+
{% for otherSubject in query.other_subject %}
52
+
<input type="hidden" name="otherSubject" value="{{ otherSubject }}" />
53
+
{% endfor %}
54
+
<input type="hidden" name="limit" value="{{ query.limit }}" />
55
+
<input type="hidden" name="cursor" value={{ c|json|safe }} />
56
+
<button type="submit">next page…</button>
57
+
</form>
58
+
{% else %}
59
+
<button disabled><em>end of results</em></button>
60
+
{% endif %}
61
+
62
+
<details>
63
+
<summary>Raw JSON response</summary>
64
+
<pre class="code">{{ self|tojson }}</pre>
65
+
</details>
66
+
67
+
{% endblock %}
+65
-7
constellation/templates/hello.html.j2
+65
-7
constellation/templates/hello.html.j2
···
19
19
<p>It works by recursively walking <em>all</em> records coming through the firehose, searching for anything that looks like a link. Links are indexed by the target they point at, the collection the record came from, and the JSON path to the link in that record.</p>
20
20
21
21
<p>
22
-
This server has indexed <span class="stat">{{ stats.linking_records|human_number }}</span> links between <span class="stat">{{ stats.targetables|human_number }}</span> targets and sources from <span class="stat">{{ stats.dids|human_number }}</span> identities over <span class="stat">{{ days_indexed|human_number }}</span> days.<br/>
23
-
<small>(indexing new records in real time, backfill still TODO)</small>
22
+
This server has indexed <span class="stat">{{ stats.linking_records|human_number }}</span> links between <span class="stat">{{ stats.targetables|human_number }}</span> targets and sources from <span class="stat">{{ stats.dids|human_number }}</span> identities over <span class="stat">
23
+
{%- if let Some(days) = days_indexed %}
24
+
{{ days|human_number }}
25
+
{% else %}
26
+
???
27
+
{% endif -%}
28
+
</span> days.<br/>
29
+
<small>(indexing new records in real time, backfill coming soon!)</small>
24
30
</p>
25
31
26
-
<p>The API is currently <strong>unstable</strong>. But feel free to use it! If you want to be nice, put your project name and bsky username (or email) in your user-agent header for api requests.</p>
32
+
{# {% for k, v in stats.other_data.iter() %}
33
+
<p><strong>{{ k }}</strong>: {{ v }}</p>
34
+
{% endfor %} #}
35
+
36
+
<p>You're welcome to use this public instance! Please do not build the torment nexus. If you want to be nice, put your project name and bsky username (or email) in your user-agent header for api requests.</p>
27
37
28
38
29
39
<h2>API Endpoints</h2>
30
40
41
+
<h3 class="route"><code>GET /xrpc/blue.microcosm.links.getBacklinks</code></h3>
42
+
43
+
<p>A list of records linking to any record, identity, or uri.</p>
44
+
45
+
<h4>Query parameters:</h4>
46
+
47
+
<ul>
48
+
<li><p><code>subject</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
49
+
<li><p><code>source</code>: required. Example: <code>app.bsky.feed.like:subject.uri</code></p></li>
50
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
51
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
52
+
</ul>
53
+
54
+
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
55
+
{% call try_it::get_backlinks("at://did:plc:a4pqq234yw7fqbddawjo7y35/app.bsky.feed.post/3m237ilwc372e", "app.bsky.feed.like:subject.uri", [""], 16) %}
56
+
57
+
58
+
<h3 class="route"><code>GET /xrpc/blue.microcosm.links.getManyToManyCounts</code></h3>
59
+
60
+
<p>TODO: description</p>
61
+
62
+
<h4>Query parameters:</h4>
63
+
64
+
<ul>
65
+
<li><p><code>subject</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
66
+
<li><p><code>source</code>: required. Example: <code>app.bsky.feed.like:subject.uri</code></p></li>
67
+
<li><p><code>pathToOther</code>: required. Path to the secondary link in the many-to-many record. Example: <code>otherThing.uri</code></p></li>
68
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
69
+
<li><p><code>otherSubject</code>: optional, filter secondary links to specific subjects. Include multiple times to filter by multiple users. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
70
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
71
+
</ul>
72
+
73
+
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
74
+
{% call try_it::get_many_to_many_counts(
75
+
"at://did:plc:wshs7t2adsemcrrd4snkeqli/sh.tangled.label.definition/good-first-issue",
76
+
"sh.tangled.label.op:add[].key",
77
+
"subject",
78
+
[""],
79
+
[""],
80
+
25,
81
+
) %}
82
+
83
+
31
84
<h3 class="route"><code>GET /links</code></h3>
32
85
33
86
<p>A list of records linking to a target.</p>
34
87
88
+
<p>[DEPRECATED]: use <code>GET /xrpc/blue.microcosm.links.getBacklinks</code>. New apps should avoid it, but this endpoint <strong>will</strong> remain supported for the forseeable future.</p>
89
+
35
90
<h4>Query parameters:</h4>
36
91
37
92
<ul>
38
-
<li><code>target</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></li>
39
-
<li><code>collection</code>: required. Example: <code>app.bsky.feed.like</code></li>
40
-
<li><code>path</code>: required, must url-encode. Example: <code>.subject.uri</code></li>
93
+
<li><p><code>target</code>: required, must url-encode. Example: <code>at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r</code></p></li>
94
+
<li><p><code>collection</code>: required. Example: <code>app.bsky.feed.like</code></p></li>
95
+
<li><p><code>path</code>: required, must url-encode. Example: <code>.subject.uri</code></p></li>
96
+
<li><p><code>did</code>: optional, filter links to those from specific users. Include multiple times to filter by multiple users. Example: <code>did=did:plc:vc7f4oafdgxsihk4cry2xpze&did=did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
97
+
<li><p><code>from_dids</code> [deprecated]: optional. Use <code>did</code> instead. Example: <code>from_dids=did:plc:vc7f4oafdgxsihk4cry2xpze,did:plc:vc7f4oafdgxsihk4cry2xpze</code></p></li>
98
+
<li><p><code>limit</code>: optional. Default: <code>16</code>. Maximum: <code>100</code></p></li>
41
99
</ul>
42
100
43
101
<p style="margin-bottom: 0"><strong>Try it:</strong></p>
44
-
{% call try_it::links("at://did:plc:vc7f4oafdgxsihk4cry2xpze/app.bsky.feed.post/3lgwdn7vd722r", "app.bsky.feed.like", ".subject.uri") %}
102
+
{% call try_it::links("at://did:plc:a4pqq234yw7fqbddawjo7y35/app.bsky.feed.post/3m237ilwc372e", "app.bsky.feed.like", ".subject.uri", [""], 16) %}
45
103
46
104
47
105
<h3 class="route"><code>GET /links/distinct-dids</code></h3>
+2
-2
constellation/templates/links.html.j2
+2
-2
constellation/templates/links.html.j2
···
6
6
7
7
{% block content %}
8
8
9
-
{% call try_it::links(query.target, query.collection, query.path) %}
9
+
{% call try_it::links(query.target, query.collection, query.path, query.did, query.limit) %}
10
10
11
11
<h2>
12
12
Links to <code>{{ query.target }}</code>
···
28
28
<pre style="display: block; margin: 1em 2em" class="code"><strong>DID</strong>: {{ record.did().0 }} (<a href="/links/all?target={{ record.did().0|urlencode }}">DID links</a>)
29
29
<strong>Collection</strong>: {{ record.collection }}
30
30
<strong>RKey</strong>: {{ record.rkey }}
31
-
-> <a href="https://atproto-browser-plus-links.vercel.app/at/{{ record.did().0|urlencode }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
31
+
-> <a href="https://pdsls.dev/at://{{ record.did().0 }}/{{ record.collection }}/{{ record.rkey }}">browse record</a></pre>
32
32
{% endfor %}
33
33
34
34
{% if let Some(c) = cursor %}
+88
-3
constellation/templates/try-it-macros.html.j2
+88
-3
constellation/templates/try-it-macros.html.j2
···
1
-
{% macro links(target, collection, path) %}
1
+
{% macro get_backlinks(subject, source, dids, limit) %}
2
+
<form method="get" action="/xrpc/blue.microcosm.links.getBacklinks">
3
+
<pre class="code"><strong>GET</strong> /xrpc/blue.microcosm.links.getBacklinks
4
+
?subject= <input type="text" name="subject" value="{{ subject }}" placeholder="at-uri, did, uri..." />
5
+
&source= <input type="text" name="source" value="{{ source }}" placeholder="app.bsky.feed.like:subject.uri" />
6
+
{%- for did in dids %}{% if !did.is_empty() %}
7
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
8
+
<span id="did-placeholder"></span> <button id="add-did">+ did filter</button>
9
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
10
+
</form>
11
+
<script>
12
+
const addDidButton = document.getElementById('add-did');
13
+
const didPlaceholder = document.getElementById('did-placeholder');
14
+
addDidButton.addEventListener('click', e => {
15
+
e.preventDefault();
16
+
const i = document.createElement('input');
17
+
i.placeholder = 'did:plc:...';
18
+
i.name = "did"
19
+
const p = addDidButton.parentNode;
20
+
p.insertBefore(document.createTextNode('&did= '), didPlaceholder);
21
+
p.insertBefore(i, didPlaceholder);
22
+
p.insertBefore(document.createTextNode('\n '), didPlaceholder);
23
+
});
24
+
</script>
25
+
{% endmacro %}
26
+
27
+
{% macro get_many_to_many_counts(subject, source, pathToOther, dids, otherSubjects, limit) %}
28
+
<form method="get" action="/xrpc/blue.microcosm.links.getManyToManyCounts">
29
+
<pre class="code"><strong>GET</strong> /xrpc/blue.microcosm.links.getManyToManyCounts
30
+
?subject= <input type="text" name="subject" value="{{ subject }}" placeholder="at-uri, did, uri..." />
31
+
&source= <input type="text" name="source" value="{{ source }}" placeholder="app.bsky.feed.like:subject.uri" />
32
+
&pathToOther= <input type="text" name="pathToOther" value="{{ pathToOther }}" placeholder="otherThing.uri" />
33
+
{%- for did in dids %}{% if !did.is_empty() %}
34
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
35
+
<span id="m2m-subject-placeholder"></span> <button id="m2m-add-subject">+ other subject filter</button>
36
+
{%- for otherSubject in otherSubjects %}{% if !otherSubject.is_empty() %}
37
+
&otherSubject= <input type="text" name="did" value="{{ otherSubject }}" placeholder="at-uri, did, uri..." />{% endif %}{% endfor %}
38
+
<span id="m2m-did-placeholder"></span> <button id="m2m-add-did">+ did filter</button>
39
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
40
+
</form>
41
+
<script>
42
+
const m2mAddDidButton = document.getElementById('m2m-add-did');
43
+
const m2mDidPlaceholder = document.getElementById('m2m-did-placeholder');
44
+
m2mAddDidButton.addEventListener('click', e => {
45
+
e.preventDefault();
46
+
const i = document.createElement('input');
47
+
i.placeholder = 'did:plc:...';
48
+
i.name = "did"
49
+
const p = m2mAddDidButton.parentNode;
50
+
p.insertBefore(document.createTextNode('&did= '), m2mDidPlaceholder);
51
+
p.insertBefore(i, m2mDidPlaceholder);
52
+
p.insertBefore(document.createTextNode('\n '), m2mDidPlaceholder);
53
+
});
54
+
const m2mAddSubjectButton = document.getElementById('m2m-add-subject');
55
+
const m2mSubjectPlaceholder = document.getElementById('m2m-subject-placeholder');
56
+
m2mAddSubjectButton.addEventListener('click', e => {
57
+
e.preventDefault();
58
+
const i = document.createElement('input');
59
+
i.placeholder = 'at-uri, did, uri...';
60
+
i.name = "otherSubject"
61
+
const p = m2mAddSubjectButton.parentNode;
62
+
p.insertBefore(document.createTextNode('&otherSubject= '), m2mSubjectPlaceholder);
63
+
p.insertBefore(i, m2mSubjectPlaceholder);
64
+
p.insertBefore(document.createTextNode('\n '), m2mSubjectPlaceholder);
65
+
});
66
+
</script>
67
+
{% endmacro %}
68
+
69
+
{% macro links(target, collection, path, dids, limit) %}
2
70
<form method="get" action="/links">
3
71
<pre class="code"><strong>GET</strong> /links
4
72
?target= <input type="text" name="target" value="{{ target }}" placeholder="target" />
5
73
&collection= <input type="text" name="collection" value="{{ collection }}" placeholder="collection" />
6
-
&path= <input type="text" name="path" value="{{ path }}" placeholder="path" /> <button type="submit">get links</button></pre>
74
+
&path= <input type="text" name="path" value="{{ path }}" placeholder="path" />
75
+
{%- for did in dids %}{% if !did.is_empty() %}
76
+
&did= <input type="text" name="did" value="{{ did }}" placeholder="did:plc:..." />{% endif %}{% endfor %}
77
+
<span id="did-placeholder"></span> <button id="add-did">+ did filter</button>
78
+
&limit= <input type="number" name="limit" value="{{ limit }}" max="100" placeholder="100" /> <button type="submit">get links</button></pre>
7
79
</form>
80
+
<script>
81
+
const addDidButton = document.getElementById('add-did');
82
+
const didPlaceholder = document.getElementById('did-placeholder');
83
+
addDidButton.addEventListener('click', e => {
84
+
e.preventDefault();
85
+
const i = document.createElement('input');
86
+
i.placeholder = 'did:plc:...';
87
+
i.name = "did"
88
+
const p = addDidButton.parentNode;
89
+
p.insertBefore(document.createTextNode('&did= '), didPlaceholder);
90
+
p.insertBefore(i, didPlaceholder);
91
+
p.insertBefore(document.createTextNode('\n '), didPlaceholder);
92
+
});
93
+
</script>
8
94
{% endmacro %}
9
-
10
95
11
96
{% macro dids(target, collection, path) %}
12
97
<form method="get" action="/links/distinct-dids">
-496
cozy-setup (move to another repo).md
-496
cozy-setup (move to another repo).md
···
1
-
cozy-ucosm
2
-
3
-
4
-
## gateway
5
-
6
-
- tailscale (exit node enabled)
7
-
-> allow ipv4 and ipv6 forwarding
8
-
- caddy
9
-
10
-
```bash
11
-
apt install golang
12
-
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
-
go/bin/xcaddy build \
14
-
--with github.com/caddyserver/cache-handler \
15
-
--with github.com/darkweak/storages/badger/caddy \
16
-
--with github.com/mholt/caddy-ratelimit
17
-
# then https://caddyserver.com/docs/running#manual-installation
18
-
19
-
mkdir /var/cache/caddy-badger
20
-
chown -R caddy:caddy /var/cache/caddy-badger/
21
-
```
22
-
23
-
- `/etc/caddy/Caddyfile`
24
-
25
-
```
26
-
{
27
-
cache {
28
-
badger
29
-
api {
30
-
prometheus
31
-
}
32
-
}
33
-
}
34
-
35
-
links.bsky.bad-example.com {
36
-
reverse_proxy link-aggregator:6789
37
-
38
-
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
-
rate_limit {
40
-
zone global_burst {
41
-
key {remote_host}
42
-
events 10
43
-
window 1s
44
-
}
45
-
zone global_general {
46
-
key {remote_host}
47
-
events 100
48
-
window 60s
49
-
log_key true
50
-
}
51
-
zone website_harsh_limit {
52
-
key {header.Origin}
53
-
match {
54
-
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
-
}
56
-
events 1000
57
-
window 30s
58
-
log_key true
59
-
}
60
-
}
61
-
respond /souin-api/metrics "denied" 403 # does not work
62
-
cache {
63
-
ttl 3s
64
-
stale 1h
65
-
default_cache_control public, s-maxage=3
66
-
badger {
67
-
path /var/cache/caddy-badger/links
68
-
}
69
-
}
70
-
}
71
-
72
-
gateway:80 {
73
-
metrics
74
-
cache
75
-
}
76
-
```
77
-
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
-
79
-
80
-
alternative proxies:
81
-
82
-
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
-
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
-
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
-
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
-
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
-
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
-
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
-
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
-
91
-
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
-
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
-
- apache traffic control has experimental rate-limiting plugins
94
-
95
-
96
-
- victoriametrics
97
-
98
-
```bash
99
-
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
-
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
-
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
-
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
-
104
-
```
105
-
106
-
- `/etc/victoria-metrics/prometheus.yml`
107
-
108
-
```yaml
109
-
global:
110
-
scrape_interval: '15s'
111
-
112
-
scrape_configs:
113
-
- job_name: 'link_aggregator'
114
-
static_configs:
115
-
- targets: ['link-aggregator:8765']
116
-
- job_name: 'gateway:caddy'
117
-
static_configs:
118
-
- targets: ['gateway:80/metrics']
119
-
- job_name: 'gateway:cache'
120
-
static_configs:
121
-
- targets: ['gateway:80/souin-api/metrics']
122
-
```
123
-
124
-
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
-
126
-
```
127
-
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
-
```
129
-
130
-
- grafana
131
-
132
-
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
-
134
-
something something something then
135
-
136
-
```
137
-
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
-
```
139
-
140
-
- raspi node_exporter
141
-
142
-
```bash
143
-
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
-
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
-
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
-
sudo useradd --no-create-home --shell /bin/false node_exporter
147
-
sudo nano /etc/systemd/system/node_exporter.service
148
-
# [Unit]
149
-
# Description=Node Exporter
150
-
# Wants=network-online.target
151
-
# After=network-online.target
152
-
153
-
# [Service]
154
-
# User=node_exporter
155
-
# Group=node_exporter
156
-
# Type=simple
157
-
# ExecStart=/usr/local/bin/node_exporter
158
-
# Restart=always
159
-
# RestartSec=3
160
-
161
-
# [Install]
162
-
# WantedBy=multi-user.target
163
-
sudo systemctl daemon-reload
164
-
sudo systemctl enable node_exporter.service
165
-
sudo systemctl start node_exporter.service
166
-
```
167
-
168
-
todo: get raspi vcgencmd outputs into metrics
169
-
170
-
- nginx on gateway
171
-
172
-
```nginx
173
-
# in http
174
-
175
-
##
176
-
# cozy cache
177
-
##
178
-
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
-
180
-
##
181
-
# cozy limit
182
-
##
183
-
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
-
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
-
186
-
# in sites-available/constellation.microcosm.blue
187
-
188
-
upstream cozy_link_aggregator {
189
-
server link-aggregator:6789;
190
-
keepalive 16;
191
-
}
192
-
193
-
server {
194
-
listen 8080;
195
-
listen [::]:8080;
196
-
197
-
server_name constellation.microcosm.blue;
198
-
199
-
proxy_cache cozy_zone;
200
-
proxy_cache_background_update on;
201
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
-
proxy_cache_lock_age 1s;
204
-
proxy_cache_lock_timeout 2s;
205
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
-
proxy_cache_valid any 15s; # non-200s default
207
-
proxy_read_timeout 5s;
208
-
proxy_send_timeout 15s;
209
-
proxy_socket_keepalive on;
210
-
211
-
limit_req zone=cozy_ip_limit nodelay burst=100;
212
-
limit_req zone=cozy_global_limit;
213
-
limit_req_status 429;
214
-
215
-
location / {
216
-
proxy_pass http://cozy_link_aggregator;
217
-
include proxy_params;
218
-
proxy_http_version 1.1;
219
-
proxy_set_header Connection ""; # for keepalive
220
-
}
221
-
}
222
-
```
223
-
224
-
also `systemctl edit nginx` and paste
225
-
226
-
```
227
-
[Service]
228
-
Restart=always
229
-
```
230
-
231
-
โhttps://serverfault.com/a/1003373
232
-
233
-
now making browsers redirect to the microcosm.blue url:
234
-
235
-
```
236
-
[...]
237
-
server_name links.bsky.bad-example.com;
238
-
239
-
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
-
if ($http_user_agent ~ ^Mozilla/) {
241
-
# for now send *browsers* to the new location, hopefully without impacting api requests
242
-
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
-
return 301 https://constellation.microcosm.blue$request_uri;
244
-
}
245
-
[...]
246
-
```
247
-
248
-
- nginx metrics
249
-
250
-
- download nginx-prometheus-exporter
251
-
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
-
253
-
- err actually going to make mistakes and try with snap
254
-
`snap install nginx-prometheus-exporter`
255
-
- so it got a binary for me but no systemd task set up. boooo.
256
-
`snap remove nginx-prometheus-exporter`
257
-
258
-
- ```bash
259
-
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
-
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
-
mv nginx-prometheus-exporter /usr/local/bin
262
-
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
-
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
-
# [Unit]
265
-
# Description=NGINX Exporter
266
-
# Wants=network-online.target
267
-
# After=network-online.target
268
-
269
-
# [Service]
270
-
# User=nginx-prometheus-exporter
271
-
# Group=nginx-prometheus-exporter
272
-
# Type=simple
273
-
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
-
# Restart=always
275
-
# RestartSec=3
276
-
277
-
# [Install]
278
-
# WantedBy=multi-user.target
279
-
systemctl daemon-reload
280
-
systemctl start nginx-prometheus-exporter.service
281
-
systemctl enable nginx-prometheus-exporter.service
282
-
```
283
-
284
-
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
-
286
-
```nginx
287
-
server {
288
-
listen 8080;
289
-
listen [::]:8080;
290
-
291
-
server_name gateway;
292
-
293
-
location /stub_status {
294
-
stub_status;
295
-
}
296
-
location / {
297
-
return 404;
298
-
}
299
-
}
300
-
```
301
-
302
-
```bash
303
-
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
-
```
305
-
306
-
307
-
## bootes (pi5)
308
-
309
-
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
-
- raspi-config: enable pcie 3, set hostname, enable ssh
311
-
- put ssh key into `.ssh/authorized_keys`
312
-
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
-
- `sudo apt update && sudo apt upgrade`
314
-
- `sudo apt install xfsprogs`
315
-
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
-
- `sudo mount /dev/nvme0n1 /mnt`
317
-
- set up tailscale
318
-
- `sudo tailscale up`
319
-
- `git clone https://github.com/atcosm/links.git`
320
-
- tailscale: disable bootes key expiry
321
-
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
-
- `cd links/constellation`
323
-
- `sudo apt install libssl-dev` needed
324
-
- `sudo apt install clang` needed for bindgen
325
-
- (in tmux) `cargo build --release`
326
-
- `mkdir ~/backup`
327
-
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
-
- `sudo chown pi:pi /mnt/`
329
-
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
-
etc
331
-
- follow above `- raspi node_exporter`
332
-
- configure victoriametrics to scrape the new pi
333
-
- configure ulimit before starting! `ulimit -n 16384`
334
-
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
-
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
-
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
-
- stop upstreaming requests to older instance in nginx
338
-
339
-
340
-
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
-
342
-
```ini
343
-
[Unit]
344
-
Description=Constellation backlinks index
345
-
After=network.target
346
-
347
-
[Service]
348
-
User=pi
349
-
WorkingDirectory=/home/pi/links/constellation
350
-
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
-
LimitNOFILE=16384
352
-
Restart=always
353
-
354
-
[Install]
355
-
WantedBy=multi-user.target
356
-
```
357
-
358
-
359
-
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
-
- todo: cross-compile for raspi?
361
-
362
-
---
363
-
364
-
some todos
365
-
366
-
- [x] tailscale: exit node
367
-
- [!] link_aggregator: use exit node
368
-
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
-
- [x] caddy: reverse proxy
370
-
- [x] build with cache and rate-limit plugins
371
-
- [x] configure systemd to keep it alive
372
-
- [x] configure caddy cache
373
-
- [x] configure caddy rate-limit
374
-
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
-
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
-
- [x] make some grafana dashboards
377
-
- [ ] raspi: mount /dev/sda on boot
378
-
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
-
380
-
- [x] use nginx instead of caddy
381
-
- [x] nginx: enable cache
382
-
- [x] nginx: rate-limit
383
-
- [ ] nginx: get metrics
384
-
385
-
386
-
387
-
388
-
---
389
-
390
-
nginx cors for constellation + small burst bump
391
-
392
-
```nginx
393
-
upstream cozy_constellation {
394
-
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
-
keepalive 16;
396
-
}
397
-
398
-
server {
399
-
server_name constellation.microcosm.blue;
400
-
401
-
proxy_cache cozy_zone;
402
-
proxy_cache_background_update on;
403
-
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
-
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
-
proxy_cache_lock_age 1s;
406
-
proxy_cache_lock_timeout 2s;
407
-
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
-
proxy_cache_valid any 2s; # non-200s default
409
-
proxy_read_timeout 5s;
410
-
proxy_send_timeout 15s;
411
-
proxy_socket_keepalive on;
412
-
413
-
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
-
proxy_hide_header 'Access-Control-Allow-Origin';
415
-
proxy_hide_header 'Access-Control-Allowed-Methods';
416
-
proxy_hide_header 'Access-Control-Allow-Headers';
417
-
add_header 'Access-Control-Allow-Origin' '*' always;
418
-
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
-
add_header 'Access-Control-Allow-Headers' '*' always;
420
-
421
-
422
-
limit_req zone=cozy_ip_limit nodelay burst=150;
423
-
limit_req zone=cozy_global_limit burst=1800;
424
-
limit_req_status 429;
425
-
426
-
location / {
427
-
proxy_pass http://cozy_constellation;
428
-
include proxy_params;
429
-
proxy_http_version 1.1;
430
-
proxy_set_header Connection ""; # for keepalive
431
-
}
432
-
433
-
434
-
listen 443 ssl; # managed by Certbot
435
-
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
-
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
-
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
-
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
-
440
-
}
441
-
442
-
server {
443
-
if ($host = constellation.microcosm.blue) {
444
-
return 301 https://$host$request_uri;
445
-
} # managed by Certbot
446
-
447
-
448
-
server_name constellation.microcosm.blue;
449
-
listen 80;
450
-
return 404; # managed by Certbot
451
-
}
452
-
```
453
-
454
-
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
-
456
-
leaving it for now though.
457
-
458
-
459
-
### nginx logs to prom
460
-
461
-
```bash
462
-
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
-
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
-
systemctl enable prometheus-nginxlog-exporter.service
465
-
466
-
```
467
-
468
-
have it run as www-data (maybe not the best idea but...)
469
-
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
-
set User under service and remove capabilities bounding
471
-
472
-
```systemd
473
-
User=www-data
474
-
#CapabilityBoundingSet=
475
-
```
476
-
477
-
in `nginx.conf` in `http`:
478
-
479
-
```nginx
480
-
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
-
```
482
-
483
-
in `sites-available/constellation.microcosm.blue` in `server`:
484
-
485
-
```nginx
486
-
# log format must match prometheus-nginx-log-exporter
487
-
access_log /var/log/nginx/constellation-access.log constellation_format;
488
-
```
489
-
490
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
-
492
-
493
-
494
-
```bash
495
-
systemctl start prometheus-nginxlog-exporter.service
496
-
```
+1
-1
jetstream/Cargo.toml
+1
-1
jetstream/Cargo.toml
···
10
10
11
11
[dependencies]
12
12
async-trait = "0.1.83"
13
-
atrium-api = { version = "0.25.4", default-features = false, features = [
13
+
atrium-api = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace", default-features = false, features = [
14
14
"namespace-appbsky",
15
15
] }
16
16
tokio = { version = "1.44.2", features = ["full", "sync", "time"] }
+496
legacy/cozy-setup (move to another repo).md
+496
legacy/cozy-setup (move to another repo).md
···
1
+
cozy-ucosm
2
+
3
+
4
+
## gateway
5
+
6
+
- tailscale (exit node enabled)
7
+
-> allow ipv4 and ipv6 forwarding
8
+
- caddy
9
+
10
+
```bash
11
+
apt install golang
12
+
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
13
+
go/bin/xcaddy build \
14
+
--with github.com/caddyserver/cache-handler \
15
+
--with github.com/darkweak/storages/badger/caddy \
16
+
--with github.com/mholt/caddy-ratelimit
17
+
# then https://caddyserver.com/docs/running#manual-installation
18
+
19
+
mkdir /var/cache/caddy-badger
20
+
chown -R caddy:caddy /var/cache/caddy-badger/
21
+
```
22
+
23
+
- `/etc/caddy/Caddyfile`
24
+
25
+
```
26
+
{
27
+
cache {
28
+
badger
29
+
api {
30
+
prometheus
31
+
}
32
+
}
33
+
}
34
+
35
+
links.bsky.bad-example.com {
36
+
reverse_proxy link-aggregator:6789
37
+
38
+
@browser `{header.Origin.startsWith("Mozilla/5.0")`
39
+
rate_limit {
40
+
zone global_burst {
41
+
key {remote_host}
42
+
events 10
43
+
window 1s
44
+
}
45
+
zone global_general {
46
+
key {remote_host}
47
+
events 100
48
+
window 60s
49
+
log_key true
50
+
}
51
+
zone website_harsh_limit {
52
+
key {header.Origin}
53
+
match {
54
+
expression {header.User-Agent}.startsWith("Mozilla/5.0")
55
+
}
56
+
events 1000
57
+
window 30s
58
+
log_key true
59
+
}
60
+
}
61
+
respond /souin-api/metrics "denied" 403 # does not work
62
+
cache {
63
+
ttl 3s
64
+
stale 1h
65
+
default_cache_control public, s-maxage=3
66
+
badger {
67
+
path /var/cache/caddy-badger/links
68
+
}
69
+
}
70
+
}
71
+
72
+
gateway:80 {
73
+
metrics
74
+
cache
75
+
}
76
+
```
77
+
well... the gateway fell over IMMEDIATELY with like 2 req/sec from deletions, with that ^^ config. for now i removed everything except the reverse proxy config + normal caddy metrics and it's running fine on vanilla caddy. i did try reducing the rate-limiting configs to a single, fixed-key global limit but it still ate all the ram and died. maybe badger w/ the cache config was still a problem. maybe it would have been ok on a machine with more than 1GB mem.
78
+
79
+
80
+
alternative proxies:
81
+
82
+
- nginx. i should probably just use this. acme-client is a piece of cake to set up, and i know how to configure it.
83
+
- haproxy. also kind of familiar, it's old and stable. no idea how it handle low-mem (our 1gb) vs nginx.
84
+
- sozu. popular rust thing, fast. doesn't have rate-limiting or cache feature?
85
+
- rpxy. like caddy (auto-tls) but in rust and actually fast? has an "experimental" cache feature. but the cache feature looks good.
86
+
- rama. build-your-own proxy. not sure that it has both cache and limiter in their standard features?
87
+
- pingora. build-your-own cloudflare, so like, probably stable. has tools for cache and limiting. low-mem...?
88
+
- cache stuff in pingora seems a little... hit and miss (byeeeee). only a test impl for Storage for the main cache feature?
89
+
- but the rate-limiter has a guide: https://github.com/cloudflare/pingora/blob/main/docs/user_guide/rate_limiter.md
90
+
91
+
what i want is low-resource reverse proxy with built-in rate-limiting and caching. but maybe cache (and/or ratelimiting) could be external to the reverse proxy
92
+
- varnish is a dedicated cache. has https://github.com/varnish/varnish-modules/blob/master/src/vmod_vsthrottle.vcc
93
+
- apache traffic control has experimental rate-limiting plugins
94
+
95
+
96
+
- victoriametrics
97
+
98
+
```bash
99
+
curl -LO https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
100
+
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
101
+
# and then https://docs.victoriametrics.com/quick-start/#starting-vm-single-from-a-binary
102
+
sudo mkdir /etc/victoria-metrics && sudo chown -R victoriametrics:victoriametrics /etc/victoria-metrics
103
+
104
+
```
105
+
106
+
- `/etc/victoria-metrics/prometheus.yml`
107
+
108
+
```yaml
109
+
global:
110
+
scrape_interval: '15s'
111
+
112
+
scrape_configs:
113
+
- job_name: 'link_aggregator'
114
+
static_configs:
115
+
- targets: ['link-aggregator:8765']
116
+
- job_name: 'gateway:caddy'
117
+
static_configs:
118
+
- targets: ['gateway:80/metrics']
119
+
- job_name: 'gateway:cache'
120
+
static_configs:
121
+
- targets: ['gateway:80/souin-api/metrics']
122
+
```
123
+
124
+
- `ExecStart` in `/etc/systemd/system/victoriametrics.service`:
125
+
126
+
```
127
+
ExecStart=/usr/local/bin/victoria-metrics-prod -storageDataPath=/var/lib/victoria-metrics -retentionPeriod=90d -selfScrapeInterval=1m -promscrape.config=/etc/victoria-metrics/prometheus.yml
128
+
```
129
+
130
+
- grafana
131
+
132
+
followed `https://grafana.com/docs/grafana/latest/setup-grafana/installation/debian/#install-grafana-on-debian-or-ubuntu`
133
+
134
+
something something something then
135
+
136
+
```
137
+
sudo grafana-cli --pluginUrl https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.11.1/victoriametrics-datasource-v0.11.1.zip plugins install victoriametrics
138
+
```
139
+
140
+
- raspi node_exporter
141
+
142
+
```bash
143
+
curl -LO https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-armv7.tar.gz
144
+
tar xzf node_exporter-1.8.2.linux-armv7.tar.gz
145
+
sudo cp node_exporter-1.8.2.linux-armv7/node_exporter /usr/local/bin/
146
+
sudo useradd --no-create-home --shell /bin/false node_exporter
147
+
sudo nano /etc/systemd/system/node_exporter.service
148
+
# [Unit]
149
+
# Description=Node Exporter
150
+
# Wants=network-online.target
151
+
# After=network-online.target
152
+
153
+
# [Service]
154
+
# User=node_exporter
155
+
# Group=node_exporter
156
+
# Type=simple
157
+
# ExecStart=/usr/local/bin/node_exporter
158
+
# Restart=always
159
+
# RestartSec=3
160
+
161
+
# [Install]
162
+
# WantedBy=multi-user.target
163
+
sudo systemctl daemon-reload
164
+
sudo systemctl enable node_exporter.service
165
+
sudo systemctl start node_exporter.service
166
+
```
167
+
168
+
todo: get raspi vcgencmd outputs into metrics
169
+
170
+
- nginx on gateway
171
+
172
+
```nginx
173
+
# in http
174
+
175
+
##
176
+
# cozy cache
177
+
##
178
+
proxy_cache_path /var/cache/nginx keys_zone=cozy_zone:10m;
179
+
180
+
##
181
+
# cozy limit
182
+
##
183
+
limit_req_zone $binary_remote_addr zone=cozy_ip_limit:10m rate=50r/s;
184
+
limit_req_zone $server_name zone=cozy_global_limit:10m rate=1000r/s;
185
+
186
+
# in sites-available/constellation.microcosm.blue
187
+
188
+
upstream cozy_link_aggregator {
189
+
server link-aggregator:6789;
190
+
keepalive 16;
191
+
}
192
+
193
+
server {
194
+
listen 8080;
195
+
listen [::]:8080;
196
+
197
+
server_name constellation.microcosm.blue;
198
+
199
+
proxy_cache cozy_zone;
200
+
proxy_cache_background_update on;
201
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
202
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
203
+
proxy_cache_lock_age 1s;
204
+
proxy_cache_lock_timeout 2s;
205
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
206
+
proxy_cache_valid any 15s; # non-200s default
207
+
proxy_read_timeout 5s;
208
+
proxy_send_timeout 15s;
209
+
proxy_socket_keepalive on;
210
+
211
+
limit_req zone=cozy_ip_limit nodelay burst=100;
212
+
limit_req zone=cozy_global_limit;
213
+
limit_req_status 429;
214
+
215
+
location / {
216
+
proxy_pass http://cozy_link_aggregator;
217
+
include proxy_params;
218
+
proxy_http_version 1.1;
219
+
proxy_set_header Connection ""; # for keepalive
220
+
}
221
+
}
222
+
```
223
+
224
+
also `systemctl edit nginx` and paste
225
+
226
+
```
227
+
[Service]
228
+
Restart=always
229
+
```
230
+
231
+
โhttps://serverfault.com/a/1003373
232
+
233
+
now making browsers redirect to the microcosm.blue url:
234
+
235
+
```
236
+
[...]
237
+
server_name links.bsky.bad-example.com;
238
+
239
+
add_header Access-Control-Allow-Origin * always; # bit of hack to have it here but nginx doesn't like it in the `if`
240
+
if ($http_user_agent ~ ^Mozilla/) {
241
+
# for now send *browsers* to the new location, hopefully without impacting api requests
242
+
# (yeah we're doing UA test here and content-negotatiation in the app. whatever.)
243
+
return 301 https://constellation.microcosm.blue$request_uri;
244
+
}
245
+
[...]
246
+
```
247
+
248
+
- nginx metrics
249
+
250
+
- download nginx-prometheus-exporter
251
+
https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
252
+
253
+
- err actually going to make mistakes and try with snap
254
+
`snap install nginx-prometheus-exporter`
255
+
- so it got a binary for me but no systemd task set up. boooo.
256
+
`snap remove nginx-prometheus-exporter`
257
+
258
+
- ```bash
259
+
curl -LO https://github.com/nginx/nginx-prometheus-exporter/releases/download/v1.4.1/nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
260
+
tar xzf nginx-prometheus-exporter_1.4.1_linux_amd64.tar.gz
261
+
mv nginx-prometheus-exporter /usr/local/bin
262
+
useradd --no-create-home --shell /bin/false nginx-prometheus-exporter
263
+
nano /etc/systemd/system/nginx-prometheus-exporter.service
264
+
# [Unit]
265
+
# Description=NGINX Exporter
266
+
# Wants=network-online.target
267
+
# After=network-online.target
268
+
269
+
# [Service]
270
+
# User=nginx-prometheus-exporter
271
+
# Group=nginx-prometheus-exporter
272
+
# Type=simple
273
+
# ExecStart=/usr/local/bin/nginx-prometheus-exporter --nginx.scrape-uri=http://gateway:8080/stub_status --web.listen-address=gateway:9113
274
+
# Restart=always
275
+
# RestartSec=3
276
+
277
+
# [Install]
278
+
# WantedBy=multi-user.target
279
+
systemctl daemon-reload
280
+
systemctl start nginx-prometheus-exporter.service
281
+
systemctl enable nginx-prometheus-exporter.service
282
+
```
283
+
284
+
- nginx `/etc/nginx/sites-available/gateway-nginx-status`
285
+
286
+
```nginx
287
+
server {
288
+
listen 8080;
289
+
listen [::]:8080;
290
+
291
+
server_name gateway;
292
+
293
+
location /stub_status {
294
+
stub_status;
295
+
}
296
+
location / {
297
+
return 404;
298
+
}
299
+
}
300
+
```
301
+
302
+
```bash
303
+
ln -s /etc/nginx/sites-available/gateway-nginx-status /etc/nginx/sites-enabled/
304
+
```
305
+
306
+
307
+
## bootes (pi5)
308
+
309
+
- mount sd card, touch `ssh` file echo `echo "pi:$(echo raspberry | openssl passwd -6 -stdin)" > userconf.txt`
310
+
- raspi-config: enable pcie 3, set hostname, enable ssh
311
+
- put ssh key into `.ssh/authorized_keys`
312
+
- put `PasswordAuthentication no` in `/etc/ssh/sshd_config`
313
+
- `sudo apt update && sudo apt upgrade`
314
+
- `sudo apt install xfsprogs`
315
+
- `sudo mkfs.xfs -L c11n-kv /dev/nvme0n1`
316
+
- `sudo mount /dev/nvme0n1 /mnt`
317
+
- set up tailscale
318
+
- `sudo tailscale up`
319
+
- `git clone https://github.com/atcosm/links.git`
320
+
- tailscale: disable bootes key expiry
321
+
- rustup `curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh`
322
+
- `cd links/constellation`
323
+
- `sudo apt install libssl-dev` needed
324
+
- `sudo apt install clang` needed for bindgen
325
+
- (in tmux) `cargo build --release`
326
+
- `mkdir ~/backup`
327
+
- `sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi`
328
+
- `sudo chown pi:pi /mnt/`
329
+
- `RUST_BACKTRACE=full cargo run --bin rocks-restore-from-backup --release -- --from-backup-dir "/home/pi/backup/constellation-index" --to-data-dir /mnt/constellation-index`
330
+
etc
331
+
- follow above `- raspi node_exporter`
332
+
- configure victoriametrics to scrape the new pi
333
+
- configure ulimit before starting! `ulimit -n 16384`
334
+
- `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20`
335
+
- add server to nginx gateway upstream: ` server 100.123.79.12:6789; # bootes`
336
+
- stop backups from running on the older instance! `RUST_BACKTRACE=full cargo run --release -- --backend rocks --data /mnt/links-2.rocks/ --jetstream us-east-1`
337
+
- stop upstreaming requests to older instance in nginx
338
+
339
+
340
+
- systemd unit for running: `sudo nano /etc/systemd/system/constellation.service`
341
+
342
+
```ini
343
+
[Unit]
344
+
Description=Constellation backlinks index
345
+
After=network.target
346
+
347
+
[Service]
348
+
User=pi
349
+
WorkingDirectory=/home/pi/links/constellation
350
+
ExecStart=/home/pi/links/target/release/main --backend rocks --data /mnt/constellation-index/ --jetstream us-east-2 --backup /home/pi/backup/constellation-index --backup-interval 6 --max-old-backups 20
351
+
LimitNOFILE=16384
352
+
Restart=always
353
+
354
+
[Install]
355
+
WantedBy=multi-user.target
356
+
```
357
+
358
+
359
+
- todo: overlayfs? would need to figure out builds/updates still, also i guess logs are currently written to sd? (oof)
360
+
- todo: cross-compile for raspi?
361
+
362
+
---
363
+
364
+
some todos
365
+
366
+
- [x] tailscale: exit node
367
+
- [!] link_aggregator: use exit node
368
+
-> worked, but reverted for now: tailscale on raspi was consuming ~50% cpu for the jetstream traffic. this might be near its max since it would have been catching up at the time (max jetstream throughput) but it feels a bit too much. we have to trust the jetstream server and link_aggregator doesn't (yet) make any other external connections, so for now the raspi connects directly from my home again.
369
+
- [x] caddy: reverse proxy
370
+
- [x] build with cache and rate-limit plugins
371
+
- [x] configure systemd to keep it alive
372
+
- [x] configure caddy cache
373
+
- [x] configure caddy rate-limit
374
+
- [ ] configure ~caddy~ nginx to use a health check (once it's added)
375
+
- [ ] ~configure caddy to only expose cache metrics to tailnet :/~
376
+
- [x] make some grafana dashboards
377
+
- [ ] raspi: mount /dev/sda on boot
378
+
- [ ] raspi: run link_aggregator via systemd so it starts on startup (and restarts?)
379
+
380
+
- [x] use nginx instead of caddy
381
+
- [x] nginx: enable cache
382
+
- [x] nginx: rate-limit
383
+
- [ ] nginx: get metrics
384
+
385
+
386
+
387
+
388
+
---
389
+
390
+
nginx cors for constellation + small burst bump
391
+
392
+
```nginx
393
+
upstream cozy_constellation {
394
+
server <tailnet ip>:6789; # bootes; ip so that we don't race on reboot with tailscale coming up, which nginx doesn't like
395
+
keepalive 16;
396
+
}
397
+
398
+
server {
399
+
server_name constellation.microcosm.blue;
400
+
401
+
proxy_cache cozy_zone;
402
+
proxy_cache_background_update on;
403
+
proxy_cache_key "$scheme$proxy_host$uri$is_args$args$http_accept";
404
+
proxy_cache_lock on; # make simlutaneous requests for the same uri wait for it to appear in cache instead of hitting origin
405
+
proxy_cache_lock_age 1s;
406
+
proxy_cache_lock_timeout 2s;
407
+
proxy_cache_valid 10s; # default -- should be explicitly set in the response headers
408
+
proxy_cache_valid any 2s; # non-200s default
409
+
proxy_read_timeout 5s;
410
+
proxy_send_timeout 15s;
411
+
proxy_socket_keepalive on;
412
+
413
+
# take over cors responsibility from upsteram. `always` applies it to error responses.
414
+
proxy_hide_header 'Access-Control-Allow-Origin';
415
+
proxy_hide_header 'Access-Control-Allowed-Methods';
416
+
proxy_hide_header 'Access-Control-Allow-Headers';
417
+
add_header 'Access-Control-Allow-Origin' '*' always;
418
+
add_header 'Access-Control-Allow-Methods' 'GET' always;
419
+
add_header 'Access-Control-Allow-Headers' '*' always;
420
+
421
+
422
+
limit_req zone=cozy_ip_limit nodelay burst=150;
423
+
limit_req zone=cozy_global_limit burst=1800;
424
+
limit_req_status 429;
425
+
426
+
location / {
427
+
proxy_pass http://cozy_constellation;
428
+
include proxy_params;
429
+
proxy_http_version 1.1;
430
+
proxy_set_header Connection ""; # for keepalive
431
+
}
432
+
433
+
434
+
listen 443 ssl; # managed by Certbot
435
+
ssl_certificate /etc/letsencrypt/live/constellation.microcosm.blue/fullchain.pem; # managed by Certbot
436
+
ssl_certificate_key /etc/letsencrypt/live/constellation.microcosm.blue/privkey.pem; # managed by Certbot
437
+
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
438
+
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
439
+
440
+
}
441
+
442
+
server {
443
+
if ($host = constellation.microcosm.blue) {
444
+
return 301 https://$host$request_uri;
445
+
} # managed by Certbot
446
+
447
+
448
+
server_name constellation.microcosm.blue;
449
+
listen 80;
450
+
return 404; # managed by Certbot
451
+
}
452
+
```
453
+
454
+
re-reading about `nodelay`, i should probably remove it -- nginx would then queue requests to upstream, but still service them at the configured limit. it's fine for my internet since the global limit isn't nodelay, but probably less "fair" to clients if there's contention around the global limit (earlier requests would get all of theirs serviced before later ones can get in the queue)
455
+
456
+
leaving it for now though.
457
+
458
+
459
+
### nginx logs to prom
460
+
461
+
```bash
462
+
curl -LO https://github.com/martin-helmich/prometheus-nginxlog-exporter/releases/download/v1.11.0/prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
463
+
apt install ./prometheus-nginxlog-exporter_1.11.0_linux_amd64.deb
464
+
systemctl enable prometheus-nginxlog-exporter.service
465
+
466
+
```
467
+
468
+
have it run as www-data (maybe not the best idea but...)
469
+
file `/usr/lib/systemd/system/prometheus-nginxlog-exporter.service`
470
+
set User under service and remove capabilities bounding
471
+
472
+
```systemd
473
+
User=www-data
474
+
#CapabilityBoundingSet=
475
+
```
476
+
477
+
in `nginx.conf` in `http`:
478
+
479
+
```nginx
480
+
log_format constellation_format "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"";
481
+
```
482
+
483
+
in `sites-available/constellation.microcosm.blue` in `server`:
484
+
485
+
```nginx
486
+
# log format must match prometheus-nginx-log-exporter
487
+
access_log /var/log/nginx/constellation-access.log constellation_format;
488
+
```
489
+
490
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
491
+
492
+
493
+
494
+
```bash
495
+
systemctl start prometheus-nginxlog-exporter.service
496
+
```
+35
legacy/old-readme-details.md
+35
legacy/old-readme-details.md
···
1
+
[Constellation](./constellation/)
2
+
--------------------------------------------
3
+
4
+
A global atproto backlink index โจ
5
+
6
+
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
7
+
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
8
+
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
9
+
- Simple JSON API
10
+
11
+
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
12
+
13
+
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
14
+
- source: [./constellation/](./constellation/)
15
+
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
16
+
17
+
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
18
+
19
+
20
+
App: Spacedust
21
+
--------------
22
+
23
+
A notification subscription service ๐ซ
24
+
25
+
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
26
+
27
+
- **status**: in design
28
+
29
+
30
+
Library: [links](./links/)
31
+
------------------------------------
32
+
33
+
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
34
+
35
+
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
+123
legacy/original-notes.md
+123
legacy/original-notes.md
···
1
+
---
2
+
3
+
4
+
old notes follow, ignore
5
+
------------------------
6
+
7
+
8
+
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
9
+
10
+
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
11
+
12
+
13
+
why
14
+
---
15
+
16
+
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
17
+
18
+
- bluesky posts (firesky, deletions, )
19
+
- blueksy post stats (emojis, )
20
+
- trending keywords ()
21
+
22
+
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
23
+
24
+
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
25
+
26
+
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
27
+
28
+
**so, why**
29
+
30
+
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
31
+
32
+
everything is links:
33
+
34
+
- likes
35
+
- follows
36
+
- blocks
37
+
- reposts
38
+
- quotes
39
+
40
+
some low-level things you could make from links:
41
+
42
+
- notification streams (part of ucosm)
43
+
- a global reverse index (part of ucosm)
44
+
45
+
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
46
+
47
+
48
+
extracting links
49
+
---------------
50
+
51
+
52
+
- low-level: pass a &str of a field value and get a parsed link back
53
+
54
+
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
55
+
56
+
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
57
+
58
+
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
59
+
60
+
61
+
resolving / canonicalizing links
62
+
--------------------------------
63
+
64
+
65
+
### at-uris
66
+
67
+
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
68
+
69
+
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
70
+
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
71
+
72
+
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
73
+
74
+
hopefully atrium will make it easy to resolve at-uris.
75
+
76
+
77
+
### urls
78
+
79
+
canonicalizing URLs is more annoying but also a bit more established. lots of details.
80
+
81
+
- do we have to deal with punycode?
82
+
- follow redirects (todo: only permanent ones, or all?)
83
+
- check for rel=canonical http header and possibly follow it
84
+
- check link rel=canonical meta tag and possibly follow it
85
+
- do we need to check site maps??
86
+
- do we have to care at all about AMP?
87
+
- do we want anything to do with url shorteners??
88
+
- how do multilingual sites affect this?
89
+
- do we have to care about `script type="application/ld+json"` ???
90
+
91
+
ugh. is there a crate for this.
92
+
93
+
94
+
### relative uris?
95
+
96
+
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
97
+
98
+
99
+
### canonicalizing
100
+
101
+
there should be a few async functions available to canonicalize already-parsed links.
102
+
103
+
- what happens if a link can't be resolved?
104
+
105
+
106
+
---
107
+
108
+
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
109
+
110
+
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
111
+
112
+
- rsky-syntax has an aturi
113
+
- adenosyne also
114
+
- might come back to these
115
+
116
+
117
+
-------
118
+
119
+
rocks
120
+
121
+
```bash
122
+
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
123
+
```
+196
legacy/ufos ops (move to micro-ops).md
+196
legacy/ufos ops (move to micro-ops).md
···
1
+
ufos ops
2
+
3
+
btrfs snapshots: snapper
4
+
5
+
```bash
6
+
sudo apt install snapper
7
+
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
+
9
+
# edit /etc/snapper/configs/ufos-db
10
+
# change
11
+
TIMELINE_MIN_AGE="1800"
12
+
TIMELINE_LIMIT_HOURLY="10"
13
+
TIMELINE_LIMIT_DAILY="10"
14
+
TIMELINE_LIMIT_WEEKLY="0"
15
+
TIMELINE_LIMIT_MONTHLY="10"
16
+
TIMELINE_LIMIT_YEARLY="10"
17
+
# to
18
+
TIMELINE_MIN_AGE="1800"
19
+
TIMELINE_LIMIT_HOURLY="22"
20
+
TIMELINE_LIMIT_DAILY="4"
21
+
TIMELINE_LIMIT_WEEKLY="0"
22
+
TIMELINE_LIMIT_MONTHLY="0"
23
+
TIMELINE_LIMIT_YEARLY="0"
24
+
```
25
+
26
+
this should be enough?
27
+
28
+
list snapshots:
29
+
30
+
```bash
31
+
sudo snapper -c ufos-db list
32
+
```
33
+
34
+
systemd
35
+
36
+
create file: `/etc/systemd/system/ufos.service`
37
+
38
+
```ini
39
+
[Unit]
40
+
Description=UFOs-API
41
+
After=network.target
42
+
43
+
[Service]
44
+
User=pi
45
+
WorkingDirectory=/home/pi/
46
+
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
+
Environment="RUST_LOG=info"
48
+
LimitNOFILE=16384
49
+
Restart=always
50
+
51
+
[Install]
52
+
WantedBy=multi-user.target
53
+
```
54
+
55
+
then
56
+
57
+
```bash
58
+
sudo systemctl daemon-reload
59
+
sudo systemctl enable ufos
60
+
sudo systemctl start ufos
61
+
```
62
+
63
+
monitor with
64
+
65
+
```bash
66
+
journalctl -u ufos -f
67
+
```
68
+
69
+
make sure a backup dir exists
70
+
71
+
```bash
72
+
mkdir /home/pi/backup
73
+
```
74
+
75
+
mount the NAS
76
+
77
+
```bash
78
+
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
+
```
80
+
81
+
manual rsync
82
+
83
+
```bash
84
+
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
+
```
86
+
87
+
backup script sketch
88
+
89
+
```bash
90
+
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
+
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
+
```
93
+
94
+
just crontab it?
95
+
96
+
`sudo crontab -e`
97
+
```bash
98
+
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
+
```
100
+
101
+
^^ try once initial backup is done
102
+
103
+
104
+
--columns subvolume,number
105
+
106
+
subvolume
107
+
number
108
+
109
+
110
+
111
+
112
+
gateway: follow constellation for nginx->prom thing
113
+
114
+
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
+
116
+
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
+
118
+
```hcl
119
+
listen {
120
+
port = 4044
121
+
}
122
+
123
+
namespace "nginx" {
124
+
source = {
125
+
files = [
126
+
"/var/log/nginx/constellation-access.log"
127
+
]
128
+
}
129
+
130
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
+
132
+
labels {
133
+
app = "constellation"
134
+
}
135
+
136
+
relabel "cache_status" {
137
+
from = "upstream_cache_status"
138
+
}
139
+
}
140
+
```
141
+
142
+
after:
143
+
144
+
```hcl
145
+
listen {
146
+
port = 4044
147
+
}
148
+
149
+
namespace "constellation" {
150
+
source = {
151
+
files = [
152
+
"/var/log/nginx/constellation-access.log"
153
+
]
154
+
}
155
+
156
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
+
158
+
labels {
159
+
app = "constellation"
160
+
}
161
+
162
+
relabel "cache_status" {
163
+
from = "upstream_cache_status"
164
+
}
165
+
166
+
namespace_label = "vhost"
167
+
metrics_override = { prefix = "nginx" }
168
+
}
169
+
170
+
namespace "ufos" {
171
+
source = {
172
+
files = [
173
+
"/var/log/nginx/ufos-access.log"
174
+
]
175
+
}
176
+
177
+
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
+
179
+
labels {
180
+
app = "ufos"
181
+
}
182
+
183
+
relabel "cache_status" {
184
+
from = "upstream_cache_status"
185
+
}
186
+
187
+
namespace_label = "vhost"
188
+
metrics_override = { prefix = "nginx" }
189
+
}
190
+
```
191
+
192
+
193
+
```bash
194
+
systemctl start prometheus-nginxlog-exporter.service
195
+
```
196
+
+2
links/Cargo.toml
+2
links/Cargo.toml
+3
-2
links/src/lib.rs
+3
-2
links/src/lib.rs
···
1
1
use fluent_uri::Uri;
2
+
use serde::{Deserialize, Serialize};
2
3
3
4
pub mod at_uri;
4
5
pub mod did;
···
6
7
7
8
pub use record::collect_links;
8
9
9
-
#[derive(Debug, Clone, Ord, Eq, PartialOrd, PartialEq)]
10
+
#[derive(Debug, Clone, Ord, Eq, PartialOrd, PartialEq, Serialize, Deserialize)]
10
11
pub enum Link {
11
12
AtUri(String),
12
13
Uri(String),
···
59
60
}
60
61
}
61
62
62
-
#[derive(Debug, PartialEq)]
63
+
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
63
64
pub struct CollectedLink {
64
65
pub path: String,
65
66
pub target: Link,
+41
links/src/record.rs
+41
links/src/record.rs
···
1
+
use dasl::drisl::Value as DrislValue;
1
2
use tinyjson::JsonValue;
2
3
3
4
use crate::{parse_any_link, CollectedLink};
···
36
37
}
37
38
}
38
39
40
+
pub fn walk_drisl(path: &str, v: &DrislValue, found: &mut Vec<CollectedLink>) {
41
+
match v {
42
+
DrislValue::Map(o) => {
43
+
for (key, child) in o {
44
+
walk_drisl(&format!("{path}.{key}"), child, found)
45
+
}
46
+
}
47
+
DrislValue::Array(a) => {
48
+
for child in a {
49
+
let child_p = match child {
50
+
DrislValue::Map(o) => {
51
+
if let Some(DrislValue::Text(t)) = o.get("$type") {
52
+
format!("{path}[{t}]")
53
+
} else {
54
+
format!("{path}[]")
55
+
}
56
+
}
57
+
_ => format!("{path}[]"),
58
+
};
59
+
walk_drisl(&child_p, child, found)
60
+
}
61
+
}
62
+
DrislValue::Text(s) => {
63
+
if let Some(link) = parse_any_link(s) {
64
+
found.push(CollectedLink {
65
+
path: path.to_string(),
66
+
target: link,
67
+
});
68
+
}
69
+
}
70
+
_ => {}
71
+
}
72
+
}
73
+
39
74
pub fn collect_links(v: &JsonValue) -> Vec<CollectedLink> {
40
75
let mut found = vec![];
41
76
walk_record("", v, &mut found);
77
+
found
78
+
}
79
+
80
+
pub fn collect_links_drisl(v: &DrislValue) -> Vec<CollectedLink> {
81
+
let mut found = vec![];
82
+
walk_drisl("", v, &mut found);
42
83
found
43
84
}
44
85
+1
pocket/.gitignore
+1
pocket/.gitignore
···
1
+
prefs.sqlite3*
+19
pocket/Cargo.toml
+19
pocket/Cargo.toml
···
1
+
[package]
2
+
name = "pocket"
3
+
version = "0.1.0"
4
+
edition = "2024"
5
+
6
+
[dependencies]
7
+
atrium-crypto = "0.1.2"
8
+
clap = { version = "4.5.41", features = ["derive"] }
9
+
jwt-compact = { git = "https://github.com/fatfingers23/jwt-compact.git", features = ["es256k"] }
10
+
log = "0.4.27"
11
+
poem = { version = "3.1.12", features = ["acme", "static-files"] }
12
+
poem-openapi = { version = "5.1.16", features = ["scalar"] }
13
+
reqwest = { version = "0.12.22", features = ["json"] }
14
+
rusqlite = "0.37.0"
15
+
serde = { version = "1.0.219", features = ["derive"] }
16
+
serde_json = { version = "1.0.141" }
17
+
thiserror = "2.0.16"
18
+
tokio = { version = "1.47.0", features = ["full"] }
19
+
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
+17
pocket/api-description.md
+17
pocket/api-description.md
···
1
+
_A pocket dimension to stash a bit of non-public user data._
2
+
3
+
4
+
# Pocket: user preference storage
5
+
6
+
This API leverages atproto service proxying to offer a bit of per-user per-app non-public data storage.
7
+
Perfect for things like application preferences that might be better left out of the public PDS data.
8
+
9
+
The intent is to use oauth scopes to isolate storage on a per-application basis, and to allow easy data migration from a community hosted instance to your own if you end up needing that.
10
+
11
+
12
+
### Current status
13
+
14
+
> [!important]
15
+
> Pocket is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and occaisional data loss until it's stable.
16
+
17
+
ATProto might end up adding a similar feature to [PDSs](https://atproto.com/guides/glossary#pds-personal-data-server). If/when that happens, you should use it instead of this!
+7
pocket/src/lib.rs
+7
pocket/src/lib.rs
+34
pocket/src/main.rs
+34
pocket/src/main.rs
···
1
+
use clap::Parser;
2
+
use pocket::{Storage, serve};
3
+
use std::path::PathBuf;
4
+
5
+
/// Slingshot record edge cache
6
+
#[derive(Parser, Debug, Clone)]
7
+
#[command(version, about, long_about = None)]
8
+
struct Args {
9
+
/// path to the sqlite db file
10
+
#[arg(long)]
11
+
db: Option<PathBuf>,
12
+
/// just initialize the db and exit
13
+
#[arg(long, action)]
14
+
init_db: bool,
15
+
/// the domain for serving a did doc (unused if running behind reflector)
16
+
#[arg(long)]
17
+
domain: Option<String>,
18
+
}
19
+
20
+
#[tokio::main]
21
+
async fn main() {
22
+
tracing_subscriber::fmt::init();
23
+
log::info!("๐ hi");
24
+
let args = Args::parse();
25
+
let domain = args.domain.unwrap_or("bad-example.com".into());
26
+
let db_path = args.db.unwrap_or("prefs.sqlite3".into());
27
+
if args.init_db {
28
+
Storage::init(&db_path).unwrap();
29
+
log::info!("๐ initialized db at {db_path:?}. bye")
30
+
} else {
31
+
let storage = Storage::connect(db_path).unwrap();
32
+
serve(&domain, storage).await
33
+
}
34
+
}
+265
pocket/src/server.rs
+265
pocket/src/server.rs
···
1
+
use crate::{Storage, TokenVerifier};
2
+
use poem::{
3
+
Endpoint, EndpointExt, Route, Server,
4
+
endpoint::{StaticFileEndpoint, make_sync},
5
+
http::Method,
6
+
listener::TcpListener,
7
+
middleware::{CatchPanic, Cors, Tracing},
8
+
};
9
+
use poem_openapi::{
10
+
ApiResponse, ContactObject, ExternalDocumentObject, Object, OpenApi, OpenApiService,
11
+
SecurityScheme, Tags,
12
+
auth::Bearer,
13
+
payload::{Json, PlainText},
14
+
types::Example,
15
+
};
16
+
use serde::Serialize;
17
+
use serde_json::{Value, json};
18
+
use std::sync::{Arc, Mutex};
19
+
20
+
#[derive(Debug, SecurityScheme)]
21
+
#[oai(ty = "bearer")]
22
+
struct XrpcAuth(Bearer);
23
+
24
+
#[derive(Tags)]
25
+
enum ApiTags {
26
+
/// Custom pocket APIs
27
+
#[oai(rename = "Pocket APIs")]
28
+
Pocket,
29
+
}
30
+
31
+
#[derive(Object)]
32
+
#[oai(example = true)]
33
+
struct XrpcErrorResponseObject {
34
+
/// Should correspond an error `name` in the lexicon errors array
35
+
error: String,
36
+
/// Human-readable description and possibly additonal context
37
+
message: String,
38
+
}
39
+
impl Example for XrpcErrorResponseObject {
40
+
fn example() -> Self {
41
+
Self {
42
+
error: "PreferencesNotFound".to_string(),
43
+
message: "No preferences were found for this user".to_string(),
44
+
}
45
+
}
46
+
}
47
+
type XrpcError = Json<XrpcErrorResponseObject>;
48
+
fn xrpc_error(error: impl AsRef<str>, message: impl AsRef<str>) -> XrpcError {
49
+
Json(XrpcErrorResponseObject {
50
+
error: error.as_ref().to_string(),
51
+
message: message.as_ref().to_string(),
52
+
})
53
+
}
54
+
55
+
#[derive(Debug, Object)]
56
+
#[oai(example = true)]
57
+
struct BskyPrefsObject {
58
+
/// at-uri for this record
59
+
preferences: Value,
60
+
}
61
+
impl Example for BskyPrefsObject {
62
+
fn example() -> Self {
63
+
Self {
64
+
preferences: json!({
65
+
"hello": "world",
66
+
}),
67
+
}
68
+
}
69
+
}
70
+
71
+
#[derive(ApiResponse)]
72
+
enum GetBskyPrefsResponse {
73
+
/// Record found
74
+
#[oai(status = 200)]
75
+
Ok(Json<BskyPrefsObject>),
76
+
/// Bad request or no preferences to return
77
+
#[oai(status = 400)]
78
+
BadRequest(XrpcError),
79
+
}
80
+
81
+
#[derive(ApiResponse)]
82
+
enum PutBskyPrefsResponse {
83
+
/// Record found
84
+
#[oai(status = 200)]
85
+
Ok(PlainText<String>),
86
+
/// Bad request or no preferences to return
87
+
#[oai(status = 400)]
88
+
BadRequest(XrpcError),
89
+
// /// Server errors
90
+
// #[oai(status = 500)]
91
+
// ServerError(XrpcError),
92
+
}
93
+
94
+
struct Xrpc {
95
+
verifier: TokenVerifier,
96
+
storage: Arc<Mutex<Storage>>,
97
+
}
98
+
99
+
#[OpenApi]
100
+
impl Xrpc {
101
+
/// com.bad-example.pocket.getPreferences
102
+
///
103
+
/// get stored preferencess
104
+
#[oai(
105
+
path = "/com.bad-example.pocket.getPreferences",
106
+
method = "get",
107
+
tag = "ApiTags::Pocket"
108
+
)]
109
+
async fn pocket_get_prefs(&self, XrpcAuth(auth): XrpcAuth) -> GetBskyPrefsResponse {
110
+
let (did, aud) = match self
111
+
.verifier
112
+
.verify("com.bad-example.pocket.getPreferences", &auth.token)
113
+
.await
114
+
{
115
+
Ok(d) => d,
116
+
Err(e) => return GetBskyPrefsResponse::BadRequest(xrpc_error("boooo", e.to_string())),
117
+
};
118
+
log::info!("verified did: {did}/{aud}");
119
+
120
+
let storage = self.storage.clone();
121
+
122
+
let Ok(Ok(res)) = tokio::task::spawn_blocking(move || {
123
+
storage
124
+
.lock()
125
+
.unwrap()
126
+
.get(&did, &aud)
127
+
.inspect_err(|e| log::error!("failed to get prefs: {e}"))
128
+
})
129
+
.await
130
+
else {
131
+
return GetBskyPrefsResponse::BadRequest(xrpc_error("boooo", "failed to get from db"));
132
+
};
133
+
134
+
let Some(serialized) = res else {
135
+
return GetBskyPrefsResponse::BadRequest(xrpc_error(
136
+
"NotFound",
137
+
"could not find prefs for u",
138
+
));
139
+
};
140
+
141
+
let preferences = match serde_json::from_str(&serialized) {
142
+
Ok(v) => v,
143
+
Err(e) => {
144
+
log::error!("failed to deserialize prefs: {e}");
145
+
return GetBskyPrefsResponse::BadRequest(xrpc_error(
146
+
"boooo",
147
+
"failed to deserialize prefs",
148
+
));
149
+
}
150
+
};
151
+
152
+
GetBskyPrefsResponse::Ok(Json(BskyPrefsObject { preferences }))
153
+
}
154
+
155
+
/// com.bad-example.pocket.putPreferences
156
+
///
157
+
/// store bluesky prefs
158
+
#[oai(
159
+
path = "/com.bad-example.pocket.putPreferences",
160
+
method = "post",
161
+
tag = "ApiTags::Pocket"
162
+
)]
163
+
async fn pocket_put_prefs(
164
+
&self,
165
+
XrpcAuth(auth): XrpcAuth,
166
+
Json(prefs): Json<BskyPrefsObject>,
167
+
) -> PutBskyPrefsResponse {
168
+
let (did, aud) = match self
169
+
.verifier
170
+
.verify("com.bad-example.pocket.putPreferences", &auth.token)
171
+
.await
172
+
{
173
+
Ok(d) => d,
174
+
Err(e) => return PutBskyPrefsResponse::BadRequest(xrpc_error("boooo", e.to_string())),
175
+
};
176
+
log::info!("verified did: {did}/{aud}");
177
+
log::warn!("received prefs: {prefs:?}");
178
+
179
+
let storage = self.storage.clone();
180
+
let serialized = prefs.preferences.to_string();
181
+
182
+
let Ok(Ok(())) = tokio::task::spawn_blocking(move || {
183
+
storage
184
+
.lock()
185
+
.unwrap()
186
+
.put(&did, &aud, &serialized)
187
+
.inspect_err(|e| log::error!("failed to insert prefs: {e}"))
188
+
})
189
+
.await
190
+
else {
191
+
return PutBskyPrefsResponse::BadRequest(xrpc_error("boooo", "failed to put to db"));
192
+
};
193
+
194
+
PutBskyPrefsResponse::Ok(PlainText("saved.".to_string()))
195
+
}
196
+
}
197
+
198
+
#[derive(Debug, Clone, Serialize)]
199
+
#[serde(rename_all = "camelCase")]
200
+
struct AppViewService {
201
+
id: String,
202
+
r#type: String,
203
+
service_endpoint: String,
204
+
}
205
+
#[derive(Debug, Clone, Serialize)]
206
+
struct AppViewDoc {
207
+
id: String,
208
+
service: [AppViewService; 2],
209
+
}
210
+
/// Serve a did document for did:web for this to be an xrpc appview
211
+
fn get_did_doc(domain: &str) -> impl Endpoint + use<> {
212
+
let doc = poem::web::Json(AppViewDoc {
213
+
id: format!("did:web:{domain}"),
214
+
service: [
215
+
AppViewService {
216
+
id: "#pocket_prefs".to_string(),
217
+
r#type: "PocketPreferences".to_string(),
218
+
service_endpoint: format!("https://{domain}"),
219
+
},
220
+
AppViewService {
221
+
id: "#bsky_appview".to_string(),
222
+
r#type: "BlueskyAppview".to_string(),
223
+
service_endpoint: format!("https://{domain}"),
224
+
},
225
+
],
226
+
});
227
+
make_sync(move |_| doc.clone())
228
+
}
229
+
230
+
pub async fn serve(domain: &str, storage: Storage) -> () {
231
+
let verifier = TokenVerifier::default();
232
+
let api_service = OpenApiService::new(
233
+
Xrpc {
234
+
verifier,
235
+
storage: Arc::new(Mutex::new(storage)),
236
+
},
237
+
"Pocket",
238
+
env!("CARGO_PKG_VERSION"),
239
+
)
240
+
.server(domain)
241
+
.url_prefix("/xrpc")
242
+
.contact(
243
+
ContactObject::new()
244
+
.name("@microcosm.blue")
245
+
.url("https://bsky.app/profile/microcosm.blue"),
246
+
)
247
+
.description(include_str!("../api-description.md"))
248
+
.external_document(ExternalDocumentObject::new("https://microcosm.blue/pocket"));
249
+
250
+
let app = Route::new()
251
+
.nest("/openapi", api_service.spec_endpoint())
252
+
.nest("/xrpc/", api_service)
253
+
.at("/.well-known/did.json", get_did_doc(domain))
254
+
.at("/", StaticFileEndpoint::new("./static/index.html"))
255
+
.with(
256
+
Cors::new()
257
+
.allow_method(Method::GET)
258
+
.allow_method(Method::POST),
259
+
)
260
+
.with(CatchPanic::new())
261
+
.with(Tracing);
262
+
263
+
let listener = TcpListener::bind("127.0.0.1:3000");
264
+
Server::new(listener).name("pocket").run(app).await.unwrap();
265
+
}
+50
pocket/src/storage.rs
+50
pocket/src/storage.rs
···
1
+
use rusqlite::{Connection, OptionalExtension, Result};
2
+
use std::path::Path;
3
+
4
+
pub struct Storage {
5
+
con: Connection,
6
+
}
7
+
8
+
impl Storage {
9
+
pub fn connect(path: impl AsRef<Path>) -> Result<Self> {
10
+
let con = Connection::open(path)?;
11
+
con.pragma_update(None, "journal_mode", "WAL")?;
12
+
con.pragma_update(None, "synchronous", "NORMAL")?;
13
+
con.pragma_update(None, "busy_timeout", "100")?;
14
+
con.pragma_update(None, "foreign_keys", "ON")?;
15
+
Ok(Self { con })
16
+
}
17
+
pub fn init(path: impl AsRef<Path>) -> Result<Self> {
18
+
let me = Self::connect(path)?;
19
+
me.con.execute(
20
+
r#"
21
+
create table prefs (
22
+
actor text not null,
23
+
aud text not null,
24
+
pref text not null,
25
+
primary key (actor, aud)
26
+
) strict"#,
27
+
(),
28
+
)?;
29
+
Ok(me)
30
+
}
31
+
pub fn put(&self, actor: &str, aud: &str, pref: &str) -> Result<()> {
32
+
self.con.execute(
33
+
r#"insert into prefs (actor, aud, pref)
34
+
values (?1, ?2, ?3)
35
+
on conflict do update set pref = excluded.pref"#,
36
+
[actor, aud, pref],
37
+
)?;
38
+
Ok(())
39
+
}
40
+
pub fn get(&self, actor: &str, aud: &str) -> Result<Option<String>> {
41
+
self.con
42
+
.query_one(
43
+
r#"select pref from prefs
44
+
where actor = ?1 and aud = ?2"#,
45
+
[actor, aud],
46
+
|row| row.get(0),
47
+
)
48
+
.optional()
49
+
}
50
+
}
+143
pocket/src/token.rs
+143
pocket/src/token.rs
···
1
+
use atrium_crypto::did::parse_multikey;
2
+
use atrium_crypto::verify::Verifier;
3
+
use jwt_compact::UntrustedToken;
4
+
use serde::Deserialize;
5
+
use std::collections::HashMap;
6
+
use std::time::Duration;
7
+
use thiserror::Error;
8
+
9
+
#[derive(Debug, Deserialize)]
10
+
struct MiniDoc {
11
+
signing_key: String,
12
+
did: String,
13
+
}
14
+
15
+
#[derive(Error, Debug)]
16
+
pub enum VerifyError {
17
+
#[error("The cross-service authorization token failed verification: {0}")]
18
+
VerificationFailed(&'static str),
19
+
#[error("Error trying to resolve the DID to a signing key, retry in a moment: {0}")]
20
+
ResolutionFailed(&'static str),
21
+
}
22
+
23
+
pub struct TokenVerifier {
24
+
client: reqwest::Client,
25
+
}
26
+
27
+
impl TokenVerifier {
28
+
pub fn new() -> Self {
29
+
let client = reqwest::Client::builder()
30
+
.user_agent(format!(
31
+
"microcosm pocket v{} (dev: @bad-example.com)",
32
+
env!("CARGO_PKG_VERSION")
33
+
))
34
+
.no_proxy()
35
+
.timeout(Duration::from_secs(12)) // slingshot timeout is 10s
36
+
.build()
37
+
.unwrap();
38
+
Self { client }
39
+
}
40
+
41
+
pub async fn verify(
42
+
&self,
43
+
expected_lxm: &str,
44
+
token: &str,
45
+
) -> Result<(String, String), VerifyError> {
46
+
let untrusted = UntrustedToken::new(token).unwrap();
47
+
48
+
// danger! unfortunately we need to decode the DID from the jwt body before we have a public key to verify the jwt with
49
+
let Ok(untrusted_claims) =
50
+
untrusted.deserialize_claims_unchecked::<HashMap<String, String>>()
51
+
else {
52
+
return Err(VerifyError::VerificationFailed(
53
+
"could not deserialize jtw claims",
54
+
));
55
+
};
56
+
57
+
// get the (untrusted!) claimed DID
58
+
let Some(untrusted_did) = untrusted_claims.custom.get("iss") else {
59
+
return Err(VerifyError::VerificationFailed(
60
+
"jwt must include the user's did in `iss`",
61
+
));
62
+
};
63
+
64
+
// bail if it's not even a user-ish did
65
+
if !untrusted_did.starts_with("did:") {
66
+
return Err(VerifyError::VerificationFailed("iss should be a did"));
67
+
}
68
+
if untrusted_did.contains("#") {
69
+
return Err(VerifyError::VerificationFailed(
70
+
"iss should be a user did without a service identifier",
71
+
));
72
+
}
73
+
74
+
let endpoint =
75
+
"https://slingshot.microcosm.blue/xrpc/com.bad-example.identity.resolveMiniDoc";
76
+
let doc: MiniDoc = self
77
+
.client
78
+
.get(format!("{endpoint}?identifier={untrusted_did}"))
79
+
.send()
80
+
.await
81
+
.map_err(|_| VerifyError::ResolutionFailed("failed to fetch minidoc"))?
82
+
.error_for_status()
83
+
.map_err(|_| VerifyError::ResolutionFailed("non-ok response for minidoc"))?
84
+
.json()
85
+
.await
86
+
.map_err(|_| VerifyError::ResolutionFailed("failed to parse json to minidoc"))?;
87
+
88
+
// sanity check before we go ahead with this signing key
89
+
if doc.did != *untrusted_did {
90
+
return Err(VerifyError::VerificationFailed(
91
+
"wtf, resolveMiniDoc returned a doc for a different DID, slingshot bug",
92
+
));
93
+
}
94
+
95
+
let Ok((alg, public_key)) = parse_multikey(&doc.signing_key) else {
96
+
return Err(VerifyError::VerificationFailed(
97
+
"could not parse signing key form minidoc",
98
+
));
99
+
};
100
+
101
+
// i _guess_ we've successfully bootstrapped the verification of the jwt unless this fails
102
+
if let Err(e) = Verifier::default().verify(
103
+
alg,
104
+
&public_key,
105
+
&untrusted.signed_data,
106
+
untrusted.signature_bytes(),
107
+
) {
108
+
log::warn!("jwt verification failed: {e}");
109
+
return Err(VerifyError::VerificationFailed(
110
+
"jwt signature verification failed",
111
+
));
112
+
}
113
+
114
+
// past this point we're should have established trust. crossing ts and dotting is.
115
+
let did = &untrusted_did;
116
+
let claims = &untrusted_claims;
117
+
118
+
let Some(aud) = claims.custom.get("aud") else {
119
+
return Err(VerifyError::VerificationFailed("missing aud"));
120
+
};
121
+
let Some(mut aud) = aud.strip_prefix("did:web:") else {
122
+
return Err(VerifyError::VerificationFailed("expected a did:web aud"));
123
+
};
124
+
if let Some((aud_without_hash, _)) = aud.split_once("#") {
125
+
log::warn!("aud claim is missing service id fragment: {aud:?}");
126
+
aud = aud_without_hash;
127
+
}
128
+
let Some(lxm) = claims.custom.get("lxm") else {
129
+
return Err(VerifyError::VerificationFailed("missing lxm"));
130
+
};
131
+
if lxm != expected_lxm {
132
+
return Err(VerifyError::VerificationFailed("wrong lxm"));
133
+
}
134
+
135
+
Ok((did.to_string(), aud.to_string()))
136
+
}
137
+
}
138
+
139
+
impl Default for TokenVerifier {
140
+
fn default() -> Self {
141
+
Self::new()
142
+
}
143
+
}
+67
pocket/static/index.html
+67
pocket/static/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<title>Pocket: atproto user preference storage</title>
6
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7
+
<meta name="description" content="API Documentation for Pocket, a simple user-preference storage system for atproto" />
8
+
<style>
9
+
:root {
10
+
--scalar-small: 13px;
11
+
}
12
+
.scalar-app .markdown .markdown-alert {
13
+
font-size: var(--scalar-small);
14
+
}
15
+
.sidebar-heading-link-title {
16
+
line-height: 1.2;
17
+
}
18
+
.custom-header {
19
+
height: 42px;
20
+
background-color: #221828;
21
+
box-shadow: inset 0 -1px 0 var(--scalar-border-color);
22
+
color: var(--scalar-color-1);
23
+
font-size: var(--scalar-font-size-3);
24
+
font-family: 'Iowan Old Style', 'Palatino Linotype', 'URW Palladio L', P052, serif;
25
+
padding: 0 18px;
26
+
justify-content: space-between;
27
+
}
28
+
.custom-header,
29
+
.custom-header nav {
30
+
display: flex;
31
+
align-items: center;
32
+
gap: 18px;
33
+
}
34
+
.custom-header a:hover {
35
+
color: var(--scalar-color-2);
36
+
}
37
+
38
+
.light-mode .custom-header {
39
+
background-color: thistle;
40
+
}
41
+
</style>
42
+
</head>
43
+
<body>
44
+
<header class="custom-header scalar-app">
45
+
<p>
46
+
TODO: thing
47
+
</p>
48
+
<nav>
49
+
<b>a <a href="https://microcosm.blue">microcosm</a> project</b>
50
+
<a href="https://bsky.app/profile/microcosm.blue">@microcosm.blue</a>
51
+
<a href="https://github.com/at-microcosm">github</a>
52
+
</nav>
53
+
</header>
54
+
55
+
<script id="api-reference" type="application/json" data-url="/openapi"></script>
56
+
57
+
<script>
58
+
var configuration = {
59
+
theme: 'purple',
60
+
hideModels: true,
61
+
}
62
+
document.getElementById('api-reference').dataset.configuration = JSON.stringify(configuration)
63
+
</script>
64
+
65
+
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
66
+
</body>
67
+
</html>
+8
quasar/Cargo.toml
+8
quasar/Cargo.toml
+3
quasar/readme.md
+3
quasar/readme.md
+57
-129
readme.md
+57
-129
readme.md
···
1
-
microcosm: links
2
-
================
3
-
4
-
this repo contains libraries and apps for working with cross-record references in at-protocol.
5
-
1
+
microcosm HTTP APIs + rust crates
2
+
=================================
3
+
[](https://bsky.app/profile/microcosm.blue)
4
+
[](https://discord.gg/tcDfe4PGVB)
5
+
[](https://github.com/sponsors/uniphil/)
6
+
[](https://ko-fi.com/bad_example)
6
7
7
-
App: [Constellation](./constellation/)
8
-
--------------------------------------------
8
+
Welcome! Documentation is under active development. If you like reading API docs, you'll probably hit the ground running!
9
9
10
-
A global atproto backlink index โจ
10
+
Tutorials, how-to guides, and client SDK libraries are all in the works for gentler on-ramps, but are not quite ready yet. But don't let that stop you! Hop in the [microcosm discord](https://discord.gg/tcDfe4PGVB), or post questions and tag [@bad-example.com](https://bsky.app/profile/bad-example.com) on Bluesky if you get stuck anywhere.
11
11
12
-
- Self hostable: handles the full write throughput of the global atproto firehose on a raspberry pi 4b + single SSD
13
-
- Storage efficient: less than 2GB/day disk consumption indexing all references in all lexicons and all non-atproto URLs
14
-
- Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts and respecting users data choices
15
-
- Simple JSON API
12
+
> [!tip]
13
+
> This repository's primary home is moving to tangled: [@microcosm.blue/microcosm-rs](https://tangled.sh/@microcosm.blue/microcosm-rs). It will continue to be mirrored on [github](https://github.com/at-microcosm/microcosm-rs) for the forseeable future, and it's fine to open issues or pulls in either place!
16
14
17
-
All social interactions in atproto tend to be represented by links (or references) between PDS records. This index can answer questions like "how many likes does a bsky post have", "who follows an account", "what are all the comments on a [frontpage](https://frontpage.fyi/) post", and more.
18
15
19
-
- **status**: works! api is unstable and likely to change, and no known instances have a full network backfill yet.
20
-
- source: [./constellation/](./constellation/)
21
-
- public instance: [constellation.microcosm.blue](https://constellation.microcosm.blue/)
22
-
23
-
_note: the public instance currently runs on a little raspberry pi in my house, feel free to use it! it comes with only with best-effort uptime, no commitment to not breaking the api for now, and possible rate-limiting. if you want to be nice you can put your project name and bsky username (or email) in your user-agent header for api requests._
24
-
25
-
26
-
App: Spacedust
27
-
--------------
28
-
29
-
A notification subscription service ๐ซ
30
-
31
-
using the same "link source" concept as [constellation](./constellation/), offer webhook notifications for new references created to records
32
-
33
-
- **status**: in design
34
-
35
-
36
-
Library: [links](./links/)
16
+
๐ [Constellation](./constellation/)
37
17
------------------------------------
38
18
39
-
A rust crate (not published on crates.io yet) for optimistically parsing links out of arbitrary atproto PDS records, and potentially canonicalizing them
40
-
41
-
- **status**: unstable, might remain an internal lib for constellation (and spacedust, soon)
42
-
43
-
44
-
45
-
---
46
-
47
-
48
-
old notes follow, ignore
49
-
------------------------
50
-
51
-
52
-
as far as i can tell, atproto lexicons today don't follow much of a convention for referencing across documents: sometimes it's a StrongRef, sometimes it's a DID, sometimes it's a bare at-uri. lexicon authors choose any old link-sounding key name for the key in their document.
53
-
54
-
it's pretty messy so embrace the mess: atproto wants to be part of the web, so this library will also extract URLs and other URIs if you want it to. all the links.
55
-
56
-
57
-
why
58
-
---
59
-
60
-
the atproto firehose that bluesky sprays at you will contain raw _contents_ from peoples' pdses. these are isolated, decontextualized updates. it's very easy to build some kinds of interesting downstream apps off of this feed.
61
-
62
-
- bluesky posts (firesky, deletions, )
63
-
- blueksy post stats (emojis, )
64
-
- trending keywords ()
65
-
66
-
but bringing almost kind of _context_ into your project requires a big step up in complexity and potentially cost: you're entering "appview" territory. _how many likes does a post have? who follows this account?_
67
-
68
-
you own your atproto data: it's kept in your personal data repository (PDS) and noone else can write to it. when someone likes your post, they create a "like" record in their _own_ pds, and that like belongs to _them_, not to you/your post.
69
-
70
-
in the firehose you'll see a `app.bsky.feed.post` record created, with no details about who has liked it. then you'll see separate `app.bsky.feed.like` records show up for each like that comes in on that post, with no context about the post except a random-looking reference to it. storing these in order to do so is up to you!
71
-
72
-
**so, why**
73
-
74
-
everything is links, and they're a mess, but they all kinda work the same, so maybe some tooling can bring down that big step in complexity from firehose raw-content apps -> apps requiring any social context.
75
-
76
-
everything is links:
77
-
78
-
- likes
79
-
- follows
80
-
- blocks
81
-
- reposts
82
-
- quotes
83
-
84
-
some low-level things you could make from links:
85
-
86
-
- notification streams (part of ucosm)
87
-
- a global reverse index (part of ucosm)
88
-
89
-
i think that making these low-level services as easy to use as jetstream could open up pathways for building more atproto apps that operate at full scale with interesting features for reasonable effort at low cost to operate.
19
+
A global atproto interactions backlink index as a simple JSON API. Works with every lexicon, runs on a raspberry pi, consumes less than 2GiB of disk per day. Handles record deletion, account de/re-activation, and account deletion, ensuring accurate link counts while respecting users' data choices.
90
20
21
+
- Source: [./constellation/](./constellation/)
22
+
- [Public instance/API docs](https://constellation.microcosm.blue/)
23
+
- Status: used in production. APIs will change but backwards compatibility will be maintained as long as needed.
91
24
92
-
extracting links
93
-
---------------
94
25
26
+
๐ [Spacedust](./spacedust/)
27
+
----------------------------
95
28
96
-
- low-level: pass a &str of a field value and get a parsed link back
29
+
A global atproto interactions firehose. Extracts all at-uris, DIDs, and URLs from every lexicon in the firehose, and exposes them over a websocket modelled after [jetstream](github.com/bluesky-social/jetstream).
97
30
98
-
- med-level: pass a &str of record in json form and get a list of parsed links + json paths back. (todo: should also handle dag-cbor prob?)
31
+
- Source: [./spacedust/](./spacedust/)
32
+
- [Public instance/API docs](https://spacedust.microcosm.blue/)
33
+
- Status: v0: the basics work and the APIs are in place! missing cursor replay, forward link storage, and delete event link hydration.
99
34
100
-
- high-ish level: pass the json record and maybe apply some pre-loaded rules based on known lexicons to get the best result.
35
+
### Demos:
101
36
102
-
for now, a link is only considered if it matches for the entire value of the record's field -- links embedded in text content are not included. note that urls in bluesky posts _will_ still be extracted, since they are broken out into facets.
37
+
- [Spacedust notifications](https://notifications.microcosm.blue/): web push notifications for _every_ atproto app
38
+
- [Zero-Bluesky real-time interaction-updating post embed](https://bsky.bad-example.com/zero-bluesky-realtime-embed/)
103
39
104
40
105
-
resolving / canonicalizing links
106
-
--------------------------------
41
+
๐ฐ๏ธ [Slingshot](./slingshot)
42
+
---------------------------
107
43
44
+
A fast, eager, production-grade edge cache for atproto records and identities. Pre-caches all records from the firehose and maintains a longer-term cache of requested records on disk.
108
45
109
-
### at-uris
46
+
- Source: [./slingshot/](./slingshot/)
47
+
- [Public instance/API docs](https://slingshot.microcosm.blue/)
48
+
- Status: v0: most XRPC APIs are working. cache storage is being reworked.
110
49
111
-
every at-uri has at least two equivalent forms, one with a `DID`, and one with an account handle. the at-uri spec [illustrates this by example](https://atproto.com/specs/at-uri-scheme):
112
50
113
-
- `at://did:plc:44ybard66vv44zksje25o7dz/app.bsky.feed.post/3jwdwj2ctlk26`
114
-
- `at://bnewbold.bsky.team/app.bsky.feed.post/3jwdwj2ctlk26`
51
+
๐ธ [UFOs API](./ufos)
52
+
---------------------
115
53
116
-
some applications, like a reverse link index, may wish to canonicalize at-uris to a single form. the `DID`-form is stable as an account changes its handle and probably the right choice to canonicalize to, but maybe some apps would actually perfer to canonicalise to handles?
54
+
Timeseries stats and sample records for every [collection](https://atproto.com/guides/glossary#collection) ever seen in the atproto firehose. Unique users are counted in hyperloglog sketches enabling arbitrary cardinality aggregation across time buckets and/or NSIDs.
117
55
118
-
hopefully atrium will make it easy to resolve at-uris.
56
+
- Source: [./ufos/](./ufos/)
57
+
- [Public instance/API docs](https://ufos-api.microcosm.blue/)
58
+
- Status: Used in production. It has APIs and they work! Needs improvement on indexing; needs more indexes and some more APIs to the data exposed.
119
59
60
+
> [!tip]
61
+
> See also: [UFOs atproto explorer](https://ufos.microcosm.blue/) built on UFOs API. ([source](github.com/at-microcosm/spacedust-utils))
120
62
121
-
### urls
122
63
123
-
canonicalizing URLs is more annoying but also a bit more established. lots of details.
64
+
๐ซ [Links](./links)
65
+
-------------------
124
66
125
-
- do we have to deal with punycode?
126
-
- follow redirects (todo: only permanent ones, or all?)
127
-
- check for rel=canonical http header and possibly follow it
128
-
- check link rel=canonical meta tag and possibly follow it
129
-
- do we need to check site maps??
130
-
- do we have to care at all about AMP?
131
-
- do we want anything to do with url shorteners??
132
-
- how do multilingual sites affect this?
133
-
- do we have to care about `script type="application/ld+json"` ???
67
+
Rust library for parsing and extracting links (at-uris, DIDs, and URLs) from atproto records.
134
68
135
-
ugh. is there a crate for this.
69
+
- Source: [./links/](./links/)
70
+
- Status: not yet published to crates.io; needs some rework
136
71
137
72
138
-
### relative uris?
139
-
140
-
links might be relative, in which case they might need to be made absolute before being useful. is that a concern for this library, or up to the user? (seems like we might not have context here to determine its absolute)
141
-
142
-
143
-
### canonicalizing
144
-
145
-
there should be a few async functions available to canonicalize already-parsed links.
146
-
147
-
- what happens if a link can't be resolved?
73
+
๐ฉ๏ธ [Jetstream](./jetstream)
74
+
---------------------------
148
75
76
+
A low-overhead jetstream client with cursor handling and automatic reconnect.
149
77
150
-
---
78
+
- Source: [./links/](./links/)
79
+
- Status: used in multiple apps in production, but not yet published to crates.io; some rework planned
151
80
152
-
- using `tinyjson` because it's nice -- maybe should switch to serde_json to share deps with atrium?
81
+
> [!tip]
82
+
> See also: [Rocketman](https://github.com/teal-fm/cadet/tree/main/rocketman), another excellent rust jetstream client which shares some lineage and _is_ published on crates.io.
153
83
154
-
- would use atrium for parsing at-uris, but it's not in there. there's a did-only version in the non-lib commands.rs. its identifier parser is strict to did + handle, which makes sense, but for our purposes we might want to allow unknown methods too?
155
84
156
-
- rsky-syntax has an aturi
157
-
- adenosyne also
158
-
- might come back to these
159
85
86
+
๐ญ Deprecated: [Who am I](./who-am-i)
87
+
-------------------------------------
160
88
161
-
-------
89
+
An identity bridge for microcosm demos, that kinda worked. Fixing its problems is about equivalent to reinventing a lot of OIDC, so it's being retired.
162
90
163
-
rocks
91
+
- Source: [./who-am-i/](./who-am-i/)
92
+
- Status: ready for retirement.
164
93
165
-
```bash
166
-
ROCKSDB_LIB_DIR=/nix/store/z2chn0hsik0clridr8mlprx1cngh1g3c-rocksdb-9.7.3/lib/ cargo build
167
-
```
94
+
> [!warning]
95
+
> `who-am-i` is still in use for the Spacedust Notifications demo, but that will hopefully be migrated to use atproto oauth directly instead.
+12
reflector/Cargo.toml
+12
reflector/Cargo.toml
···
1
+
[package]
2
+
name = "reflector"
3
+
version = "0.1.0"
4
+
edition = "2024"
5
+
6
+
[dependencies]
7
+
clap = { version = "4.5.47", features = ["derive"] }
8
+
log = "0.4.28"
9
+
poem = "3.1.12"
10
+
serde = { version = "1.0.219", features = ["derive"] }
11
+
tokio = "1.47.1"
12
+
tracing-subscriber = { version = "0.3.20", features = ["env-filter"] }
+9
reflector/readme.md
+9
reflector/readme.md
···
1
+
# reflector
2
+
3
+
a tiny did:web service server that maps subdomains to a single service endpoint
4
+
5
+
receiving requests from multiple subdomains is left as a problem for the reverse proxy to solve, since acme wildcard certificates (ie. letsencrypt) require the most complicated and involved challenge type (DNS).
6
+
7
+
caddy [has good support for](https://caddyserver.com/docs/caddyfile/patterns#wildcard-certificates) configuring the wildcard DNS challenge with various DNS providers, and also supports [on-demand](https://caddyserver.com/docs/automatic-https#using-on-demand-tls) provisioning via the simpler methods.
8
+
9
+
if you only need a small fixed number of subdomains, you can also use certbot or otherwise individually configure them in your reverse proxy.
+112
reflector/src/main.rs
+112
reflector/src/main.rs
···
1
+
use clap::Parser;
2
+
use poem::{
3
+
EndpointExt, Response, Route, Server, get, handler,
4
+
http::StatusCode,
5
+
listener::TcpListener,
6
+
middleware::{AddData, Tracing},
7
+
web::{Data, Json, Query, TypedHeader, headers::Host},
8
+
};
9
+
use serde::{Deserialize, Serialize};
10
+
11
+
#[handler]
12
+
fn hello() -> String {
13
+
"ษนoสษวส
โ
วษน".to_string()
14
+
}
15
+
16
+
#[derive(Debug, Serialize)]
17
+
struct DidDoc {
18
+
id: String,
19
+
service: [DidService; 1],
20
+
}
21
+
22
+
#[derive(Debug, Clone, Serialize)]
23
+
#[serde(rename_all = "camelCase")]
24
+
struct DidService {
25
+
id: String,
26
+
r#type: String,
27
+
service_endpoint: String,
28
+
}
29
+
30
+
#[handler]
31
+
fn did_doc(TypedHeader(host): TypedHeader<Host>, service: Data<&DidService>) -> Json<DidDoc> {
32
+
Json(DidDoc {
33
+
id: format!("did:web:{}", host.hostname()),
34
+
service: [service.clone()],
35
+
})
36
+
}
37
+
38
+
#[derive(Deserialize)]
39
+
struct AskQuery {
40
+
domain: String,
41
+
}
42
+
#[handler]
43
+
fn ask_caddy(
44
+
Data(parent): Data<&Option<String>>,
45
+
Query(AskQuery { domain }): Query<AskQuery>,
46
+
) -> Response {
47
+
if let Some(parent) = parent
48
+
&& let Some(prefix) = domain.strip_suffix(&format!(".{parent}"))
49
+
&& !prefix.contains('.')
50
+
{
51
+
// no sub-sub-domains allowed
52
+
return Response::builder().body("ok");
53
+
};
54
+
Response::builder()
55
+
.status(StatusCode::FORBIDDEN)
56
+
.body("nope")
57
+
}
58
+
59
+
/// Slingshot record edge cache
60
+
#[derive(Parser, Debug, Clone)]
61
+
#[command(version, about, long_about = None)]
62
+
struct Args {
63
+
/// The DID document service ID to serve
64
+
///
65
+
/// must start with a '#', like `#bsky_appview'
66
+
#[arg(long)]
67
+
id: String,
68
+
/// Service type
69
+
///
70
+
/// Not sure exactly what its requirements are. 'BlueskyAppview' for example
71
+
#[arg(long)]
72
+
r#type: String,
73
+
/// The HTTPS endpoint for the service
74
+
#[arg(long)]
75
+
service_endpoint: String,
76
+
/// The parent domain; requests should come from subdomains of this
77
+
#[arg(long)]
78
+
domain: Option<String>,
79
+
}
80
+
81
+
impl From<Args> for DidService {
82
+
fn from(a: Args) -> Self {
83
+
Self {
84
+
id: a.id,
85
+
r#type: a.r#type,
86
+
service_endpoint: a.service_endpoint,
87
+
}
88
+
}
89
+
}
90
+
91
+
#[tokio::main(flavor = "current_thread")]
92
+
async fn main() {
93
+
tracing_subscriber::fmt::init();
94
+
log::info!("ษนoสษวส
โ
วษน");
95
+
96
+
let args = Args::parse();
97
+
let domain = args.domain.clone();
98
+
let service: DidService = args.into();
99
+
100
+
Server::new(TcpListener::bind("0.0.0.0:3001"))
101
+
.run(
102
+
Route::new()
103
+
.at("/", get(hello))
104
+
.at("/.well-known/did.json", get(did_doc))
105
+
.at("/ask", get(ask_caddy))
106
+
.with(AddData::new(service))
107
+
.with(AddData::new(domain))
108
+
.with(Tracing),
109
+
)
110
+
.await
111
+
.unwrap()
112
+
}
+5
-5
slingshot/Cargo.toml
+5
-5
slingshot/Cargo.toml
···
4
4
edition = "2024"
5
5
6
6
[dependencies]
7
-
atrium-api = { version = "0.25.4", default-features = false }
8
-
atrium-common = "0.1.2"
9
-
atrium-identity = "0.1.5"
10
-
atrium-oauth = "0.1.3"
7
+
atrium-api = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace", default-features = false }
8
+
atrium-common = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
9
+
atrium-identity = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
10
+
atrium-oauth = { git = "https://github.com/uniphil/atrium.git", branch = "fix/resolve-handle-https-accept-whitespace" }
11
11
clap = { version = "4.5.41", features = ["derive"] }
12
12
ctrlc = "3.4.7"
13
13
foyer = { version = "0.18.0", features = ["serde"] }
···
17
17
log = "0.4.27"
18
18
metrics = "0.24.2"
19
19
metrics-exporter-prometheus = { version = "0.17.1", features = ["http-listener"] }
20
-
poem = { version = "3.1.12", features = ["acme"] }
20
+
poem = { version = "3.1.12", features = ["acme", "static-files"] }
21
21
poem-openapi = { version = "5.1.16", features = ["scalar"] }
22
22
reqwest = { version = "0.12.22", features = ["json"] }
23
23
rustls = "0.23.31"
+25
-5
slingshot/api-description.md
+25
-5
slingshot/api-description.md
···
16
16
17
17
### Current status
18
18
19
-
Slingshot is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and lower cache hit-rates as the internal storage caches are adjusted and reset.
19
+
> [!important]
20
+
> Slingshot is currently in a **v0, pre-release state**. There is one production instance and you can use it! Expect short downtimes for restarts as development progresses and lower cache hit-rates as the internal storage caches are adjusted and reset.
20
21
21
22
The core APIs will not change, since they are standard third-party `com.atproto` query APIs from ATProtocol.
22
23
···
41
42
42
43
Two core standard query APIs are supported to balance convenience and trust. They both fetch [records](https://atproto.com/guides/glossary#record):
43
44
44
-
### [`com.atproto.repo.getRecord`](#tag/comatproto-queries/GET/xrpc/com.atproto.repo.getRecord)
45
+
### [`com.atproto.repo.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.repo.getRecord)
45
46
46
47
- convenient `JSON` response format
47
48
- cannot be proven authentic
48
49
49
-
### [`com.atproto.sync.getRecord`](#tag/comatproto-queries/GET/xrpc/com.atproto.sync.getRecord)
50
+
### [`com.atproto.sync.getRecord`](#tag/comatproto-queries/get/xrpc/com.atproto.sync.getRecord)
50
51
51
52
- [`DAG-CBOR`](https://atproto.com/specs/data-model)-encoded response requires extra libraries to decode, but
52
53
- includes a cryptographic proof of authenticity!
···
54
55
_(work on this endpoint is in progress)_
55
56
56
57
58
+
## Service proxying
59
+
60
+
Clients can proxy atproto queries through their own PDS with [Service Proxying](https://atproto.com/specs/xrpc#service-proxying), and this is supported by Slingshot. The Slingshot instance must be started the `--domain` argument specified.
61
+
62
+
Service-proxied requests can specify a Slingshot instance via the `atproto-proxy` header:
63
+
64
+
```http
65
+
GET /xrpc/com.bad-example.identity.resolveMiniDoc?identifier=bad-example.com
66
+
Host: <your pds>
67
+
atproto-proxy: did:web:<slingshot domain>#slingshot
68
+
```
69
+
70
+
Where `<your pds>` is the user's own PDS host, and `<slingshot domain>` is the domain that the slingshot instance is deployed at (eg. `slingshot.microcosm.blue`). See the [Service Proxying](https://atproto.com/specs/xrpc#service-proxying) docs for more.
71
+
72
+
> [!tip]
73
+
> Service proxying is supported but completely optional. All APIs are directly accessible over the public internet, and GeoDNS helps route users to the closest instance to them for the lowest possible latency. (_note: deploying multiple slingshot instances with GeoDNS is still TODO_)
74
+
75
+
57
76
## Ergonomic APIs
58
77
59
78
- Slingshot also offers variants of the `getRecord` endpoints that accept a full `at-uri` as a parameter, to save clients from needing to parse and validate all parts of a record location.
60
79
61
-
- Bi-directionally verifying identity endpoints, so you can directly exchange atproto [`handle`](https://atproto.com/guides/glossary#handle)s for [`DID`](https://atproto.com/guides/glossary#did-decentralized-id)s without extra steps, plus a convenient [Mini-Doc](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.identity.resolveMiniDoc) verified identity summary.
80
+
- Bi-directionally verifying identity endpoints, so you can directly exchange atproto [`handle`](https://atproto.com/guides/glossary#handle)s for [`DID`](https://atproto.com/guides/glossary#did-decentralized-id)s without extra steps, plus a convenient [Mini-Doc](#tag/slingshot-specific-queries/get/xrpc/com.bad-example.identity.resolveMiniDoc) verified identity summary.
62
81
63
82
64
83
## Part of microcosm
···
70
89
- [๐ Constellation](https://constellation.microcosm.blue/), a global backlink index (all social interactions in atproto are links!)
71
90
- [๐ Spacedust](https://spacedust.microcosm.blue/), a firehose of all social interactions
72
91
73
-
All microcosm projects are [open source](https://tangled.sh/@bad-example.com/microcosm-links). **You can help sustain Slingshot** and all of microcosm by becoming a [Github sponsor](https://github.com/sponsors/uniphil/) or a [Ko-fi supporter](https://ko-fi.com/bad_example)!
92
+
> [!success]
93
+
> All microcosm projects are [open source](https://tangled.sh/@bad-example.com/microcosm-links). **You can help sustain Slingshot** and all of microcosm by becoming a [Github sponsor](https://github.com/sponsors/uniphil/) or a [Ko-fi supporter](https://ko-fi.com/bad_example)!
+8
slingshot/src/error.rs
+8
slingshot/src/error.rs
···
47
47
}
48
48
49
49
#[derive(Debug, Error)]
50
+
pub enum HealthCheckError {
51
+
#[error("failed to send checkin: {0}")]
52
+
HealthCheckError(#[from] reqwest::Error),
53
+
}
54
+
55
+
#[derive(Debug, Error)]
50
56
pub enum MainTaskError {
51
57
#[error(transparent)]
52
58
ConsumerTaskError(#[from] ConsumerError),
···
54
60
ServerTaskError(#[from] ServerError),
55
61
#[error(transparent)]
56
62
IdentityTaskError(#[from] IdentityError),
63
+
#[error(transparent)]
64
+
HealthCheckError(#[from] HealthCheckError),
57
65
#[error("firehose cache failed to close: {0}")]
58
66
FirehoseCacheCloseError(foyer::Error),
59
67
}
+32
slingshot/src/healthcheck.rs
+32
slingshot/src/healthcheck.rs
···
1
+
use crate::error::HealthCheckError;
2
+
use reqwest::Client;
3
+
use std::time::Duration;
4
+
use tokio::time::sleep;
5
+
use tokio_util::sync::CancellationToken;
6
+
7
+
pub async fn healthcheck(
8
+
endpoint: String,
9
+
shutdown: CancellationToken,
10
+
) -> Result<(), HealthCheckError> {
11
+
let client = Client::builder()
12
+
.user_agent(format!(
13
+
"microcosm slingshot v{} (dev: @bad-example.com)",
14
+
env!("CARGO_PKG_VERSION")
15
+
))
16
+
.no_proxy()
17
+
.timeout(Duration::from_secs(10))
18
+
.build()?;
19
+
20
+
loop {
21
+
tokio::select! {
22
+
res = client.get(&endpoint).send() => {
23
+
let _ = res
24
+
.and_then(|r| r.error_for_status())
25
+
.inspect_err(|e| log::error!("failed to send healthcheck: {e}"));
26
+
},
27
+
_ = shutdown.cancelled() => break,
28
+
}
29
+
sleep(Duration::from_secs(51)).await;
30
+
}
31
+
Ok(())
32
+
}
+4
-3
slingshot/src/identity.rs
+4
-3
slingshot/src/identity.rs
···
240
240
Err(atrium_identity::Error::NotFound) => {
241
241
Ok(IdentityVal(UtcDateTime::now(), IdentityData::NotFound))
242
242
}
243
-
Err(other) => Err(foyer::Error::Other(Box::new(
244
-
IdentityError::ResolutionFailed(other),
245
-
))),
243
+
Err(other) => Err(foyer::Error::Other(Box::new({
244
+
log::debug!("other error resolving handle: {other:?}");
245
+
IdentityError::ResolutionFailed(other)
246
+
}))),
246
247
}
247
248
}
248
249
})
+2
slingshot/src/lib.rs
+2
slingshot/src/lib.rs
···
1
1
mod consumer;
2
2
pub mod error;
3
3
mod firehose_cache;
4
+
mod healthcheck;
4
5
mod identity;
5
6
mod record;
6
7
mod server;
7
8
8
9
pub use consumer::consume;
9
10
pub use firehose_cache::firehose_cache;
11
+
pub use healthcheck::healthcheck;
10
12
pub use identity::Identity;
11
13
pub use record::{CachedRecord, ErrorResponseObject, Repo};
12
14
pub use server::serve;
+16
-3
slingshot/src/main.rs
+16
-3
slingshot/src/main.rs
···
1
1
// use foyer::HybridCache;
2
2
// use foyer::{Engine, DirectFsDeviceOptions, HybridCacheBuilder};
3
3
use metrics_exporter_prometheus::PrometheusBuilder;
4
-
use slingshot::{Identity, Repo, consume, error::MainTaskError, firehose_cache, serve};
4
+
use slingshot::{
5
+
Identity, Repo, consume, error::MainTaskError, firehose_cache, healthcheck, serve,
6
+
};
5
7
use std::path::PathBuf;
6
8
7
9
use clap::Parser;
···
30
32
/// - an HTTPS certs will be automatically configured with Acme/letsencrypt
31
33
/// - TODO: a rate-limiter will be installed
32
34
#[arg(long)]
33
-
host: Option<String>,
35
+
domain: Option<String>,
34
36
/// email address for letsencrypt contact
35
37
///
36
38
/// recommended in production, i guess?
···
44
46
/// recommended in production, but mind the file permissions.
45
47
#[arg(long)]
46
48
certs: Option<PathBuf>,
49
+
/// an web address to send healtcheck pings to every ~51s or so
50
+
#[arg(long)]
51
+
healthcheck: Option<String>,
47
52
}
48
53
49
54
#[tokio::main]
···
104
109
server_cache_handle,
105
110
identity,
106
111
repo,
107
-
args.host,
112
+
args.domain,
108
113
args.acme_contact,
109
114
args.certs,
110
115
server_shutdown,
···
126
131
.await?;
127
132
Ok(())
128
133
});
134
+
135
+
if let Some(hc) = args.healthcheck {
136
+
let healthcheck_shutdown = shutdown.clone();
137
+
tasks.spawn(async move {
138
+
healthcheck(hc, healthcheck_shutdown).await?;
139
+
Ok(())
140
+
});
141
+
}
129
142
130
143
tokio::select! {
131
144
_ = shutdown.cancelled() => log::warn!("shutdown requested"),
+159
-50
slingshot/src/server.rs
+159
-50
slingshot/src/server.rs
···
13
13
14
14
use poem::{
15
15
Endpoint, EndpointExt, Route, Server,
16
-
endpoint::make_sync,
16
+
endpoint::{StaticFileEndpoint, make_sync},
17
17
http::Method,
18
18
listener::{
19
19
Listener, TcpListener,
20
20
acme::{AutoCert, LETS_ENCRYPT_PRODUCTION},
21
21
},
22
-
middleware::{Cors, Tracing},
22
+
middleware::{CatchPanic, Cors, Tracing},
23
23
};
24
24
use poem_openapi::{
25
25
ApiResponse, ContactObject, ExternalDocumentObject, Object, OpenApi, OpenApiService, Tags,
···
86
86
87
87
fn bad_request_handler_resolve_mini(err: poem::Error) -> ResolveMiniIDResponse {
88
88
ResolveMiniIDResponse::BadRequest(Json(XrpcErrorResponseObject {
89
+
error: "InvalidRequest".to_string(),
90
+
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
91
+
}))
92
+
}
93
+
94
+
fn bad_request_handler_resolve_handle(err: poem::Error) -> JustDidResponse {
95
+
JustDidResponse::BadRequest(Json(XrpcErrorResponseObject {
89
96
error: "InvalidRequest".to_string(),
90
97
message: format!("Bad request, here's some info that maybe should not be exposed: {err}"),
91
98
}))
···
182
189
BadRequest(XrpcError),
183
190
}
184
191
192
+
#[derive(Object)]
193
+
#[oai(example = true)]
194
+
struct FoundDidResponseObject {
195
+
/// the DID, bi-directionally verified if using Slingshot
196
+
did: String,
197
+
}
198
+
impl Example for FoundDidResponseObject {
199
+
fn example() -> Self {
200
+
Self { did: example_did() }
201
+
}
202
+
}
203
+
204
+
#[derive(ApiResponse)]
205
+
#[oai(bad_request_handler = "bad_request_handler_resolve_handle")]
206
+
enum JustDidResponse {
207
+
/// Resolution succeeded
208
+
#[oai(status = 200)]
209
+
Ok(Json<FoundDidResponseObject>),
210
+
/// Bad request, failed to resolve, or failed to verify
211
+
///
212
+
/// `error` will be one of `InvalidRequest`, `HandleNotFound`.
213
+
#[oai(status = 400)]
214
+
BadRequest(XrpcError),
215
+
/// Something went wrong trying to complete the request
216
+
#[oai(status = 500)]
217
+
ServerError(XrpcError),
218
+
}
219
+
185
220
struct Xrpc {
186
221
cache: HybridCache<String, CachedRecord>,
187
222
identity: Identity,
···
192
227
enum ApiTags {
193
228
/// Core ATProtocol-compatible APIs.
194
229
///
195
-
/// Upstream documentation is available at
196
-
/// https://docs.bsky.app/docs/category/http-reference
230
+
/// > [!tip]
231
+
/// > Upstream documentation is available at
232
+
/// > https://docs.bsky.app/docs/category/http-reference
197
233
///
198
234
/// These queries are usually executed directly against the PDS containing
199
235
/// the data being requested. Slingshot offers a caching view of the same
···
206
242
/// more convenient [request parameters](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.repo.getUriRecord)
207
243
/// or [response formats](#tag/slingshot-specific-queries/GET/xrpc/com.bad-example.identity.resolveMiniDoc).
208
244
///
209
-
/// At the moment, these are namespaced under the `com.bad-example.*` NSID
210
-
/// prefix, but as they stabilize they will likely be moved to either
211
-
/// `blue.microcosm.*` or a slingshot-instance-specific lexicon under its
212
-
/// `did:web` (ie., `blue.microcosm.slingshot.*`). Maybe one day they can
213
-
/// be promoted to the [Lexicon Community](https://discourse.lexicon.community/)
214
-
/// namespace.
245
+
/// > [!important]
246
+
/// > At the moment, these are namespaced under the `com.bad-example.*` NSID
247
+
/// > prefix, but as they stabilize they may be migrated to an org namespace
248
+
/// > like `blue.microcosm.*`. Support for asliasing to `com.bad-example.*`
249
+
/// > will be maintained as long as it's in use.
215
250
#[oai(rename = "slingshot-specific queries")]
216
251
Custom,
217
252
}
···
222
257
///
223
258
/// Get a single record from a repository. Does not require auth.
224
259
///
225
-
/// See also the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
226
-
/// that this endpoint aims to be compatible with.
260
+
/// > [!tip]
261
+
/// > See also the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-repo-get-record)
262
+
/// > that this endpoint aims to be compatible with.
227
263
#[oai(
228
264
path = "/com.atproto.repo.getRecord",
229
265
method = "get",
···
244
280
///
245
281
/// If not specified, then return the most recent version.
246
282
///
247
-
/// If specified and a newer version of the record exists, returns 404 not
248
-
/// found. That is: slingshot only retains the most recent version of a
249
-
/// record. (TODO: verify bsky behaviour for mismatched/old CID)
283
+
/// If a stale `CID` is specified and a newer version of the record
284
+
/// exists, Slingshot returns a `NotFound` error. That is: Slingshot
285
+
/// only retains the most recent version of a record.
250
286
Query(cid): Query<Option<String>>,
251
287
) -> GetRecordResponse {
252
288
self.get_record_impl(repo, collection, rkey, cid).await
···
273
309
///
274
310
/// If not specified, then return the most recent version.
275
311
///
276
-
/// If specified and a newer version of the record exists, returns 404 not
277
-
/// found. That is: slingshot only retains the most recent version of a
278
-
/// record.
312
+
/// > [!tip]
313
+
/// > If specified and a newer version of the record exists, returns 404 not
314
+
/// > found. That is: slingshot only retains the most recent version of a
315
+
/// > record.
279
316
Query(cid): Query<Option<String>>,
280
317
) -> GetRecordResponse {
281
318
let bad_at_uri = || {
···
314
351
.await
315
352
}
316
353
354
+
/// com.atproto.identity.resolveHandle
355
+
///
356
+
/// Resolves an atproto [`handle`](https://atproto.com/guides/glossary#handle)
357
+
/// (hostname) to a [`DID`](https://atproto.com/guides/glossary#did-decentralized-id).
358
+
///
359
+
/// > [!tip]
360
+
/// > Compatibility note: Slingshot will **always bi-directionally verify
361
+
/// > against the DID document**, which is optional according to the
362
+
/// > authoritative lexicon.
363
+
///
364
+
/// > [!tip]
365
+
/// > See the [canonical `com.atproto` XRPC documentation](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-handle)
366
+
/// > that this endpoint aims to be compatible with.
367
+
#[oai(
368
+
path = "/com.atproto.identity.resolveHandle",
369
+
method = "get",
370
+
tag = "ApiTags::ComAtproto"
371
+
)]
372
+
async fn resolve_handle(
373
+
&self,
374
+
/// The handle to resolve.
375
+
#[oai(example = "example_handle")]
376
+
Query(handle): Query<String>,
377
+
) -> JustDidResponse {
378
+
let Ok(handle) = Handle::new(handle) else {
379
+
return JustDidResponse::BadRequest(xrpc_error("InvalidRequest", "not a valid handle"));
380
+
};
381
+
382
+
let Ok(alleged_did) = self.identity.handle_to_did(handle.clone()).await else {
383
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not resolve handle"));
384
+
};
385
+
386
+
let Some(alleged_did) = alleged_did else {
387
+
return JustDidResponse::BadRequest(xrpc_error(
388
+
"HandleNotFound",
389
+
"Could not resolve handle to a DID",
390
+
));
391
+
};
392
+
393
+
let Ok(partial_doc) = self.identity.did_to_partial_mini_doc(&alleged_did).await else {
394
+
return JustDidResponse::ServerError(xrpc_error("Failed", "Could not fetch DID doc"));
395
+
};
396
+
397
+
let Some(partial_doc) = partial_doc else {
398
+
return JustDidResponse::BadRequest(xrpc_error(
399
+
"HandleNotFound",
400
+
"Resolved handle but could not find DID doc for the DID",
401
+
));
402
+
};
403
+
404
+
if partial_doc.unverified_handle != handle {
405
+
return JustDidResponse::BadRequest(xrpc_error(
406
+
"HandleNotFound",
407
+
"Resolved handle failed bi-directional validation",
408
+
));
409
+
}
410
+
411
+
JustDidResponse::Ok(Json(FoundDidResponseObject {
412
+
did: alleged_did.to_string(),
413
+
}))
414
+
}
415
+
317
416
/// com.bad-example.identity.resolveMiniDoc
318
417
///
319
418
/// Like [com.atproto.identity.resolveIdentity](https://docs.bsky.app/docs/api/com-atproto-identity-resolve-identity)
···
340
439
let Ok(alleged_handle) = Handle::new(identifier) else {
341
440
return invalid("identifier was not a valid DID or handle");
342
441
};
343
-
if let Ok(res) = self.identity.handle_to_did(alleged_handle.clone()).await {
344
-
if let Some(did) = res {
345
-
// we did it joe
346
-
unverified_handle = Some(alleged_handle);
347
-
did
348
-
} else {
349
-
return invalid("Could not resolve handle identifier to a DID");
442
+
443
+
match self.identity.handle_to_did(alleged_handle.clone()).await {
444
+
Ok(res) => {
445
+
if let Some(did) = res {
446
+
// we did it joe
447
+
unverified_handle = Some(alleged_handle);
448
+
did
449
+
} else {
450
+
return invalid("Could not resolve handle identifier to a DID");
451
+
}
350
452
}
351
-
} else {
352
-
// TODO: ServerError not BadRequest
353
-
return invalid("errored while trying to resolve handle to DID");
453
+
Err(e) => {
454
+
log::debug!("failed to resolve handle: {e}");
455
+
// TODO: ServerError not BadRequest
456
+
return invalid("errored while trying to resolve handle to DID");
457
+
}
354
458
}
355
459
}
356
460
};
···
415
519
"repo was not a valid DID or handle",
416
520
));
417
521
};
418
-
if let Ok(res) = self.identity.handle_to_did(handle).await {
419
-
if let Some(did) = res {
420
-
did
421
-
} else {
422
-
return GetRecordResponse::BadRequest(xrpc_error(
423
-
"InvalidRequest",
424
-
"Could not resolve handle repo to a DID",
522
+
match self.identity.handle_to_did(handle).await {
523
+
Ok(res) => {
524
+
if let Some(did) = res {
525
+
did
526
+
} else {
527
+
return GetRecordResponse::BadRequest(xrpc_error(
528
+
"InvalidRequest",
529
+
"Could not resolve handle repo to a DID",
530
+
));
531
+
}
532
+
}
533
+
Err(e) => {
534
+
log::debug!("handle resolution failed: {e}");
535
+
return GetRecordResponse::ServerError(xrpc_error(
536
+
"ResolutionFailed",
537
+
"errored while trying to resolve handle to DID",
425
538
));
426
539
}
427
-
} else {
428
-
return GetRecordResponse::ServerError(xrpc_error(
429
-
"ResolutionFailed",
430
-
"errored while trying to resolve handle to DID",
431
-
));
432
540
}
433
541
}
434
542
};
···
567
675
///
568
676
/// - PDS proxying offers a level of client IP anonymity from slingshot
569
677
/// - slingshot *may* implement more generous per-user rate-limits for proxied requests in the future
570
-
fn get_did_doc(host: &str) -> impl Endpoint + use<> {
678
+
fn get_did_doc(domain: &str) -> impl Endpoint + use<> {
571
679
let doc = poem::web::Json(AppViewDoc {
572
-
id: format!("did:web:{host}"),
680
+
id: format!("did:web:{domain}"),
573
681
service: [AppViewService {
574
682
id: "#slingshot".to_string(),
575
683
r#type: "SlingshotRecordProxy".to_string(),
576
-
service_endpoint: format!("https://{host}"),
684
+
service_endpoint: format!("https://{domain}"),
577
685
}],
578
686
});
579
687
make_sync(move |_| doc.clone())
···
583
691
cache: HybridCache<String, CachedRecord>,
584
692
identity: Identity,
585
693
repo: Repo,
586
-
host: Option<String>,
694
+
domain: Option<String>,
587
695
acme_contact: Option<String>,
588
696
certs: Option<PathBuf>,
589
697
shutdown: CancellationToken,
···
598
706
"Slingshot",
599
707
env!("CARGO_PKG_VERSION"),
600
708
)
601
-
.server(if let Some(ref h) = host {
709
+
.server(if let Some(ref h) = domain {
602
710
format!("https://{h}")
603
711
} else {
604
712
"http://localhost:3000".to_string()
···
615
723
));
616
724
617
725
let mut app = Route::new()
618
-
.nest("/", api_service.scalar())
619
-
.nest("/openapi.json", api_service.spec_endpoint())
726
+
.at("/", StaticFileEndpoint::new("./static/index.html"))
727
+
.nest("/openapi", api_service.spec_endpoint())
620
728
.nest("/xrpc/", api_service);
621
729
622
-
if let Some(host) = host {
730
+
if let Some(domain) = domain {
623
731
rustls::crypto::aws_lc_rs::default_provider()
624
732
.install_default()
625
733
.expect("alskfjalksdjf");
626
734
627
-
app = app.at("/.well-known/did.json", get_did_doc(&host));
735
+
app = app.at("/.well-known/did.json", get_did_doc(&domain));
628
736
629
737
let mut auto_cert = AutoCert::builder()
630
738
.directory_url(LETS_ENCRYPT_PRODUCTION)
631
-
.domain(&host);
739
+
.domain(&domain);
632
740
if let Some(contact) = acme_contact {
633
741
auto_cert = auto_cert.contact(contact);
634
742
}
···
659
767
.allow_methods([Method::GET])
660
768
.allow_credentials(false),
661
769
)
770
+
.with(CatchPanic::new())
662
771
.with(Tracing);
663
772
Server::new(listener)
664
773
.name("slingshot")
+67
slingshot/static/index.html
+67
slingshot/static/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
<head>
4
+
<meta charset="utf-8" />
5
+
<title>Slingshot: atproto edge record cache</title>
6
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
7
+
<meta name="description" content="API Documentation for Slingshot, a firehose-listening atproto edge record and identity cache." />
8
+
<style>
9
+
:root {
10
+
--scalar-small: 13px;
11
+
}
12
+
.scalar-app .markdown .markdown-alert {
13
+
font-size: var(--scalar-small);
14
+
}
15
+
.sidebar-heading-link-title {
16
+
line-height: 1.2;
17
+
}
18
+
.custom-header {
19
+
height: 42px;
20
+
background-color: #221828;
21
+
box-shadow: inset 0 -1px 0 var(--scalar-border-color);
22
+
color: var(--scalar-color-1);
23
+
font-size: var(--scalar-font-size-3);
24
+
font-family: 'Iowan Old Style', 'Palatino Linotype', 'URW Palladio L', P052, serif;
25
+
padding: 0 18px;
26
+
justify-content: space-between;
27
+
}
28
+
.custom-header,
29
+
.custom-header nav {
30
+
display: flex;
31
+
align-items: center;
32
+
gap: 18px;
33
+
}
34
+
.custom-header a:hover {
35
+
color: var(--scalar-color-2);
36
+
}
37
+
38
+
.light-mode .custom-header {
39
+
background-color: thistle;
40
+
}
41
+
</style>
42
+
</head>
43
+
<body>
44
+
<header class="custom-header scalar-app">
45
+
<p>
46
+
TODO: thing
47
+
</p>
48
+
<nav>
49
+
<b>a <a href="https://microcosm.blue">microcosm</a> project</b>
50
+
<a href="https://bsky.app/profile/microcosm.blue">@microcosm.blue</a>
51
+
<a href="https://github.com/at-microcosm">github</a>
52
+
</nav>
53
+
</header>
54
+
55
+
<script id="api-reference" type="application/json" data-url="/openapi"></script>
56
+
57
+
<script>
58
+
var configuration = {
59
+
theme: 'purple',
60
+
hideModels: true,
61
+
}
62
+
document.getElementById('api-reference').dataset.configuration = JSON.stringify(configuration)
63
+
</script>
64
+
65
+
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
66
+
</body>
67
+
</html>
+8
spacedust/Cargo.toml
+8
spacedust/Cargo.toml
···
4
4
edition = "2024"
5
5
6
6
[dependencies]
7
+
anyhow = "1.0.100"
8
+
async-channel = "2.5.0"
7
9
async-trait = "0.1.88"
8
10
clap = { version = "4.5.40", features = ["derive"] }
9
11
ctrlc = "3.4.7"
12
+
dasl = "0.2.0"
10
13
dropshot = "0.16.2"
11
14
env_logger = "0.11.8"
15
+
fjall = "3.0.0-pre.0"
12
16
futures = "0.3.31"
13
17
http = "1.3.1"
18
+
ipld-core = { version = "0.4.2", features = ["serde"] }
14
19
jetstream = { path = "../jetstream", features = ["metrics"] }
15
20
links = { path = "../links" }
16
21
log = "0.4.27"
17
22
metrics = "0.24.2"
18
23
metrics-exporter-prometheus = { version = "0.17.1", features = ["http-listener"] }
19
24
rand = "0.9.1"
25
+
repo-stream = "0.2.2"
26
+
reqwest = { version = "0.12.24", features = ["json", "stream"] }
20
27
schemars = "0.8.22"
21
28
semver = "1.0.26"
22
29
serde = { version = "1.0.219", features = ["derive"] }
30
+
serde_ipld_dagcbor = "0.6.4"
23
31
serde_json = "1.0.140"
24
32
serde_qs = "1.0.0-rc.3"
25
33
thiserror = "2.0.12"
+21
spacedust/src/bin/import_car_file.rs
+21
spacedust/src/bin/import_car_file.rs
···
1
+
use clap::Parser;
2
+
use std::path::PathBuf;
3
+
4
+
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
5
+
6
+
#[derive(Debug, Parser)]
7
+
struct Args {
8
+
#[arg()]
9
+
file: PathBuf,
10
+
}
11
+
12
+
#[tokio::main]
13
+
async fn main() -> Result<()> {
14
+
env_logger::init();
15
+
16
+
let Args { file } = Args::parse();
17
+
18
+
let _reader = tokio::fs::File::open(file).await?;
19
+
20
+
Ok(())
21
+
}
+258
spacedust/src/bin/import_scraped.rs
+258
spacedust/src/bin/import_scraped.rs
···
1
+
use clap::Parser;
2
+
use links::CollectedLink;
3
+
use repo_stream::{
4
+
DiskBuilder, DiskStore, Driver, DriverBuilder, Processable, drive::DriverBuilderWithProcessor,
5
+
drive::NeedDisk,
6
+
};
7
+
use std::path::PathBuf;
8
+
use std::sync::{
9
+
Arc,
10
+
atomic::{AtomicUsize, Ordering},
11
+
};
12
+
use tokio::{io::AsyncRead, task::JoinSet};
13
+
14
+
type Result<T> = anyhow::Result<T>; //std::result::Result<T, Box<dyn std::error::Error>>;
15
+
16
+
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
17
+
struct CollectedProcessed(CollectedLink);
18
+
19
+
impl Processable for CollectedProcessed {
20
+
fn get_size(&self) -> usize {
21
+
self.0.path.capacity() + self.0.target.as_str().len()
22
+
}
23
+
}
24
+
25
+
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
26
+
struct ErrString(String);
27
+
28
+
impl Processable for ErrString {
29
+
fn get_size(&self) -> usize {
30
+
self.0.capacity()
31
+
}
32
+
}
33
+
34
+
type Processed = std::result::Result<Vec<CollectedProcessed>, ErrString>;
35
+
36
+
/// hacky for now: put errors in strings ๐คทโโ๏ธ
37
+
fn process(block: Vec<u8>) -> Processed {
38
+
let value: dasl::drisl::Value = dasl::drisl::from_slice(&block)
39
+
.map_err(|e| ErrString(format!("failed to parse block with drisl: {e:?}")))?;
40
+
let links = links::record::collect_links_drisl(&value)
41
+
.into_iter()
42
+
.map(CollectedProcessed)
43
+
.collect();
44
+
Ok(links)
45
+
}
46
+
47
+
#[derive(Debug, Parser)]
48
+
struct Args {
49
+
#[arg(long)]
50
+
cars_folder: PathBuf,
51
+
#[arg(long)]
52
+
mem_workers: usize,
53
+
#[arg(long)]
54
+
disk_workers: usize,
55
+
#[arg(long)]
56
+
disk_folder: PathBuf,
57
+
}
58
+
59
+
async fn get_cars(
60
+
cars_folder: PathBuf,
61
+
tx: async_channel::Sender<tokio::io::BufReader<tokio::fs::File>>,
62
+
) -> Result<()> {
63
+
let mut dir = tokio::fs::read_dir(cars_folder).await?;
64
+
while let Some(entry) = dir.next_entry().await? {
65
+
if !entry.file_type().await?.is_file() {
66
+
continue;
67
+
}
68
+
let reader = tokio::fs::File::open(&entry.path()).await?;
69
+
let reader = tokio::io::BufReader::new(reader);
70
+
tx.send(reader).await?;
71
+
}
72
+
Ok(())
73
+
}
74
+
75
+
async fn drive_mem<R: AsyncRead + Unpin + Send + Sync + 'static>(
76
+
f: R,
77
+
builder: &DriverBuilderWithProcessor<Processed>,
78
+
disk_tx: &async_channel::Sender<NeedDisk<R, Processed>>,
79
+
) -> Result<Option<(usize, usize)>> {
80
+
let mut n = 0;
81
+
let mut n_records = 0;
82
+
match builder.load_car(f).await? {
83
+
Driver::Memory(_commit, mut driver) => {
84
+
while let Some(chunk) = driver.next_chunk(512).await? {
85
+
n_records += chunk.len();
86
+
for (_key, links) in chunk {
87
+
match links {
88
+
Ok(links) => n += links.len(),
89
+
Err(e) => eprintln!("wat: {e:?}"),
90
+
}
91
+
}
92
+
}
93
+
Ok(Some((n, n_records)))
94
+
}
95
+
Driver::Disk(need_disk) => {
96
+
disk_tx.send(need_disk).await?;
97
+
Ok(None)
98
+
}
99
+
}
100
+
}
101
+
102
+
async fn mem_worker<R: AsyncRead + Unpin + Send + Sync + 'static>(
103
+
car_rx: async_channel::Receiver<R>,
104
+
disk_tx: async_channel::Sender<NeedDisk<R, Processed>>,
105
+
n: Arc<AtomicUsize>,
106
+
n_records: Arc<AtomicUsize>,
107
+
) -> Result<()> {
108
+
let builder = DriverBuilder::new()
109
+
.with_block_processor(process) // don't care just counting records
110
+
.with_mem_limit_mb(128);
111
+
while let Ok(f) = car_rx.recv().await {
112
+
let driven = match drive_mem(f, &builder, &disk_tx).await {
113
+
Ok(d) => d,
114
+
Err(e) => {
115
+
eprintln!("failed to drive mem: {e:?}. skipping...");
116
+
continue;
117
+
}
118
+
};
119
+
if let Some((drove, recs)) = driven {
120
+
n.fetch_add(drove, Ordering::Relaxed);
121
+
n_records.fetch_add(recs, Ordering::Relaxed);
122
+
}
123
+
}
124
+
Ok(())
125
+
}
126
+
127
+
async fn drive_disk<R: AsyncRead + Unpin>(
128
+
needed: NeedDisk<R, Processed>,
129
+
store: DiskStore,
130
+
) -> Result<(usize, usize, DiskStore)> {
131
+
let (_commit, mut driver) = needed.finish_loading(store).await?;
132
+
let mut n = 0;
133
+
let mut n_records = 0;
134
+
while let Some(chunk) = driver.next_chunk(512).await? {
135
+
n_records += chunk.len();
136
+
for (_key, links) in chunk {
137
+
match links {
138
+
Ok(links) => n += links.len(),
139
+
Err(e) => eprintln!("wat: {e:?}"),
140
+
}
141
+
}
142
+
}
143
+
let store = driver.reset_store().await?;
144
+
Ok((n, n_records, store))
145
+
}
146
+
147
+
async fn disk_worker<R: AsyncRead + Unpin>(
148
+
worker_id: usize,
149
+
disk_rx: async_channel::Receiver<NeedDisk<R, Processed>>,
150
+
folder: PathBuf,
151
+
n: Arc<AtomicUsize>,
152
+
n_records: Arc<AtomicUsize>,
153
+
disk_workers_active: Arc<AtomicUsize>,
154
+
) -> Result<()> {
155
+
let mut file = folder;
156
+
file.push(format!("disk-worker-{worker_id}.sqlite"));
157
+
let builder = DiskBuilder::new().with_cache_size_mb(128);
158
+
let mut store = builder.open(file.clone()).await?;
159
+
while let Ok(needed) = disk_rx.recv().await {
160
+
let active = disk_workers_active.fetch_add(1, Ordering::AcqRel);
161
+
println!("-> disk workers active: {}", active + 1);
162
+
let (drove, records) = match drive_disk(needed, store).await {
163
+
Ok((d, r, s)) => {
164
+
store = s;
165
+
(d, r)
166
+
}
167
+
Err(e) => {
168
+
eprintln!("failed to drive disk: {e:?}. skipping...");
169
+
store = builder.open(file.clone()).await?;
170
+
continue;
171
+
}
172
+
};
173
+
n.fetch_add(drove, Ordering::Relaxed);
174
+
n_records.fetch_add(records, Ordering::Relaxed);
175
+
let were_active = disk_workers_active.fetch_sub(1, Ordering::AcqRel);
176
+
println!("<- disk workers active: {}", were_active - 1);
177
+
}
178
+
Ok(())
179
+
}
180
+
181
+
#[tokio::main]
182
+
async fn main() -> Result<()> {
183
+
env_logger::init();
184
+
185
+
let Args {
186
+
cars_folder,
187
+
disk_folder,
188
+
disk_workers,
189
+
mem_workers,
190
+
} = Args::parse();
191
+
192
+
let mut set = JoinSet::<Result<()>>::new();
193
+
194
+
let (cars_tx, cars_rx) = async_channel::bounded(2);
195
+
set.spawn(get_cars(cars_folder, cars_tx));
196
+
197
+
let n: Arc<AtomicUsize> = Arc::new(0.into());
198
+
let n_records: Arc<AtomicUsize> = Arc::new(0.into());
199
+
let disk_workers_active: Arc<AtomicUsize> = Arc::new(0.into());
200
+
201
+
set.spawn({
202
+
let n = n.clone();
203
+
let n_records = n_records.clone();
204
+
let mut interval = tokio::time::interval(std::time::Duration::from_secs(10));
205
+
async move {
206
+
let mut last_n = n.load(Ordering::Relaxed);
207
+
let mut last_n_records = n.load(Ordering::Relaxed);
208
+
loop {
209
+
interval.tick().await;
210
+
let n = n.load(Ordering::Relaxed);
211
+
let n_records = n_records.load(Ordering::Relaxed);
212
+
let diff_n = n - last_n;
213
+
let diff_records = n_records - last_n_records;
214
+
println!("rate: {} rec/sec; {} n/sec", diff_records / 10, diff_n / 10);
215
+
if n_records > 0 && diff_records == 0 {
216
+
println!("zero encountered, stopping rate calculation polling.");
217
+
break Ok(());
218
+
}
219
+
last_n = n;
220
+
last_n_records = n_records;
221
+
}
222
+
}
223
+
});
224
+
225
+
let (needs_disk_tx, needs_disk_rx) = async_channel::bounded(disk_workers);
226
+
227
+
for _ in 0..mem_workers {
228
+
set.spawn(mem_worker(
229
+
cars_rx.clone(),
230
+
needs_disk_tx.clone(),
231
+
n.clone(),
232
+
n_records.clone(),
233
+
));
234
+
}
235
+
drop(cars_rx);
236
+
drop(needs_disk_tx);
237
+
238
+
tokio::fs::create_dir_all(disk_folder.clone()).await?;
239
+
for id in 0..disk_workers {
240
+
set.spawn(disk_worker(
241
+
id,
242
+
needs_disk_rx.clone(),
243
+
disk_folder.clone(),
244
+
n.clone(),
245
+
n_records.clone(),
246
+
disk_workers_active.clone(),
247
+
));
248
+
}
249
+
drop(needs_disk_rx);
250
+
251
+
while let Some(res) = set.join_next().await {
252
+
println!("task from set joined: {res:?}");
253
+
}
254
+
255
+
eprintln!("total records processed: {n_records:?}; total n: {n:?}");
256
+
257
+
Ok(())
258
+
}
+137
spacedust/src/bin/scrape_pds.rs
+137
spacedust/src/bin/scrape_pds.rs
···
1
+
use clap::Parser;
2
+
use reqwest::Url;
3
+
use serde::Deserialize;
4
+
use std::path::PathBuf;
5
+
use tokio::io::AsyncWriteExt;
6
+
use tokio::{sync::mpsc, time};
7
+
8
+
type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>;
9
+
10
+
use futures::StreamExt;
11
+
12
+
#[derive(Debug, Parser)]
13
+
struct Args {
14
+
#[arg(long)]
15
+
pds: Url,
16
+
#[arg(long)]
17
+
throttle_ms: u64, // 100ms per pds?
18
+
#[arg(long)]
19
+
folder: PathBuf,
20
+
}
21
+
22
+
async fn download_repo(
23
+
client: &reqwest::Client,
24
+
mut pds: Url,
25
+
did: String,
26
+
mut path: PathBuf,
27
+
) -> Result<()> {
28
+
path.push(format!("{did}.car"));
29
+
let f = tokio::fs::File::create(path).await?;
30
+
let mut w = tokio::io::BufWriter::new(f);
31
+
32
+
pds.set_path("/xrpc/com.atproto.sync.getRepo");
33
+
pds.set_query(Some(&format!("did={did}")));
34
+
let mut byte_stream = client.get(pds).send().await?.bytes_stream();
35
+
36
+
while let Some(stuff) = byte_stream.next().await {
37
+
tokio::io::copy(&mut stuff?.as_ref(), &mut w).await?;
38
+
}
39
+
w.flush().await?;
40
+
41
+
Ok(())
42
+
}
43
+
44
+
#[derive(Debug, Deserialize)]
45
+
struct RepoInfo {
46
+
did: String,
47
+
active: bool,
48
+
}
49
+
50
+
#[derive(Debug, Deserialize)]
51
+
struct ListReposResponse {
52
+
cursor: Option<String>,
53
+
repos: Vec<RepoInfo>,
54
+
}
55
+
56
+
fn get_pds_dids(client: reqwest::Client, mut pds: Url) -> mpsc::Receiver<String> {
57
+
let (tx, rx) = mpsc::channel(2);
58
+
tokio::task::spawn(async move {
59
+
pds.set_path("/xrpc/com.atproto.sync.listRepos");
60
+
let mut cursor = None;
61
+
62
+
loop {
63
+
if let Some(c) = cursor {
64
+
pds.set_query(Some(&format!("cursor={c}")));
65
+
}
66
+
let res: ListReposResponse = client
67
+
.get(pds.clone())
68
+
.send()
69
+
.await
70
+
.expect("to send request")
71
+
.error_for_status()
72
+
.expect("to be ok")
73
+
.json()
74
+
.await
75
+
.expect("json response");
76
+
for repo in res.repos {
77
+
if repo.active {
78
+
tx.send(repo.did)
79
+
.await
80
+
.expect("to be able to send on the channel");
81
+
}
82
+
}
83
+
cursor = res.cursor;
84
+
if cursor.is_none() {
85
+
break;
86
+
}
87
+
}
88
+
});
89
+
rx
90
+
}
91
+
92
+
#[tokio::main]
93
+
async fn main() -> Result<()> {
94
+
env_logger::init();
95
+
96
+
let Args {
97
+
pds,
98
+
throttle_ms,
99
+
folder,
100
+
} = Args::parse();
101
+
102
+
tokio::fs::create_dir_all(folder.clone()).await?;
103
+
104
+
let client = reqwest::Client::builder()
105
+
.user_agent("microcosm/spacedust-testing")
106
+
.build()?;
107
+
108
+
let mut dids = get_pds_dids(client.clone(), pds.clone());
109
+
110
+
let mut interval = time::interval(time::Duration::from_millis(throttle_ms));
111
+
let mut oks = 0;
112
+
let mut single_fails = 0;
113
+
let mut double_fails = 0;
114
+
115
+
while let Some(did) = dids.recv().await {
116
+
interval.tick().await;
117
+
println!("did: {did:?}");
118
+
if let Err(e) = download_repo(&client, pds.clone(), did.clone(), folder.clone()).await {
119
+
single_fails += 1;
120
+
eprintln!("failed to download repo for did: {did:?}: {e:?}. retrying in a moment...");
121
+
tokio::time::sleep(time::Duration::from_secs(3)).await;
122
+
interval.reset();
123
+
if let Err(e) = download_repo(&client, pds.clone(), did.clone(), folder.clone()).await {
124
+
double_fails += 1;
125
+
eprintln!("failed again: {e:?}. moving on in a moment...");
126
+
tokio::time::sleep(time::Duration::from_secs(1)).await;
127
+
continue;
128
+
}
129
+
}
130
+
oks += 1;
131
+
println!(" -> done. did: {did:?}");
132
+
}
133
+
134
+
eprintln!("got {oks} repos. single fails: {single_fails}; doubles: {double_fails}.");
135
+
136
+
Ok(())
137
+
}
+1
spacedust/src/lib.rs
+1
spacedust/src/lib.rs
spacedust/src/storage/car/drive.rs
spacedust/src/storage/car/drive.rs
This is a binary file and will not be displayed.
+1
spacedust/src/storage/car/mod.rs
+1
spacedust/src/storage/car/mod.rs
···
1
+
spacedust/src/storage/car/walk.rs
spacedust/src/storage/car/walk.rs
This is a binary file and will not be displayed.
+9
spacedust/src/storage/fjall/mod.rs
+9
spacedust/src/storage/fjall/mod.rs
+6
spacedust/src/storage/mod.rs
+6
spacedust/src/storage/mod.rs
+5
-5
spacedust/src/subscriber.rs
+5
-5
spacedust/src/subscriber.rs
···
42
42
loop {
43
43
tokio::select! {
44
44
l = receiver.recv() => match l {
45
-
Ok(link) => if self.filter(&link.properties) {
46
-
if let Err(e) = ws_sender.send(link.message.clone()).await {
47
-
log::warn!("failed to send link, dropping subscriber: {e:?}");
48
-
break;
49
-
}
45
+
Ok(link) => if self.filter(&link.properties)
46
+
&& let Err(e) = ws_sender.send(link.message.clone()).await
47
+
{
48
+
log::warn!("failed to send link, dropping subscriber: {e:?}");
49
+
break;
50
50
},
51
51
Err(RecvError::Closed) => self.shutdown.cancel(),
52
52
Err(RecvError::Lagged(n)) => {
+1
-1
ufos/Cargo.toml
+1
-1
ufos/Cargo.toml
···
13
13
clap = { version = "4.5.31", features = ["derive"] }
14
14
dropshot = "0.16.0"
15
15
env_logger = "0.11.7"
16
-
fjall = { version = "2.8.0", features = ["lz4"] }
16
+
fjall = { git = "https://github.com/fjall-rs/fjall.git", features = ["lz4"] }
17
17
getrandom = "0.3.3"
18
18
http = "1.3.1"
19
19
jetstream = { path = "../jetstream", features = ["metrics"] }
+42
-10
ufos/src/main.rs
+42
-10
ufos/src/main.rs
···
4
4
use metrics_exporter_prometheus::PrometheusBuilder;
5
5
use std::path::PathBuf;
6
6
use std::time::{Duration, SystemTime};
7
+
use tokio::task::JoinSet;
7
8
use ufos::consumer;
8
9
use ufos::file_consumer;
9
10
use ufos::server;
···
72
73
Ok(())
73
74
}
74
75
75
-
async fn go<B: StoreBackground>(
76
+
async fn go<B: StoreBackground + 'static>(
76
77
args: Args,
77
78
read_store: impl StoreReader + 'static + Clone,
78
79
mut write_store: impl StoreWriter<B> + 'static,
79
80
cursor: Option<Cursor>,
80
81
sketch_secret: SketchSecretPrefix,
81
82
) -> anyhow::Result<()> {
83
+
let mut whatever_tasks: JoinSet<anyhow::Result<()>> = JoinSet::new();
84
+
let mut consumer_tasks: JoinSet<anyhow::Result<()>> = JoinSet::new();
85
+
82
86
println!("starting server with storage...");
83
87
let serving = server::serve(read_store.clone());
88
+
whatever_tasks.spawn(async move {
89
+
serving.await.map_err(|e| {
90
+
log::warn!("server ended: {e}");
91
+
anyhow::anyhow!(e)
92
+
})
93
+
});
84
94
85
95
if args.pause_writer {
86
96
log::info!("not starting jetstream or the write loop.");
87
-
serving.await.map_err(|e| anyhow::anyhow!(e))?;
97
+
for t in whatever_tasks.join_all().await {
98
+
if let Err(e) = t {
99
+
return Err(anyhow::anyhow!(e));
100
+
}
101
+
}
88
102
return Ok(());
89
103
}
90
104
···
102
116
let rolling = write_store
103
117
.background_tasks(args.reroll)?
104
118
.run(args.backfill);
105
-
let consuming = write_store.receive_batches(batches);
119
+
whatever_tasks.spawn(async move {
120
+
rolling
121
+
.await
122
+
.inspect_err(|e| log::warn!("rollup ended: {e}"))?;
123
+
Ok(())
124
+
});
106
125
107
-
let stating = do_update_stuff(read_store);
126
+
consumer_tasks.spawn(async move {
127
+
write_store
128
+
.receive_batches(batches)
129
+
.await
130
+
.inspect_err(|e| log::warn!("consumer ended: {e}"))?;
131
+
Ok(())
132
+
});
133
+
134
+
whatever_tasks.spawn(async move {
135
+
do_update_stuff(read_store).await;
136
+
log::warn!("status task ended");
137
+
Ok(())
138
+
});
108
139
109
140
install_metrics_server()?;
110
141
111
-
tokio::select! {
112
-
z = serving => log::warn!("serve task ended: {z:?}"),
113
-
z = rolling => log::warn!("rollup task ended: {z:?}"),
114
-
z = consuming => log::warn!("consuming task ended: {z:?}"),
115
-
z = stating => log::warn!("status task ended: {z:?}"),
116
-
};
142
+
for (i, t) in consumer_tasks.join_all().await.iter().enumerate() {
143
+
log::warn!("task {i} done: {t:?}");
144
+
}
145
+
146
+
println!("consumer tasks all completed, killing the others");
147
+
whatever_tasks.shutdown().await;
117
148
118
149
println!("bye!");
119
150
···
162
193
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
163
194
loop {
164
195
interval.tick().await;
196
+
read_store.update_metrics();
165
197
match read_store.get_consumer_info().await {
166
198
Err(e) => log::warn!("failed to get jetstream consumer info: {e:?}"),
167
199
Ok(ConsumerInfo::Jetstream {
+11
-1
ufos/src/storage.rs
+11
-1
ufos/src/storage.rs
···
41
41
Unit::Microseconds,
42
42
"batches that took more than 3s to insert"
43
43
);
44
+
describe_histogram!(
45
+
"storage_batch_insert_time",
46
+
Unit::Microseconds,
47
+
"total time to insert one commit batch"
48
+
);
44
49
while let Some(event_batch) = batches.recv().await {
45
50
let token = CancellationToken::new();
46
51
let cancelled = token.clone();
···
69
74
let mut me = self.clone();
70
75
move || {
71
76
let _guard = token.drop_guard();
72
-
me.insert_batch(event_batch)
77
+
let t0 = Instant::now();
78
+
let r = me.insert_batch(event_batch);
79
+
histogram!("storage_batch_insert_time").record(t0.elapsed().as_micros() as f64);
80
+
r
73
81
}
74
82
})
75
83
.await??;
···
103
111
#[async_trait]
104
112
pub trait StoreReader: Send + Sync {
105
113
fn name(&self) -> String;
114
+
115
+
fn update_metrics(&self) {}
106
116
107
117
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value>;
108
118
+104
-20
ufos/src/storage_fjall.rs
+104
-20
ufos/src/storage_fjall.rs
···
23
23
Batch as FjallBatch, Config, Keyspace, PartitionCreateOptions, PartitionHandle, Snapshot,
24
24
};
25
25
use jetstream::events::Cursor;
26
-
use metrics::{counter, describe_counter, describe_histogram, histogram, Unit};
26
+
use lsm_tree::AbstractTree;
27
+
use metrics::{
28
+
counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram, Unit,
29
+
};
27
30
use std::collections::{HashMap, HashSet};
28
31
use std::iter::Peekable;
29
32
use std::ops::Bound;
···
227
230
feeds: feeds.clone(),
228
231
records: records.clone(),
229
232
rollups: rollups.clone(),
233
+
queues: queues.clone(),
230
234
};
235
+
reader.describe_metrics();
231
236
let writer = FjallWriter {
232
237
bg_taken: Arc::new(AtomicBool::new(false)),
233
238
keyspace,
···
237
242
rollups,
238
243
queues,
239
244
};
245
+
writer.describe_metrics();
240
246
Ok((reader, writer, js_cursor, sketch_secret))
241
247
}
242
248
}
···
250
256
feeds: PartitionHandle,
251
257
records: PartitionHandle,
252
258
rollups: PartitionHandle,
259
+
queues: PartitionHandle,
253
260
}
254
261
255
262
/// An iterator that knows how to skip over deleted/invalidated records
···
381
388
type CollectionSerieses = HashMap<Nsid, Vec<CountsValue>>;
382
389
383
390
impl FjallReader {
391
+
fn describe_metrics(&self) {
392
+
describe_gauge!(
393
+
"storage_fjall_l0_run_count",
394
+
Unit::Count,
395
+
"number of L0 runs in a partition"
396
+
);
397
+
describe_gauge!(
398
+
"storage_fjall_keyspace_disk_space",
399
+
Unit::Bytes,
400
+
"total storage used according to fjall"
401
+
);
402
+
describe_gauge!(
403
+
"storage_fjall_journal_count",
404
+
Unit::Count,
405
+
"total keyspace journals according to fjall"
406
+
);
407
+
describe_gauge!(
408
+
"storage_fjall_keyspace_sequence",
409
+
Unit::Count,
410
+
"fjall keyspace sequence"
411
+
);
412
+
}
413
+
384
414
fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
385
415
let rollup_cursor =
386
416
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
···
1000
1030
fn name(&self) -> String {
1001
1031
"fjall storage v2".into()
1002
1032
}
1033
+
fn update_metrics(&self) {
1034
+
gauge!("storage_fjall_l0_run_count", "partition" => "global")
1035
+
.set(self.global.tree.l0_run_count() as f64);
1036
+
gauge!("storage_fjall_l0_run_count", "partition" => "feeds")
1037
+
.set(self.feeds.tree.l0_run_count() as f64);
1038
+
gauge!("storage_fjall_l0_run_count", "partition" => "records")
1039
+
.set(self.records.tree.l0_run_count() as f64);
1040
+
gauge!("storage_fjall_l0_run_count", "partition" => "rollups")
1041
+
.set(self.rollups.tree.l0_run_count() as f64);
1042
+
gauge!("storage_fjall_l0_run_count", "partition" => "queues")
1043
+
.set(self.queues.tree.l0_run_count() as f64);
1044
+
gauge!("storage_fjall_keyspace_disk_space").set(self.keyspace.disk_space() as f64);
1045
+
gauge!("storage_fjall_journal_count").set(self.keyspace.journal_count() as f64);
1046
+
gauge!("storage_fjall_keyspace_sequence").set(self.keyspace.instant() as f64);
1047
+
}
1003
1048
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
1004
1049
let s = self.clone();
1005
1050
tokio::task::spawn_blocking(move || FjallReader::get_storage_stats(&s)).await?
···
1091
1136
}
1092
1137
1093
1138
impl FjallWriter {
1139
+
fn describe_metrics(&self) {
1140
+
describe_histogram!(
1141
+
"storage_insert_batch_db_batch_items",
1142
+
Unit::Count,
1143
+
"how many items are in the fjall batch for batched inserts"
1144
+
);
1145
+
describe_histogram!(
1146
+
"storage_rollup_counts_db_batch_items",
1147
+
Unit::Count,
1148
+
"how many items are in the fjall batch for a timlies rollup"
1149
+
);
1150
+
describe_counter!(
1151
+
"storage_delete_account_partial_commits",
1152
+
Unit::Count,
1153
+
"fjall checkpoint commits for cleaning up accounts with too many records"
1154
+
);
1155
+
describe_counter!(
1156
+
"storage_delete_account_completions",
1157
+
Unit::Count,
1158
+
"total count of account deletes handled"
1159
+
);
1160
+
describe_counter!(
1161
+
"storage_delete_account_records_deleted",
1162
+
Unit::Count,
1163
+
"total records deleted when handling account deletes"
1164
+
);
1165
+
describe_histogram!(
1166
+
"storage_trim_dirty_nsids",
1167
+
Unit::Count,
1168
+
"number of NSIDs trimmed"
1169
+
);
1170
+
describe_histogram!(
1171
+
"storage_trim_duration",
1172
+
Unit::Microseconds,
1173
+
"how long it took to trim the dirty NSIDs"
1174
+
);
1175
+
describe_counter!(
1176
+
"storage_trim_removed",
1177
+
Unit::Count,
1178
+
"how many records were removed during trim"
1179
+
);
1180
+
}
1094
1181
fn rollup_delete_account(
1095
1182
&mut self,
1096
1183
cursor: Cursor,
···
1222
1309
AllTimeRecordsKey::new(new_creates_count.into(), &nsid).to_db_bytes()?,
1223
1310
),
1224
1311
};
1225
-
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1312
+
// remove_weak is allowed here because the secondary ranking index only ever inserts once at a key
1313
+
batch.remove_weak(&self.rollups, &old_k);
1226
1314
batch.insert(&self.rollups, &new_k, "");
1227
1315
}
1228
1316
···
1246
1334
AllTimeDidsKey::new(new_dids_estimate.into(), &nsid).to_db_bytes()?,
1247
1335
),
1248
1336
};
1249
-
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1337
+
// remove_weak is allowed here because the secondary ranking index only ever inserts once at a key
1338
+
batch.remove_weak(&self.rollups, &old_k);
1250
1339
batch.insert(&self.rollups, &new_k, "");
1251
1340
}
1252
1341
···
1256
1345
1257
1346
insert_batch_static_neu::<NewRollupCursorKey>(&mut batch, &self.global, last_cursor)?;
1258
1347
1348
+
histogram!("storage_rollup_counts_db_batch_items").record(batch.len() as f64);
1259
1349
batch.commit()?;
1260
1350
Ok((cursors_advanced, dirty_nsids))
1261
1351
}
···
1266
1356
if self.bg_taken.swap(true, Ordering::SeqCst) {
1267
1357
return Err(StorageError::BackgroundAlreadyStarted);
1268
1358
}
1269
-
describe_histogram!(
1270
-
"storage_trim_dirty_nsids",
1271
-
Unit::Count,
1272
-
"number of NSIDs trimmed"
1273
-
);
1274
-
describe_histogram!(
1275
-
"storage_trim_duration",
1276
-
Unit::Microseconds,
1277
-
"how long it took to trim the dirty NSIDs"
1278
-
);
1279
-
describe_counter!(
1280
-
"storage_trim_removed",
1281
-
Unit::Count,
1282
-
"how many records were removed during trim"
1283
-
);
1284
1359
if reroll {
1285
1360
log::info!("reroll: resetting rollup cursor...");
1286
1361
insert_static_neu::<NewRollupCursorKey>(&self.global, Cursor::from_start())?;
···
1375
1450
latest.to_db_bytes()?,
1376
1451
);
1377
1452
1453
+
histogram!("storage_insert_batch_db_batch_items").record(batch.len() as f64);
1378
1454
batch.commit()?;
1379
1455
Ok(())
1380
1456
}
···
1529
1605
candidate_new_feed_lower_cursor = Some(feed_key.cursor());
1530
1606
}
1531
1607
1532
-
self.feeds.remove(&location_key_bytes)?;
1608
+
self.records.remove(&location_key_bytes)?;
1533
1609
self.feeds.remove(key_bytes)?;
1534
1610
records_deleted += 1;
1535
1611
}
···
1556
1632
batch.remove(&self.records, key_bytes);
1557
1633
records_deleted += 1;
1558
1634
if batch.len() >= MAX_BATCHED_ACCOUNT_DELETE_RECORDS {
1635
+
counter!("storage_delete_account_partial_commits").increment(1);
1559
1636
batch.commit()?;
1560
1637
batch = self.keyspace.batch();
1561
1638
}
1562
1639
}
1640
+
counter!("storage_delete_account_completions").increment(1);
1641
+
counter!("storage_delete_account_records_deleted").increment(records_deleted as u64);
1563
1642
batch.commit()?;
1564
1643
Ok(records_deleted)
1565
1644
}
···
1619
1698
histogram!("storage_trim_dirty_nsids").record(completed.len() as f64);
1620
1699
histogram!("storage_trim_duration").record(dt.as_micros() as f64);
1621
1700
counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);
1622
-
counter!("storage_trim_removed", "dangling" => "false").increment((total_deleted - total_danglers) as u64);
1701
+
if total_deleted >= total_danglers {
1702
+
counter!("storage_trim_removed", "dangling" => "false").increment((total_deleted - total_danglers) as u64);
1703
+
} else {
1704
+
// TODO: probably think through what's happening here
1705
+
log::warn!("weird trim case: more danglers than deleted? metric will be missing for dangling=false. deleted={total_deleted} danglers={total_danglers}");
1706
+
}
1623
1707
for c in completed {
1624
1708
dirty_nsids.remove(&c);
1625
1709
}
-196
ufos ops (move to micro-ops).md
-196
ufos ops (move to micro-ops).md
···
1
-
ufos ops
2
-
3
-
btrfs snapshots: snapper
4
-
5
-
```bash
6
-
sudo apt install snapper
7
-
sudo snapper -c ufos-db create-config /mnt/ufos-db
8
-
9
-
# edit /etc/snapper/configs/ufos-db
10
-
# change
11
-
TIMELINE_MIN_AGE="1800"
12
-
TIMELINE_LIMIT_HOURLY="10"
13
-
TIMELINE_LIMIT_DAILY="10"
14
-
TIMELINE_LIMIT_WEEKLY="0"
15
-
TIMELINE_LIMIT_MONTHLY="10"
16
-
TIMELINE_LIMIT_YEARLY="10"
17
-
# to
18
-
TIMELINE_MIN_AGE="1800"
19
-
TIMELINE_LIMIT_HOURLY="22"
20
-
TIMELINE_LIMIT_DAILY="4"
21
-
TIMELINE_LIMIT_WEEKLY="0"
22
-
TIMELINE_LIMIT_MONTHLY="0"
23
-
TIMELINE_LIMIT_YEARLY="0"
24
-
```
25
-
26
-
this should be enough?
27
-
28
-
list snapshots:
29
-
30
-
```bash
31
-
sudo snapper -c ufos-db list
32
-
```
33
-
34
-
systemd
35
-
36
-
create file: `/etc/systemd/system/ufos.service`
37
-
38
-
```ini
39
-
[Unit]
40
-
Description=UFOs-API
41
-
After=network.target
42
-
43
-
[Service]
44
-
User=pi
45
-
WorkingDirectory=/home/pi/
46
-
ExecStart=/home/pi/ufos --jetstream us-west-2 --data /mnt/ufos-db/
47
-
Environment="RUST_LOG=info"
48
-
LimitNOFILE=16384
49
-
Restart=always
50
-
51
-
[Install]
52
-
WantedBy=multi-user.target
53
-
```
54
-
55
-
then
56
-
57
-
```bash
58
-
sudo systemctl daemon-reload
59
-
sudo systemctl enable ufos
60
-
sudo systemctl start ufos
61
-
```
62
-
63
-
monitor with
64
-
65
-
```bash
66
-
journalctl -u ufos -f
67
-
```
68
-
69
-
make sure a backup dir exists
70
-
71
-
```bash
72
-
mkdir /home/pi/backup
73
-
```
74
-
75
-
mount the NAS
76
-
77
-
```bash
78
-
sudo mount.cifs "//truenas.local/folks data" /home/pi/backup -o user=phil,uid=pi
79
-
```
80
-
81
-
manual rsync
82
-
83
-
```bash
84
-
sudo rsync -ahP --delete /mnt/ufos-db/.snapshots/1/snapshot/ backup/ufos/
85
-
```
86
-
87
-
backup script sketch
88
-
89
-
```bash
90
-
NUM=$(sudo snapper --csvout -c ufos-db list --type single --columns number | tail -n1)
91
-
sudo rsync -ahP --delete "/mnt/ufos-db/.snapshots/${NUM}/snapshot/" backup/ufos/
92
-
```
93
-
94
-
just crontab it?
95
-
96
-
`sudo crontab -e`
97
-
```bash
98
-
0 1/6 * * * rsync -ahP --delete "/mnt/ufos-db/.snapshots/$(sudo snapper --csvout -c ufos-db list --columns number | tail -n1)/snapshot/" backup/ufos/
99
-
```
100
-
101
-
^^ try once initial backup is done
102
-
103
-
104
-
--columns subvolume,number
105
-
106
-
subvolume
107
-
number
108
-
109
-
110
-
111
-
112
-
gateway: follow constellation for nginx->prom thing
113
-
114
-
config at `/etc/prometheus-nginxlog-exporter.hcl`
115
-
116
-
before: `/etc/prometheus-nginxlog-exporter.hcl`
117
-
118
-
```hcl
119
-
listen {
120
-
port = 4044
121
-
}
122
-
123
-
namespace "nginx" {
124
-
source = {
125
-
files = [
126
-
"/var/log/nginx/constellation-access.log"
127
-
]
128
-
}
129
-
130
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
131
-
132
-
labels {
133
-
app = "constellation"
134
-
}
135
-
136
-
relabel "cache_status" {
137
-
from = "upstream_cache_status"
138
-
}
139
-
}
140
-
```
141
-
142
-
after:
143
-
144
-
```hcl
145
-
listen {
146
-
port = 4044
147
-
}
148
-
149
-
namespace "constellation" {
150
-
source = {
151
-
files = [
152
-
"/var/log/nginx/constellation-access.log"
153
-
]
154
-
}
155
-
156
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
157
-
158
-
labels {
159
-
app = "constellation"
160
-
}
161
-
162
-
relabel "cache_status" {
163
-
from = "upstream_cache_status"
164
-
}
165
-
166
-
namespace_label = "vhost"
167
-
metrics_override = { prefix = "nginx" }
168
-
}
169
-
170
-
namespace "ufos" {
171
-
source = {
172
-
files = [
173
-
"/var/log/nginx/ufos-access.log"
174
-
]
175
-
}
176
-
177
-
format = "$remote_addr - $remote_user [$time_local] \"$request\" $status $upstream_cache_status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\""
178
-
179
-
labels {
180
-
app = "ufos"
181
-
}
182
-
183
-
relabel "cache_status" {
184
-
from = "upstream_cache_status"
185
-
}
186
-
187
-
namespace_label = "vhost"
188
-
metrics_override = { prefix = "nginx" }
189
-
}
190
-
```
191
-
192
-
193
-
```bash
194
-
systemctl start prometheus-nginxlog-exporter.service
195
-
```
196
-
+4
-4
who-am-i/src/server.rs
+4
-4
who-am-i/src/server.rs
···
268
268
Some(parent_host),
269
269
);
270
270
}
271
-
if let Some(ref app) = params.app {
272
-
if !allowed_hosts.contains(app) {
273
-
return err("Login is not allowed for this app", false, Some(app));
274
-
}
271
+
if let Some(ref app) = params.app
272
+
&& !allowed_hosts.contains(app)
273
+
{
274
+
return err("Login is not allowed for this app", false, Some(app));
275
275
}
276
276
let parent_origin = url.origin().ascii_serialization();
277
277
if parent_origin == "null" {