+2
-245
Cargo.lock
+2
-245
Cargo.lock
···
125
125
checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04"
126
126
127
127
[[package]]
128
-
name = "async-channel"
129
-
version = "1.9.0"
130
-
source = "registry+https://github.com/rust-lang/crates.io-index"
131
-
checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
132
-
dependencies = [
133
-
"concurrent-queue",
134
-
"event-listener 2.5.3",
135
-
"futures-core",
136
-
]
137
-
138
-
[[package]]
139
-
name = "async-channel"
140
-
version = "2.3.1"
141
-
source = "registry+https://github.com/rust-lang/crates.io-index"
142
-
checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a"
143
-
dependencies = [
144
-
"concurrent-queue",
145
-
"event-listener-strategy",
146
-
"futures-core",
147
-
"pin-project-lite",
148
-
]
149
-
150
-
[[package]]
151
128
name = "async-compression"
152
129
version = "0.4.22"
153
130
source = "registry+https://github.com/rust-lang/crates.io-index"
···
161
138
]
162
139
163
140
[[package]]
164
-
name = "async-executor"
165
-
version = "1.13.1"
166
-
source = "registry+https://github.com/rust-lang/crates.io-index"
167
-
checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec"
168
-
dependencies = [
169
-
"async-task",
170
-
"concurrent-queue",
171
-
"fastrand",
172
-
"futures-lite",
173
-
"slab",
174
-
]
175
-
176
-
[[package]]
177
-
name = "async-global-executor"
178
-
version = "2.4.1"
179
-
source = "registry+https://github.com/rust-lang/crates.io-index"
180
-
checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c"
181
-
dependencies = [
182
-
"async-channel 2.3.1",
183
-
"async-executor",
184
-
"async-io",
185
-
"async-lock",
186
-
"blocking",
187
-
"futures-lite",
188
-
"once_cell",
189
-
]
190
-
191
-
[[package]]
192
-
name = "async-io"
193
-
version = "2.4.0"
194
-
source = "registry+https://github.com/rust-lang/crates.io-index"
195
-
checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059"
196
-
dependencies = [
197
-
"async-lock",
198
-
"cfg-if",
199
-
"concurrent-queue",
200
-
"futures-io",
201
-
"futures-lite",
202
-
"parking",
203
-
"polling",
204
-
"rustix",
205
-
"slab",
206
-
"tracing",
207
-
"windows-sys 0.59.0",
208
-
]
209
-
210
-
[[package]]
211
-
name = "async-lock"
212
-
version = "3.4.0"
213
-
source = "registry+https://github.com/rust-lang/crates.io-index"
214
-
checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18"
215
-
dependencies = [
216
-
"event-listener 5.4.0",
217
-
"event-listener-strategy",
218
-
"pin-project-lite",
219
-
]
220
-
221
-
[[package]]
222
141
name = "async-recursion"
223
142
version = "1.1.1"
224
143
source = "registry+https://github.com/rust-lang/crates.io-index"
···
228
147
"quote",
229
148
"syn",
230
149
]
231
-
232
-
[[package]]
233
-
name = "async-std"
234
-
version = "1.13.0"
235
-
source = "registry+https://github.com/rust-lang/crates.io-index"
236
-
checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615"
237
-
dependencies = [
238
-
"async-channel 1.9.0",
239
-
"async-global-executor",
240
-
"async-io",
241
-
"async-lock",
242
-
"crossbeam-utils",
243
-
"futures-channel",
244
-
"futures-core",
245
-
"futures-io",
246
-
"futures-lite",
247
-
"gloo-timers",
248
-
"kv-log-macro",
249
-
"log",
250
-
"memchr",
251
-
"once_cell",
252
-
"pin-project-lite",
253
-
"pin-utils",
254
-
"slab",
255
-
"wasm-bindgen-futures",
256
-
]
257
-
258
-
[[package]]
259
-
name = "async-task"
260
-
version = "4.7.1"
261
-
source = "registry+https://github.com/rust-lang/crates.io-index"
262
-
checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de"
263
150
264
151
[[package]]
265
152
name = "async-trait"
···
488
375
]
489
376
490
377
[[package]]
491
-
name = "blocking"
492
-
version = "1.6.1"
493
-
source = "registry+https://github.com/rust-lang/crates.io-index"
494
-
checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea"
495
-
dependencies = [
496
-
"async-channel 2.3.1",
497
-
"async-task",
498
-
"futures-io",
499
-
"futures-lite",
500
-
"piper",
501
-
]
502
-
503
-
[[package]]
504
378
name = "brotli"
505
379
version = "7.0.0"
506
380
source = "registry+https://github.com/rust-lang/crates.io-index"
···
727
601
]
728
602
729
603
[[package]]
730
-
name = "concurrent-queue"
731
-
version = "2.5.0"
732
-
source = "registry+https://github.com/rust-lang/crates.io-index"
733
-
checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
734
-
dependencies = [
735
-
"crossbeam-utils",
736
-
]
737
-
738
-
[[package]]
739
604
name = "const-oid"
740
605
version = "0.9.6"
741
606
source = "registry+https://github.com/rust-lang/crates.io-index"
···
968
833
name = "dataloader"
969
834
version = "0.18.0"
970
835
dependencies = [
971
-
"async-std",
972
836
"futures",
973
837
"tokio",
974
838
]
···
1256
1120
]
1257
1121
1258
1122
[[package]]
1259
-
name = "event-listener"
1260
-
version = "2.5.3"
1261
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1262
-
checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
1263
-
1264
-
[[package]]
1265
-
name = "event-listener"
1266
-
version = "5.4.0"
1267
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1268
-
checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae"
1269
-
dependencies = [
1270
-
"concurrent-queue",
1271
-
"parking",
1272
-
"pin-project-lite",
1273
-
]
1274
-
1275
-
[[package]]
1276
-
name = "event-listener-strategy"
1277
-
version = "0.5.3"
1278
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1279
-
checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2"
1280
-
dependencies = [
1281
-
"event-listener 5.4.0",
1282
-
"pin-project-lite",
1283
-
]
1284
-
1285
-
[[package]]
1286
1123
name = "eyre"
1287
1124
version = "0.6.12"
1288
1125
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1453
1290
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
1454
1291
1455
1292
[[package]]
1456
-
name = "futures-lite"
1457
-
version = "2.6.0"
1458
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1459
-
checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532"
1460
-
dependencies = [
1461
-
"fastrand",
1462
-
"futures-core",
1463
-
"futures-io",
1464
-
"parking",
1465
-
"pin-project-lite",
1466
-
]
1467
-
1468
-
[[package]]
1469
1293
name = "futures-macro"
1470
1294
version = "0.3.31"
1471
1295
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1564
1388
checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2"
1565
1389
1566
1390
[[package]]
1567
-
name = "gloo-timers"
1568
-
version = "0.3.0"
1569
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1570
-
checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994"
1571
-
dependencies = [
1572
-
"futures-channel",
1573
-
"futures-core",
1574
-
"js-sys",
1575
-
"wasm-bindgen",
1576
-
]
1577
-
1578
-
[[package]]
1579
1391
name = "group"
1580
1392
version = "0.13.0"
1581
1393
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1665
1477
version = "0.3.9"
1666
1478
source = "registry+https://github.com/rust-lang/crates.io-index"
1667
1479
checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
1668
-
1669
-
[[package]]
1670
-
name = "hermit-abi"
1671
-
version = "0.4.0"
1672
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1673
-
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
1674
1480
1675
1481
[[package]]
1676
1482
name = "hex"
···
2248
2054
]
2249
2055
2250
2056
[[package]]
2251
-
name = "kv-log-macro"
2252
-
version = "1.0.7"
2253
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2254
-
checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
2255
-
dependencies = [
2256
-
"log",
2257
-
]
2258
-
2259
-
[[package]]
2260
2057
name = "lazy_static"
2261
2058
version = "1.5.0"
2262
2059
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2276
2073
version = "0.1.0"
2277
2074
dependencies = [
2278
2075
"chrono",
2076
+
"cid",
2279
2077
"serde",
2280
2078
"serde_json",
2281
2079
]
···
2360
2158
version = "0.4.25"
2361
2159
source = "registry+https://github.com/rust-lang/crates.io-index"
2362
2160
checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f"
2363
-
dependencies = [
2364
-
"value-bag",
2365
-
]
2366
2161
2367
2162
[[package]]
2368
2163
name = "lru-cache"
···
2654
2449
source = "registry+https://github.com/rust-lang/crates.io-index"
2655
2450
checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43"
2656
2451
dependencies = [
2657
-
"hermit-abi 0.3.9",
2452
+
"hermit-abi",
2658
2453
"libc",
2659
2454
]
2660
2455
···
2823
2618
]
2824
2619
2825
2620
[[package]]
2826
-
name = "parking"
2827
-
version = "2.2.1"
2828
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2829
-
checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba"
2830
-
2831
-
[[package]]
2832
2621
name = "parking_lot"
2833
2622
version = "0.11.2"
2834
2623
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2991
2780
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
2992
2781
2993
2782
[[package]]
2994
-
name = "piper"
2995
-
version = "0.2.4"
2996
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2997
-
checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066"
2998
-
dependencies = [
2999
-
"atomic-waker",
3000
-
"fastrand",
3001
-
"futures-io",
3002
-
]
3003
-
3004
-
[[package]]
3005
2783
name = "pkcs1"
3006
2784
version = "0.7.5"
3007
2785
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3027
2805
version = "0.3.31"
3028
2806
source = "registry+https://github.com/rust-lang/crates.io-index"
3029
2807
checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2"
3030
-
3031
-
[[package]]
3032
-
name = "polling"
3033
-
version = "3.7.4"
3034
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3035
-
checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f"
3036
-
dependencies = [
3037
-
"cfg-if",
3038
-
"concurrent-queue",
3039
-
"hermit-abi 0.4.0",
3040
-
"pin-project-lite",
3041
-
"rustix",
3042
-
"tracing",
3043
-
"windows-sys 0.59.0",
3044
-
]
3045
2808
3046
2809
[[package]]
3047
2810
name = "portable-atomic"
···
4687
4450
version = "0.1.1"
4688
4451
source = "registry+https://github.com/rust-lang/crates.io-index"
4689
4452
checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65"
4690
-
4691
-
[[package]]
4692
-
name = "value-bag"
4693
-
version = "1.10.0"
4694
-
source = "registry+https://github.com/rust-lang/crates.io-index"
4695
-
checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2"
4696
4453
4697
4454
[[package]]
4698
4455
name = "vcpkg"
+176
LICENSE-APACHE
+176
LICENSE-APACHE
···
1
+
Apache License
2
+
Version 2.0, January 2004
3
+
http://www.apache.org/licenses/
4
+
5
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+
1. Definitions.
8
+
9
+
"License" shall mean the terms and conditions for use, reproduction,
10
+
and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+
"Licensor" shall mean the copyright owner or entity authorized by
13
+
the copyright owner that is granting the License.
14
+
15
+
"Legal Entity" shall mean the union of the acting entity and all
16
+
other entities that control, are controlled by, or are under common
17
+
control with that entity. For the purposes of this definition,
18
+
"control" means (i) the power, direct or indirect, to cause the
19
+
direction or management of such entity, whether by contract or
20
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+
outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+
"You" (or "Your") shall mean an individual or Legal Entity
24
+
exercising permissions granted by this License.
25
+
26
+
"Source" form shall mean the preferred form for making modifications,
27
+
including but not limited to software source code, documentation
28
+
source, and configuration files.
29
+
30
+
"Object" form shall mean any form resulting from mechanical
31
+
transformation or translation of a Source form, including but
32
+
not limited to compiled object code, generated documentation,
33
+
and conversions to other media types.
34
+
35
+
"Work" shall mean the work of authorship, whether in Source or
36
+
Object form, made available under the License, as indicated by a
37
+
copyright notice that is included in or attached to the work
38
+
(an example is provided in the Appendix below).
39
+
40
+
"Derivative Works" shall mean any work, whether in Source or Object
41
+
form, that is based on (or derived from) the Work and for which the
42
+
editorial revisions, annotations, elaborations, or other modifications
43
+
represent, as a whole, an original work of authorship. For the purposes
44
+
of this License, Derivative Works shall not include works that remain
45
+
separable from, or merely link (or bind by name) to the interfaces of,
46
+
the Work and Derivative Works thereof.
47
+
48
+
"Contribution" shall mean any work of authorship, including
49
+
the original version of the Work and any modifications or additions
50
+
to that Work or Derivative Works thereof, that is intentionally
51
+
submitted to Licensor for inclusion in the Work by the copyright owner
52
+
or by an individual or Legal Entity authorized to submit on behalf of
53
+
the copyright owner. For the purposes of this definition, "submitted"
54
+
means any form of electronic, verbal, or written communication sent
55
+
to the Licensor or its representatives, including but not limited to
56
+
communication on electronic mailing lists, source code control systems,
57
+
and issue tracking systems that are managed by, or on behalf of, the
58
+
Licensor for the purpose of discussing and improving the Work, but
59
+
excluding communication that is conspicuously marked or otherwise
60
+
designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+
"Contributor" shall mean Licensor and any individual or Legal Entity
63
+
on behalf of whom a Contribution has been received by Licensor and
64
+
subsequently incorporated within the Work.
65
+
66
+
2. Grant of Copyright License. Subject to the terms and conditions of
67
+
this License, each Contributor hereby grants to You a perpetual,
68
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+
copyright license to reproduce, prepare Derivative Works of,
70
+
publicly display, publicly perform, sublicense, and distribute the
71
+
Work and such Derivative Works in Source or Object form.
72
+
73
+
3. Grant of Patent License. Subject to the terms and conditions of
74
+
this License, each Contributor hereby grants to You a perpetual,
75
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+
(except as stated in this section) patent license to make, have made,
77
+
use, offer to sell, sell, import, and otherwise transfer the Work,
78
+
where such license applies only to those patent claims licensable
79
+
by such Contributor that are necessarily infringed by their
80
+
Contribution(s) alone or by combination of their Contribution(s)
81
+
with the Work to which such Contribution(s) was submitted. If You
82
+
institute patent litigation against any entity (including a
83
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+
or a Contribution incorporated within the Work constitutes direct
85
+
or contributory patent infringement, then any patent licenses
86
+
granted to You under this License for that Work shall terminate
87
+
as of the date such litigation is filed.
88
+
89
+
4. Redistribution. You may reproduce and distribute copies of the
90
+
Work or Derivative Works thereof in any medium, with or without
91
+
modifications, and in Source or Object form, provided that You
92
+
meet the following conditions:
93
+
94
+
(a) You must give any other recipients of the Work or
95
+
Derivative Works a copy of this License; and
96
+
97
+
(b) You must cause any modified files to carry prominent notices
98
+
stating that You changed the files; and
99
+
100
+
(c) You must retain, in the Source form of any Derivative Works
101
+
that You distribute, all copyright, patent, trademark, and
102
+
attribution notices from the Source form of the Work,
103
+
excluding those notices that do not pertain to any part of
104
+
the Derivative Works; and
105
+
106
+
(d) If the Work includes a "NOTICE" text file as part of its
107
+
distribution, then any Derivative Works that You distribute must
108
+
include a readable copy of the attribution notices contained
109
+
within such NOTICE file, excluding those notices that do not
110
+
pertain to any part of the Derivative Works, in at least one
111
+
of the following places: within a NOTICE text file distributed
112
+
as part of the Derivative Works; within the Source form or
113
+
documentation, if provided along with the Derivative Works; or,
114
+
within a display generated by the Derivative Works, if and
115
+
wherever such third-party notices normally appear. The contents
116
+
of the NOTICE file are for informational purposes only and
117
+
do not modify the License. You may add Your own attribution
118
+
notices within Derivative Works that You distribute, alongside
119
+
or as an addendum to the NOTICE text from the Work, provided
120
+
that such additional attribution notices cannot be construed
121
+
as modifying the License.
122
+
123
+
You may add Your own copyright statement to Your modifications and
124
+
may provide additional or different license terms and conditions
125
+
for use, reproduction, or distribution of Your modifications, or
126
+
for any such Derivative Works as a whole, provided Your use,
127
+
reproduction, and distribution of the Work otherwise complies with
128
+
the conditions stated in this License.
129
+
130
+
5. Submission of Contributions. Unless You explicitly state otherwise,
131
+
any Contribution intentionally submitted for inclusion in the Work
132
+
by You to the Licensor shall be under the terms and conditions of
133
+
this License, without any additional terms or conditions.
134
+
Notwithstanding the above, nothing herein shall supersede or modify
135
+
the terms of any separate license agreement you may have executed
136
+
with Licensor regarding such Contributions.
137
+
138
+
6. Trademarks. This License does not grant permission to use the trade
139
+
names, trademarks, service marks, or product names of the Licensor,
140
+
except as required for reasonable and customary use in describing the
141
+
origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+
7. Disclaimer of Warranty. Unless required by applicable law or
144
+
agreed to in writing, Licensor provides the Work (and each
145
+
Contributor provides its Contributions) on an "AS IS" BASIS,
146
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+
implied, including, without limitation, any warranties or conditions
148
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+
PARTICULAR PURPOSE. You are solely responsible for determining the
150
+
appropriateness of using or redistributing the Work and assume any
151
+
risks associated with Your exercise of permissions under this License.
152
+
153
+
8. Limitation of Liability. In no event and under no legal theory,
154
+
whether in tort (including negligence), contract, or otherwise,
155
+
unless required by applicable law (such as deliberate and grossly
156
+
negligent acts) or agreed to in writing, shall any Contributor be
157
+
liable to You for damages, including any direct, indirect, special,
158
+
incidental, or consequential damages of any character arising as a
159
+
result of this License or out of the use or inability to use the
160
+
Work (including but not limited to damages for loss of goodwill,
161
+
work stoppage, computer failure or malfunction, or any and all
162
+
other commercial damages or losses), even if such Contributor
163
+
has been advised of the possibility of such damages.
164
+
165
+
9. Accepting Warranty or Additional Liability. While redistributing
166
+
the Work or Derivative Works thereof, You may choose to offer,
167
+
and charge a fee for, acceptance of support, warranty, indemnity,
168
+
or other liability obligations and/or rights consistent with this
169
+
License. However, in accepting such obligations, You may act only
170
+
on Your own behalf and on Your sole responsibility, not on behalf
171
+
of any other Contributor, and only if You agree to indemnify,
172
+
defend, and hold each Contributor harmless for any liability
173
+
incurred by, or claims asserted against, such Contributor by reason
174
+
of your accepting any such warranty or additional liability.
175
+
176
+
END OF TERMS AND CONDITIONS
+21
LICENSE-MIT
+21
LICENSE-MIT
···
1
+
MIT License
2
+
3
+
Copyright (c) 2025 Parakeet Project
4
+
5
+
Permission is hereby granted, free of charge, to any person obtaining a copy
6
+
of this software and associated documentation files (the "Software"), to deal
7
+
in the Software without restriction, including without limitation the rights
8
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+
copies of the Software, and to permit persons to whom the Software is
10
+
furnished to do so, subject to the following conditions:
11
+
12
+
The above copyright notice and this permission notice shall be included in all
13
+
copies or substantial portions of the Software.
14
+
15
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+
SOFTWARE.
+81
README.md
+81
README.md
···
1
+
# Parakeet
2
+
3
+
Parakeet is a [Bluesky](https://bsky.app) [AppView](https://atproto.wiki/en/wiki/reference/core-architecture/appview)
4
+
aiming to implement most of the functionality required to support the Bluesky client. Notably not implemented is a CDN.
5
+
6
+
## Status and Roadmap
7
+
Most common functionality works, with notable omissions being like/repost/follow statuses, blocks and mutes don't get
8
+
applied, labels might not track CIDs properly, label redaction doesn't work at all (beware!).
9
+
10
+
Future work is tracked in issues, but the highlights are below. Help would be highly appreciated.
11
+
- Notifications
12
+
- Search
13
+
- Pinned Posts
14
+
- The Timeline
15
+
- Monitoring: metrics, tracing, and health checks.
16
+
17
+
## The Code
18
+
Parakeet is implemented in Rust, using Postgres as a database, Redis for caching and queue processing, RocksDB for
19
+
aggregation, and Diesel for migrations and querying.
20
+
21
+
This repo is one big Rust workspace, containing nearly everything required to run and support the AppView.
22
+
23
+
### Packages
24
+
- consumer: Relay indexer, Label consumer, Backfiller. Takes raw records in from repos and stores them.
25
+
- dataloader-rs: a vendored fork of https://github.com/cksac/dataloader-rs, with some tweaks to fit caching requirements.
26
+
- did-resolver: A did:plc and did:web resolver using hickory and reqwest. Supports custom PLC directories.
27
+
- lexica: Rust types for the relevant lexicons[sic] for Bluesky.
28
+
- parakeet: The core AppView server code. Using Axum and Diesel.
29
+
- parakeet-db: Database types and models, also the Diesel schema.
30
+
- parakeet-index: Stats aggregator based on RocksDB. Uses gRPC with tonic.
31
+
- parakeet-lexgen: A WIP code generator for Lexicon in Rust. Not in use.
32
+
33
+
There is also a dependency on a fork of [jsonwebtoken](https://gitlab.com/parakeet-social/jsonwebtoken) until upstream
34
+
supports ES256K.
35
+
36
+
## Running
37
+
Prebuilt docker images are published (semi) automatically by GitLab CI at https://gitlab.com/parakeet-social/parakeet.
38
+
Use `registry.gitlab.com/parakeet-social/parakeet/[package]:[branch]` in your docker-compose.yml. There is currently no
39
+
versioning until the project is more stable (sorry).
40
+
You can also just build with cargo.
41
+
42
+
To run, you'll need Postgres (version 16 or higher), Redis or a Redis-like, consumer, parakeet, and parakeet-index.
43
+
44
+
### Configuring
45
+
There are quite a lot of environment variables, although sensible defaults are provided when possible. Variables are
46
+
prefixed by `PK`, `PKC`, or `PKI` depending on if they're used in Parakeet, Consumer, or parakeet-index, respectively.
47
+
Some are common to two or three parts, and are marked accordingly.
48
+
49
+
| Variable | Default | Description |
50
+
|-------------------------------------|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|
51
+
| (PK/PKC)_INDEX_URI | n/a | Required. URI of the parakeet-index instance in format `[host]:[port]` |
52
+
| (PK/PKC)_REDIS_URI | n/a | Required. URI of Redis (or compatible) in format `redis://[host]:[port]` |
53
+
| (PK/PKC)_PLC_DIRECTORY | `https://plc.directory` | Optional. A PLC mirror or different instance to use when resolving did:plc. |
54
+
| PKC_DATABASE__URL | n/a | Required. URI of Postgres in format `postgres://[user]:[pass]@[host]:[port]/[db]` |
55
+
| PKC_UA_CONTACT | n/a | Recommended. Some contact details (email / bluesky handle / website) to add to User-Agent. |
56
+
| PKC_LABEL_SOURCE | n/a | Required if consuming Labels. A labeler or label relay to consume. |
57
+
| PKC_RESUME_PATH | n/a | Required if consuming relay or label firehose. Where to store the cursor data. |
58
+
| PKC_INDEXER__RELAY_SOURCE | n/a | Required if consuming relay. Relay to consume from. |
59
+
| PKC_INDEXER__HISTORY_MODE | n/a | Required if consuming relay. `backfill_history` or `realtime` depending on if you plan to backfill when consuming record data from a relay. |
60
+
| PKC_INDEXER__INDEXER_WORKERS | 4 | How many workers to spread indexing work between. 4 or 6 usually works depending on load. Ensure you have enough DB connections available. |
61
+
| PKC_INDEXER__START_COMMIT_SEQ | n/a | Optionally, the relay sequence to start consuming from. Overridden by the data in PKC_RESUME_PATH, so clear that first if you reset. |
62
+
| PKC_INDEXER__SKIP_HANDLE_VALIDATION | false | Should the indexer SKIP validating handles from `#identity` events. |
63
+
| PKC_INDEXER__REQUEST_BACKFILL | false | Should the indexer request backfill when relevant. Only when `backfill_history` set. You likely want TRUE, unless you're manually controlling backfill queues. |
64
+
| PKC_BACKFILL__WORKERS | 4 | How many workers to use when backfilling into the DB. Ensure you have enough DB connections available as one is created per worker. |
65
+
| PKC_BACKFILL__SKIP_AGGREGATION | false | Whether to skip sending aggregation to parakeet-index. Does not remove the index requirement. Useful when developing. |
66
+
| PKC_BACKFILL__DOWNLOAD_WORKERS | 25 | How many workers to use to download repos for backfilling. |
67
+
| PKC_BACKFILL__DOWNLOAD_BUFFER | 25000 | How many repos to download and queue. |
68
+
| PKC_BACKFILL__DOWNLOAD_TMP_DIR | n/a | Where to download repos to. Ensure there is enough space. |
69
+
| (PK/PKI)_SERVER__BIND_ADDRESS | `0.0.0.0` | Address for the server to bind to. For index outside of docker, you probably want loopback as there is no auth. |
70
+
| (PK/PKI)_SERVER__PORT | PK: 6000, PKI: 6001 | Port for the server to bind to. |
71
+
| (PK/PKI)_DATABASE_URL | n/a | Required. URI of Postgres in format `postgres://[user]:[pass]@[host]:[port]/[db]` |
72
+
| PK_SERVICE__DID | n/a | DID for the AppView in did:web. (did:plc is possible but untested) |
73
+
| PK_SERVICE__PUBLIC_KEY | n/a | Public key for the AppView. Unsure if actually used, but may be required by PDS. |
74
+
| PK_SERVICE__ENDPOINT | n/a | HTTPS publicly accessible endpoint for the AppView. |
75
+
| PK_TRUSTED_VERIFIERS | n/a | Optionally, trusted verifiers to use. For many, join with `,`. |
76
+
| PK_CDN__BASE | `https://cdn.bsky.app` | Optionally, base URL for a Bluesky compatible CDN |
77
+
| PK_CDN__VIDEO_BASE | `https://video.bsky.app` | Optionally, base URL for a Bluesky compatible video CDN |
78
+
| PK_DID_ALLOWLIST | n/a | Optional. If set, controls which DIDs can access the AppView. For many, join with `,` |
79
+
| PK_MIGRATE | false | Set to TRUE to run database migrations automatically on start. |
80
+
| PKI_INDEX_DB_PATH | n/a | Required. Location to store the index database. |
81
+
+11
consumer/justfile
+11
consumer/justfile
···
1
+
@release:
2
+
cargo build --release
3
+
4
+
@lint:
5
+
cargo clippy
6
+
7
+
@run +params:
8
+
cargo run -- {{params}}
9
+
10
+
@docker platform='linux/amd64' branch='main':
11
+
docker buildx build --platform {{platform}} -t registry.gitlab.com/parakeet-social/parakeet/consumer:{{branch}} . -f consumer/Dockerfile
-1
consumer/run.sh
-1
consumer/run.sh
···
1
-
cargo run
+14
-8
consumer/src/backfill/downloader.rs
+14
-8
consumer/src/backfill/downloader.rs
···
109
109
Ok(Some(did_doc)) => {
110
110
let Some(service) = did_doc.find_service_by_id(PDS_SERVICE_ID) else {
111
111
tracing::warn!("bad DID doc for {did}");
112
-
db::backfill_job_write(&mut conn, &did, "failed.resolve")
112
+
db::backfill_job_write(&mut conn, &did, "failed.resolve.did_svc")
113
113
.await
114
114
.unwrap();
115
115
continue;
···
132
132
}
133
133
}
134
134
Ok(None) => {
135
-
tracing::warn!(did, "bad DID doc");
136
-
db::backfill_job_write(&mut conn, &did, "failed.resolve")
135
+
tracing::warn!(did, "bad/missing DID doc");
136
+
db::actor_set_sync_status(&mut conn, &did, ActorSyncState::Dirty, Utc::now())
137
+
.await
138
+
.unwrap();
139
+
db::backfill_job_write(&mut conn, &did, "failed.resolve.did_doc")
137
140
.await
138
141
.unwrap();
139
142
}
140
143
Err(e) => {
141
144
tracing::error!(did, "failed to resolve DID doc: {e}");
142
-
db::backfill_job_write(&mut conn, &did, "failed.resolve")
145
+
db::actor_set_sync_status(&mut conn, &did, ActorSyncState::Dirty, Utc::now())
146
+
.await
147
+
.unwrap();
148
+
db::backfill_job_write(&mut conn, &did, "failed.resolve.did")
143
149
.await
144
150
.unwrap();
145
151
}
···
173
179
Ok(false) => continue,
174
180
Err(e) => {
175
181
tracing::error!(pds, did, "failed to check repo status: {e}");
176
-
db::backfill_job_write(&mut conn, &did, "failed.resolve")
182
+
db::backfill_job_write(&mut conn, &did, "failed.resolve.status")
177
183
.await
178
184
.unwrap();
179
185
continue;
···
184
190
if let Some(handle) = maybe_handle {
185
191
if let Err(e) = resolve_and_set_handle(&conn, &resolver, &did, &handle).await {
186
192
tracing::error!(pds, did, "failed to resolve handle: {e}");
187
-
db::backfill_job_write(&mut conn, &did, "failed.resolve")
193
+
db::backfill_job_write(&mut conn, &did, "failed.resolve.handle")
188
194
.await
189
195
.unwrap();
190
196
}
···
247
253
pds: &str,
248
254
did: &str,
249
255
) -> eyre::Result<Option<(i32, i32)>> {
250
-
let mut file = tokio::fs::File::create_new(tmp_dir.join(did)).await?;
251
-
252
256
let res = http
253
257
.get(format!("{pds}/xrpc/com.atproto.sync.getRepo?did={did}"))
254
258
.send()
255
259
.await?
256
260
.error_for_status()?;
261
+
262
+
let mut file = tokio::fs::File::create_new(tmp_dir.join(did)).await?;
257
263
258
264
let headers = res.headers();
259
265
let ratelimit_rem = header_to_int(headers, "ratelimit-remaining");
+11
-15
consumer/src/backfill/mod.rs
+11
-15
consumer/src/backfill/mod.rs
···
6
6
use deadpool_postgres::{Object, Pool, Transaction};
7
7
use did_resolver::Resolver;
8
8
use ipld_core::cid::Cid;
9
+
use lexica::StrongRef;
9
10
use metrics::counter;
10
11
use parakeet_db::types::{ActorStatus, ActorSyncState};
11
12
use redis::aio::MultiplexedConnection;
···
52
53
index_client: Option<parakeet_index::Client>,
53
54
opts: BackfillConfig,
54
55
) -> eyre::Result<Self> {
55
-
let semaphore = Arc::new(Semaphore::new(opts.backfill_workers as usize));
56
+
let semaphore = Arc::new(Semaphore::new(opts.workers as usize));
56
57
57
58
Ok(BackfillManager {
58
59
pool,
···
130
131
}
131
132
}
132
133
133
-
#[instrument(skip(conn, inner))]
134
+
#[instrument(skip(conn, rc, inner))]
134
135
async fn backfill_actor(
135
136
conn: &mut Object,
136
137
rc: &mut MultiplexedConnection,
···
267
268
268
269
#[derive(Debug, Default)]
269
270
struct CopyStore {
270
-
likes: Vec<(
271
-
String,
272
-
records::StrongRef,
273
-
Option<records::StrongRef>,
274
-
DateTime<Utc>,
275
-
)>,
271
+
likes: Vec<(String, StrongRef, Option<StrongRef>, DateTime<Utc>)>,
276
272
posts: Vec<(String, Cid, records::AppBskyFeedPost)>,
277
-
reposts: Vec<(
278
-
String,
279
-
records::StrongRef,
280
-
Option<records::StrongRef>,
281
-
DateTime<Utc>,
282
-
)>,
273
+
reposts: Vec<(String, StrongRef, Option<StrongRef>, DateTime<Utc>)>,
283
274
blocks: Vec<(String, String, DateTime<Utc>)>,
284
275
follows: Vec<(String, String, DateTime<Utc>)>,
285
276
list_items: Vec<(String, records::AppBskyGraphListItem)>,
286
277
verifications: Vec<(String, Cid, records::AppBskyGraphVerification)>,
278
+
threadgates: Vec<(String, Cid, records::AppBskyFeedThreadgate)>, // not COPY'd but needs to be kept until last.
287
279
records: Vec<(String, Cid)>,
288
280
}
289
281
290
282
impl CopyStore {
291
283
async fn submit(self, t: &mut Transaction<'_>, did: &str) -> Result<(), tokio_postgres::Error> {
292
284
db::copy::copy_likes(t, did, self.likes).await?;
293
-
db::copy::copy_posts(t, did, self.posts).await?;
294
285
db::copy::copy_reposts(t, did, self.reposts).await?;
295
286
db::copy::copy_blocks(t, did, self.blocks).await?;
296
287
db::copy::copy_follows(t, did, self.follows).await?;
297
288
db::copy::copy_list_items(t, self.list_items).await?;
298
289
db::copy::copy_verification(t, did, self.verifications).await?;
290
+
db::copy::copy_posts(t, did, self.posts).await?;
291
+
for (at_uri, cid, record) in self.threadgates {
292
+
db::threadgate_enforce_backfill(t, did, &record).await?;
293
+
db::threadgate_upsert(t, &at_uri, cid, record).await?;
294
+
}
299
295
db::copy::copy_records(t, did, self.records).await?;
300
296
301
297
Ok(())
+16
-3
consumer/src/backfill/repo.rs
+16
-3
consumer/src/backfill/repo.rs
···
1
1
use super::{
2
-
types::{CarCommitEntry, CarEntry},
2
+
types::{CarCommitEntry, CarEntry, CarRecordEntry},
3
3
CopyStore,
4
4
};
5
5
use crate::indexer::records;
6
6
use crate::indexer::types::{AggregateDeltaStore, RecordTypes};
7
+
use crate::utils::at_uri_is_by;
7
8
use crate::{db, indexer};
8
9
use deadpool_postgres::Transaction;
9
10
use ipld_core::cid::Cid;
···
54
55
CarEntry::Commit(_) => {
55
56
tracing::warn!("got commit entry that was not in root")
56
57
}
57
-
CarEntry::Record(record) => {
58
+
CarEntry::Record(CarRecordEntry::Known(record)) => {
58
59
if let Some(path) = mst_nodes.remove(&cid) {
59
60
record_index(t, rc, &mut copies, &mut deltas, repo, &path, cid, record).await?;
60
61
} else {
61
62
records.insert(cid, record);
62
63
}
64
+
}
65
+
CarEntry::Record(CarRecordEntry::Other { ty }) => {
66
+
tracing::debug!("repo contains unknown record type: {ty} ({cid})");
63
67
}
64
68
CarEntry::Mst(mst) => {
65
69
let mut out = Vec::with_capacity(mst.e.len());
···
141
145
db::maintain_self_labels(t, did, Some(cid), &at_uri, labels).await?;
142
146
}
143
147
if let Some(embed) = rec.embed.clone().and_then(|embed| embed.into_bsky()) {
144
-
db::post_embed_insert(t, &at_uri, embed, rec.created_at).await?;
148
+
db::post_embed_insert(t, &at_uri, embed, rec.created_at, true).await?;
145
149
}
146
150
147
151
deltas.incr(did, AggregateType::ProfilePost).await;
···
162
166
copies
163
167
.reposts
164
168
.push((rkey.to_string(), rec.subject, rec.via, rec.created_at));
169
+
}
170
+
RecordTypes::AppBskyFeedThreadgate(record) => {
171
+
if !at_uri_is_by(&record.post, did) {
172
+
tracing::warn!("tried to create a threadgate on a post we don't control!");
173
+
return Ok(());
174
+
}
175
+
176
+
copies.push_record(&at_uri, cid);
177
+
copies.threadgates.push((at_uri, cid, record));
165
178
}
166
179
RecordTypes::AppBskyGraphBlock(rec) => {
167
180
copies.push_record(&at_uri, cid);
+11
-1
consumer/src/backfill/types.rs
+11
-1
consumer/src/backfill/types.rs
···
8
8
pub enum CarEntry {
9
9
Mst(CarMstEntry),
10
10
Commit(CarCommitEntry),
11
-
Record(RecordTypes),
11
+
Record(CarRecordEntry),
12
12
}
13
13
14
14
#[derive(Debug, Deserialize)]
···
33
33
pub rev: String,
34
34
pub prev: Option<Cid>,
35
35
pub sig: ByteBuf,
36
+
}
37
+
38
+
#[derive(Debug, Deserialize)]
39
+
#[serde(untagged)]
40
+
pub enum CarRecordEntry {
41
+
Known(RecordTypes),
42
+
Other {
43
+
#[serde(rename = "$type")]
44
+
ty: String,
45
+
},
36
46
}
37
47
38
48
#[derive(Debug, Deserialize)]
+2
-2
consumer/src/config.rs
+2
-2
consumer/src/config.rs
···
34
34
pub relay_source: String,
35
35
pub history_mode: HistoryMode,
36
36
#[serde(default = "default_indexer_workers")]
37
-
pub indexer_workers: u8,
37
+
pub workers: u8,
38
38
pub start_commit_seq: Option<u64>,
39
39
/// Whether to resolve handles as part of `#identity` events.
40
40
/// You can use this to move handle resolution out of event handling and into another place.
···
57
57
#[derive(Clone, Debug, Deserialize)]
58
58
pub struct BackfillConfig {
59
59
#[serde(default = "default_backfill_workers")]
60
-
pub backfill_workers: u8,
60
+
pub workers: u8,
61
61
#[serde(default)]
62
62
pub skip_aggregation: bool,
63
63
#[serde(default = "default_download_workers")]
+1
-1
consumer/src/db/backfill.rs
+1
-1
consumer/src/db/backfill.rs
···
19
19
status: &str,
20
20
) -> PgExecResult {
21
21
conn.execute(
22
-
"INSERT INTO backfill_jobs (did, status) VALUES ($1, $2)",
22
+
"INSERT INTO backfill_jobs (did, status) VALUES ($1, $2) ON CONFLICT (did) DO UPDATE SET status = $2, updated_at = NOW()",
23
23
&[&did, &status],
24
24
)
25
25
.await
+40
-9
consumer/src/db/copy.rs
+40
-9
consumer/src/db/copy.rs
···
1
1
use super::PgExecResult;
2
2
use crate::indexer::records;
3
-
use crate::utils::strongref_to_parts;
3
+
use crate::utils::{extract_mentions_and_tags, merge_tags, strongref_to_parts};
4
4
use chrono::prelude::*;
5
5
use deadpool_postgres::Transaction;
6
6
use futures::pin_mut;
7
7
use ipld_core::cid::Cid;
8
+
use lexica::StrongRef;
8
9
use tokio_postgres::binary_copy::BinaryCopyInWriter;
9
10
use tokio_postgres::types::Type;
10
11
···
18
19
Type::TEXT,
19
20
Type::TIMESTAMP,
20
21
];
21
-
type StrongRefRow = (
22
-
String,
23
-
records::StrongRef,
24
-
Option<records::StrongRef>,
25
-
DateTime<Utc>,
26
-
);
22
+
type StrongRefRow = (String, StrongRef, Option<StrongRef>, DateTime<Utc>);
27
23
28
24
// SubjectRefs are used in both blocks and follows
29
25
const SUBJECT_TYPES: &[Type] = &[Type::TEXT, Type::TEXT, Type::TEXT, Type::TIMESTAMP];
···
123
119
.await
124
120
}
125
121
126
-
const POST_STMT: &str = "COPY posts_tmp (at_uri, cid, did, record, content, facets, languages, tags, parent_uri, parent_cid, root_uri, root_cid, embed, embed_subtype, created_at) FROM STDIN (FORMAT binary)";
122
+
const POST_STMT: &str = "COPY posts_tmp (at_uri, cid, did, record, content, facets, languages, tags, parent_uri, parent_cid, root_uri, root_cid, embed, embed_subtype, mentions, created_at) FROM STDIN (FORMAT binary)";
127
123
const POST_TYPES: &[Type] = &[
128
124
Type::TEXT,
129
125
Type::TEXT,
···
139
135
Type::TEXT,
140
136
Type::TEXT,
141
137
Type::TEXT,
138
+
Type::TEXT_ARRAY,
142
139
Type::TIMESTAMP,
143
140
];
144
141
pub async fn copy_posts(
···
163
160
164
161
for (at_uri, cid, post) in data {
165
162
let record = serde_json::to_value(&post).unwrap();
163
+
let (mentions, tags) = post
164
+
.facets
165
+
.as_ref()
166
+
.map(|v| extract_mentions_and_tags(v))
167
+
.unzip();
166
168
let facets = post.facets.and_then(|v| serde_json::to_value(v).ok());
167
169
let embed = post.embed.as_ref().map(|v| v.as_str());
168
170
let embed_subtype = post.embed.as_ref().and_then(|v| v.subtype());
169
171
let (parent_uri, parent_cid) = strongref_to_parts(post.reply.as_ref().map(|v| &v.parent));
170
172
let (root_uri, root_cid) = strongref_to_parts(post.reply.as_ref().map(|v| &v.root));
171
173
174
+
let tags = merge_tags(tags, post.tags);
175
+
172
176
let writer = writer.as_mut();
173
177
writer
174
178
.write(&[
···
179
183
&post.text,
180
184
&facets,
181
185
&post.langs.unwrap_or_default(),
182
-
&post.tags.unwrap_or_default(),
186
+
&tags,
183
187
&parent_uri,
184
188
&parent_cid,
185
189
&root_uri,
186
190
&root_cid,
187
191
&embed,
188
192
&embed_subtype,
193
+
&mentions,
189
194
&post.created_at.naive_utc(),
190
195
])
191
196
.await?;
192
197
}
193
198
194
199
writer.finish().await?;
200
+
201
+
let threadgated: Vec<(String, String, DateTime<Utc>)> = conn
202
+
.query(
203
+
"SELECT root_uri, p.at_uri, p.created_at FROM posts_tmp p INNER JOIN threadgates t ON root_uri = post_uri WHERE t.allow IS NOT NULL",
204
+
&[],
205
+
)
206
+
.await?
207
+
.into_iter()
208
+
.map(|v| (v.get(0), v.get(1), v.get(2))).collect();
209
+
210
+
for (root, post, created_at) in threadgated {
211
+
match super::post_enforce_threadgate(conn, &root, did, created_at, true).await {
212
+
Ok(true) => {
213
+
conn.execute(
214
+
"UPDATE posts_tmp SET violates_threadgate=TRUE WHERE at_uri=$1",
215
+
&[&post],
216
+
)
217
+
.await?;
218
+
}
219
+
Ok(false) => continue,
220
+
Err(e) => {
221
+
tracing::error!("failed to check threadgate enforcement: {e}");
222
+
continue;
223
+
}
224
+
}
225
+
}
195
226
196
227
conn.execute("INSERT INTO posts (SELECT * FROM posts_tmp)", &[])
197
228
.await
+208
consumer/src/db/gates.rs
+208
consumer/src/db/gates.rs
···
1
+
use super::{PgExecResult, PgResult};
2
+
use crate::indexer::records::{
3
+
AppBskyFeedThreadgate, ThreadgateRule, THREADGATE_RULE_FOLLOWER, THREADGATE_RULE_FOLLOWING,
4
+
THREADGATE_RULE_LIST, THREADGATE_RULE_MENTION,
5
+
};
6
+
use chrono::prelude::*;
7
+
use chrono::{DateTime, Utc};
8
+
use deadpool_postgres::GenericClient;
9
+
use std::collections::HashSet;
10
+
11
+
pub async fn post_enforce_threadgate<C: GenericClient>(
12
+
conn: &mut C,
13
+
root: &str,
14
+
post_author: &str,
15
+
post_created_at: DateTime<Utc>,
16
+
is_backfill: bool,
17
+
) -> PgResult<bool> {
18
+
// check if the root and the current post are the same author
19
+
// strip "at://" then break into parts by '/'
20
+
let parts = root[5..].split('/').collect::<Vec<_>>();
21
+
let root_author = parts[0];
22
+
if root_author == post_author {
23
+
return Ok(false);
24
+
}
25
+
26
+
let tg_data = super::threadgate_get(conn, root).await?;
27
+
28
+
let Some((created_at, allow, allow_lists)) = tg_data else {
29
+
return Ok(false);
30
+
};
31
+
32
+
// when backfilling, there's no point continuing if the record is dated before the threadgate
33
+
if is_backfill && post_created_at < created_at {
34
+
return Ok(false);
35
+
}
36
+
37
+
if allow.is_empty() {
38
+
return Ok(true);
39
+
}
40
+
41
+
let allow: HashSet<String> = HashSet::from_iter(allow);
42
+
43
+
if allow.contains(THREADGATE_RULE_FOLLOWER) || allow.contains(THREADGATE_RULE_FOLLOWING) {
44
+
let profile_state: Option<(bool, bool)> = conn
45
+
.query_opt(
46
+
"SELECT following IS NOT NULL, followed IS NOT NULL FROM profile_states WHERE did=$1 AND subject=$2",
47
+
&[&root_author, &post_author],
48
+
)
49
+
.await?
50
+
.map(|v| (v.get(0), v.get(1)));
51
+
52
+
if let Some((following, followed)) = profile_state {
53
+
if allow.contains(THREADGATE_RULE_FOLLOWER) && followed {
54
+
return Ok(false);
55
+
}
56
+
57
+
if allow.contains(THREADGATE_RULE_FOLLOWING) && following {
58
+
return Ok(false);
59
+
}
60
+
}
61
+
}
62
+
63
+
// check mentions
64
+
if allow.contains(THREADGATE_RULE_MENTION) {
65
+
let mentions: Vec<String> = conn
66
+
.query_opt("SELECT mentions FROM posts WHERE at_uri=$1", &[&root])
67
+
.await?
68
+
.map(|r| r.get(0))
69
+
.unwrap_or_default();
70
+
71
+
if mentions.contains(&post_author.to_owned()) {
72
+
return Ok(false);
73
+
}
74
+
}
75
+
76
+
if allow.contains(THREADGATE_RULE_LIST) {
77
+
if allow_lists.is_empty() {
78
+
return Ok(true);
79
+
}
80
+
81
+
let count: i64 = conn
82
+
.query_one(
83
+
"SELECT count(*) FROM list_items WHERE list_uri=ANY($1) AND subject=$2",
84
+
&[&allow_lists, &post_author],
85
+
)
86
+
.await?
87
+
.get(0);
88
+
if count != 0 {
89
+
return Ok(false);
90
+
}
91
+
}
92
+
93
+
Ok(true)
94
+
}
95
+
96
+
pub async fn postgate_maintain_detaches<C: GenericClient>(
97
+
conn: &mut C,
98
+
post: &str,
99
+
detached: &[String],
100
+
disable_effective: Option<NaiveDateTime>,
101
+
) -> PgExecResult {
102
+
conn.execute(
103
+
"SELECT maintain_postgates($1, $2, $3)",
104
+
&[&post, &detached, &disable_effective],
105
+
)
106
+
.await
107
+
}
108
+
109
+
// variant of post_enforce_threadgate that runs when backfilling to clean up any posts already in DB
110
+
pub async fn threadgate_enforce_backfill<C: GenericClient>(
111
+
conn: &mut C,
112
+
root_author: &str,
113
+
threadgate: &AppBskyFeedThreadgate,
114
+
) -> PgExecResult {
115
+
// pull out allow - if it's None we can skip this gate.
116
+
let Some(allow) = threadgate.allow.as_ref() else {
117
+
return Ok(0);
118
+
};
119
+
120
+
let root = &threadgate.post;
121
+
122
+
if allow.is_empty() {
123
+
// blind update everything
124
+
return conn.execute(
125
+
"UPDATE posts SET violates_threadgate=TRUE WHERE root_uri=$1 AND did != $2 AND created_at >= $3",
126
+
&[&root, &root_author, &threadgate.created_at],
127
+
).await;
128
+
}
129
+
130
+
// pull authors with our root_uri where the author is not the root author and are dated after created_at
131
+
// this is mutable because we'll remove ALLOWED dids
132
+
let mut dids: HashSet<String> = conn
133
+
.query(
134
+
"SELECT DISTINCT did FROM posts WHERE root_uri=$1 AND did != $2 AND created_at >= $3",
135
+
&[&root, &root_author, &threadgate.created_at],
136
+
)
137
+
.await?
138
+
.into_iter()
139
+
.map(|row| row.get(0))
140
+
.collect();
141
+
142
+
// this will be empty if there are no replies.
143
+
if dids.is_empty() {
144
+
return Ok(0);
145
+
}
146
+
147
+
let allowed_lists = allow
148
+
.iter()
149
+
.filter_map(|rule| match rule {
150
+
ThreadgateRule::List { list } => Some(list),
151
+
_ => None,
152
+
})
153
+
.collect::<Vec<_>>();
154
+
155
+
let allow: HashSet<_> = HashSet::from_iter(allow.into_iter().map(|v| v.as_str()));
156
+
157
+
if allow.contains(THREADGATE_RULE_FOLLOWER) && !dids.is_empty() {
158
+
let current_dids: Vec<_> = dids.iter().collect();
159
+
160
+
let res = conn.query(
161
+
"SELECT subject FROM profile_states WHERE did=$1 AND subject=ANY($2) AND followed IS NOT NULL",
162
+
&[&root_author, ¤t_dids]
163
+
).await?;
164
+
165
+
dids = &dids - &HashSet::from_iter(res.into_iter().map(|r| r.get(0)));
166
+
}
167
+
168
+
if allow.contains(THREADGATE_RULE_FOLLOWING) && !dids.is_empty() {
169
+
let current_dids: Vec<_> = dids.iter().collect();
170
+
171
+
let res = conn.query(
172
+
"SELECT subject FROM profile_states WHERE did=$1 AND subject=ANY($2) AND following IS NOT NULL",
173
+
&[&root_author, ¤t_dids]
174
+
).await?;
175
+
176
+
dids = &dids - &HashSet::from_iter(res.into_iter().map(|r| r.get(0)));
177
+
}
178
+
179
+
if allow.contains(THREADGATE_RULE_MENTION) && !dids.is_empty() {
180
+
let mentions: Vec<String> = conn
181
+
.query_opt("SELECT mentions FROM posts WHERE at_uri=$1", &[&root])
182
+
.await?
183
+
.map(|r| r.get(0))
184
+
.unwrap_or_default();
185
+
186
+
dids = &dids - &HashSet::from_iter(mentions);
187
+
}
188
+
189
+
if allow.contains(THREADGATE_RULE_LIST) && !dids.is_empty() {
190
+
let current_dids: Vec<_> = dids.iter().collect();
191
+
192
+
let res = conn
193
+
.query(
194
+
"SELECT subject FROM list_items WHERE list_uri = ANY($1) AND subject = ANY($2)",
195
+
&[&allowed_lists, ¤t_dids],
196
+
)
197
+
.await?;
198
+
199
+
dids = &dids - &HashSet::from_iter(res.into_iter().map(|r| r.get(0)));
200
+
}
201
+
202
+
let dids = dids.into_iter().collect::<Vec<_>>();
203
+
204
+
conn.execute(
205
+
"UPDATE posts SET violates_threadgate=TRUE WHERE root_uri = $1 AND did = ANY($2) AND created_at >= $3",
206
+
&[&threadgate.post, &dids, &threadgate.created_at]
207
+
).await
208
+
}
+2
consumer/src/db/mod.rs
+2
consumer/src/db/mod.rs
+131
-46
consumer/src/db/record.rs
+131
-46
consumer/src/db/record.rs
···
1
1
use super::{PgExecResult, PgOptResult, PgResult};
2
2
use crate::indexer::records::*;
3
-
use crate::utils::{blob_ref, strongref_to_parts};
3
+
use crate::utils::{blob_ref, extract_mentions_and_tags, merge_tags, strongref_to_parts};
4
4
use chrono::prelude::*;
5
5
use deadpool_postgres::GenericClient;
6
6
use ipld_core::cid::Cid;
7
+
use lexica::community_lexicon::bookmarks::Bookmark;
8
+
use std::collections::HashSet;
7
9
8
10
pub async fn record_upsert<C: GenericClient>(
9
11
conn: &mut C,
···
20
22
pub async fn record_delete<C: GenericClient>(conn: &mut C, at_uri: &str) -> PgExecResult {
21
23
conn.execute("DELETE FROM records WHERE at_uri=$1", &[&at_uri])
22
24
.await
25
+
}
26
+
27
+
pub async fn bookmark_upsert<C: GenericClient>(
28
+
conn: &mut C,
29
+
rkey: &str,
30
+
repo: &str,
31
+
rec: Bookmark,
32
+
) -> PgExecResult {
33
+
// strip "at://" then break into parts by '/'
34
+
let rec_type = match rec.subject.strip_prefix("at://") {
35
+
Some(at_uri) => at_uri.split('/').collect::<Vec<_>>()[1],
36
+
None => "$uri",
37
+
};
38
+
39
+
conn.execute(
40
+
include_str!("sql/bookmarks_upsert.sql"),
41
+
&[
42
+
&repo,
43
+
&rkey,
44
+
&rec.subject,
45
+
&rec_type,
46
+
&rec.tags,
47
+
&rec.created_at,
48
+
],
49
+
)
50
+
.await
51
+
}
52
+
53
+
pub async fn bookmark_delete<C: GenericClient>(
54
+
conn: &mut C,
55
+
rkey: &str,
56
+
repo: &str,
57
+
) -> PgExecResult {
58
+
conn.execute(
59
+
"DELETE FROM bookmarks WHERE rkey=$1 AND did=$2",
60
+
&[&rkey, &repo],
61
+
)
62
+
.await
23
63
}
24
64
25
65
pub async fn block_insert<C: GenericClient>(
···
278
318
repo: &str,
279
319
cid: Cid,
280
320
rec: AppBskyFeedPost,
321
+
is_backfill: bool,
281
322
) -> PgExecResult {
282
323
let cid = cid.to_string();
283
324
let record = serde_json::to_value(&rec).unwrap();
325
+
let (mentions, tags) = rec
326
+
.facets
327
+
.as_ref()
328
+
.map(|v| extract_mentions_and_tags(v))
329
+
.unzip();
284
330
let facets = rec.facets.and_then(|v| serde_json::to_value(v).ok());
285
331
let (parent_uri, parent_cid) = strongref_to_parts(rec.reply.as_ref().map(|v| &v.parent));
286
332
let (root_uri, root_cid) = strongref_to_parts(rec.reply.as_ref().map(|v| &v.root));
287
333
let embed = rec.embed.as_ref().map(|v| v.as_str());
288
334
let embed_subtype = rec.embed.as_ref().and_then(|v| v.subtype());
289
335
336
+
// if there is a root, we need to check for the presence of a threadgate.
337
+
let violates_threadgate = match &root_uri {
338
+
Some(root) => {
339
+
super::post_enforce_threadgate(conn, root, repo, rec.created_at, is_backfill).await?
340
+
}
341
+
None => false,
342
+
};
343
+
344
+
let tags = merge_tags(tags, rec.tags);
345
+
290
346
let count = conn
291
347
.execute(
292
348
include_str!("sql/post_insert.sql"),
···
298
354
&rec.text,
299
355
&facets,
300
356
&rec.langs.unwrap_or_default(),
301
-
&rec.tags.unwrap_or_default(),
357
+
&tags,
302
358
&parent_uri,
303
359
&parent_cid,
304
360
&root_uri,
305
361
&root_cid,
306
362
&embed,
307
363
&embed_subtype,
364
+
&mentions,
365
+
&violates_threadgate,
308
366
&rec.created_at,
309
367
],
310
368
)
311
369
.await?;
312
370
313
371
if let Some(embed) = rec.embed.and_then(|embed| embed.into_bsky()) {
314
-
post_embed_insert(conn, at_uri, embed, rec.created_at).await?;
372
+
post_embed_insert(conn, at_uri, embed, rec.created_at, is_backfill).await?;
315
373
}
316
374
317
375
Ok(count)
···
341
399
post: &str,
342
400
embed: AppBskyEmbed,
343
401
created_at: DateTime<Utc>,
402
+
is_backfill: bool,
344
403
) -> PgExecResult {
345
404
match embed {
346
405
AppBskyEmbed::Images(embed) => post_embed_image_insert(conn, post, embed).await,
347
406
AppBskyEmbed::Video(embed) => post_embed_video_insert(conn, post, embed).await,
348
407
AppBskyEmbed::External(embed) => post_embed_external_insert(conn, post, embed).await,
349
408
AppBskyEmbed::Record(embed) => {
350
-
post_embed_record_insert(conn, post, embed, created_at).await
409
+
post_embed_record_insert(conn, post, embed, created_at, is_backfill).await
351
410
}
352
411
AppBskyEmbed::RecordWithMedia(embed) => {
353
-
post_embed_record_insert(conn, post, embed.record, created_at).await?;
412
+
post_embed_record_insert(conn, post, embed.record, created_at, is_backfill).await?;
354
413
match *embed.media {
355
414
AppBskyEmbed::Images(embed) => post_embed_image_insert(conn, post, embed).await,
356
415
AppBskyEmbed::Video(embed) => post_embed_video_insert(conn, post, embed).await,
···
371
430
let stmt = conn.prepare("INSERT INTO post_embed_images (post_uri, seq, cid, mime_type, alt, width, height) VALUES ($1, $2, $3, $4, $5, $6, $7)").await?;
372
431
373
432
for (idx, image) in embed.images.iter().enumerate() {
374
-
let cid = image.image.r#ref.to_string();
433
+
let cid = image.image.cid.to_string();
375
434
let width = image.aspect_ratio.as_ref().map(|v| v.width);
376
435
let height = image.aspect_ratio.as_ref().map(|v| v.height);
377
436
···
398
457
post: &str,
399
458
embed: AppBskyEmbedVideo,
400
459
) -> PgExecResult {
401
-
let cid = embed.video.r#ref.to_string();
460
+
let cid = embed.video.cid.to_string();
402
461
let width = embed.aspect_ratio.as_ref().map(|v| v.width);
403
462
let height = embed.aspect_ratio.as_ref().map(|v| v.height);
404
463
···
411
470
let stmt = conn.prepare_cached("INSERT INTO post_embed_video_captions (post_uri, cid, mime_type, language) VALUES ($1, $2, $3, $4)").await?;
412
471
413
472
for caption in captions {
414
-
let cid = caption.file.r#ref.to_string();
473
+
let cid = caption.file.cid.to_string();
415
474
conn.execute(
416
475
&stmt,
417
476
&[&post, &cid, &caption.file.mime_type, &caption.lang],
···
429
488
embed: AppBskyEmbedExternal,
430
489
) -> PgExecResult {
431
490
let thumb_mime = embed.external.thumb.as_ref().map(|v| v.mime_type.clone());
432
-
let thumb_cid = embed.external.thumb.as_ref().map(|v| v.r#ref.to_string());
491
+
let thumb_cid = embed.external.thumb.as_ref().map(|v| v.cid.to_string());
433
492
434
493
conn.execute(
435
494
"INSERT INTO post_embed_ext (post_uri, uri, title, description, thumb_mime_type, thumb_cid) VALUES ($1, $2, $3, $4, $5, $6)",
···
437
496
).await
438
497
}
439
498
499
+
const PG_DISABLE_RULE: &str = "app.bsky.feed.postgate#disableRule";
440
500
async fn post_embed_record_insert<C: GenericClient>(
441
501
conn: &mut C,
442
502
post: &str,
443
503
embed: AppBskyEmbedRecord,
444
504
post_created_at: DateTime<Utc>,
505
+
is_backfill: bool,
445
506
) -> PgExecResult {
446
507
// strip "at://" then break into parts by '/'
447
508
let parts = embed.record.uri[5..].split('/').collect::<Vec<_>>();
448
509
449
510
let detached = if parts[1] == "app.bsky.feed.post" {
450
-
let postgate_effective: Option<DateTime<Utc>> = conn
451
-
.query_opt(
452
-
"SELECT created_at FROM postgates WHERE post_uri=$1",
453
-
&[&post],
454
-
)
455
-
.await?
456
-
.map(|v| v.get(0));
511
+
let pg_data = postgate_get(conn, post).await?;
512
+
513
+
if let Some((effective, detached, rules)) = pg_data {
514
+
let detached: HashSet<String> = HashSet::from_iter(detached);
515
+
let rules: HashSet<String> = HashSet::from_iter(rules);
516
+
let compare_date = match is_backfill {
517
+
true => post_created_at,
518
+
false => Utc::now(),
519
+
};
457
520
458
-
postgate_effective
459
-
.map(|v| Utc::now().min(post_created_at) > v)
460
-
.unwrap_or_default()
521
+
detached.contains(post) || (rules.contains(PG_DISABLE_RULE) && compare_date > effective)
522
+
} else {
523
+
false
524
+
}
461
525
} else {
462
526
false
463
527
};
···
468
532
).await
469
533
}
470
534
535
+
async fn postgate_get<C: GenericClient>(
536
+
conn: &mut C,
537
+
post: &str,
538
+
) -> PgOptResult<(DateTime<Utc>, Vec<String>, Vec<String>)> {
539
+
let res = conn
540
+
.query_opt(
541
+
"SELECT created_at, detached, rules FROM postgates WHERE post_uri=$1",
542
+
&[&post],
543
+
)
544
+
.await?
545
+
.map(|v| (v.get(0), v.get(1), v.get(2)));
546
+
547
+
Ok(res)
548
+
}
549
+
471
550
pub async fn postgate_upsert<C: GenericClient>(
472
551
conn: &mut C,
473
552
at_uri: &str,
···
499
578
.await
500
579
}
501
580
502
-
pub async fn postgate_maintain_detaches<C: GenericClient>(
503
-
conn: &mut C,
504
-
post: &str,
505
-
detached: &[String],
506
-
disable_effective: Option<NaiveDateTime>,
507
-
) -> PgExecResult {
508
-
conn.execute(
509
-
"SELECT maintain_postgates($1, $2, $3)",
510
-
&[&post, &detached, &disable_effective],
511
-
)
512
-
.await
513
-
}
514
-
515
581
pub async fn profile_upsert<C: GenericClient>(
516
582
conn: &mut C,
517
583
repo: &str,
···
537
603
&pinned_cid,
538
604
&joined_sp_uri,
539
605
&joined_sp_cid,
606
+
&rec.pronouns,
607
+
&rec.website,
540
608
&rec.created_at.unwrap_or(Utc::now()).naive_utc(),
541
609
],
542
610
)
···
634
702
let record = serde_json::to_value(&rec).unwrap();
635
703
let thumb = rec.embed.as_ref().and_then(|v| v.external.thumb.clone());
636
704
let thumb_mime = thumb.as_ref().map(|v| v.mime_type.clone());
637
-
let thumb_cid = thumb.as_ref().map(|v| v.r#ref.to_string());
705
+
let thumb_cid = thumb.as_ref().map(|v| v.cid.to_string());
638
706
639
707
conn.execute(
640
708
include_str!("sql/status_upsert.sql"),
···
659
727
.await
660
728
}
661
729
730
+
pub async fn threadgate_get<C: GenericClient>(
731
+
conn: &mut C,
732
+
post: &str,
733
+
) -> PgOptResult<(DateTime<Utc>, Vec<String>, Vec<String>)> {
734
+
let res = conn
735
+
.query_opt(
736
+
"SELECT created_at, allow, allowed_lists FROM threadgates WHERE post_uri=$1 AND allow IS NOT NULL",
737
+
&[&post],
738
+
)
739
+
.await?
740
+
.map(|v| (v.get(0), v.get(1), v.get(2)));
741
+
742
+
Ok(res)
743
+
}
744
+
662
745
pub async fn threadgate_upsert<C: GenericClient>(
663
746
conn: &mut C,
664
747
at_uri: &str,
···
667
750
) -> PgExecResult {
668
751
let record = serde_json::to_value(&rec).unwrap();
669
752
670
-
let allowed_lists = rec
671
-
.allow
672
-
.iter()
673
-
.filter_map(|rule| match rule {
674
-
ThreadgateRule::List { list } => Some(list.clone()),
675
-
_ => None,
676
-
})
677
-
.collect::<Vec<_>>();
753
+
let allowed_lists = rec.allow.as_ref().map(|allow| {
754
+
allow
755
+
.iter()
756
+
.filter_map(|rule| match rule {
757
+
ThreadgateRule::List { list } => Some(list.clone()),
758
+
_ => None,
759
+
})
760
+
.collect::<Vec<_>>()
761
+
});
678
762
679
-
let allow = rec
680
-
.allow
681
-
.into_iter()
682
-
.map(|v| v.as_str().to_string())
683
-
.collect::<Vec<_>>();
763
+
let allow = rec.allow.map(|allow| {
764
+
allow
765
+
.into_iter()
766
+
.map(|v| v.as_str().to_string())
767
+
.collect::<Vec<_>>()
768
+
});
684
769
685
770
conn.execute(
686
771
include_str!("sql/threadgate_upsert.sql"),
+5
consumer/src/db/sql/bookmarks_upsert.sql
+5
consumer/src/db/sql/bookmarks_upsert.sql
+2
-2
consumer/src/db/sql/post_insert.sql
+2
-2
consumer/src/db/sql/post_insert.sql
···
1
1
INSERT INTO posts (at_uri, did, cid, record, content, facets, languages, tags, parent_uri, parent_cid, root_uri,
2
-
root_cid, embed, embed_subtype, created_at)
3
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
2
+
root_cid, embed, embed_subtype, mentions, violates_threadgate, created_at)
3
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
4
4
ON CONFLICT DO NOTHING
+4
-2
consumer/src/db/sql/profile_upsert.sql
+4
-2
consumer/src/db/sql/profile_upsert.sql
···
1
1
INSERT INTO profiles (did, cid, avatar_cid, banner_cid, display_name, description, pinned_uri, pinned_cid,
2
-
joined_sp_uri, joined_sp_cid, created_at)
3
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
2
+
joined_sp_uri, joined_sp_cid, pronouns, website, created_at)
3
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
4
4
ON CONFLICT (did) DO UPDATE SET cid=EXCLUDED.cid,
5
5
avatar_cid=EXCLUDED.avatar_cid,
6
6
banner_cid=EXCLUDED.banner_cid,
···
10
10
pinned_cid=EXCLUDED.pinned_cid,
11
11
joined_sp_uri=EXCLUDED.joined_sp_uri,
12
12
joined_sp_cid=EXCLUDED.joined_sp_cid,
13
+
pronouns=EXCLUDED.pronouns,
14
+
website=EXCLUDED.website,
13
15
indexed_at=NOW()
+14
consumer/src/firehose/mod.rs
+14
consumer/src/firehose/mod.rs
···
117
117
118
118
FirehoseEvent::Label(event)
119
119
}
120
+
"#sync" => {
121
+
counter!("firehose_events.total", "event" => "sync").increment(1);
122
+
let event: AtpSyncEvent = serde_ipld_dagcbor::from_reader(&mut reader)?;
123
+
124
+
// increment the seq
125
+
if self.seq < event.seq {
126
+
self.seq = event.seq;
127
+
} else {
128
+
tracing::error!("Event sequence was not greater than previous seq, exiting. {} <= {}", event.seq, self.seq);
129
+
return Ok(FirehoseOutput::Close);
130
+
}
131
+
132
+
FirehoseEvent::Sync(event)
133
+
}
120
134
_ => {
121
135
tracing::warn!("unknown event type {ty}");
122
136
return Ok(FirehoseOutput::Continue);
+23
consumer/src/firehose/types.rs
+23
consumer/src/firehose/types.rs
···
31
31
Account(AtpAccountEvent),
32
32
Commit(AtpCommitEvent),
33
33
Label(AtpLabelEvent),
34
+
Sync(AtpSyncEvent),
34
35
}
35
36
36
37
#[derive(Debug, Deserialize)]
···
48
49
Suspended,
49
50
Deleted,
50
51
Deactivated,
52
+
Throttled,
53
+
Desynchronized,
51
54
}
52
55
53
56
impl AtpAccountStatus {
···
57
60
AtpAccountStatus::Suspended => "suspended",
58
61
AtpAccountStatus::Deleted => "deleted",
59
62
AtpAccountStatus::Deactivated => "deactivated",
63
+
AtpAccountStatus::Throttled => "throttled",
64
+
AtpAccountStatus::Desynchronized => "desynchronized",
60
65
}
61
66
}
62
67
}
···
68
73
AtpAccountStatus::Suspended => parakeet_db::types::ActorStatus::Suspended,
69
74
AtpAccountStatus::Deleted => parakeet_db::types::ActorStatus::Deleted,
70
75
AtpAccountStatus::Deactivated => parakeet_db::types::ActorStatus::Deactivated,
76
+
AtpAccountStatus::Throttled | AtpAccountStatus::Desynchronized => {
77
+
parakeet_db::types::ActorStatus::Active
78
+
}
71
79
}
72
80
}
73
81
}
···
90
98
pub since: Option<String>,
91
99
pub commit: Cid,
92
100
#[serde(rename = "tooBig")]
101
+
#[deprecated]
93
102
pub too_big: bool,
94
103
#[serde(default)]
95
104
pub blocks: ByteBuf,
96
105
#[serde(default)]
97
106
pub ops: Vec<CommitOp>,
98
107
#[serde(default)]
108
+
#[deprecated]
99
109
pub blobs: Vec<Cid>,
110
+
#[serde(rename = "prevData")]
111
+
pub prev_data: Option<Cid>,
100
112
}
101
113
102
114
#[derive(Debug, Deserialize)]
103
115
pub struct CommitOp {
104
116
pub action: String,
105
117
pub cid: Option<Cid>,
118
+
pub prev: Option<Cid>,
106
119
pub path: String,
107
120
}
108
121
···
124
137
pub seq: u64,
125
138
pub labels: Vec<AtpLabel>,
126
139
}
140
+
141
+
#[derive(Debug, Deserialize)]
142
+
pub struct AtpSyncEvent {
143
+
pub seq: u64,
144
+
pub did: String,
145
+
pub time: DateTime<Utc>,
146
+
pub rev: String,
147
+
#[serde(default)]
148
+
pub blocks: ByteBuf,
149
+
}
+45
-10
consumer/src/indexer/mod.rs
+45
-10
consumer/src/indexer/mod.rs
···
1
1
use crate::config::HistoryMode;
2
2
use crate::db;
3
3
use crate::firehose::{
4
-
AtpAccountEvent, AtpCommitEvent, AtpIdentityEvent, CommitOp, FirehoseConsumer, FirehoseEvent,
5
-
FirehoseOutput,
4
+
AtpAccountEvent, AtpCommitEvent, AtpIdentityEvent, AtpSyncEvent, CommitOp, FirehoseConsumer,
5
+
FirehoseEvent, FirehoseOutput,
6
6
};
7
7
use crate::indexer::types::{
8
8
AggregateDeltaStore, BackfillItem, BackfillItemInner, CollectionType, RecordTypes,
9
9
};
10
+
use crate::utils::at_uri_is_by;
10
11
use deadpool_postgres::{Object, Pool, Transaction};
11
12
use did_resolver::Resolver;
12
13
use foldhash::quality::RandomState;
···
107
108
FirehoseEvent::Commit(commit) => {
108
109
index_commit(&mut state, &mut conn, &mut rc, commit).await
109
110
}
111
+
FirehoseEvent::Sync(sync) => {
112
+
process_sync(&state, &mut conn, &mut rc, sync).await
113
+
}
110
114
FirehoseEvent::Label(_) => unreachable!(),
111
115
};
112
116
···
188
192
FirehoseEvent::Identity(identity) => self.hasher.hash_one(&identity.did) % threads,
189
193
FirehoseEvent::Account(account) => self.hasher.hash_one(&account.did) % threads,
190
194
FirehoseEvent::Commit(commit) => self.hasher.hash_one(&commit.repo) % threads,
195
+
FirehoseEvent::Sync(sync) => self.hasher.hash_one(&sync.did) % threads,
191
196
FirehoseEvent::Label(_) => {
192
197
// We handle all labels through direct connections to labelers
193
198
tracing::warn!("got #labels from the relay");
···
201
206
}
202
207
}
203
208
209
+
#[instrument(skip_all, fields(seq = sync.seq, repo = sync.did))]
210
+
async fn process_sync(
211
+
state: &RelayIndexerState,
212
+
conn: &mut Object,
213
+
rc: &mut MultiplexedConnection,
214
+
sync: AtpSyncEvent,
215
+
) -> eyre::Result<()> {
216
+
let Some((sync_state, Some(current_rev))) = db::actor_get_repo_status(conn, &sync.did).await?
217
+
else {
218
+
return Ok(());
219
+
};
220
+
221
+
// don't care if we're not synced. also no point if !do_backfill bc we might not have a worker
222
+
if sync_state == ActorSyncState::Synced && state.do_backfill && sync.rev > current_rev {
223
+
tracing::debug!("triggering backfill due to #sync");
224
+
rc.rpush::<_, _, i32>("backfill_queue", sync.did).await?;
225
+
}
226
+
227
+
Ok(())
228
+
}
229
+
204
230
#[instrument(skip_all, fields(seq = identity.seq, repo = identity.did))]
205
231
async fn index_identity(
206
232
state: &RelayIndexerState,
···
527
553
rkey: &str,
528
554
) -> eyre::Result<()> {
529
555
match record {
530
-
RecordTypes::AppBskyActorProfile(record) => {
556
+
RecordTypes::AppBskyActorProfile(mut record) => {
531
557
if rkey == "self" {
532
558
let labels = record.labels.clone();
559
+
560
+
// don't allow pinned posts that aren't by us.
561
+
if let Some(pinned) = &record.pinned_post {
562
+
if !at_uri_is_by(&pinned.uri, repo) {
563
+
record.pinned_post = None;
564
+
}
565
+
}
566
+
533
567
db::profile_upsert(conn, repo, cid, record).await?;
534
568
535
569
if let Some(labels) = labels {
···
591
625
});
592
626
593
627
let labels = record.labels.clone();
594
-
db::post_insert(conn, at_uri, repo, cid, record).await?;
628
+
db::post_insert(conn, at_uri, repo, cid, record, false).await?;
595
629
if let Some(labels) = labels {
596
630
db::maintain_self_labels(conn, repo, Some(cid), at_uri, labels).await?;
597
631
}
···
605
639
}
606
640
}
607
641
RecordTypes::AppBskyFeedPostgate(record) => {
608
-
let split_aturi = record.post.rsplitn(4, '/').collect::<Vec<_>>();
609
-
if repo != split_aturi[2] {
642
+
if !at_uri_is_by(&record.post, repo) {
610
643
tracing::warn!("tried to create a postgate on a post we don't control!");
611
644
return Ok(());
612
645
}
···
636
669
db::repost_insert(conn, rkey, repo, record).await?;
637
670
}
638
671
RecordTypes::AppBskyFeedThreadgate(record) => {
639
-
let split_aturi = record.post.rsplitn(4, '/').collect::<Vec<_>>();
640
-
if repo != split_aturi[2] {
672
+
if !at_uri_is_by(&record.post, repo) {
641
673
tracing::warn!("tried to create a threadgate on a post we don't control!");
642
674
return Ok(());
643
675
}
···
677
709
db::list_block_insert(conn, at_uri, repo, record).await?;
678
710
}
679
711
RecordTypes::AppBskyGraphListItem(record) => {
680
-
let split_aturi = record.list.rsplitn(4, '/').collect::<Vec<_>>();
681
-
if repo != split_aturi[2] {
712
+
if !at_uri_is_by(&record.list, repo) {
682
713
// it's also probably a bad idea to log *all* the attempts to do this...
683
714
tracing::warn!("tried to create a listitem on a list we don't control!");
684
715
return Ok(());
···
722
753
db::chat_decl_upsert(conn, repo, record).await?;
723
754
redis::AsyncTypedCommands::del(rc, format!("profile#{repo}")).await?;
724
755
}
756
+
}
757
+
RecordTypes::CommunityLexiconBookmark(record) => {
758
+
db::bookmark_upsert(conn, rkey, repo, record).await?;
725
759
}
726
760
}
727
761
···
833
867
redis::AsyncTypedCommands::del(rc, format!("profile#{repo}")).await?;
834
868
db::chat_decl_delete(conn, repo).await?
835
869
}
870
+
CollectionType::CommunityLexiconBookmark => db::bookmark_delete(conn, rkey, repo).await?,
836
871
_ => unreachable!(),
837
872
};
838
873
+15
-28
consumer/src/indexer/records.rs
+15
-28
consumer/src/indexer/records.rs
···
1
1
use crate::utils;
2
2
use chrono::{DateTime, Utc};
3
-
use ipld_core::cid::Cid;
4
3
use lexica::app_bsky::actor::{ChatAllowIncoming, ProfileAllowSubscriptions, Status};
5
4
use lexica::app_bsky::embed::AspectRatio;
6
5
use lexica::app_bsky::labeler::LabelerPolicy;
7
6
use lexica::app_bsky::richtext::FacetMain;
8
7
use lexica::com_atproto::label::SelfLabels;
9
8
use lexica::com_atproto::moderation::{ReasonType, SubjectType};
9
+
use lexica::{Blob, StrongRef};
10
10
use serde::{Deserialize, Serialize};
11
11
use serde_with::serde_as;
12
12
13
-
#[derive(Clone, Debug, Deserialize, Serialize)]
14
-
pub struct StrongRef {
15
-
#[serde(
16
-
deserialize_with = "utils::cid_from_string",
17
-
serialize_with = "utils::cid_as_str"
18
-
)]
19
-
pub cid: Cid,
20
-
pub uri: String,
21
-
}
22
-
23
-
#[derive(Clone, Debug, Deserialize, Serialize)]
24
-
#[serde(tag = "$type")]
25
-
#[serde(rename = "blob")]
26
-
#[serde(rename_all = "camelCase")]
27
-
pub struct Blob {
28
-
pub mime_type: String,
29
-
#[serde(serialize_with = "utils::cid_as_link")]
30
-
pub r#ref: Cid,
31
-
pub size: i32,
32
-
}
33
-
34
13
#[derive(Debug, Deserialize, Serialize)]
35
14
#[serde(rename_all = "camelCase")]
36
15
#[serde_as]
···
44
23
pub labels: Option<SelfLabels>,
45
24
pub joined_via_starter_pack: Option<StrongRef>,
46
25
pub pinned_post: Option<StrongRef>,
26
+
#[serde_as(as = "utils::safe_string")]
27
+
pub pronouns: Option<String>,
28
+
#[serde_as(as = "utils::safe_string")]
29
+
pub website: Option<String>,
47
30
pub created_at: Option<DateTime<Utc>>,
48
31
}
49
32
···
284
267
pub struct AppBskyFeedThreadgate {
285
268
pub post: String,
286
269
pub created_at: DateTime<Utc>,
287
-
#[serde(default)]
288
-
pub allow: Vec<ThreadgateRule>,
270
+
pub allow: Option<Vec<ThreadgateRule>>,
289
271
#[serde(default)]
290
272
pub hidden_replies: Vec<String>,
291
273
}
274
+
275
+
pub const THREADGATE_RULE_MENTION: &str = "app.bsky.feed.threadgate#mentionRule";
276
+
pub const THREADGATE_RULE_FOLLOWER: &str = "app.bsky.feed.threadgate#followerRule";
277
+
pub const THREADGATE_RULE_FOLLOWING: &str = "app.bsky.feed.threadgate#followingRule";
278
+
pub const THREADGATE_RULE_LIST: &str = "app.bsky.feed.threadgate#listRule";
292
279
293
280
#[derive(Debug, Deserialize, Serialize)]
294
281
#[serde(tag = "$type")]
···
306
293
impl ThreadgateRule {
307
294
pub fn as_str(&self) -> &'static str {
308
295
match self {
309
-
ThreadgateRule::Mention => "app.bsky.feed.threadgate#mentionRule",
310
-
ThreadgateRule::Follower => "app.bsky.feed.threadgate#followerRule",
311
-
ThreadgateRule::Following => "app.bsky.feed.threadgate#followingRule",
312
-
ThreadgateRule::List { .. } => "app.bsky.feed.threadgate#listRule",
296
+
ThreadgateRule::Mention => THREADGATE_RULE_MENTION,
297
+
ThreadgateRule::Follower => THREADGATE_RULE_FOLLOWER,
298
+
ThreadgateRule::Following => THREADGATE_RULE_FOLLOWING,
299
+
ThreadgateRule::List { .. } => THREADGATE_RULE_LIST,
313
300
}
314
301
}
315
302
}
+5
consumer/src/indexer/types.rs
+5
consumer/src/indexer/types.rs
···
41
41
AppBskyNotificationDeclaration(records::AppBskyNotificationDeclaration),
42
42
#[serde(rename = "chat.bsky.actor.declaration")]
43
43
ChatBskyActorDeclaration(records::ChatBskyActorDeclaration),
44
+
#[serde(rename = "community.lexicon.bookmarks.bookmark")]
45
+
CommunityLexiconBookmark(lexica::community_lexicon::bookmarks::Bookmark),
44
46
}
45
47
46
48
#[derive(Debug, PartialOrd, PartialEq, Deserialize, Serialize)]
···
63
65
BskyLabelerService,
64
66
BskyNotificationDeclaration,
65
67
ChatActorDecl,
68
+
CommunityLexiconBookmark,
66
69
Unsupported,
67
70
}
68
71
···
87
90
"app.bsky.labeler.service" => CollectionType::BskyLabelerService,
88
91
"app.bsky.notification.declaration" => CollectionType::BskyNotificationDeclaration,
89
92
"chat.bsky.actor.declaration" => CollectionType::ChatActorDecl,
93
+
"community.lexicon.bookmarks.bookmark" => CollectionType::CommunityLexiconBookmark,
90
94
_ => CollectionType::Unsupported,
91
95
}
92
96
}
···
111
115
CollectionType::BskyVerification => false,
112
116
CollectionType::BskyLabelerService => true,
113
117
CollectionType::BskyNotificationDeclaration => true,
118
+
CollectionType::CommunityLexiconBookmark => true,
114
119
CollectionType::Unsupported => false,
115
120
}
116
121
}
+1
-1
consumer/src/main.rs
+1
-1
consumer/src/main.rs
+42
-36
consumer/src/utils.rs
+42
-36
consumer/src/utils.rs
···
1
-
use ipld_core::cid::Cid;
2
-
use serde::{Deserialize, Deserializer, Serialize, Serializer};
1
+
use lexica::app_bsky::richtext::{Facet, FacetMain, FacetOuter};
2
+
use lexica::{Blob, StrongRef};
3
+
use serde::{Deserialize, Deserializer};
3
4
4
5
// see https://deer.social/profile/did:plc:63y3oh7iakdueqhlj6trojbq/post/3ltuv4skhqs2h
5
6
pub fn safe_string<'de, D: Deserializer<'de>>(deserializer: D) -> Result<String, D::Error> {
···
8
9
Ok(str.replace('\u{0000}', ""))
9
10
}
10
11
11
-
pub fn cid_from_string<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Cid, D::Error> {
12
-
let str = String::deserialize(deserializer)?;
13
-
14
-
Cid::try_from(str).map_err(serde::de::Error::custom)
15
-
}
16
-
17
-
pub fn cid_as_str<S>(inp: &Cid, serializer: S) -> Result<S::Ok, S::Error>
18
-
where
19
-
S: Serializer,
20
-
{
21
-
inp.to_string().serialize(serializer)
12
+
pub fn blob_ref(blob: Option<Blob>) -> Option<String> {
13
+
blob.map(|blob| blob.cid.to_string())
22
14
}
23
15
24
-
#[derive(Debug, Deserialize, Serialize)]
25
-
pub struct LinkRef {
26
-
#[serde(rename = "$link")]
27
-
link: String,
28
-
}
29
-
30
-
pub fn cid_as_link<S>(inp: &Cid, serializer: S) -> Result<S::Ok, S::Error>
31
-
where
32
-
S: Serializer,
33
-
{
34
-
LinkRef {
35
-
link: inp.to_string(),
36
-
}
37
-
.serialize(serializer)
38
-
}
39
-
40
-
pub fn blob_ref(blob: Option<crate::indexer::records::Blob>) -> Option<String> {
41
-
blob.map(|blob| blob.r#ref.to_string())
42
-
}
43
-
44
-
pub fn strongref_to_parts(
45
-
strongref: Option<&crate::indexer::records::StrongRef>,
46
-
) -> (Option<String>, Option<String>) {
16
+
pub fn strongref_to_parts(strongref: Option<&StrongRef>) -> (Option<String>, Option<String>) {
47
17
strongref
48
18
.map(|sr| (sr.uri.clone(), sr.cid.to_string()))
49
19
.unzip()
···
64
34
None
65
35
}
66
36
}
37
+
38
+
pub fn at_uri_is_by(uri: &str, did: &str) -> bool {
39
+
let split_aturi = uri.rsplitn(4, '/').collect::<Vec<_>>();
40
+
41
+
did == split_aturi[2]
42
+
}
43
+
44
+
pub fn extract_mentions_and_tags(from: &[FacetMain]) -> (Vec<String>, Vec<String>) {
45
+
let (mentions, tags) = from
46
+
.iter()
47
+
.flat_map(|v| {
48
+
v.features.iter().map(|facet| match facet {
49
+
FacetOuter::Bsky(Facet::Mention { did }) => (Some(did), None),
50
+
FacetOuter::Bsky(Facet::Tag { tag }) => (None, Some(tag)),
51
+
_ => (None, None),
52
+
})
53
+
})
54
+
.unzip::<_, _, Vec<_>, Vec<_>>();
55
+
56
+
let mentions = mentions.into_iter().flatten().cloned().collect();
57
+
let tags = tags.into_iter().flatten().cloned().collect();
58
+
59
+
(mentions, tags)
60
+
}
61
+
62
+
pub fn merge_tags<T>(t1: Option<Vec<T>>, t2: Option<Vec<T>>) -> Vec<T> {
63
+
match (t1, t2) {
64
+
(Some(t1), None) => t1,
65
+
(None, Some(t2)) => t2,
66
+
(Some(mut t1), Some(t2)) => {
67
+
t1.extend(t2);
68
+
t1
69
+
}
70
+
_ => Vec::default(),
71
+
}
72
+
}
+2
-12
dataloader-rs/Cargo.toml
+2
-12
dataloader-rs/Cargo.toml
···
2
2
name = "dataloader"
3
3
version = "0.18.0"
4
4
edition = "2021"
5
-
authors = ["cksac <cs.cksac@gmail.com>", "Lily"]
5
+
authors = ["cksac <cs.cksac@gmail.com>", "Mia"]
6
6
description = "Rust implementation of Facebook's DataLoader using async-await."
7
7
keywords = ["batcher", "dataloader", "cache"]
8
8
categories = ["asynchronous", "caching"]
···
15
15
[badges]
16
16
travis-ci = { repository = "/cksac/dataloader-rs" }
17
17
18
-
[features]
19
-
default = ["runtime-async-std"]
20
-
runtime-async-std = [
21
-
"async-std",
22
-
]
23
-
runtime-tokio = [
24
-
"tokio"
25
-
]
26
-
27
18
[dependencies]
28
-
async-std = { version = "1", optional = true }
29
-
tokio = { version = "1", features = [ "sync", "rt" ], optional = true }
19
+
tokio = { version = "1", features = [ "sync", "rt" ] }
30
20
31
21
[dev-dependencies]
32
22
futures = "0.3"
-13
dataloader-rs/src/runtime.rs
-13
dataloader-rs/src/runtime.rs
···
1
-
// runtime-async-std
2
-
#[cfg(feature = "runtime-async-std")]
3
-
pub type Arc<T> = async_std::sync::Arc<T>;
4
-
5
-
#[cfg(feature = "runtime-async-std")]
6
-
pub type Mutex<T> = async_std::sync::Mutex<T>;
7
-
8
-
#[cfg(feature = "runtime-async-std")]
9
-
pub use async_std::task::yield_now;
10
-
11
1
// runtime-tokio
12
-
#[cfg(feature = "runtime-tokio")]
13
2
pub type Arc<T> = std::sync::Arc<T>;
14
3
15
-
#[cfg(feature = "runtime-tokio")]
16
4
pub type Mutex<T> = tokio::sync::Mutex<T>;
17
5
18
-
#[cfg(feature = "runtime-tokio")]
19
6
pub use tokio::task::yield_now;
+23
justfile
+23
justfile
···
1
+
mod consumer
2
+
mod parakeet
3
+
mod parakeet-index
4
+
5
+
alias run-consumer := consumer::run
6
+
alias run-parakeet := parakeet::run
7
+
alias run-index := parakeet-index::run
8
+
9
+
@reset-db:
10
+
echo "Resetting and redoing Diesel migrations..."
11
+
diesel migration redo -a --locked-schema
12
+
13
+
@reset-redis:
14
+
echo "Resetting Redis lists..."
15
+
redis-cli DEL backfill_queue backfill_processing bf_downloaded
16
+
17
+
@reset-and-backfill *dids: reset-db reset-redis
18
+
for PARAMETER_VALUE in {{dids}}; do \
19
+
psql parakeet -c "INSERT INTO actors (did) VALUES ('$PARAMETER_VALUE');" > /dev/null 2>&1 \
20
+
redis-cli LPUSH backfill_queue "$PARAMETER_VALUE" > /dev/null 2>&1; \
21
+
done
22
+
export RUST_LOG=info,consumer=trace,consumer::firehose=info
23
+
just run-consumer --backfill
+1
lexica/Cargo.toml
+1
lexica/Cargo.toml
+36
-6
lexica/src/app_bsky/actor.rs
+36
-6
lexica/src/app_bsky/actor.rs
···
1
1
use crate::app_bsky::embed::External;
2
+
use crate::app_bsky::graph::ListViewBasic;
2
3
use crate::com_atproto::label::Label;
3
4
use chrono::prelude::*;
4
5
use serde::{Deserialize, Serialize};
5
6
use std::fmt::Display;
6
7
use std::str::FromStr;
8
+
9
+
#[derive(Clone, Default, Debug, Serialize)]
10
+
#[serde(rename_all = "camelCase")]
11
+
pub struct ProfileViewerState {
12
+
pub muted: bool,
13
+
#[serde(skip_serializing_if = "Option::is_none")]
14
+
pub muted_by_list: Option<ListViewBasic>,
15
+
pub blocked_by: bool,
16
+
#[serde(skip_serializing_if = "Option::is_none")]
17
+
pub blocking: Option<String>,
18
+
#[serde(skip_serializing_if = "Option::is_none")]
19
+
pub blocking_by_list: Option<ListViewBasic>,
20
+
#[serde(skip_serializing_if = "Option::is_none")]
21
+
pub following: Option<String>,
22
+
#[serde(skip_serializing_if = "Option::is_none")]
23
+
pub followed_by: Option<String>,
24
+
// #[serde(skip_serializing_if = "Option::is_none")]
25
+
// pub known_followers: Option<()>,
26
+
// #[serde(skip_serializing_if = "Option::is_none")]
27
+
// pub activity_subscriptions: Option<()>,
28
+
}
7
29
8
30
#[derive(Clone, Default, Debug, Serialize)]
9
31
#[serde(rename_all = "camelCase")]
···
130
152
pub avatar: Option<String>,
131
153
#[serde(skip_serializing_if = "Option::is_none")]
132
154
pub associated: Option<ProfileAssociated>,
133
-
// #[serde(skip_serializing_if = "Option::is_none")]
134
-
// pub viewer: Option<()>,
155
+
#[serde(skip_serializing_if = "Option::is_none")]
156
+
pub viewer: Option<ProfileViewerState>,
135
157
#[serde(skip_serializing_if = "Vec::is_empty")]
136
158
pub labels: Vec<Label>,
137
159
#[serde(skip_serializing_if = "Option::is_none")]
138
160
pub verification: Option<VerificationState>,
139
161
#[serde(skip_serializing_if = "Option::is_none")]
140
162
pub status: Option<StatusView>,
163
+
#[serde(skip_serializing_if = "Option::is_none")]
164
+
pub pronouns: Option<String>,
141
165
142
166
pub created_at: DateTime<Utc>,
143
167
}
···
156
180
pub avatar: Option<String>,
157
181
#[serde(skip_serializing_if = "Option::is_none")]
158
182
pub associated: Option<ProfileAssociated>,
159
-
// #[serde(skip_serializing_if = "Option::is_none")]
160
-
// pub viewer: Option<()>,
183
+
#[serde(skip_serializing_if = "Option::is_none")]
184
+
pub viewer: Option<ProfileViewerState>,
161
185
#[serde(skip_serializing_if = "Vec::is_empty")]
162
186
pub labels: Vec<Label>,
163
187
#[serde(skip_serializing_if = "Option::is_none")]
164
188
pub verification: Option<VerificationState>,
165
189
#[serde(skip_serializing_if = "Option::is_none")]
166
190
pub status: Option<StatusView>,
191
+
#[serde(skip_serializing_if = "Option::is_none")]
192
+
pub pronouns: Option<String>,
167
193
168
194
pub created_at: DateTime<Utc>,
169
195
pub indexed_at: NaiveDateTime,
···
189
215
pub associated: Option<ProfileAssociated>,
190
216
// #[serde(skip_serializing_if = "Option::is_none")]
191
217
// pub joined_via_starter_pack: Option<()>,
192
-
// #[serde(skip_serializing_if = "Option::is_none")]
193
-
// pub viewer: Option<()>,
218
+
#[serde(skip_serializing_if = "Option::is_none")]
219
+
pub viewer: Option<ProfileViewerState>,
194
220
#[serde(skip_serializing_if = "Vec::is_empty")]
195
221
pub labels: Vec<Label>,
196
222
// #[serde(skip_serializing_if = "Option::is_none")]
···
199
225
pub verification: Option<VerificationState>,
200
226
#[serde(skip_serializing_if = "Option::is_none")]
201
227
pub status: Option<StatusView>,
228
+
#[serde(skip_serializing_if = "Option::is_none")]
229
+
pub pronouns: Option<String>,
230
+
#[serde(skip_serializing_if = "Option::is_none")]
231
+
pub website: Option<String>,
202
232
203
233
pub created_at: DateTime<Utc>,
204
234
pub indexed_at: NaiveDateTime,
+32
lexica/src/app_bsky/bookmark.rs
+32
lexica/src/app_bsky/bookmark.rs
···
1
+
use crate::app_bsky::feed::{BlockedAuthor, PostView};
2
+
use crate::StrongRef;
3
+
use chrono::prelude::*;
4
+
use serde::Serialize;
5
+
6
+
#[derive(Clone, Debug, Serialize)]
7
+
#[serde(rename_all = "camelCase")]
8
+
pub struct BookmarkView {
9
+
pub subject: StrongRef,
10
+
pub item: BookmarkViewItem,
11
+
pub created_at: DateTime<Utc>,
12
+
}
13
+
14
+
#[derive(Clone, Debug, Serialize)]
15
+
#[serde(tag = "$type")]
16
+
// This is technically the same as ReplyRefPost atm, but just in case...
17
+
pub enum BookmarkViewItem {
18
+
#[serde(rename = "app.bsky.feed.defs#postView")]
19
+
Post(PostView),
20
+
#[serde(rename = "app.bsky.feed.defs#notFoundPost")]
21
+
NotFound {
22
+
uri: String,
23
+
#[serde(rename = "notFound")]
24
+
not_found: bool,
25
+
},
26
+
#[serde(rename = "app.bsky.feed.defs#blockedPost")]
27
+
Blocked {
28
+
uri: String,
29
+
blocked: bool,
30
+
author: BlockedAuthor,
31
+
},
32
+
}
+29
-10
lexica/src/app_bsky/feed.rs
+29
-10
lexica/src/app_bsky/feed.rs
···
1
1
use super::RecordStats;
2
-
use crate::app_bsky::actor::{ProfileView, ProfileViewBasic};
2
+
use crate::app_bsky::actor::{ProfileView, ProfileViewBasic, ProfileViewerState};
3
3
use crate::app_bsky::embed::Embed;
4
4
use crate::app_bsky::graph::ListViewBasic;
5
5
use crate::app_bsky::richtext::FacetMain;
···
8
8
use serde::{Deserialize, Serialize};
9
9
use std::str::FromStr;
10
10
11
+
#[derive(Clone, Default, Debug, Serialize)]
12
+
#[serde(rename_all = "camelCase")]
13
+
pub struct PostViewerState {
14
+
#[serde(skip_serializing_if = "Option::is_none")]
15
+
pub repost: Option<String>,
16
+
#[serde(skip_serializing_if = "Option::is_none")]
17
+
pub like: Option<String>,
18
+
pub bookmarked: bool,
19
+
pub thread_muted: bool,
20
+
pub reply_disabled: bool,
21
+
pub embedding_disabled: bool,
22
+
pub pinned: bool,
23
+
}
24
+
11
25
#[derive(Clone, Debug, Serialize)]
12
26
#[serde(rename_all = "camelCase")]
13
27
pub struct PostView {
···
23
37
24
38
#[serde(skip_serializing_if = "Vec::is_empty")]
25
39
pub labels: Vec<Label>,
26
-
// #[serde(skip_serializing_if = "Option::is_none")]
27
-
// pub viewer: Option<()>,
40
+
#[serde(skip_serializing_if = "Option::is_none")]
41
+
pub viewer: Option<PostViewerState>,
28
42
#[serde(skip_serializing_if = "Option::is_none")]
29
43
pub threadgate: Option<ThreadgateView>,
30
44
···
123
137
124
138
#[derive(Clone, Debug, Serialize)]
125
139
pub struct BlockedAuthor {
126
-
pub uri: String,
127
-
// pub viewer: Option<()>,
140
+
pub did: String,
141
+
pub viewer: Option<ProfileViewerState>,
142
+
}
143
+
144
+
#[derive(Clone, Default, Debug, Serialize)]
145
+
#[serde(rename_all = "camelCase")]
146
+
pub struct GeneratorViewerState {
147
+
#[serde(skip_serializing_if = "Option::is_none")]
148
+
pub like: Option<String>,
128
149
}
129
150
130
151
#[derive(Clone, Debug, Serialize)]
···
148
169
pub accepts_interactions: bool,
149
170
#[serde(skip_serializing_if = "Vec::is_empty")]
150
171
pub labels: Vec<Label>,
151
-
// #[serde(skip_serializing_if = "Option::is_none")]
152
-
// pub viewer: Option<()>,
172
+
#[serde(skip_serializing_if = "Option::is_none")]
173
+
pub viewer: Option<GeneratorViewerState>,
153
174
#[serde(skip_serializing_if = "Option::is_none")]
154
175
pub content_mode: Option<GeneratorContentMode>,
155
176
···
219
240
#[serde(rename = "app.bsky.feed.defs#skeletonReasonPin")]
220
241
Pin {},
221
242
#[serde(rename = "app.bsky.feed.defs#skeletonReasonRepost")]
222
-
Repost {
223
-
repost: String,
224
-
},
243
+
Repost { repost: String },
225
244
}
+12
-4
lexica/src/app_bsky/graph.rs
+12
-4
lexica/src/app_bsky/graph.rs
···
6
6
use serde::{Deserialize, Serialize};
7
7
use std::str::FromStr;
8
8
9
+
#[derive(Clone, Default, Debug, Serialize)]
10
+
#[serde(rename_all = "camelCase")]
11
+
pub struct ListViewerState {
12
+
pub muted: bool,
13
+
#[serde(skip_serializing_if = "Option::is_none")]
14
+
pub blocked: Option<String>,
15
+
}
16
+
9
17
#[derive(Clone, Debug, Serialize)]
10
18
#[serde(rename_all = "camelCase")]
11
19
pub struct ListViewBasic {
···
18
26
pub avatar: Option<String>,
19
27
pub list_item_count: i64,
20
28
21
-
// #[serde(skip_serializing_if = "Option::is_none")]
22
-
// pub viewer: Option<()>,
29
+
#[serde(skip_serializing_if = "Option::is_none")]
30
+
pub viewer: Option<ListViewerState>,
23
31
#[serde(skip_serializing_if = "Vec::is_empty")]
24
32
pub labels: Vec<Label>,
25
33
···
44
52
pub avatar: Option<String>,
45
53
pub list_item_count: i64,
46
54
47
-
// #[serde(skip_serializing_if = "Option::is_none")]
48
-
// pub viewer: Option<()>,
55
+
#[serde(skip_serializing_if = "Option::is_none")]
56
+
pub viewer: Option<ListViewerState>,
49
57
#[serde(skip_serializing_if = "Vec::is_empty")]
50
58
pub labels: Vec<Label>,
51
59
+11
-4
lexica/src/app_bsky/labeler.rs
+11
-4
lexica/src/app_bsky/labeler.rs
···
4
4
use chrono::prelude::*;
5
5
use serde::{Deserialize, Serialize};
6
6
7
+
#[derive(Clone, Default, Debug, Serialize)]
8
+
#[serde(rename_all = "camelCase")]
9
+
pub struct LabelerViewerState {
10
+
#[serde(skip_serializing_if = "Option::is_none")]
11
+
pub like: Option<String>,
12
+
}
13
+
7
14
#[derive(Clone, Debug, Serialize)]
8
15
#[serde(rename_all = "camelCase")]
9
16
pub struct LabelerView {
···
12
19
pub creator: ProfileView,
13
20
14
21
pub like_count: i64,
15
-
// #[serde(skip_serializing_if = "Option::is_none")]
16
-
// pub viewer: Option<()>,
22
+
#[serde(skip_serializing_if = "Option::is_none")]
23
+
pub viewer: Option<LabelerViewerState>,
17
24
#[serde(skip_serializing_if = "Vec::is_empty")]
18
25
pub labels: Vec<Label>,
19
26
pub indexed_at: DateTime<Utc>,
···
27
34
pub creator: ProfileView,
28
35
29
36
pub like_count: i64,
30
-
// #[serde(skip_serializing_if = "Option::is_none")]
31
-
// pub viewer: Option<()>,
37
+
#[serde(skip_serializing_if = "Option::is_none")]
38
+
pub viewer: Option<LabelerViewerState>,
32
39
#[serde(skip_serializing_if = "Vec::is_empty")]
33
40
pub labels: Vec<Label>,
34
41
pub policies: LabelerPolicy,
+2
lexica/src/app_bsky/mod.rs
+2
lexica/src/app_bsky/mod.rs
+33
lexica/src/app_bsky/unspecced.rs
+33
lexica/src/app_bsky/unspecced.rs
···
1
+
use crate::app_bsky::feed::{BlockedAuthor, PostView};
2
+
use serde::Serialize;
3
+
4
+
#[derive(Clone, Debug, Serialize)]
5
+
pub struct ThreadV2Item {
6
+
pub uri: String,
7
+
pub depth: i32,
8
+
pub value: ThreadV2ItemType,
9
+
}
10
+
11
+
#[derive(Clone, Debug, Serialize)]
12
+
#[serde(tag = "$type")]
13
+
pub enum ThreadV2ItemType {
14
+
#[serde(rename = "app.bsky.unspecced.defs#threadItemPost")]
15
+
Post(ThreadItemPost),
16
+
#[serde(rename = "app.bsky.unspecced.defs#threadItemNoUnauthenticated")]
17
+
NoUnauthenticated {},
18
+
#[serde(rename = "app.bsky.unspecced.defs#threadItemNotFound")]
19
+
NotFound {},
20
+
#[serde(rename = "app.bsky.unspecced.defs#threadItemBlocked")]
21
+
Blocked { author: BlockedAuthor },
22
+
}
23
+
24
+
#[derive(Clone, Debug, Serialize)]
25
+
#[serde(rename_all = "camelCase")]
26
+
pub struct ThreadItemPost {
27
+
pub post: PostView,
28
+
pub more_parents: bool,
29
+
pub more_replies: i32,
30
+
pub op_thread: bool,
31
+
pub hidden_by_threadgate: bool,
32
+
pub muted_by_viewer: bool,
33
+
}
+14
lexica/src/community_lexicon/bookmarks.rs
+14
lexica/src/community_lexicon/bookmarks.rs
···
1
+
use chrono::prelude::*;
2
+
use serde::{Deserialize, Serialize};
3
+
4
+
#[derive(Clone, Debug, Deserialize, Serialize)]
5
+
#[serde(tag = "$type")]
6
+
#[serde(rename = "community.lexicon.bookmarks.bookmark")]
7
+
#[serde(rename_all = "camelCase")]
8
+
pub struct Bookmark {
9
+
pub subject: String,
10
+
#[serde(default)]
11
+
#[serde(skip_serializing_if = "Vec::is_empty")]
12
+
pub tags: Vec<String>,
13
+
pub created_at: DateTime<Utc>,
14
+
}
+1
lexica/src/community_lexicon/mod.rs
+1
lexica/src/community_lexicon/mod.rs
···
1
+
pub mod bookmarks;
+35
-1
lexica/src/lib.rs
+35
-1
lexica/src/lib.rs
···
1
-
use serde::Serialize;
1
+
use cid::Cid;
2
+
use serde::{Deserialize, Serialize};
3
+
4
+
pub use utils::LinkRef;
2
5
3
6
pub mod app_bsky;
4
7
pub mod com_atproto;
8
+
pub mod community_lexicon;
9
+
mod utils;
5
10
6
11
#[derive(Clone, Debug, Serialize)]
7
12
pub struct JsonBytes {
8
13
#[serde(rename = "$bytes")]
9
14
pub bytes: String,
10
15
}
16
+
17
+
#[derive(Clone, Debug, Deserialize, Serialize)]
18
+
pub struct StrongRef {
19
+
#[serde(
20
+
deserialize_with = "utils::cid_from_string",
21
+
serialize_with = "utils::cid_as_str"
22
+
)]
23
+
pub cid: Cid,
24
+
pub uri: String,
25
+
}
26
+
27
+
impl StrongRef {
28
+
pub fn new_from_str(uri: String, cid: &str) -> Result<Self, cid::Error> {
29
+
let cid = cid.parse()?;
30
+
Ok(StrongRef { uri, cid })
31
+
}
32
+
}
33
+
34
+
#[derive(Clone, Debug, Deserialize, Serialize)]
35
+
#[serde(tag = "$type")]
36
+
#[serde(rename = "blob")]
37
+
#[serde(rename_all = "camelCase")]
38
+
pub struct Blob {
39
+
pub mime_type: String,
40
+
#[serde(rename = "ref")]
41
+
#[serde(serialize_with = "utils::cid_as_link")]
42
+
pub cid: Cid,
43
+
pub size: i32,
44
+
}
+31
lexica/src/utils.rs
+31
lexica/src/utils.rs
···
1
+
use cid::Cid;
2
+
use serde::{Deserialize, Deserializer, Serialize, Serializer};
3
+
4
+
pub fn cid_from_string<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Cid, D::Error> {
5
+
let str = String::deserialize(deserializer)?;
6
+
7
+
Cid::try_from(str).map_err(serde::de::Error::custom)
8
+
}
9
+
10
+
pub fn cid_as_str<S>(inp: &Cid, serializer: S) -> Result<S::Ok, S::Error>
11
+
where
12
+
S: Serializer,
13
+
{
14
+
inp.to_string().serialize(serializer)
15
+
}
16
+
17
+
#[derive(Debug, Deserialize, Serialize)]
18
+
pub struct LinkRef {
19
+
#[serde(rename = "$link")]
20
+
link: String,
21
+
}
22
+
23
+
pub fn cid_as_link<S>(inp: &Cid, serializer: S) -> Result<S::Ok, S::Error>
24
+
where
25
+
S: Serializer,
26
+
{
27
+
LinkRef {
28
+
link: inp.to_string(),
29
+
}
30
+
.serialize(serializer)
31
+
}
+2
-2
migrations/2025-02-16-142357_posts/up.sql
+2
-2
migrations/2025-02-16-142357_posts/up.sql
+1
migrations/2025-09-02-190833_bookmarks/down.sql
+1
migrations/2025-09-02-190833_bookmarks/down.sql
···
1
+
drop table bookmarks;
+19
migrations/2025-09-02-190833_bookmarks/up.sql
+19
migrations/2025-09-02-190833_bookmarks/up.sql
···
1
+
create table bookmarks
2
+
(
3
+
did text not null references actors (did),
4
+
rkey text,
5
+
subject text not null,
6
+
subject_cid text,
7
+
subject_type text not null,
8
+
tags text[] not null default ARRAY []::text[],
9
+
10
+
created_at timestamptz not null default now(),
11
+
12
+
primary key (did, subject)
13
+
);
14
+
15
+
create index bookmarks_rkey_index on bookmarks (rkey);
16
+
create index bookmarks_subject_index on bookmarks (subject);
17
+
create index bookmarks_subject_type_index on bookmarks (subject_type);
18
+
create index bookmarks_tags_index on bookmarks using gin (tags);
19
+
create unique index bookmarks_rkey_ui on bookmarks (did, rkey);
+17
migrations/2025-09-17-190406_viewer-interactions/down.sql
+17
migrations/2025-09-17-190406_viewer-interactions/down.sql
···
1
+
drop trigger t_profile_state_ins on follows;
2
+
drop trigger t_profile_state_del on follows;
3
+
drop trigger t_profile_state_ins on blocks;
4
+
drop trigger t_profile_state_del on blocks;
5
+
drop trigger t_profile_state_ins on mutes;
6
+
drop trigger t_profile_state_del on mutes;
7
+
8
+
drop function f_profile_state_ins_follow;
9
+
drop function f_profile_state_del_follow;
10
+
drop function f_profile_state_ins_block;
11
+
drop function f_profile_state_del_block;
12
+
drop function f_profile_state_ins_mute;
13
+
drop function f_profile_state_del_mute;
14
+
15
+
drop view v_list_mutes_exp;
16
+
drop view v_list_block_exp;
17
+
drop table profile_states;
+146
migrations/2025-09-17-190406_viewer-interactions/up.sql
+146
migrations/2025-09-17-190406_viewer-interactions/up.sql
···
1
+
create table profile_states
2
+
(
3
+
did text not null,
4
+
subject text not null,
5
+
muting bool not null default false, -- subj muted by did
6
+
blocked bool not null default false, -- did blocked by subj
7
+
blocking text, -- subj blocked by did
8
+
following text, -- rkey of follow record (did->subj)
9
+
followed text, -- rkey of follow record (subj->did)
10
+
11
+
primary key (did, subject)
12
+
);
13
+
14
+
create index profilestates_did_index on profile_states using hash (did);
15
+
create index profilestates_sub_index on profile_states using hash (subject);
16
+
17
+
create view v_list_block_exp as
18
+
(
19
+
select lb.list_uri, did, li.subject
20
+
from list_blocks lb
21
+
inner join list_items li on lb.list_uri = li.list_uri
22
+
);
23
+
24
+
create view v_list_mutes_exp as
25
+
(
26
+
select lm.list_uri, did, li.subject
27
+
from list_mutes lm
28
+
inner join list_items li on lm.list_uri = li.list_uri
29
+
);
30
+
31
+
-- profile_states follow triggers
32
+
create function f_profile_state_ins_follow() returns trigger
33
+
language plpgsql as
34
+
$$
35
+
begin
36
+
insert into profile_states (did, subject, following)
37
+
VALUES (NEW.did, NEW.subject, NEW.rkey)
38
+
ON CONFLICT (did, subject) DO UPDATE SET following=excluded.following;
39
+
40
+
insert into profile_states (did, subject, followed)
41
+
VALUES (NEW.subject, NEW.did, NEW.rkey)
42
+
ON CONFLICT (did, subject) DO UPDATE SET followed=excluded.followed;
43
+
44
+
return NEW;
45
+
end;
46
+
$$;
47
+
48
+
create trigger t_profile_state_ins
49
+
before insert
50
+
on follows
51
+
for each row
52
+
execute procedure f_profile_state_ins_follow();
53
+
54
+
create function f_profile_state_del_follow() returns trigger
55
+
language plpgsql as
56
+
$$
57
+
begin
58
+
update profile_states set following = null where did = OLD.did and subject = OLD.subject;
59
+
update profile_states set followed = null where did = OLD.subject and subject = OLD.did;
60
+
61
+
return OLD;
62
+
end;
63
+
$$;
64
+
65
+
create trigger t_profile_state_del
66
+
before delete
67
+
on follows
68
+
for each row
69
+
execute procedure f_profile_state_del_follow();
70
+
71
+
-- profile_states block triggers
72
+
73
+
create function f_profile_state_ins_block() returns trigger
74
+
language plpgsql as
75
+
$$
76
+
begin
77
+
insert into profile_states (did, subject, blocking)
78
+
VALUES (NEW.did, NEW.subject, NEW.rkey)
79
+
ON CONFLICT (did, subject) DO UPDATE SET blocking=excluded.blocking;
80
+
81
+
insert into profile_states (did, subject, blocked)
82
+
VALUES (NEW.subject, NEW.did, TRUE)
83
+
ON CONFLICT (did, subject) DO UPDATE SET blocked=excluded.blocked;
84
+
85
+
return NEW;
86
+
end;
87
+
$$;
88
+
89
+
create trigger t_profile_state_ins
90
+
before insert
91
+
on blocks
92
+
for each row
93
+
execute procedure f_profile_state_ins_block();
94
+
95
+
create function f_profile_state_del_block() returns trigger
96
+
language plpgsql as
97
+
$$
98
+
begin
99
+
update profile_states set blocking = null where did = OLD.did and subject = OLD.subject;
100
+
update profile_states set blocked = FALSE where did = OLD.subject and subject = OLD.did;
101
+
102
+
return OLD;
103
+
end;
104
+
$$;
105
+
106
+
create trigger t_profile_state_del
107
+
before delete
108
+
on blocks
109
+
for each row
110
+
execute procedure f_profile_state_del_block();
111
+
112
+
-- profile_states mutes triggers
113
+
114
+
create function f_profile_state_ins_mute() returns trigger
115
+
language plpgsql as
116
+
$$
117
+
begin
118
+
insert into profile_states (did, subject, muting)
119
+
VALUES (NEW.did, NEW.subject, TRUE)
120
+
ON CONFLICT (did, subject) DO UPDATE SET muting=excluded.muting;
121
+
122
+
return NEW;
123
+
end;
124
+
$$;
125
+
126
+
create trigger t_profile_state_ins
127
+
before insert
128
+
on mutes
129
+
for each row
130
+
execute procedure f_profile_state_ins_mute();
131
+
132
+
create function f_profile_state_del_mute() returns trigger
133
+
language plpgsql as
134
+
$$
135
+
begin
136
+
update profile_states set muting = false where did = OLD.did and subject = OLD.subject;
137
+
138
+
return OLD;
139
+
end;
140
+
$$;
141
+
142
+
create trigger t_profile_state_del
143
+
before delete
144
+
on mutes
145
+
for each row
146
+
execute procedure f_profile_state_del_mute();
+3
migrations/2025-09-24-205239_profiles-4224/down.sql
+3
migrations/2025-09-24-205239_profiles-4224/down.sql
+3
migrations/2025-09-24-205239_profiles-4224/up.sql
+3
migrations/2025-09-24-205239_profiles-4224/up.sql
+15
migrations/2025-09-27-171241_post-tweaks/down.sql
+15
migrations/2025-09-27-171241_post-tweaks/down.sql
···
1
+
alter table posts
2
+
drop column mentions,
3
+
drop column violates_threadgate;
4
+
5
+
drop trigger t_author_feed_ins_post on posts;
6
+
drop trigger t_author_feed_del_post on posts;
7
+
drop trigger t_author_feed_ins_repost on reposts;
8
+
drop trigger t_author_feed_del_repost on reposts;
9
+
10
+
drop function f_author_feed_ins_post;
11
+
drop function f_author_feed_del_post;
12
+
drop function f_author_feed_ins_repost;
13
+
drop function f_author_feed_del_repost;
14
+
15
+
drop table author_feeds;
+79
migrations/2025-09-27-171241_post-tweaks/up.sql
+79
migrations/2025-09-27-171241_post-tweaks/up.sql
···
1
+
alter table posts
2
+
add column mentions text[],
3
+
add column violates_threadgate bool not null default false;
4
+
5
+
create table author_feeds
6
+
(
7
+
uri text primary key,
8
+
cid text not null,
9
+
post text not null,
10
+
did text not null,
11
+
typ text not null,
12
+
sort_at timestamptz not null
13
+
);
14
+
15
+
-- author_feeds post triggers
16
+
create function f_author_feed_ins_post() returns trigger
17
+
language plpgsql as
18
+
$$
19
+
begin
20
+
insert into author_feeds (uri, cid, post, did, typ, sort_at)
21
+
VALUES (NEW.at_uri, NEW.cid, NEW.at_uri, NEW.did, 'post', NEW.created_at)
22
+
on conflict do nothing;
23
+
return NEW;
24
+
end;
25
+
$$;
26
+
27
+
create trigger t_author_feed_ins_post
28
+
before insert
29
+
on posts
30
+
for each row
31
+
execute procedure f_author_feed_ins_post();
32
+
33
+
create function f_author_feed_del_post() returns trigger
34
+
language plpgsql as
35
+
$$
36
+
begin
37
+
delete from author_feeds where did = OLD.did and item = OLD.at_uri and typ = 'post';
38
+
return OLD;
39
+
end;
40
+
$$;
41
+
42
+
create trigger t_author_feed_del_post
43
+
before delete
44
+
on posts
45
+
for each row
46
+
execute procedure f_author_feed_del_post();
47
+
48
+
-- author_feeds repost triggers
49
+
create function f_author_feed_ins_repost() returns trigger
50
+
language plpgsql as
51
+
$$
52
+
begin
53
+
insert into author_feeds (uri, cid, post, did, typ, sort_at)
54
+
VALUES ('at://' || NEW.did || 'app.bsky.feed.repost' || NEW.rkey, NEW.post_cid, NEW.post, NEW.did, 'repost', NEW.created_at)
55
+
on conflict do nothing;
56
+
return NEW;
57
+
end;
58
+
$$;
59
+
60
+
create trigger t_author_feed_ins_repost
61
+
before insert
62
+
on reposts
63
+
for each row
64
+
execute procedure f_author_feed_ins_repost();
65
+
66
+
create function f_author_feed_del_repost() returns trigger
67
+
language plpgsql as
68
+
$$
69
+
begin
70
+
delete from author_feeds where did = OLD.did and item = OLD.post and typ = 'repost';
71
+
return OLD;
72
+
end;
73
+
$$;
74
+
75
+
create trigger t_author_feed_del_repost
76
+
before delete
77
+
on reposts
78
+
for each row
79
+
execute procedure f_author_feed_del_repost();
+1
-1
parakeet/Cargo.toml
+1
-1
parakeet/Cargo.toml
···
9
9
axum-extra = { version = "0.10.0", features = ["query", "typed-header"] }
10
10
base64 = "0.22"
11
11
chrono = { version = "0.4.39", features = ["serde"] }
12
-
dataloader = { path = "../dataloader-rs", default-features = false, features = ["runtime-tokio"] }
12
+
dataloader = { path = "../dataloader-rs" }
13
13
deadpool = { version = "0.12.1", features = ["managed"] }
14
14
did-resolver = { path = "../did-resolver" }
15
15
diesel = { version = "2.2.6", features = ["chrono", "serde_json"] }
+11
parakeet/justfile
+11
parakeet/justfile
···
1
+
@release:
2
+
cargo build --release
3
+
4
+
@lint:
5
+
cargo clippy
6
+
7
+
@run *params:
8
+
cargo run -- {{params}}
9
+
10
+
@docker platform='linux/amd64' branch='main':
11
+
docker buildx build --platform {{platform}} -t registry.gitlab.com/parakeet-social/parakeet/parakeet:{{branch}} . -f parakeet/Dockerfile
-1
parakeet/run.sh
-1
parakeet/run.sh
···
1
-
cargo run
+1
-1
parakeet/src/config.rs
+1
-1
parakeet/src/config.rs
+277
parakeet/src/db.rs
+277
parakeet/src/db.rs
···
1
1
use diesel::prelude::*;
2
+
use diesel::sql_types::{Array, Bool, Integer, Nullable, Text};
2
3
use diesel_async::{AsyncPgConnection, RunQueryDsl};
3
4
use parakeet_db::{schema, types};
5
+
use parakeet_db::models::TextArray;
4
6
5
7
pub async fn get_actor_status(
6
8
conn: &mut AsyncPgConnection,
···
13
15
.await
14
16
.optional()
15
17
}
18
+
19
+
#[derive(Clone, Debug, QueryableByName)]
20
+
#[diesel(check_for_backend(diesel::pg::Pg))]
21
+
pub struct ProfileStateRet {
22
+
#[diesel(sql_type = Text)]
23
+
pub did: String,
24
+
#[diesel(sql_type = Text)]
25
+
pub subject: String,
26
+
#[diesel(sql_type = Nullable<Bool>)]
27
+
pub muting: Option<bool>,
28
+
#[diesel(sql_type = Nullable<Bool>)]
29
+
pub blocked: Option<bool>,
30
+
#[diesel(sql_type = Nullable<Text>)]
31
+
pub blocking: Option<String>,
32
+
#[diesel(sql_type = Nullable<Text>)]
33
+
pub following: Option<String>,
34
+
#[diesel(sql_type = Nullable<Text>)]
35
+
pub followed: Option<String>,
36
+
#[diesel(sql_type = Nullable<Text>)]
37
+
pub list_block: Option<String>,
38
+
#[diesel(sql_type = Nullable<Text>)]
39
+
pub list_mute: Option<String>,
40
+
}
41
+
pub async fn get_profile_state(
42
+
conn: &mut AsyncPgConnection,
43
+
did: &str,
44
+
sub: &str,
45
+
) -> QueryResult<Option<ProfileStateRet>> {
46
+
diesel::sql_query(include_str!("sql/profile_state.sql"))
47
+
.bind::<Text, _>(did)
48
+
.bind::<Array<Text>, _>(vec![sub])
49
+
.get_result::<ProfileStateRet>(conn)
50
+
.await
51
+
.optional()
52
+
}
53
+
pub async fn get_profile_states(
54
+
conn: &mut AsyncPgConnection,
55
+
did: &str,
56
+
sub: &[String],
57
+
) -> QueryResult<Vec<ProfileStateRet>> {
58
+
diesel::sql_query(include_str!("sql/profile_state.sql"))
59
+
.bind::<Text, _>(did)
60
+
.bind::<Array<Text>, _>(sub)
61
+
.load::<ProfileStateRet>(conn)
62
+
.await
63
+
}
64
+
65
+
#[derive(Clone, Debug, QueryableByName)]
66
+
#[diesel(check_for_backend(diesel::pg::Pg))]
67
+
pub struct PostStateRet {
68
+
#[diesel(sql_type = diesel::sql_types::Text)]
69
+
pub at_uri: String,
70
+
#[diesel(sql_type = diesel::sql_types::Text)]
71
+
pub did: String,
72
+
#[diesel(sql_type = diesel::sql_types::Text)]
73
+
pub cid: String,
74
+
#[diesel(sql_type = diesel::sql_types::Nullable<diesel::sql_types::Text>)]
75
+
pub like_rkey: Option<String>,
76
+
#[diesel(sql_type = diesel::sql_types::Nullable<diesel::sql_types::Text>)]
77
+
pub repost_rkey: Option<String>,
78
+
#[diesel(sql_type = diesel::sql_types::Bool)]
79
+
pub bookmarked: bool,
80
+
// #[diesel(sql_type = diesel::sql_types::Bool)]
81
+
// pub muted: bool,
82
+
#[diesel(sql_type = diesel::sql_types::Bool)]
83
+
pub embed_disabled: bool,
84
+
#[diesel(sql_type = diesel::sql_types::Bool)]
85
+
pub pinned: bool,
86
+
}
87
+
pub async fn get_post_state(
88
+
conn: &mut AsyncPgConnection,
89
+
did: &str,
90
+
subject: &str,
91
+
) -> QueryResult<Option<PostStateRet>> {
92
+
diesel::sql_query(include_str!("sql/post_state.sql"))
93
+
.bind::<Text, _>(did)
94
+
.bind::<Array<Text>, _>(vec![subject])
95
+
.get_result::<PostStateRet>(conn)
96
+
.await
97
+
.optional()
98
+
}
99
+
100
+
pub async fn get_post_states(
101
+
conn: &mut AsyncPgConnection,
102
+
did: &str,
103
+
sub: &[String],
104
+
) -> QueryResult<Vec<PostStateRet>> {
105
+
diesel::sql_query(include_str!("sql/post_state.sql"))
106
+
.bind::<Text, _>(did)
107
+
.bind::<Array<Text>, _>(sub)
108
+
.load::<PostStateRet>(conn)
109
+
.await
110
+
}
111
+
112
+
#[derive(Clone, Debug, QueryableByName)]
113
+
#[diesel(check_for_backend(diesel::pg::Pg))]
114
+
pub struct ListStateRet {
115
+
#[diesel(sql_type = Text)]
116
+
pub at_uri: String,
117
+
#[diesel(sql_type = Bool)]
118
+
pub muted: bool,
119
+
#[diesel(sql_type = Nullable<Text>)]
120
+
pub block: Option<String>,
121
+
}
122
+
123
+
pub async fn get_list_state(
124
+
conn: &mut AsyncPgConnection,
125
+
did: &str,
126
+
subject: &str,
127
+
) -> QueryResult<Option<ListStateRet>> {
128
+
diesel::sql_query(include_str!("sql/list_states.sql"))
129
+
.bind::<Text, _>(did)
130
+
.bind::<Array<Text>, _>(vec![subject])
131
+
.get_result::<ListStateRet>(conn)
132
+
.await
133
+
.optional()
134
+
}
135
+
136
+
pub async fn get_list_states(
137
+
conn: &mut AsyncPgConnection,
138
+
did: &str,
139
+
sub: &[String],
140
+
) -> QueryResult<Vec<ListStateRet>> {
141
+
diesel::sql_query(include_str!("sql/list_states.sql"))
142
+
.bind::<Text, _>(did)
143
+
.bind::<Array<Text>, _>(sub)
144
+
.load::<ListStateRet>(conn)
145
+
.await
146
+
}
147
+
148
+
pub async fn get_like_state(
149
+
conn: &mut AsyncPgConnection,
150
+
did: &str,
151
+
subject: &str,
152
+
) -> QueryResult<Option<(String, String)>> {
153
+
schema::likes::table
154
+
.select((schema::likes::did, schema::likes::rkey))
155
+
.filter(
156
+
schema::likes::did
157
+
.eq(did)
158
+
.and(schema::likes::subject.eq(subject)),
159
+
)
160
+
.get_result(conn)
161
+
.await
162
+
.optional()
163
+
}
164
+
165
+
pub async fn get_like_states(
166
+
conn: &mut AsyncPgConnection,
167
+
did: &str,
168
+
sub: &[String],
169
+
) -> QueryResult<Vec<(String, String, String)>> {
170
+
schema::likes::table
171
+
.select((
172
+
schema::likes::subject,
173
+
schema::likes::did,
174
+
schema::likes::rkey,
175
+
))
176
+
.filter(
177
+
schema::likes::did
178
+
.eq(did)
179
+
.and(schema::likes::subject.eq_any(sub)),
180
+
)
181
+
.load(conn)
182
+
.await
183
+
}
184
+
185
+
pub async fn get_pinned_post_uri(
186
+
conn: &mut AsyncPgConnection,
187
+
did: &str,
188
+
) -> QueryResult<Option<String>> {
189
+
schema::profiles::table
190
+
.select(schema::profiles::pinned_uri.assume_not_null())
191
+
.filter(
192
+
schema::profiles::did
193
+
.eq(did)
194
+
.and(schema::profiles::pinned_uri.is_not_null()),
195
+
)
196
+
.get_result(conn)
197
+
.await
198
+
.optional()
199
+
}
200
+
201
+
#[derive(Debug, QueryableByName)]
202
+
#[diesel(check_for_backend(diesel::pg::Pg))]
203
+
#[allow(unused)]
204
+
pub struct ThreadItem {
205
+
#[diesel(sql_type = Text)]
206
+
pub at_uri: String,
207
+
#[diesel(sql_type = Nullable<Text>)]
208
+
pub parent_uri: Option<String>,
209
+
#[diesel(sql_type = Nullable<Text>)]
210
+
pub root_uri: Option<String>,
211
+
#[diesel(sql_type = Integer)]
212
+
pub depth: i32,
213
+
}
214
+
215
+
pub async fn get_thread_children(
216
+
conn: &mut AsyncPgConnection,
217
+
uri: &str,
218
+
depth: i32,
219
+
) -> QueryResult<Vec<ThreadItem>> {
220
+
diesel::sql_query(include_str!("sql/thread.sql"))
221
+
.bind::<Text, _>(uri)
222
+
.bind::<Integer, _>(depth)
223
+
.load(conn)
224
+
.await
225
+
}
226
+
227
+
pub async fn get_thread_children_branching(
228
+
conn: &mut AsyncPgConnection,
229
+
uri: &str,
230
+
depth: i32,
231
+
branching_factor: i32,
232
+
) -> QueryResult<Vec<ThreadItem>> {
233
+
diesel::sql_query(include_str!("sql/thread_branching.sql"))
234
+
.bind::<Text, _>(uri)
235
+
.bind::<Integer, _>(depth)
236
+
.bind::<Integer, _>(branching_factor)
237
+
.load(conn)
238
+
.await
239
+
}
240
+
241
+
#[derive(Debug, QueryableByName)]
242
+
#[diesel(check_for_backend(diesel::pg::Pg))]
243
+
pub struct HiddenThreadChildItem {
244
+
#[diesel(sql_type = Text)]
245
+
pub at_uri: String,
246
+
}
247
+
248
+
pub async fn get_thread_children_hidden(
249
+
conn: &mut AsyncPgConnection,
250
+
uri: &str,
251
+
root: &str,
252
+
) -> QueryResult<Vec<HiddenThreadChildItem>> {
253
+
diesel::sql_query(include_str!("sql/thread_v2_hidden_children.sql"))
254
+
.bind::<Text, _>(uri)
255
+
.bind::<Text, _>(root)
256
+
.load(conn)
257
+
.await
258
+
}
259
+
260
+
pub async fn get_thread_parents(
261
+
conn: &mut AsyncPgConnection,
262
+
uri: &str,
263
+
height: i32,
264
+
) -> QueryResult<Vec<ThreadItem>> {
265
+
diesel::sql_query(include_str!("sql/thread_parent.sql"))
266
+
.bind::<Text, _>(uri)
267
+
.bind::<Integer, _>(height)
268
+
.load(conn)
269
+
.await
270
+
}
271
+
272
+
pub async fn get_root_post(conn: &mut AsyncPgConnection, uri: &str) -> QueryResult<Option<String>> {
273
+
schema::posts::table
274
+
.select(schema::posts::root_uri)
275
+
.find(&uri)
276
+
.get_result(conn)
277
+
.await
278
+
.optional()
279
+
.map(|v| v.flatten())
280
+
}
281
+
282
+
pub async fn get_threadgate_hiddens(
283
+
conn: &mut AsyncPgConnection,
284
+
uri: &str,
285
+
) -> QueryResult<Option<TextArray>> {
286
+
schema::threadgates::table
287
+
.select(schema::threadgates::hidden_replies)
288
+
.find(&uri)
289
+
.get_result(conn)
290
+
.await
291
+
.optional()
292
+
}
+41
-3
parakeet/src/hydration/feedgen.rs
+41
-3
parakeet/src/hydration/feedgen.rs
···
1
1
use crate::hydration::map_labels;
2
2
use crate::xrpc::cdn::BskyCdn;
3
3
use lexica::app_bsky::actor::ProfileView;
4
-
use lexica::app_bsky::feed::{GeneratorContentMode, GeneratorView};
4
+
use lexica::app_bsky::feed::{GeneratorContentMode, GeneratorView, GeneratorViewerState};
5
5
use parakeet_db::models;
6
6
use std::collections::HashMap;
7
7
use std::str::FromStr;
8
8
9
+
fn build_viewer((did, rkey): (String, String)) -> GeneratorViewerState {
10
+
GeneratorViewerState {
11
+
like: Some(format!("at://{did}/app.bsky.feed.like/{rkey}")),
12
+
}
13
+
}
14
+
9
15
fn build_feedgen(
10
16
feedgen: models::FeedGen,
11
17
creator: ProfileView,
12
18
labels: Vec<models::Label>,
13
19
likes: Option<i32>,
20
+
viewer: Option<GeneratorViewerState>,
14
21
cdn: &BskyCdn,
15
22
) -> GeneratorView {
16
23
let content_mode = feedgen
···
35
42
like_count: likes.unwrap_or_default() as i64,
36
43
accepts_interactions: feedgen.accepts_interactions,
37
44
labels: map_labels(labels),
45
+
viewer,
38
46
content_mode,
39
47
indexed_at: feedgen.created_at,
40
48
}
···
43
51
impl super::StatefulHydrator<'_> {
44
52
pub async fn hydrate_feedgen(&self, feedgen: String) -> Option<GeneratorView> {
45
53
let labels = self.get_label(&feedgen).await;
54
+
let viewer = self.get_feedgen_viewer_state(&feedgen).await;
46
55
let likes = self.loaders.like.load(feedgen.clone()).await;
47
56
let feedgen = self.loaders.feedgen.load(feedgen).await?;
48
57
let profile = self.hydrate_profile(feedgen.owner.clone()).await?;
49
58
50
-
Some(build_feedgen(feedgen, profile, labels, likes, &self.cdn))
59
+
Some(build_feedgen(
60
+
feedgen, profile, labels, likes, viewer, &self.cdn,
61
+
))
51
62
}
52
63
53
64
pub async fn hydrate_feedgens(&self, feedgens: Vec<String>) -> HashMap<String, GeneratorView> {
54
65
let labels = self.get_label_many(&feedgens).await;
66
+
let viewers = self.get_feedgen_viewer_states(&feedgens).await;
55
67
let mut likes = self.loaders.like.load_many(feedgens.clone()).await;
56
68
let feedgens = self.loaders.feedgen.load_many(feedgens).await;
57
69
···
66
78
.into_iter()
67
79
.filter_map(|(uri, feedgen)| {
68
80
let creator = creators.get(&feedgen.owner).cloned()?;
81
+
let viewer = viewers.get(&uri).cloned();
69
82
let labels = labels.get(&uri).cloned().unwrap_or_default();
70
83
let likes = likes.remove(&uri);
71
84
72
85
Some((
73
86
uri,
74
-
build_feedgen(feedgen, creator, labels, likes, &self.cdn),
87
+
build_feedgen(feedgen, creator, labels, likes, viewer, &self.cdn),
75
88
))
76
89
})
77
90
.collect()
91
+
}
92
+
93
+
async fn get_feedgen_viewer_state(&self, subject: &str) -> Option<GeneratorViewerState> {
94
+
if let Some(viewer) = &self.current_actor {
95
+
let data = self.loaders.like_state.get(viewer, subject).await?;
96
+
97
+
Some(build_viewer(data))
98
+
} else {
99
+
None
100
+
}
101
+
}
102
+
103
+
async fn get_feedgen_viewer_states(
104
+
&self,
105
+
subjects: &[String],
106
+
) -> HashMap<String, GeneratorViewerState> {
107
+
if let Some(viewer) = &self.current_actor {
108
+
let data = self.loaders.like_state.get_many(viewer, subjects).await;
109
+
110
+
data.into_iter()
111
+
.map(|(k, state)| (k, build_viewer(state)))
112
+
.collect()
113
+
} else {
114
+
HashMap::new()
115
+
}
78
116
}
79
117
}
+59
-14
parakeet/src/hydration/labeler.rs
+59
-14
parakeet/src/hydration/labeler.rs
···
1
1
use crate::hydration::{map_labels, StatefulHydrator};
2
2
use lexica::app_bsky::actor::ProfileView;
3
-
use lexica::app_bsky::labeler::{LabelerPolicy, LabelerView, LabelerViewDetailed};
3
+
use lexica::app_bsky::labeler::{
4
+
LabelerPolicy, LabelerView, LabelerViewDetailed, LabelerViewerState,
5
+
};
4
6
use lexica::com_atproto::label::{Blurs, LabelValueDefinition, Severity};
5
7
use lexica::com_atproto::moderation::{ReasonType, SubjectType};
6
8
use parakeet_db::models;
7
9
use std::collections::HashMap;
8
10
use std::str::FromStr;
11
+
12
+
fn build_viewer((did, rkey): (String, String)) -> LabelerViewerState {
13
+
LabelerViewerState {
14
+
like: Some(format!("at://{did}/app.bsky.feed.like/{rkey}")),
15
+
}
16
+
}
9
17
10
18
fn build_view(
11
19
labeler: models::LabelerService,
12
20
creator: ProfileView,
13
21
labels: Vec<models::Label>,
22
+
viewer: Option<LabelerViewerState>,
14
23
likes: Option<i32>,
15
24
) -> LabelerView {
16
25
LabelerView {
···
18
27
cid: labeler.cid,
19
28
creator,
20
29
like_count: likes.unwrap_or_default() as i64,
30
+
viewer,
21
31
labels: map_labels(labels),
22
32
indexed_at: labeler.indexed_at.and_utc(),
23
33
}
···
28
38
defs: Vec<models::LabelDefinition>,
29
39
creator: ProfileView,
30
40
labels: Vec<models::Label>,
41
+
viewer: Option<LabelerViewerState>,
31
42
likes: Option<i32>,
32
43
) -> LabelerViewDetailed {
33
44
let reason_types = labeler.reasons.map(|v| {
34
-
v.into_iter()
35
-
.flatten()
36
-
.filter_map(|v| ReasonType::from_str(&v).ok())
45
+
v.iter()
46
+
.filter_map(|v| ReasonType::from_str(v).ok())
37
47
.collect()
38
48
});
39
49
···
63
73
})
64
74
.collect();
65
75
let subject_types = labeler.subject_types.map(|v| {
66
-
v.into_iter()
67
-
.flatten()
68
-
.filter_map(|v| SubjectType::from_str(&v).ok())
76
+
v.iter()
77
+
.filter_map(|v| SubjectType::from_str(v).ok())
69
78
.collect()
70
79
});
71
-
let subject_collections = labeler
72
-
.subject_collections
73
-
.map(|v| v.into_iter().flatten().collect());
80
+
let subject_collections = labeler.subject_collections.map(Vec::from);
74
81
75
82
LabelerViewDetailed {
76
83
uri: format!("at://{}/app.bsky.labeler.service/self", labeler.did),
77
84
cid: labeler.cid,
78
85
creator,
79
86
like_count: likes.unwrap_or_default() as i64,
87
+
viewer,
80
88
policies: LabelerPolicy {
81
89
label_values,
82
90
label_value_definitions,
···
92
100
impl StatefulHydrator<'_> {
93
101
pub async fn hydrate_labeler(&self, labeler: String) -> Option<LabelerView> {
94
102
let labels = self.get_label(&labeler).await;
103
+
let viewer = self.get_labeler_viewer_state(&labeler).await;
95
104
let likes = self.loaders.like.load(make_labeler_uri(&labeler)).await;
96
105
let (labeler, _) = self.loaders.labeler.load(labeler).await?;
97
106
let creator = self.hydrate_profile(labeler.did.clone()).await?;
98
107
99
-
Some(build_view(labeler, creator, labels, likes))
108
+
Some(build_view(labeler, creator, labels, viewer, likes))
100
109
}
101
110
102
111
pub async fn hydrate_labelers(&self, labelers: Vec<String>) -> HashMap<String, LabelerView> {
···
107
116
.values()
108
117
.map(|(labeler, _)| (labeler.did.clone(), make_labeler_uri(&labeler.did)))
109
118
.unzip::<_, _, Vec<_>, Vec<_>>();
119
+
let viewers = self.get_labeler_viewer_states(&uris).await;
110
120
let creators = self.hydrate_profiles(creators).await;
111
121
let mut likes = self.loaders.like.load_many(uris.clone()).await;
112
122
···
116
126
let creator = creators.get(&labeler.did).cloned()?;
117
127
let labels = labels.get(&k).cloned().unwrap_or_default();
118
128
let likes = likes.remove(&make_labeler_uri(&labeler.did));
129
+
let viewer = viewers.get(&make_labeler_uri(&k)).cloned();
119
130
120
-
Some((k, build_view(labeler, creator, labels, likes)))
131
+
Some((k, build_view(labeler, creator, labels, viewer, likes)))
121
132
})
122
133
.collect()
123
134
}
124
135
125
136
pub async fn hydrate_labeler_detailed(&self, labeler: String) -> Option<LabelerViewDetailed> {
126
137
let labels = self.get_label(&labeler).await;
138
+
let viewer = self.get_labeler_viewer_state(&labeler).await;
127
139
let likes = self.loaders.like.load(make_labeler_uri(&labeler)).await;
128
140
let (labeler, defs) = self.loaders.labeler.load(labeler).await?;
129
141
let creator = self.hydrate_profile(labeler.did.clone()).await?;
130
142
131
-
Some(build_view_detailed(labeler, defs, creator, labels, likes))
143
+
Some(build_view_detailed(
144
+
labeler, defs, creator, labels, viewer, likes,
145
+
))
132
146
}
133
147
134
148
pub async fn hydrate_labelers_detailed(
···
142
156
.values()
143
157
.map(|(labeler, _)| (labeler.did.clone(), make_labeler_uri(&labeler.did)))
144
158
.unzip::<_, _, Vec<_>, Vec<_>>();
159
+
let viewers = self.get_labeler_viewer_states(&uris).await;
145
160
let creators = self.hydrate_profiles(creators).await;
146
161
let mut likes = self.loaders.like.load_many(uris.clone()).await;
147
162
···
151
166
let creator = creators.get(&labeler.did).cloned()?;
152
167
let labels = labels.get(&k).cloned().unwrap_or_default();
153
168
let likes = likes.remove(&make_labeler_uri(&labeler.did));
169
+
let viewer = viewers.get(&make_labeler_uri(&k)).cloned();
154
170
155
-
let view = build_view_detailed(labeler, defs, creator, labels, likes);
171
+
let view = build_view_detailed(labeler, defs, creator, labels, viewer, likes);
156
172
157
173
Some((k, view))
158
174
})
159
175
.collect()
176
+
}
177
+
178
+
async fn get_labeler_viewer_state(&self, subject: &str) -> Option<LabelerViewerState> {
179
+
if let Some(viewer) = &self.current_actor {
180
+
let data = self
181
+
.loaders
182
+
.like_state
183
+
.get(&make_labeler_uri(viewer), subject)
184
+
.await?;
185
+
186
+
Some(build_viewer(data))
187
+
} else {
188
+
None
189
+
}
190
+
}
191
+
192
+
async fn get_labeler_viewer_states(
193
+
&self,
194
+
subjects: &[String],
195
+
) -> HashMap<String, LabelerViewerState> {
196
+
if let Some(viewer) = &self.current_actor {
197
+
let data = self.loaders.like_state.get_many(viewer, subjects).await;
198
+
199
+
data.into_iter()
200
+
.map(|(k, state)| (k, build_viewer(state)))
201
+
.collect()
202
+
} else {
203
+
HashMap::new()
204
+
}
160
205
}
161
206
}
162
207
+57
-5
parakeet/src/hydration/list.rs
+57
-5
parakeet/src/hydration/list.rs
···
1
+
use crate::db::ListStateRet;
1
2
use crate::hydration::{map_labels, StatefulHydrator};
2
3
use crate::xrpc::cdn::BskyCdn;
3
4
use lexica::app_bsky::actor::ProfileView;
4
-
use lexica::app_bsky::graph::{ListPurpose, ListView, ListViewBasic};
5
+
use lexica::app_bsky::graph::{ListPurpose, ListView, ListViewBasic, ListViewerState};
5
6
use parakeet_db::models;
6
7
use std::collections::HashMap;
7
8
use std::str::FromStr;
8
9
10
+
fn build_viewer(data: ListStateRet) -> ListViewerState {
11
+
ListViewerState {
12
+
muted: data.muted,
13
+
blocked: data.block,
14
+
}
15
+
}
16
+
9
17
fn build_basic(
10
18
list: models::List,
11
19
list_item_count: i64,
12
20
labels: Vec<models::Label>,
21
+
viewer: Option<ListViewerState>,
13
22
cdn: &BskyCdn,
14
23
) -> Option<ListViewBasic> {
15
24
let purpose = ListPurpose::from_str(&list.list_type).ok()?;
···
22
31
purpose,
23
32
avatar,
24
33
list_item_count,
34
+
viewer,
25
35
labels: map_labels(labels),
26
36
indexed_at: list.created_at,
27
37
})
···
32
42
list_item_count: i64,
33
43
creator: ProfileView,
34
44
labels: Vec<models::Label>,
45
+
viewer: Option<ListViewerState>,
35
46
cdn: &BskyCdn,
36
47
) -> Option<ListView> {
37
48
let purpose = ListPurpose::from_str(&list.list_type).ok()?;
···
51
62
description_facets,
52
63
avatar,
53
64
list_item_count,
65
+
viewer,
54
66
labels: map_labels(labels),
55
67
indexed_at: list.created_at,
56
68
})
···
59
71
impl StatefulHydrator<'_> {
60
72
pub async fn hydrate_list_basic(&self, list: String) -> Option<ListViewBasic> {
61
73
let labels = self.get_label(&list).await;
74
+
let viewer = self.get_list_viewer_state(&list).await;
62
75
let (list, count) = self.loaders.list.load(list).await?;
63
76
64
-
build_basic(list, count, labels, &self.cdn)
77
+
build_basic(list, count, labels, viewer, &self.cdn)
65
78
}
66
79
67
80
pub async fn hydrate_lists_basic(&self, lists: Vec<String>) -> HashMap<String, ListViewBasic> {
81
+
if lists.is_empty() {
82
+
return HashMap::new();
83
+
}
84
+
68
85
let labels = self.get_label_many(&lists).await;
86
+
let viewers = self.get_list_viewer_states(&lists).await;
69
87
let lists = self.loaders.list.load_many(lists).await;
70
88
71
89
lists
72
90
.into_iter()
73
91
.filter_map(|(uri, (list, count))| {
74
92
let labels = labels.get(&uri).cloned().unwrap_or_default();
93
+
let viewer = viewers.get(&uri).cloned();
75
94
76
-
build_basic(list, count, labels, &self.cdn).map(|v| (uri, v))
95
+
build_basic(list, count, labels, viewer, &self.cdn).map(|v| (uri, v))
77
96
})
78
97
.collect()
79
98
}
80
99
81
100
pub async fn hydrate_list(&self, list: String) -> Option<ListView> {
82
101
let labels = self.get_label(&list).await;
102
+
let viewer = self.get_list_viewer_state(&list).await;
83
103
let (list, count) = self.loaders.list.load(list).await?;
84
104
let profile = self.hydrate_profile(list.owner.clone()).await?;
85
105
86
-
build_listview(list, count, profile, labels, &self.cdn)
106
+
build_listview(list, count, profile, labels, viewer, &self.cdn)
87
107
}
88
108
89
109
pub async fn hydrate_lists(&self, lists: Vec<String>) -> HashMap<String, ListView> {
110
+
if lists.is_empty() {
111
+
return HashMap::new();
112
+
}
113
+
90
114
let labels = self.get_label_many(&lists).await;
115
+
let viewers = self.get_list_viewer_states(&lists).await;
91
116
let lists = self.loaders.list.load_many(lists).await;
92
117
93
118
let creators = lists.values().map(|(list, _)| list.owner.clone()).collect();
···
97
122
.into_iter()
98
123
.filter_map(|(uri, (list, count))| {
99
124
let creator = creators.get(&list.owner)?;
125
+
let viewer = viewers.get(&uri).cloned();
100
126
let labels = labels.get(&uri).cloned().unwrap_or_default();
101
127
102
-
build_listview(list, count, creator.to_owned(), labels, &self.cdn).map(|v| (uri, v))
128
+
build_listview(list, count, creator.to_owned(), labels, viewer, &self.cdn)
129
+
.map(|v| (uri, v))
103
130
})
104
131
.collect()
132
+
}
133
+
134
+
async fn get_list_viewer_state(&self, subject: &str) -> Option<ListViewerState> {
135
+
if let Some(viewer) = &self.current_actor {
136
+
let data = self.loaders.list_state.get(viewer, subject).await?;
137
+
138
+
Some(build_viewer(data))
139
+
} else {
140
+
None
141
+
}
142
+
}
143
+
144
+
async fn get_list_viewer_states(
145
+
&self,
146
+
subjects: &[String],
147
+
) -> HashMap<String, ListViewerState> {
148
+
if let Some(viewer) = &self.current_actor {
149
+
let data = self.loaders.list_state.get_many(viewer, subjects).await;
150
+
151
+
data.into_iter()
152
+
.map(|(k, state)| (k, build_viewer(state)))
153
+
.collect()
154
+
} else {
155
+
HashMap::new()
156
+
}
105
157
}
106
158
}
+239
-80
parakeet/src/hydration/posts.rs
+239
-80
parakeet/src/hydration/posts.rs
···
1
+
use crate::db::PostStateRet;
1
2
use crate::hydration::{map_labels, StatefulHydrator};
2
3
use lexica::app_bsky::actor::ProfileViewBasic;
3
4
use lexica::app_bsky::embed::Embed;
4
-
use lexica::app_bsky::feed::{FeedViewPost, PostView, ReplyRef, ReplyRefPost, ThreadgateView};
5
+
use lexica::app_bsky::feed::{
6
+
BlockedAuthor, FeedReasonRepost, FeedViewPost, FeedViewPostReason, PostView, PostViewerState,
7
+
ReplyRef, ReplyRefPost, ThreadgateView,
8
+
};
5
9
use lexica::app_bsky::graph::ListViewBasic;
6
10
use lexica::app_bsky::RecordStats;
7
11
use parakeet_db::models;
8
12
use parakeet_index::PostStats;
9
13
use std::collections::HashMap;
10
14
15
+
fn build_viewer(did: &str, data: PostStateRet) -> PostViewerState {
16
+
let is_me = did == data.did;
17
+
18
+
let repost = data
19
+
.repost_rkey
20
+
.map(|rkey| format!("at://{did}/app.bsky.feed.repost/{rkey}"));
21
+
let like = data
22
+
.like_rkey
23
+
.map(|rkey| format!("at://{did}/app.bsky.feed.like/{rkey}"));
24
+
25
+
PostViewerState {
26
+
repost,
27
+
like,
28
+
bookmarked: data.bookmarked,
29
+
thread_muted: false, // todo when we have thread mutes
30
+
reply_disabled: false,
31
+
embedding_disabled: data.embed_disabled && !is_me, // poster can always bypass embed disabled.
32
+
pinned: data.pinned,
33
+
}
34
+
}
35
+
36
+
type HydratePostsRet = (
37
+
models::Post,
38
+
ProfileViewBasic,
39
+
Vec<models::Label>,
40
+
Option<Embed>,
41
+
Option<ThreadgateView>,
42
+
Option<PostViewerState>,
43
+
Option<PostStats>,
44
+
);
45
+
11
46
fn build_postview(
12
-
post: models::Post,
13
-
author: ProfileViewBasic,
14
-
labels: Vec<models::Label>,
15
-
embed: Option<Embed>,
16
-
threadgate: Option<ThreadgateView>,
17
-
stats: Option<PostStats>,
47
+
(post, author, labels, embed, threadgate, viewer, stats): HydratePostsRet,
18
48
) -> PostView {
19
49
let stats = stats
20
50
.map(|stats| RecordStats {
···
33
63
embed,
34
64
stats,
35
65
labels: map_labels(labels),
66
+
viewer,
36
67
threadgate,
37
68
indexed_at: post.created_at,
38
69
}
···
57
88
) -> Option<ThreadgateView> {
58
89
let threadgate = threadgate?;
59
90
60
-
let lists = threadgate
61
-
.allowed_lists
62
-
.iter()
63
-
.flatten()
64
-
.cloned()
65
-
.collect::<Vec<_>>();
91
+
let lists = match threadgate.allowed_lists.as_ref() {
92
+
Some(allowed_lists) => allowed_lists.clone().into(),
93
+
None => Vec::new(),
94
+
};
66
95
let lists = self.hydrate_lists_basic(lists).await;
67
96
68
97
Some(build_threadgate_view(
···
76
105
threadgates: Vec<models::Threadgate>,
77
106
) -> HashMap<String, ThreadgateView> {
78
107
let lists = threadgates.iter().fold(Vec::new(), |mut acc, c| {
79
-
acc.extend(c.allowed_lists.iter().flatten().cloned());
108
+
if let Some(lists) = &c.allowed_lists {
109
+
acc.extend(lists.clone().0);
110
+
}
80
111
acc
81
112
});
82
113
let lists = self.hydrate_lists_basic(lists).await;
···
84
115
threadgates
85
116
.into_iter()
86
117
.map(|threadgate| {
87
-
let this_lists = threadgate
88
-
.allowed_lists
89
-
.iter()
90
-
.filter_map(|v| v.clone().and_then(|v| lists.get(&v).cloned()))
91
-
.collect();
118
+
let this_lists = match &threadgate.allowed_lists {
119
+
Some(allowed_lists) => allowed_lists
120
+
.iter()
121
+
.filter_map(|v| lists.get(v).cloned())
122
+
.collect(),
123
+
None => Vec::new(),
124
+
};
92
125
93
126
(
94
127
threadgate.at_uri.clone(),
···
101
134
pub async fn hydrate_post(&self, post: String) -> Option<PostView> {
102
135
let stats = self.loaders.post_stats.load(post.clone()).await;
103
136
let (post, threadgate) = self.loaders.posts.load(post).await?;
137
+
let viewer = self.get_post_viewer_state(&post.at_uri).await;
104
138
let embed = self.hydrate_embed(post.at_uri.clone()).await;
105
139
let author = self.hydrate_profile_basic(post.did.clone()).await?;
106
140
let threadgate = self.hydrate_threadgate(threadgate).await;
107
141
let labels = self.get_label(&post.at_uri).await;
108
142
109
-
Some(build_postview(
110
-
post, author, labels, embed, threadgate, stats,
111
-
))
143
+
Some(build_postview((
144
+
post, author, labels, embed, threadgate, viewer, stats,
145
+
)))
112
146
}
113
147
114
-
pub async fn hydrate_posts(&self, posts: Vec<String>) -> HashMap<String, PostView> {
148
+
async fn hydrate_posts_inner(&self, posts: Vec<String>) -> HashMap<String, HydratePostsRet> {
115
149
let stats = self.loaders.post_stats.load_many(posts.clone()).await;
116
150
let posts = self.loaders.posts.load_many(posts).await;
117
151
···
121
155
.unzip::<_, _, Vec<_>, Vec<_>>();
122
156
let authors = self.hydrate_profiles_basic(authors).await;
123
157
124
-
let post_labels = self.get_label_many(&post_uris).await;
158
+
let mut post_labels = self.get_label_many(&post_uris).await;
159
+
let mut viewer_data = self.get_post_viewer_states(&post_uris).await;
125
160
126
161
let threadgates = posts
127
162
.values()
···
129
164
.collect();
130
165
let threadgates = self.hydrate_threadgates(threadgates).await;
131
166
132
-
let embeds = self.hydrate_embeds(post_uris).await;
167
+
let mut embeds = self.hydrate_embeds(post_uris).await;
133
168
134
169
posts
135
170
.into_iter()
136
171
.filter_map(|(uri, (post, threadgate))| {
137
-
let author = authors.get(&post.did)?;
138
-
let embed = embeds.get(&uri).cloned();
172
+
let author = authors.get(&post.did)?.clone();
173
+
let embed = embeds.remove(&uri);
139
174
let threadgate = threadgate.and_then(|tg| threadgates.get(&tg.at_uri).cloned());
140
-
let labels = post_labels.get(&uri).cloned().unwrap_or_default();
175
+
let labels = post_labels.remove(&uri).unwrap_or_default();
141
176
let stats = stats.get(&uri).cloned();
177
+
let viewer = viewer_data.remove(&uri);
142
178
143
179
Some((
144
180
uri,
145
-
build_postview(post, author.to_owned(), labels, embed, threadgate, stats),
181
+
(post, author, labels, embed, threadgate, viewer, stats),
146
182
))
147
183
})
148
184
.collect()
149
185
}
150
186
151
-
pub async fn hydrate_feed_posts(&self, posts: Vec<String>) -> HashMap<String, FeedViewPost> {
152
-
let stats = self.loaders.post_stats.load_many(posts.clone()).await;
153
-
let posts = self.loaders.posts.load_many(posts).await;
154
-
155
-
let (authors, post_uris) = posts
156
-
.values()
157
-
.map(|(post, _)| (post.did.clone(), post.at_uri.clone()))
158
-
.unzip::<_, _, Vec<_>, Vec<_>>();
159
-
let authors = self.hydrate_profiles_basic(authors).await;
160
-
161
-
let post_labels = self.get_label_many(&post_uris).await;
187
+
pub async fn hydrate_posts(&self, posts: Vec<String>) -> HashMap<String, PostView> {
188
+
self.hydrate_posts_inner(posts)
189
+
.await
190
+
.into_iter()
191
+
.map(|(uri, data)| (uri, build_postview(data)))
192
+
.collect()
193
+
}
162
194
163
-
let embeds = self.hydrate_embeds(post_uris).await;
195
+
pub async fn hydrate_feed_posts(
196
+
&self,
197
+
posts: Vec<RawFeedItem>,
198
+
author_threads_only: bool,
199
+
) -> Vec<FeedViewPost> {
200
+
let post_uris = posts
201
+
.iter()
202
+
.map(|item| item.post_uri().to_string())
203
+
.collect::<Vec<_>>();
204
+
let mut posts_hyd = self.hydrate_posts_inner(post_uris).await;
164
205
165
-
let reply_refs = posts
206
+
// we shouldn't show the parent when the post violates a threadgate.
207
+
let reply_refs = posts_hyd
166
208
.values()
167
-
.flat_map(|(post, _)| [post.parent_uri.clone(), post.root_uri.clone()])
209
+
.filter(|(post, ..)| !post.violates_threadgate)
210
+
.flat_map(|(post, ..)| [post.parent_uri.clone(), post.root_uri.clone()])
168
211
.flatten()
169
212
.collect::<Vec<_>>();
170
-
171
213
let reply_posts = self.hydrate_posts(reply_refs).await;
172
214
215
+
let repost_profiles = posts
216
+
.iter()
217
+
.filter_map(|item| item.repost_by())
218
+
.collect::<Vec<_>>();
219
+
let profiles_hydrated = self.hydrate_profiles_basic(repost_profiles).await;
220
+
173
221
posts
174
222
.into_iter()
175
-
.filter_map(|(post_uri, (post, _))| {
176
-
let author = authors.get(&post.did)?;
223
+
.filter_map(|item| {
224
+
let post = posts_hyd.remove(item.post_uri())?;
225
+
let context = item.context();
226
+
227
+
let reply = if let RawFeedItem::Post { .. } = item {
228
+
let root_uri = post.0.root_uri.as_ref();
229
+
let parent_uri = post.0.parent_uri.as_ref();
230
+
231
+
let (root, parent) = if author_threads_only {
232
+
if root_uri.is_some() && parent_uri.is_some() {
233
+
let root = root_uri.and_then(|uri| posts_hyd.get(uri))?;
234
+
let parent = parent_uri.and_then(|uri| posts_hyd.get(uri))?;
235
+
236
+
let root = build_postview(root.clone());
237
+
let parent = build_postview(parent.clone());
238
+
239
+
(Some(root), Some(parent))
240
+
} else {
241
+
(None, None)
242
+
}
243
+
} else {
244
+
let root = root_uri.and_then(|uri| reply_posts.get(uri)).cloned();
245
+
let parent = parent_uri.and_then(|uri| reply_posts.get(uri)).cloned();
177
246
178
-
let root = post.root_uri.as_ref().and_then(|uri| reply_posts.get(uri));
179
-
let parent = post
180
-
.parent_uri
181
-
.as_ref()
182
-
.and_then(|uri| reply_posts.get(uri));
247
+
(root, parent)
248
+
};
183
249
184
-
let reply = if post.parent_uri.is_some() && post.root_uri.is_some() {
185
-
Some(ReplyRef {
186
-
root: root.cloned().map(ReplyRefPost::Post).unwrap_or(
187
-
ReplyRefPost::NotFound {
188
-
uri: post.root_uri.as_ref().unwrap().clone(),
189
-
not_found: true,
190
-
},
191
-
),
192
-
parent: parent.cloned().map(ReplyRefPost::Post).unwrap_or(
193
-
ReplyRefPost::NotFound {
194
-
uri: post.parent_uri.as_ref().unwrap().clone(),
195
-
not_found: true,
196
-
},
197
-
),
198
-
grandparent_author: None,
199
-
})
250
+
if root_uri.is_some() || parent_uri.is_some() {
251
+
Some(ReplyRef {
252
+
root: root.map(postview_to_replyref).unwrap_or(
253
+
ReplyRefPost::NotFound {
254
+
uri: root_uri.unwrap().to_owned(),
255
+
not_found: true,
256
+
},
257
+
),
258
+
parent: parent.map(postview_to_replyref).unwrap_or(
259
+
ReplyRefPost::NotFound {
260
+
uri: parent_uri.unwrap().to_owned(),
261
+
not_found: true,
262
+
},
263
+
),
264
+
grandparent_author: None,
265
+
})
266
+
} else {
267
+
None
268
+
}
200
269
} else {
201
270
None
202
271
};
203
272
204
-
let embed = embeds.get(&post_uri).cloned();
205
-
let labels = post_labels.get(&post_uri).cloned().unwrap_or_default();
206
-
let stats = stats.get(&post_uri).cloned();
207
-
let post = build_postview(post, author.to_owned(), labels, embed, None, stats);
273
+
let reason = match item {
274
+
RawFeedItem::Repost { uri, by, at, .. } => {
275
+
Some(FeedViewPostReason::Repost(FeedReasonRepost {
276
+
by: profiles_hydrated.get(&by).cloned()?,
277
+
uri: Some(uri),
278
+
cid: None,
279
+
indexed_at: at,
280
+
}))
281
+
}
282
+
RawFeedItem::Pin { .. } => Some(FeedViewPostReason::Pin),
283
+
_ => None,
284
+
};
208
285
209
-
Some((
210
-
post_uri,
211
-
FeedViewPost {
212
-
post,
213
-
reply,
214
-
reason: None,
215
-
feed_context: None,
216
-
},
217
-
))
286
+
let post = build_postview(post);
287
+
288
+
Some(FeedViewPost {
289
+
post,
290
+
reply,
291
+
reason,
292
+
feed_context: context,
293
+
})
218
294
})
219
295
.collect()
296
+
}
297
+
298
+
async fn get_post_viewer_state(&self, subject: &str) -> Option<PostViewerState> {
299
+
if let Some(viewer) = &self.current_actor {
300
+
let data = self.loaders.post_state.get(viewer, subject).await?;
301
+
302
+
Some(build_viewer(viewer, data))
303
+
} else {
304
+
None
305
+
}
306
+
}
307
+
308
+
async fn get_post_viewer_states(
309
+
&self,
310
+
subjects: &[String],
311
+
) -> HashMap<String, PostViewerState> {
312
+
if let Some(viewer) = &self.current_actor {
313
+
let data = self.loaders.post_state.get_many(viewer, subjects).await;
314
+
315
+
data.into_iter()
316
+
.map(|(k, state)| (k, build_viewer(viewer, state)))
317
+
.collect()
318
+
} else {
319
+
HashMap::new()
320
+
}
321
+
}
322
+
}
323
+
324
+
fn postview_to_replyref(post: PostView) -> ReplyRefPost {
325
+
match &post.author.viewer {
326
+
Some(v) if v.blocked_by || v.blocking.is_some() => ReplyRefPost::Blocked {
327
+
uri: post.uri,
328
+
blocked: true,
329
+
author: BlockedAuthor {
330
+
did: post.author.did.clone(),
331
+
viewer: post.author.viewer,
332
+
},
333
+
},
334
+
_ => ReplyRefPost::Post(post),
335
+
}
336
+
}
337
+
338
+
#[derive(Debug)]
339
+
pub enum RawFeedItem {
340
+
Pin {
341
+
uri: String,
342
+
context: Option<String>,
343
+
},
344
+
Post {
345
+
uri: String,
346
+
context: Option<String>,
347
+
},
348
+
Repost {
349
+
uri: String,
350
+
post: String,
351
+
by: String,
352
+
at: chrono::DateTime<chrono::Utc>,
353
+
context: Option<String>,
354
+
},
355
+
}
356
+
357
+
impl RawFeedItem {
358
+
fn post_uri(&self) -> &str {
359
+
match self {
360
+
RawFeedItem::Pin { uri, .. } => uri,
361
+
RawFeedItem::Post { uri, .. } => uri,
362
+
RawFeedItem::Repost { post, .. } => post,
363
+
}
364
+
}
365
+
366
+
fn repost_by(&self) -> Option<String> {
367
+
match self {
368
+
RawFeedItem::Repost { by, .. } => Some(by.clone()),
369
+
_ => None,
370
+
}
371
+
}
372
+
373
+
fn context(&self) -> Option<String> {
374
+
match self {
375
+
RawFeedItem::Pin { context, .. } => context.clone(),
376
+
RawFeedItem::Post { context, .. } => context.clone(),
377
+
RawFeedItem::Repost { context, .. } => context.clone(),
378
+
}
220
379
}
221
380
}
+115
-5
parakeet/src/hydration/profile.rs
+115
-5
parakeet/src/hydration/profile.rs
···
1
+
use crate::db::ProfileStateRet;
1
2
use crate::hydration::map_labels;
2
3
use crate::loaders::ProfileLoaderRet;
3
4
use crate::xrpc::cdn::BskyCdn;
···
5
6
use chrono::TimeDelta;
6
7
use lexica::app_bsky::actor::*;
7
8
use lexica::app_bsky::embed::External;
9
+
use lexica::app_bsky::graph::ListViewBasic;
8
10
use parakeet_db::models;
9
11
use parakeet_index::ProfileStats;
10
12
use std::collections::HashMap;
···
34
36
})
35
37
} else {
36
38
None
39
+
}
40
+
}
41
+
42
+
fn build_viewer(
43
+
data: ProfileStateRet,
44
+
list_mute: Option<ListViewBasic>,
45
+
list_block: Option<ListViewBasic>,
46
+
) -> ProfileViewerState {
47
+
let following = data
48
+
.following
49
+
.map(|rkey| format!("at://{}/app.bsky.graph.follow/{rkey}", data.did));
50
+
let followed_by = data
51
+
.followed
52
+
.map(|rkey| format!("at://{}/app.bsky.graph.follow/{rkey}", data.subject));
53
+
54
+
let blocking = data.list_block.or(data.blocking);
55
+
56
+
ProfileViewerState {
57
+
muted: data.muting.unwrap_or_default(),
58
+
muted_by_list: list_mute,
59
+
blocked_by: data.blocked.unwrap_or_default(), // TODO: this doesn't factor for blocklists atm
60
+
blocking,
61
+
blocking_by_list: list_block,
62
+
following,
63
+
followed_by,
37
64
}
38
65
}
39
66
···
156
183
stats: Option<ProfileStats>,
157
184
labels: Vec<models::Label>,
158
185
verifications: Option<Vec<models::VerificationEntry>>,
186
+
viewer: Option<ProfileViewerState>,
159
187
cdn: &BskyCdn,
160
188
) -> ProfileViewBasic {
161
189
let associated = build_associated(chat_decl, is_labeler, stats, notif_decl);
···
169
197
display_name: profile.display_name,
170
198
avatar,
171
199
associated,
200
+
viewer,
172
201
labels: map_labels(labels),
173
202
verification,
174
203
status,
204
+
pronouns: profile.pronouns,
175
205
created_at: profile.created_at.and_utc(),
176
206
}
177
207
}
···
181
211
stats: Option<ProfileStats>,
182
212
labels: Vec<models::Label>,
183
213
verifications: Option<Vec<models::VerificationEntry>>,
214
+
viewer: Option<ProfileViewerState>,
184
215
cdn: &BskyCdn,
185
216
) -> ProfileView {
186
217
let associated = build_associated(chat_decl, is_labeler, stats, notif_decl);
···
195
226
description: profile.description,
196
227
avatar,
197
228
associated,
229
+
viewer,
198
230
labels: map_labels(labels),
199
231
verification,
200
232
status,
233
+
pronouns: profile.pronouns,
201
234
created_at: profile.created_at.and_utc(),
202
235
indexed_at: profile.indexed_at,
203
236
}
···
208
241
stats: Option<ProfileStats>,
209
242
labels: Vec<models::Label>,
210
243
verifications: Option<Vec<models::VerificationEntry>>,
244
+
viewer: Option<ProfileViewerState>,
211
245
cdn: &BskyCdn,
212
246
) -> ProfileViewDetailed {
213
247
let associated = build_associated(chat_decl, is_labeler, stats, notif_decl);
···
226
260
followers_count: stats.map(|v| v.followers as i64).unwrap_or_default(),
227
261
follows_count: stats.map(|v| v.following as i64).unwrap_or_default(),
228
262
associated,
263
+
viewer,
229
264
labels: map_labels(labels),
230
265
verification,
231
266
status,
267
+
pronouns: profile.pronouns,
268
+
website: profile.website,
232
269
created_at: profile.created_at.and_utc(),
233
270
indexed_at: profile.indexed_at,
234
271
}
···
237
274
impl super::StatefulHydrator<'_> {
238
275
pub async fn hydrate_profile_basic(&self, did: String) -> Option<ProfileViewBasic> {
239
276
let labels = self.get_profile_label(&did).await;
277
+
let viewer = self.get_profile_viewer_state(&did).await;
240
278
let verif = self.loaders.verification.load(did.clone()).await;
241
279
let stats = self.loaders.profile_stats.load(did.clone()).await;
242
280
let profile_info = self.loaders.profile.load(did).await?;
243
281
244
-
Some(build_basic(profile_info, stats, labels, verif, &self.cdn))
282
+
Some(build_basic(
283
+
profile_info,
284
+
stats,
285
+
labels,
286
+
verif,
287
+
viewer,
288
+
&self.cdn,
289
+
))
245
290
}
246
291
247
292
pub async fn hydrate_profiles_basic(
···
249
294
dids: Vec<String>,
250
295
) -> HashMap<String, ProfileViewBasic> {
251
296
let labels = self.get_profile_label_many(&dids).await;
297
+
let viewers = self.get_profile_viewer_states(&dids).await;
252
298
let verif = self.loaders.verification.load_many(dids.clone()).await;
253
299
let stats = self.loaders.profile_stats.load_many(dids.clone()).await;
254
300
let profiles = self.loaders.profile.load_many(dids).await;
···
258
304
.map(|(k, profile_info)| {
259
305
let labels = labels.get(&k).cloned().unwrap_or_default();
260
306
let verif = verif.get(&k).cloned();
307
+
let viewer = viewers.get(&k).cloned();
261
308
let stats = stats.get(&k).cloned();
262
309
263
-
let v = build_basic(profile_info, stats, labels, verif, &self.cdn);
310
+
let v = build_basic(profile_info, stats, labels, verif, viewer, &self.cdn);
264
311
(k, v)
265
312
})
266
313
.collect()
···
268
315
269
316
pub async fn hydrate_profile(&self, did: String) -> Option<ProfileView> {
270
317
let labels = self.get_profile_label(&did).await;
318
+
let viewer = self.get_profile_viewer_state(&did).await;
271
319
let verif = self.loaders.verification.load(did.clone()).await;
272
320
let stats = self.loaders.profile_stats.load(did.clone()).await;
273
321
let profile_info = self.loaders.profile.load(did).await?;
274
322
275
-
Some(build_profile(profile_info, stats, labels, verif, &self.cdn))
323
+
Some(build_profile(
324
+
profile_info,
325
+
stats,
326
+
labels,
327
+
verif,
328
+
viewer,
329
+
&self.cdn,
330
+
))
276
331
}
277
332
278
333
pub async fn hydrate_profiles(&self, dids: Vec<String>) -> HashMap<String, ProfileView> {
279
334
let labels = self.get_profile_label_many(&dids).await;
335
+
let viewers = self.get_profile_viewer_states(&dids).await;
280
336
let verif = self.loaders.verification.load_many(dids.clone()).await;
281
337
let stats = self.loaders.profile_stats.load_many(dids.clone()).await;
282
338
let profiles = self.loaders.profile.load_many(dids).await;
···
286
342
.map(|(k, profile_info)| {
287
343
let labels = labels.get(&k).cloned().unwrap_or_default();
288
344
let verif = verif.get(&k).cloned();
345
+
let viewer = viewers.get(&k).cloned();
289
346
let stats = stats.get(&k).cloned();
290
347
291
-
let v = build_profile(profile_info, stats, labels, verif, &self.cdn);
348
+
let v = build_profile(profile_info, stats, labels, verif, viewer, &self.cdn);
292
349
(k, v)
293
350
})
294
351
.collect()
···
296
353
297
354
pub async fn hydrate_profile_detailed(&self, did: String) -> Option<ProfileViewDetailed> {
298
355
let labels = self.get_profile_label(&did).await;
356
+
let viewer = self.get_profile_viewer_state(&did).await;
299
357
let verif = self.loaders.verification.load(did.clone()).await;
300
358
let stats = self.loaders.profile_stats.load(did.clone()).await;
301
359
let profile_info = self.loaders.profile.load(did).await?;
···
305
363
stats,
306
364
labels,
307
365
verif,
366
+
viewer,
308
367
&self.cdn,
309
368
))
310
369
}
···
314
373
dids: Vec<String>,
315
374
) -> HashMap<String, ProfileViewDetailed> {
316
375
let labels = self.get_profile_label_many(&dids).await;
376
+
let viewers = self.get_profile_viewer_states(&dids).await;
317
377
let verif = self.loaders.verification.load_many(dids.clone()).await;
318
378
let stats = self.loaders.profile_stats.load_many(dids.clone()).await;
319
379
let profiles = self.loaders.profile.load_many(dids).await;
···
323
383
.map(|(k, profile_info)| {
324
384
let labels = labels.get(&k).cloned().unwrap_or_default();
325
385
let verif = verif.get(&k).cloned();
386
+
let viewer = viewers.get(&k).cloned();
326
387
let stats = stats.get(&k).cloned();
327
388
328
-
let v = build_detailed(profile_info, stats, labels, verif, &self.cdn);
389
+
let v = build_detailed(profile_info, stats, labels, verif, viewer, &self.cdn);
329
390
(k, v)
330
391
})
331
392
.collect()
393
+
}
394
+
395
+
async fn get_profile_viewer_state(&self, subject: &str) -> Option<ProfileViewerState> {
396
+
if let Some(viewer) = &self.current_actor {
397
+
let data = self.loaders.profile_state.get(viewer, subject).await?;
398
+
399
+
let list_block = match &data.list_block {
400
+
Some(uri) => self.hydrate_list_basic(uri.clone()).await,
401
+
None => None,
402
+
};
403
+
let list_mute = match &data.list_mute {
404
+
Some(uri) => self.hydrate_list_basic(uri.clone()).await,
405
+
None => None,
406
+
};
407
+
408
+
Some(build_viewer(data, list_mute, list_block))
409
+
} else {
410
+
None
411
+
}
412
+
}
413
+
414
+
async fn get_profile_viewer_states(
415
+
&self,
416
+
dids: &[String],
417
+
) -> HashMap<String, ProfileViewerState> {
418
+
if let Some(viewer) = &self.current_actor {
419
+
let data = self.loaders.profile_state.get_many(viewer, dids).await;
420
+
let lists = data
421
+
.values()
422
+
.flat_map(|v| [&v.list_block, &v.list_mute])
423
+
.flatten()
424
+
.cloned()
425
+
.collect();
426
+
let lists = self.hydrate_lists_basic(lists).await;
427
+
428
+
data.into_iter()
429
+
.map(|(k, state)| {
430
+
let list_mute = state.list_mute.as_ref().and_then(|v| lists.get(v).cloned());
431
+
let list_block = state
432
+
.list_block
433
+
.as_ref()
434
+
.and_then(|v| lists.get(v).cloned());
435
+
436
+
(k, build_viewer(state, list_mute, list_block))
437
+
})
438
+
.collect()
439
+
} else {
440
+
HashMap::new()
441
+
}
332
442
}
333
443
}
+3
-7
parakeet/src/hydration/starter_packs.rs
+3
-7
parakeet/src/hydration/starter_packs.rs
···
96
96
let feeds = sp
97
97
.feeds
98
98
.clone()
99
-
.unwrap_or_default()
100
-
.into_iter()
101
-
.flatten()
102
-
.collect();
103
-
let feeds = self.hydrate_feedgens(feeds).await.into_values().collect();
99
+
.unwrap_or_default();
100
+
let feeds = self.hydrate_feedgens(feeds.into()).await.into_values().collect();
104
101
105
102
Some(build_spview(sp, creator, labels, list, feeds))
106
103
}
···
119
116
let feeds = packs
120
117
.values()
121
118
.filter_map(|pack| pack.feeds.clone())
122
-
.flat_map(|feeds| feeds.into_iter().flatten())
119
+
.flat_map(Vec::from)
123
120
.collect();
124
121
125
122
let creators = self.hydrate_profiles_basic(creators).await;
···
133
130
let list = lists.get(&pack.list).cloned();
134
131
let feeds = pack.feeds.as_ref().map(|v| {
135
132
v.iter()
136
-
.flatten()
137
133
.filter_map(|feed| feeds.get(feed).cloned())
138
134
.collect()
139
135
});
+135
-1
parakeet/src/loaders.rs
+135
-1
parakeet/src/loaders.rs
···
1
1
use crate::cache::PrefixedLoaderCache;
2
+
use crate::db;
2
3
use crate::xrpc::extract::LabelConfigItem;
3
4
use dataloader::async_cached::Loader;
4
5
use dataloader::non_cached::Loader as NonCachedLoader;
5
6
use dataloader::BatchFn;
7
+
use diesel::dsl::sql;
6
8
use diesel::prelude::*;
7
9
use diesel_async::pooled_connection::deadpool::Pool;
8
10
use diesel_async::{AsyncPgConnection, RunQueryDsl};
···
39
41
pub label: LabelLoader,
40
42
pub labeler: CachingLoader<String, LabelServiceLoaderRet, LabelServiceLoader>,
41
43
pub list: CachingLoader<String, ListLoaderRet, ListLoader>,
44
+
pub list_state: ListStateLoader,
42
45
pub like: NonCachedLoader<String, i32, LikeLoader>,
46
+
pub like_state: LikeRecordLoader,
43
47
pub posts: CachingLoader<String, PostLoaderRet, PostLoader>,
44
48
pub post_stats: NonCachedLoader<String, parakeet_index::PostStats, PostStatsLoader>,
49
+
pub post_state: PostStateLoader,
45
50
pub profile: CachingLoader<String, ProfileLoaderRet, ProfileLoader>,
46
51
pub profile_stats: NonCachedLoader<String, parakeet_index::ProfileStats, ProfileStatsLoader>,
52
+
pub profile_state: ProfileStateLoader,
47
53
pub starterpacks: CachingLoader<String, StarterPackLoaderRet, StarterPackLoader>,
48
54
pub verification: CachingLoader<String, Vec<models::VerificationEntry>, VerificationLoader>,
49
55
}
···
62
68
label: LabelLoader(pool.clone()), // CARE: never cache this.
63
69
labeler: new_plc_loader(LabelServiceLoader(pool.clone(), idxc.clone()), &rc, "labeler", 600),
64
70
like: NonCachedLoader::new(LikeLoader(idxc.clone())),
71
+
like_state: LikeRecordLoader(pool.clone()),
65
72
list: new_plc_loader(ListLoader(pool.clone()), &rc, "list", 600),
73
+
list_state: ListStateLoader(pool.clone()),
66
74
posts: new_plc_loader(PostLoader(pool.clone()), &rc, "post", 3600),
67
75
post_stats: NonCachedLoader::new(PostStatsLoader(idxc.clone())),
76
+
post_state: PostStateLoader(pool.clone()),
68
77
profile: new_plc_loader(ProfileLoader(pool.clone()), &rc, "profile", 3600),
69
78
profile_stats: NonCachedLoader::new(ProfileStatsLoader(idxc.clone())),
79
+
profile_state: ProfileStateLoader(pool.clone()),
70
80
starterpacks: new_plc_loader(StarterPackLoader(pool.clone()), &rc, "starterpacks", 600),
71
81
verification: new_plc_loader(VerificationLoader(pool.clone()), &rc, "verification", 60),
72
82
}
···
95
105
}
96
106
}
97
107
108
+
pub struct LikeRecordLoader(Pool<AsyncPgConnection>);
109
+
impl LikeRecordLoader {
110
+
pub async fn get(&self, did: &str, subject: &str) -> Option<(String, String)> {
111
+
let mut conn = self.0.get().await.unwrap();
112
+
113
+
db::get_like_state(&mut conn, did, subject)
114
+
.await
115
+
.unwrap_or_else(|e| {
116
+
tracing::error!("like state load failed: {e}");
117
+
None
118
+
})
119
+
}
120
+
121
+
pub async fn get_many(
122
+
&self,
123
+
did: &str,
124
+
subjects: &[String],
125
+
) -> HashMap<String, (String, String)> {
126
+
let mut conn = self.0.get().await.unwrap();
127
+
128
+
match db::get_like_states(&mut conn, did, subjects).await {
129
+
Ok(res) => {
130
+
HashMap::from_iter(res.into_iter().map(|(sub, did, rkey)| (sub, (did, rkey))))
131
+
}
132
+
Err(e) => {
133
+
tracing::error!("like state load failed: {e}");
134
+
HashMap::new()
135
+
}
136
+
}
137
+
}
138
+
}
139
+
98
140
pub struct HandleLoader(Pool<AsyncPgConnection>);
99
141
impl BatchFn<String, String> for HandleLoader {
100
142
async fn load(&mut self, keys: &[String]) -> HashMap<String, String> {
···
204
246
}
205
247
}
206
248
249
+
pub struct ProfileStateLoader(Pool<AsyncPgConnection>);
250
+
impl ProfileStateLoader {
251
+
pub async fn get(&self, did: &str, subject: &str) -> Option<db::ProfileStateRet> {
252
+
let mut conn = self.0.get().await.unwrap();
253
+
254
+
db::get_profile_state(&mut conn, did, subject)
255
+
.await
256
+
.unwrap_or_else(|e| {
257
+
tracing::error!("profile state load failed: {e}");
258
+
None
259
+
})
260
+
}
261
+
262
+
pub async fn get_many(
263
+
&self,
264
+
did: &str,
265
+
subjects: &[String],
266
+
) -> HashMap<String, db::ProfileStateRet> {
267
+
let mut conn = self.0.get().await.unwrap();
268
+
269
+
match db::get_profile_states(&mut conn, did, subjects).await {
270
+
Ok(res) => HashMap::from_iter(res.into_iter().map(|v| (v.subject.clone(), v))),
271
+
Err(e) => {
272
+
tracing::error!("profile state load failed: {e}");
273
+
HashMap::new()
274
+
}
275
+
}
276
+
}
277
+
}
278
+
207
279
pub struct ListLoader(Pool<AsyncPgConnection>);
208
280
type ListLoaderRet = (models::List, i64);
209
281
impl BatchFn<String, ListLoaderRet> for ListLoader {
···
230
302
),
231
303
Err(e) => {
232
304
tracing::error!("list load failed: {e}");
305
+
HashMap::new()
306
+
}
307
+
}
308
+
}
309
+
}
310
+
311
+
pub struct ListStateLoader(Pool<AsyncPgConnection>);
312
+
impl ListStateLoader {
313
+
pub async fn get(&self, did: &str, subject: &str) -> Option<db::ListStateRet> {
314
+
let mut conn = self.0.get().await.unwrap();
315
+
316
+
db::get_list_state(&mut conn, did, subject)
317
+
.await
318
+
.unwrap_or_else(|e| {
319
+
tracing::error!("list state load failed: {e}");
320
+
None
321
+
})
322
+
}
323
+
324
+
pub async fn get_many(
325
+
&self,
326
+
did: &str,
327
+
subjects: &[String],
328
+
) -> HashMap<String, db::ListStateRet> {
329
+
let mut conn = self.0.get().await.unwrap();
330
+
331
+
match db::get_list_states(&mut conn, did, subjects).await {
332
+
Ok(res) => HashMap::from_iter(res.into_iter().map(|v| (v.at_uri.clone(), v))),
333
+
Err(e) => {
334
+
tracing::error!("list state load failed: {e}");
233
335
HashMap::new()
234
336
}
235
337
}
···
267
369
let mut conn = self.0.get().await.unwrap();
268
370
269
371
let res = schema::posts::table
270
-
.left_join(schema::threadgates::table)
372
+
.left_join(schema::threadgates::table.on(
373
+
schema::threadgates::post_uri.eq(sql("coalesce(posts.root_uri, posts.at_uri)")),
374
+
))
271
375
.select((
272
376
models::Post::as_select(),
273
377
Option::<models::Threadgate>::as_select(),
···
302
406
.unwrap()
303
407
.into_inner()
304
408
.entries
409
+
}
410
+
}
411
+
412
+
pub struct PostStateLoader(Pool<AsyncPgConnection>);
413
+
impl PostStateLoader {
414
+
pub async fn get(&self, did: &str, subject: &str) -> Option<db::PostStateRet> {
415
+
let mut conn = self.0.get().await.unwrap();
416
+
417
+
db::get_post_state(&mut conn, did, subject)
418
+
.await
419
+
.unwrap_or_else(|e| {
420
+
tracing::error!("post state load failed: {e}");
421
+
None
422
+
})
423
+
}
424
+
425
+
pub async fn get_many(
426
+
&self,
427
+
did: &str,
428
+
subjects: &[String],
429
+
) -> HashMap<String, db::PostStateRet> {
430
+
let mut conn = self.0.get().await.unwrap();
431
+
432
+
match db::get_post_states(&mut conn, did, subjects).await {
433
+
Ok(res) => HashMap::from_iter(res.into_iter().map(|v| (v.at_uri.clone(), v))),
434
+
Err(e) => {
435
+
tracing::error!("post state load failed: {e}");
436
+
HashMap::new()
437
+
}
438
+
}
305
439
}
306
440
}
307
441
+1
-1
parakeet/src/main.rs
+1
-1
parakeet/src/main.rs
···
49
49
tracing::info!("database migrations complete");
50
50
}
51
51
52
-
let redis_client = redis::Client::open(conf.cache_uri)?;
52
+
let redis_client = redis::Client::open(conf.redis_uri)?;
53
53
let redis_mp = redis_client.get_multiplexed_tokio_connection().await?;
54
54
55
55
let index_client = parakeet_index::Client::connect(conf.index_uri).await?;
+5
parakeet/src/sql/list_states.sql
+5
parakeet/src/sql/list_states.sql
···
1
+
select l.at_uri, lb.at_uri as block, lm.did is not null as muted
2
+
from lists l
3
+
left join list_blocks lb on l.at_uri = lb.list_uri and lb.did = $1
4
+
left join list_mutes lm on l.at_uri = lm.list_uri and lm.did = $1
5
+
where l.at_uri = any ($2) and (lm.did is not null or lb.at_uri is not null)
+16
parakeet/src/sql/post_state.sql
+16
parakeet/src/sql/post_state.sql
···
1
+
select bq.*, coalesce(bq.at_uri = pinned_uri, false) as pinned
2
+
from (select p.at_uri,
3
+
p.did,
4
+
p.cid,
5
+
l.rkey as like_rkey,
6
+
r.rkey as repost_rkey,
7
+
b.did is not null as bookmarked,
8
+
coalesce(pg.rules && ARRAY ['app.bsky.feed.postgate#disableRule'], false) as embed_disabled
9
+
from posts p
10
+
left join likes l on l.subject = p.at_uri and l.did = $1
11
+
left join reposts r on r.post = p.at_uri and r.did = $1
12
+
left join bookmarks b on b.subject = p.at_uri and b.did = $1
13
+
left join postgates pg on pg.post_uri = p.at_uri
14
+
where p.at_uri = any ($2)
15
+
and (l.rkey is not null or r.rkey is not null or b.did is not null or pg.rules is not null)) bq,
16
+
(select pinned_uri, pinned_cid from profiles where did = $1) pp;
+20
parakeet/src/sql/profile_state.sql
+20
parakeet/src/sql/profile_state.sql
···
1
+
with vlb as (select * from v_list_block_exp where did = $1 and subject = any ($2)),
2
+
vlm as (select * from v_list_mutes_exp where did = $1 and subject = any ($2)),
3
+
ps as (select * from profile_states where did = $1 and subject = any ($2)),
4
+
vlb2 as (select subject as did, did as subject, list_uri is not null as blocked
5
+
from v_list_block_exp
6
+
where did = any ($2)
7
+
and subject = $1)
8
+
select distinct on (did, subject) did,
9
+
subject,
10
+
muting,
11
+
ps.blocked or vlb2.blocked as blocked,
12
+
blocking,
13
+
following,
14
+
followed,
15
+
vlb.list_uri as list_block,
16
+
vlm.list_uri as list_mute
17
+
from ps
18
+
full join vlb using (did, subject)
19
+
full join vlm using (did, subject)
20
+
full join vlb2 using (did, subject);
+3
-3
parakeet/src/sql/thread.sql
+3
-3
parakeet/src/sql/thread.sql
···
1
-
with recursive thread as (select at_uri, parent_uri, root_uri, 0 as depth
1
+
with recursive thread as (select at_uri, parent_uri, root_uri, 1 as depth
2
2
from posts
3
-
where parent_uri = $1
3
+
where parent_uri = $1 and violates_threadgate=FALSE
4
4
union all
5
5
select p.at_uri, p.parent_uri, p.root_uri, thread.depth + 1
6
6
from posts p
7
7
join thread on p.parent_uri = thread.at_uri
8
-
where thread.depth <= $2)
8
+
where thread.depth <= $2 and p.violates_threadgate=FALSE)
9
9
select *
10
10
from thread
11
11
order by depth desc;
+13
parakeet/src/sql/thread_branching.sql
+13
parakeet/src/sql/thread_branching.sql
···
1
+
with recursive thread as (select at_uri, parent_uri, root_uri, 1 as depth
2
+
from posts
3
+
where parent_uri = $1
4
+
and violates_threadgate = FALSE
5
+
union all
6
+
(select p.at_uri, p.parent_uri, p.root_uri, thread.depth + 1
7
+
from posts p
8
+
join thread on p.parent_uri = thread.at_uri
9
+
where thread.depth <= $2
10
+
and violates_threadgate = FALSE
11
+
LIMIT $3))
12
+
select *
13
+
from thread;
+4
-2
parakeet/src/sql/thread_parent.sql
+4
-2
parakeet/src/sql/thread_parent.sql
···
1
1
with recursive parents as (select at_uri, cid, parent_uri, root_uri, 0 as depth
2
2
from posts
3
-
where at_uri = (select parent_uri from posts where at_uri = $1)
3
+
where
4
+
at_uri = (select parent_uri from posts where at_uri = $1 and violates_threadgate = FALSE)
4
5
union all
5
6
select p.at_uri, p.cid, p.parent_uri, p.root_uri, parents.depth + 1
6
7
from posts p
7
8
join parents on p.at_uri = parents.parent_uri
8
-
where parents.depth <= $2)
9
+
where parents.depth <= $2
10
+
and p.violates_threadgate = FALSE)
9
11
select *
10
12
from parents
11
13
order by depth desc;
+161
parakeet/src/xrpc/app_bsky/bookmark.rs
+161
parakeet/src/xrpc/app_bsky/bookmark.rs
···
1
+
use crate::hydration::StatefulHydrator;
2
+
use crate::xrpc::error::XrpcResult;
3
+
use crate::xrpc::extract::{AtpAcceptLabelers, AtpAuth};
4
+
use crate::xrpc::{datetime_cursor, CursorQuery};
5
+
use crate::GlobalState;
6
+
use axum::extract::{Query, State};
7
+
use axum::Json;
8
+
use diesel::prelude::*;
9
+
use diesel_async::RunQueryDsl;
10
+
use lexica::app_bsky::bookmark::{BookmarkView, BookmarkViewItem};
11
+
use lexica::app_bsky::feed::{BlockedAuthor, PostView};
12
+
use lexica::StrongRef;
13
+
use parakeet_db::{models, schema};
14
+
use serde::{Deserialize, Serialize};
15
+
16
+
const BSKY_ALLOWED_TYPES: &[&str] = &["app.bsky.feed.post"];
17
+
18
+
#[derive(Debug, Deserialize)]
19
+
pub struct CreateBookmarkReq {
20
+
pub uri: String,
21
+
pub cid: String,
22
+
}
23
+
24
+
pub async fn create_bookmark(
25
+
State(state): State<GlobalState>,
26
+
auth: AtpAuth,
27
+
Json(form): Json<CreateBookmarkReq>,
28
+
) -> XrpcResult<()> {
29
+
let mut conn = state.pool.get().await?;
30
+
31
+
// strip "at://" then break into parts by '/'
32
+
let parts = form.uri[5..].split('/').collect::<Vec<_>>();
33
+
34
+
let data = models::NewBookmark {
35
+
did: &auth.0,
36
+
rkey: None,
37
+
subject: &form.uri,
38
+
subject_cid: Some(form.cid),
39
+
subject_type: &parts[1],
40
+
tags: vec![],
41
+
};
42
+
43
+
diesel::insert_into(schema::bookmarks::table)
44
+
.values(&data)
45
+
.on_conflict_do_nothing()
46
+
.execute(&mut conn)
47
+
.await?;
48
+
49
+
Ok(())
50
+
}
51
+
52
+
#[derive(Debug, Deserialize)]
53
+
pub struct DeleteBookmarkReq {
54
+
pub uri: String,
55
+
}
56
+
57
+
pub async fn delete_bookmark(
58
+
State(state): State<GlobalState>,
59
+
auth: AtpAuth,
60
+
Json(form): Json<DeleteBookmarkReq>,
61
+
) -> XrpcResult<()> {
62
+
let mut conn = state.pool.get().await?;
63
+
64
+
diesel::delete(schema::bookmarks::table)
65
+
.filter(
66
+
schema::bookmarks::did
67
+
.eq(&auth.0)
68
+
.and(schema::bookmarks::subject.eq(&form.uri)),
69
+
)
70
+
.execute(&mut conn)
71
+
.await?;
72
+
73
+
Ok(())
74
+
}
75
+
76
+
#[derive(Debug, Serialize)]
77
+
pub struct GetBookmarksRes {
78
+
#[serde(skip_serializing_if = "Option::is_none")]
79
+
cursor: Option<String>,
80
+
bookmarks: Vec<BookmarkView>,
81
+
}
82
+
83
+
pub async fn get_bookmarks(
84
+
State(state): State<GlobalState>,
85
+
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
86
+
auth: AtpAuth,
87
+
Query(query): Query<CursorQuery>,
88
+
) -> XrpcResult<Json<GetBookmarksRes>> {
89
+
let mut conn = state.pool.get().await?;
90
+
let did = auth.0.clone();
91
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, Some(auth));
92
+
93
+
let limit = query.limit.unwrap_or(50).clamp(1, 100);
94
+
95
+
let mut bookmarks_query = schema::bookmarks::table
96
+
.select(models::Bookmark::as_select())
97
+
.filter(schema::bookmarks::did.eq(&did))
98
+
.filter(schema::bookmarks::subject_type.eq_any(BSKY_ALLOWED_TYPES))
99
+
.into_boxed();
100
+
101
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
102
+
bookmarks_query = bookmarks_query.filter(schema::bookmarks::created_at.lt(cursor));
103
+
}
104
+
105
+
let results = bookmarks_query
106
+
.order(schema::bookmarks::created_at.desc())
107
+
.limit(limit as i64)
108
+
.load(&mut conn)
109
+
.await?;
110
+
111
+
let cursor = results
112
+
.last()
113
+
.map(|bm| bm.created_at.timestamp_millis().to_string());
114
+
115
+
let uris = results.iter().map(|bm| bm.subject.clone()).collect();
116
+
117
+
let mut posts = hyd.hydrate_posts(uris).await;
118
+
119
+
let bookmarks = results
120
+
.into_iter()
121
+
.filter_map(|bookmark| {
122
+
let maybe_item = posts.remove(&bookmark.subject);
123
+
let maybe_cid = maybe_item.as_ref().map(|v| v.cid.clone());
124
+
125
+
// ensure that either the cid is set in the bookmark record *or* in the post record
126
+
// otherwise just ditch. we should have one.
127
+
let cid = bookmark.subject_cid.or(maybe_cid)?;
128
+
129
+
let item = maybe_item
130
+
.map(postview_to_bvi)
131
+
.unwrap_or(BookmarkViewItem::NotFound {
132
+
uri: bookmark.subject.clone(),
133
+
not_found: true,
134
+
});
135
+
136
+
let subject = StrongRef::new_from_str(bookmark.subject, &cid).ok()?;
137
+
138
+
Some(BookmarkView {
139
+
subject,
140
+
item,
141
+
created_at: bookmark.created_at,
142
+
})
143
+
})
144
+
.collect();
145
+
146
+
Ok(Json(GetBookmarksRes { cursor, bookmarks }))
147
+
}
148
+
149
+
fn postview_to_bvi(post: PostView) -> BookmarkViewItem {
150
+
match &post.author.viewer {
151
+
Some(v) if v.blocked_by || v.blocking.is_some() => BookmarkViewItem::Blocked {
152
+
uri: post.uri,
153
+
blocked: true,
154
+
author: BlockedAuthor {
155
+
did: post.author.did.clone(),
156
+
viewer: post.author.viewer,
157
+
},
158
+
},
159
+
_ => BookmarkViewItem::Post(post),
160
+
}
161
+
}
+7
-8
parakeet/src/xrpc/app_bsky/feed/likes.rs
+7
-8
parakeet/src/xrpc/app_bsky/feed/likes.rs
···
1
+
use crate::hydration::posts::RawFeedItem;
1
2
use crate::hydration::StatefulHydrator;
2
3
use crate::xrpc::error::{Error, XrpcResult};
3
4
use crate::xrpc::extract::{AtpAcceptLabelers, AtpAuth};
···
57
58
.last()
58
59
.map(|(last, _)| last.timestamp_millis().to_string());
59
60
60
-
let at_uris = results
61
+
let raw_feed = results
61
62
.iter()
62
-
.map(|(_, uri)| uri.clone())
63
+
.map(|(_, uri)| RawFeedItem::Post {
64
+
uri: uri.clone(),
65
+
context: None,
66
+
})
63
67
.collect::<Vec<_>>();
64
68
65
-
let mut posts = hyd.hydrate_feed_posts(at_uris).await;
66
-
67
-
let feed: Vec<_> = results
68
-
.into_iter()
69
-
.filter_map(|(_, uri)| posts.remove(&uri))
70
-
.collect();
69
+
let feed = hyd.hydrate_feed_posts(raw_feed, false).await;
71
70
72
71
Ok(Json(FeedRes { cursor, feed }))
73
72
}
+153
-122
parakeet/src/xrpc/app_bsky/feed/posts.rs
+153
-122
parakeet/src/xrpc/app_bsky/feed/posts.rs
···
1
+
use crate::hydration::posts::RawFeedItem;
1
2
use crate::hydration::StatefulHydrator;
2
3
use crate::xrpc::app_bsky::graph::lists::ListWithCursorQuery;
3
4
use crate::xrpc::error::{Error, XrpcResult};
···
16
17
use diesel_async::{AsyncPgConnection, RunQueryDsl};
17
18
use lexica::app_bsky::actor::ProfileView;
18
19
use lexica::app_bsky::feed::{
19
-
FeedReasonRepost, FeedSkeletonResponse, FeedViewPost, FeedViewPostReason, PostView,
20
-
SkeletonReason, ThreadViewPost, ThreadViewPostType, ThreadgateView,
20
+
BlockedAuthor, FeedSkeletonResponse, FeedViewPost, PostView, SkeletonReason, ThreadViewPost,
21
+
ThreadViewPostType, ThreadgateView,
21
22
};
22
-
use parakeet_db::schema;
23
+
use parakeet_db::{models, schema};
23
24
use reqwest::Url;
24
25
use serde::{Deserialize, Serialize};
25
26
use std::collections::HashMap;
···
113
114
114
115
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
115
116
116
-
let at_uris = skeleton.feed.iter().map(|v| v.post.clone()).collect();
117
117
let repost_skeleton = skeleton
118
118
.feed
119
119
.iter()
···
122
122
_ => None,
123
123
})
124
124
.collect::<Vec<_>>();
125
-
126
-
let mut posts = hyd.hydrate_feed_posts(at_uris).await;
127
-
let mut repost_data = get_skeleton_repost_data(&mut conn, &hyd, repost_skeleton).await;
125
+
let mut repost_data = get_skeleton_repost_data(&mut conn, repost_skeleton).await;
128
126
129
-
let feed = skeleton
127
+
let raw_feed = skeleton
130
128
.feed
131
129
.into_iter()
132
-
.filter_map(|item| {
133
-
let mut post = posts.remove(&item.post)?;
134
-
let reason = match item.reason {
135
-
Some(SkeletonReason::Repost { repost }) => {
136
-
repost_data.remove(&repost).map(FeedViewPostReason::Repost)
137
-
}
138
-
Some(SkeletonReason::Pin {}) => Some(FeedViewPostReason::Pin),
139
-
_ => None,
140
-
};
141
-
142
-
post.reason = reason;
143
-
post.feed_context = item.feed_context;
144
-
145
-
Some(post)
130
+
.filter_map(|v| match v.reason {
131
+
Some(SkeletonReason::Repost { repost }) => {
132
+
repost_data
133
+
.remove_entry(&repost)
134
+
.map(|(uri, (by, at))| RawFeedItem::Repost {
135
+
uri,
136
+
post: v.post,
137
+
by,
138
+
at: at.and_utc(),
139
+
context: v.feed_context,
140
+
})
141
+
}
142
+
Some(SkeletonReason::Pin {}) => Some(RawFeedItem::Pin {
143
+
uri: v.post,
144
+
context: v.feed_context,
145
+
}),
146
+
None => Some(RawFeedItem::Post {
147
+
uri: v.post,
148
+
context: v.feed_context,
149
+
}),
146
150
})
147
151
.collect();
152
+
153
+
let feed = hyd.hydrate_feed_posts(raw_feed, false).await;
148
154
149
155
Ok(Json(FeedRes {
150
156
cursor: skeleton.cursor,
···
152
158
}))
153
159
}
154
160
155
-
#[derive(Debug, Deserialize)]
161
+
#[derive(Debug, Default, Eq, PartialEq, Deserialize)]
156
162
#[serde(rename_all = "snake_case")]
157
163
pub enum GetAuthorFeedFilter {
164
+
#[default]
158
165
PostsWithReplies,
159
166
PostsNoReplies,
160
167
PostsWithMedia,
161
168
PostsAndAuthorThreads,
162
169
PostsWithVideo,
163
-
}
164
-
165
-
impl Default for GetAuthorFeedFilter {
166
-
fn default() -> Self {
167
-
Self::PostsWithReplies
168
-
}
169
170
}
170
171
171
172
#[derive(Debug, Deserialize)]
···
187
188
Query(query): Query<GetAuthorFeedQuery>,
188
189
) -> XrpcResult<Json<FeedRes>> {
189
190
let mut conn = state.pool.get().await?;
190
-
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
191
191
192
192
let did = get_actor_did(&state.dataloaders, query.actor.clone()).await?;
193
193
194
194
check_actor_status(&mut conn, &did).await?;
195
195
196
+
// check if we block the actor or if they block us
197
+
if let Some(auth) = &maybe_auth {
198
+
if let Some(psr) = crate::db::get_profile_state(&mut conn, &auth.0, &did).await? {
199
+
if psr.blocked.unwrap_or_default() {
200
+
// they block us
201
+
return Err(Error::new(StatusCode::BAD_REQUEST, "BlockedByActor", None))
202
+
} else if psr.blocking.is_some() {
203
+
// we block them
204
+
return Err(Error::new(StatusCode::BAD_REQUEST, "BlockedActor", None))
205
+
}
206
+
}
207
+
}
208
+
209
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
210
+
211
+
let pin = match query.include_pins && query.cursor.is_none() {
212
+
false => None,
213
+
true => crate::db::get_pinned_post_uri(&mut conn, &did).await?,
214
+
};
215
+
196
216
let limit = query.limit.unwrap_or(50).clamp(1, 100);
197
217
198
-
let mut posts_query = schema::posts::table
199
-
.select((schema::posts::created_at, schema::posts::at_uri))
200
-
.filter(schema::posts::did.eq(did))
218
+
let mut posts_query = schema::author_feeds::table
219
+
.select(models::AuthorFeedItem::as_select())
220
+
.left_join(schema::posts::table.on(schema::posts::at_uri.eq(schema::author_feeds::post)))
221
+
.filter(schema::author_feeds::did.eq(&did))
201
222
.into_boxed();
202
223
203
224
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
204
-
posts_query = posts_query.filter(schema::posts::created_at.lt(cursor));
225
+
posts_query = posts_query.filter(schema::author_feeds::sort_at.lt(cursor));
205
226
}
206
227
228
+
let author_threads_only = query.filter == GetAuthorFeedFilter::PostsAndAuthorThreads;
207
229
posts_query = match query.filter {
208
-
GetAuthorFeedFilter::PostsWithReplies => posts_query,
230
+
GetAuthorFeedFilter::PostsWithReplies => {
231
+
posts_query.filter(schema::author_feeds::typ.eq("post"))
232
+
}
209
233
GetAuthorFeedFilter::PostsNoReplies => {
210
234
posts_query.filter(schema::posts::parent_uri.is_null())
211
235
}
212
-
GetAuthorFeedFilter::PostsWithMedia => posts_query.filter(embed_type_filter(&[
213
-
"app.bsky.embed.video",
214
-
"app.bsky.embed.images",
215
-
])),
236
+
GetAuthorFeedFilter::PostsWithMedia => posts_query.filter(
237
+
embed_type_filter(&["app.bsky.embed.video", "app.bsky.embed.images"])
238
+
.and(schema::author_feeds::typ.eq("post")),
239
+
),
216
240
GetAuthorFeedFilter::PostsAndAuthorThreads => posts_query.filter(
217
241
(schema::posts::parent_uri
218
-
.like(format!("at://{}/%", &query.actor))
242
+
.like(format!("at://{did}/%"))
219
243
.or(schema::posts::parent_uri.is_null()))
220
244
.and(
221
245
schema::posts::root_uri
222
-
.like(format!("at://{}/%", &query.actor))
246
+
.like(format!("at://{did}/%"))
223
247
.or(schema::posts::root_uri.is_null()),
224
248
),
225
249
),
226
-
GetAuthorFeedFilter::PostsWithVideo => {
227
-
posts_query.filter(embed_type_filter(&["app.bsky.embed.video"]))
228
-
}
250
+
GetAuthorFeedFilter::PostsWithVideo => posts_query.filter(
251
+
embed_type_filter(&["app.bsky.embed.video"]).and(schema::author_feeds::typ.eq("post")),
252
+
),
229
253
};
230
254
231
255
let results = posts_query
232
-
.order(schema::posts::created_at.desc())
256
+
.order(schema::author_feeds::sort_at.desc())
233
257
.limit(limit as i64)
234
-
.load::<(chrono::DateTime<chrono::Utc>, String)>(&mut conn)
258
+
.load(&mut conn)
235
259
.await?;
236
260
237
261
let cursor = results
238
262
.last()
239
-
.map(|(last, _)| last.timestamp_millis().to_string());
263
+
.map(|item| item.sort_at.timestamp_millis().to_string());
240
264
241
-
let at_uris = results
242
-
.iter()
243
-
.map(|(_, uri)| uri.clone())
265
+
let mut raw_feed = results
266
+
.into_iter()
267
+
.filter_map(|item| match &*item.typ {
268
+
"post" => Some(RawFeedItem::Post {
269
+
uri: item.post,
270
+
context: None,
271
+
}),
272
+
"repost" => Some(RawFeedItem::Repost {
273
+
uri: item.uri,
274
+
post: item.post,
275
+
by: item.did,
276
+
at: item.sort_at,
277
+
context: None,
278
+
}),
279
+
_ => None,
280
+
})
244
281
.collect::<Vec<_>>();
245
282
246
-
let mut posts = hyd.hydrate_feed_posts(at_uris).await;
283
+
if let Some(post) = pin {
284
+
raw_feed.insert(
285
+
0,
286
+
RawFeedItem::Pin {
287
+
uri: post,
288
+
context: None,
289
+
},
290
+
);
291
+
}
247
292
248
-
let feed = results
249
-
.into_iter()
250
-
.filter_map(|(_, uri)| posts.remove(&uri))
251
-
.collect();
293
+
let feed = hyd.hydrate_feed_posts(raw_feed, author_threads_only).await;
252
294
253
295
Ok(Json(FeedRes { cursor, feed }))
254
296
}
···
291
333
.last()
292
334
.map(|(last, _)| last.timestamp_millis().to_string());
293
335
294
-
let at_uris = results
336
+
let raw_feed = results
295
337
.iter()
296
-
.map(|(_, uri)| uri.clone())
338
+
.map(|(_, uri)| RawFeedItem::Post {
339
+
uri: uri.clone(),
340
+
context: None,
341
+
})
297
342
.collect::<Vec<_>>();
298
343
299
-
let mut posts = hyd.hydrate_feed_posts(at_uris).await;
300
-
301
-
let feed = results
302
-
.into_iter()
303
-
.filter_map(|(_, uri)| posts.remove(&uri))
304
-
.collect();
344
+
let feed = hyd.hydrate_feed_posts(raw_feed, false).await;
305
345
306
346
Ok(Json(FeedRes { cursor, feed }))
307
347
}
···
321
361
pub threadgate: Option<ThreadgateView>,
322
362
}
323
363
324
-
#[derive(Debug, QueryableByName)]
325
-
#[diesel(check_for_backend(diesel::pg::Pg))]
326
-
struct ThreadItem {
327
-
#[diesel(sql_type = diesel::sql_types::Text)]
328
-
at_uri: String,
329
-
#[diesel(sql_type = diesel::sql_types::Nullable<diesel::sql_types::Text>)]
330
-
parent_uri: Option<String>,
331
-
// #[diesel(sql_type = diesel::sql_types::Nullable<diesel::sql_types::Text>)]
332
-
// root_uri: Option<String>,
333
-
#[diesel(sql_type = diesel::sql_types::Integer)]
334
-
depth: i32,
335
-
}
336
-
337
364
pub async fn get_post_thread(
338
365
State(state): State<GlobalState>,
339
366
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
···
347
374
let depth = query.depth.unwrap_or(6).clamp(0, 1000);
348
375
let parent_height = query.parent_height.unwrap_or(80).clamp(0, 1000);
349
376
350
-
let replies = diesel::sql_query(include_str!("../../../sql/thread.sql"))
351
-
.bind::<diesel::sql_types::Text, _>(&uri)
352
-
.bind::<diesel::sql_types::Integer, _>(depth as i32)
353
-
.load::<ThreadItem>(&mut conn)
354
-
.await?;
377
+
let root = hyd
378
+
.hydrate_post(uri.clone())
379
+
.await
380
+
.ok_or(Error::not_found())?;
381
+
let threadgate = root.threadgate.clone();
355
382
356
-
let parents = diesel::sql_query(include_str!("../../../sql/thread_parent.sql"))
357
-
.bind::<diesel::sql_types::Text, _>(&uri)
358
-
.bind::<diesel::sql_types::Integer, _>(parent_height as i32)
359
-
.load::<ThreadItem>(&mut conn)
360
-
.await?;
383
+
if let Some(viewer) = &root.author.viewer {
384
+
if viewer.blocked_by || viewer.blocking.is_some() {
385
+
return Ok(Json(GetPostThreadRes {
386
+
thread: ThreadViewPostType::Blocked {
387
+
uri,
388
+
blocked: true,
389
+
author: BlockedAuthor {
390
+
did: root.author.did,
391
+
viewer: root.author.viewer,
392
+
},
393
+
},
394
+
threadgate,
395
+
}));
396
+
}
397
+
}
398
+
399
+
let replies = crate::db::get_thread_children(&mut conn, &uri, depth as i32).await?;
400
+
let parents = crate::db::get_thread_parents(&mut conn, &uri, parent_height as i32).await?;
361
401
362
402
let reply_uris = replies.iter().map(|item| item.at_uri.clone()).collect();
363
403
let parent_uris = parents.iter().map(|item| item.at_uri.clone()).collect();
364
404
365
-
let root = hyd
366
-
.hydrate_post(uri.clone())
367
-
.await
368
-
.ok_or(Error::not_found())?;
369
405
let mut replies_hydrated = hyd.hydrate_posts(reply_uris).await;
370
406
let mut parents_hydrated = hyd.hydrate_posts(parent_uris).await;
371
407
···
381
417
continue;
382
418
};
383
419
384
-
entry.push(ThreadViewPostType::Post(Box::new(ThreadViewPost {
385
-
post,
386
-
parent: None,
387
-
replies: this_post_replies,
388
-
})));
420
+
entry.push(postview_to_tvpt(post, None, this_post_replies));
389
421
}
390
422
391
423
let mut root_parent = None;
···
394
426
395
427
let parent = parents_hydrated
396
428
.remove(&parent.at_uri)
397
-
.map(|post| {
398
-
ThreadViewPostType::Post(Box::new(ThreadViewPost {
399
-
post,
400
-
parent: p2,
401
-
replies: vec![],
402
-
}))
403
-
})
429
+
.map(|post| postview_to_tvpt(post, p2, Vec::default()))
404
430
.unwrap_or(ThreadViewPostType::NotFound {
405
431
uri: parent.at_uri.clone(),
406
432
not_found: true,
···
410
436
}
411
437
412
438
let replies = tmpbuf.remove(&root.uri).unwrap_or_default();
413
-
414
-
let threadgate = root.threadgate.clone();
415
439
416
440
Ok(Json(GetPostThreadRes {
417
441
threadgate,
···
629
653
}
630
654
}
631
655
632
-
async fn get_skeleton_repost_data<'a>(
656
+
async fn get_skeleton_repost_data(
633
657
conn: &mut AsyncPgConnection,
634
-
hyd: &StatefulHydrator<'a>,
635
658
reposts: Vec<String>,
636
-
) -> HashMap<String, FeedReasonRepost> {
659
+
) -> HashMap<String, (String, NaiveDateTime)> {
637
660
let Ok(repost_data) = schema::records::table
638
661
.select((
639
662
schema::records::at_uri,
···
647
670
return HashMap::new();
648
671
};
649
672
650
-
let profiles = repost_data.iter().map(|(_, did, _)| did.clone()).collect();
651
-
let profiles = hyd.hydrate_profiles_basic(profiles).await;
652
-
653
673
repost_data
654
674
.into_iter()
655
-
.filter_map(|(uri, did, indexed_at)| {
656
-
let by = profiles.get(&did).cloned()?;
675
+
.map(|(uri, did, at)| (uri, (did, at)))
676
+
.collect()
677
+
}
657
678
658
-
let repost = FeedReasonRepost {
659
-
by,
660
-
uri: Some(uri.clone()),
661
-
cid: None, // okay, we do have this, but the app doesn't seem to be bothered about not setting it.
662
-
indexed_at: indexed_at.and_utc(),
663
-
};
664
-
665
-
Some((uri, repost))
666
-
})
667
-
.collect()
679
+
fn postview_to_tvpt(
680
+
post: PostView,
681
+
parent: Option<ThreadViewPostType>,
682
+
replies: Vec<ThreadViewPostType>,
683
+
) -> ThreadViewPostType {
684
+
match &post.author.viewer {
685
+
Some(v) if v.blocked_by || v.blocking.is_some() => ThreadViewPostType::Blocked {
686
+
uri: post.uri.clone(),
687
+
blocked: true,
688
+
author: BlockedAuthor {
689
+
did: post.author.did,
690
+
viewer: post.author.viewer,
691
+
},
692
+
},
693
+
_ => ThreadViewPostType::Post(Box::new(ThreadViewPost {
694
+
post,
695
+
parent,
696
+
replies,
697
+
})),
698
+
}
668
699
}
+61
-24
parakeet/src/xrpc/app_bsky/graph/lists.rs
+61
-24
parakeet/src/xrpc/app_bsky/graph/lists.rs
···
21
21
}
22
22
23
23
#[derive(Debug, Serialize)]
24
-
pub struct AppBskyGraphGetListsRes {
24
+
pub struct GetListsRes {
25
25
#[serde(skip_serializing_if = "Option::is_none")]
26
26
cursor: Option<String>,
27
27
lists: Vec<ListView>,
···
32
32
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
33
33
maybe_auth: Option<AtpAuth>,
34
34
Query(query): Query<ActorWithCursorQuery>,
35
-
) -> XrpcResult<Json<AppBskyGraphGetListsRes>> {
35
+
) -> XrpcResult<Json<GetListsRes>> {
36
36
let mut conn = state.pool.get().await?;
37
37
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
38
38
···
70
70
.filter_map(|(_, uri)| lists.remove(&uri))
71
71
.collect();
72
72
73
-
Ok(Json(AppBskyGraphGetListsRes { cursor, lists }))
73
+
Ok(Json(GetListsRes { cursor, lists }))
74
74
}
75
75
76
76
#[derive(Debug, Serialize)]
···
138
138
}))
139
139
}
140
140
141
-
#[derive(Debug, Serialize)]
142
-
pub struct GetListMutesRes {
143
-
#[serde(skip_serializing_if = "Option::is_none")]
144
-
cursor: Option<String>,
145
-
lists: Vec<ListView>,
146
-
}
147
-
148
141
pub async fn get_list_mutes(
149
142
State(state): State<GlobalState>,
150
143
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
151
144
auth: AtpAuth,
152
145
Query(query): Query<CursorQuery>,
153
-
) -> XrpcResult<Json<GetListMutesRes>> {
146
+
) -> XrpcResult<Json<GetListsRes>> {
154
147
let mut conn = state.pool.get().await?;
155
148
let did = auth.0.clone();
156
149
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, Some(auth));
···
158
151
let limit = query.limit.unwrap_or(50).clamp(1, 100);
159
152
160
153
let mut mutes_query = schema::list_mutes::table
161
-
.select(schema::list_mutes::list_uri)
154
+
.select((schema::list_mutes::created_at, schema::list_mutes::list_uri))
162
155
.filter(schema::list_mutes::did.eq(did))
163
156
.into_boxed();
164
157
165
-
if let Some(cursor) = query.cursor {
166
-
mutes_query = mutes_query.filter(schema::list_mutes::list_uri.lt(cursor));
158
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
159
+
mutes_query = mutes_query.filter(schema::list_mutes::created_at.lt(cursor));
160
+
}
161
+
162
+
let results = mutes_query
163
+
.order(schema::list_mutes::created_at.desc())
164
+
.limit(limit as i64)
165
+
.load::<(chrono::DateTime<chrono::Utc>, String)>(&mut conn)
166
+
.await?;
167
+
168
+
let cursor = results
169
+
.last()
170
+
.map(|(last, _)| last.timestamp_millis().to_string());
171
+
172
+
let uris = results.iter().map(|(_, uri)| uri.clone()).collect();
173
+
174
+
let lists = hyd.hydrate_lists(uris).await;
175
+
let lists = lists.into_values().collect::<Vec<_>>();
176
+
177
+
Ok(Json(GetListsRes { cursor, lists }))
178
+
}
179
+
180
+
pub async fn get_list_blocks(
181
+
State(state): State<GlobalState>,
182
+
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
183
+
auth: AtpAuth,
184
+
Query(query): Query<CursorQuery>,
185
+
) -> XrpcResult<Json<GetListsRes>> {
186
+
let mut conn = state.pool.get().await?;
187
+
let did = auth.0.clone();
188
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, Some(auth));
189
+
190
+
let limit = query.limit.unwrap_or(50).clamp(1, 100);
191
+
192
+
let mut blocks_query = schema::list_blocks::table
193
+
.select((
194
+
schema::list_blocks::created_at,
195
+
schema::list_blocks::list_uri,
196
+
))
197
+
.filter(schema::list_blocks::did.eq(did))
198
+
.into_boxed();
199
+
200
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
201
+
blocks_query = blocks_query.filter(schema::list_blocks::created_at.lt(cursor));
167
202
}
168
203
169
-
let mutes = mutes_query
170
-
.order(schema::list_mutes::list_uri.desc())
204
+
let results = blocks_query
205
+
.order(schema::list_blocks::created_at.desc())
171
206
.limit(limit as i64)
172
-
.load(&mut conn)
207
+
.load::<(chrono::DateTime<chrono::Utc>, String)>(&mut conn)
173
208
.await?;
174
209
175
-
let lists = hyd.hydrate_lists(mutes).await;
176
-
let mutes = lists.into_values().collect::<Vec<_>>();
177
-
let cursor = mutes.last().map(|v| v.uri.clone());
210
+
let cursor = results
211
+
.last()
212
+
.map(|(last, _)| last.timestamp_millis().to_string());
213
+
214
+
let uris = results.iter().map(|(_, uri)| uri.clone()).collect();
215
+
216
+
let lists = hyd.hydrate_lists(uris).await;
217
+
let lists = lists.into_values().collect::<Vec<_>>();
178
218
179
-
Ok(Json(GetListMutesRes {
180
-
cursor,
181
-
lists: mutes,
182
-
}))
219
+
Ok(Json(GetListsRes { cursor, lists }))
183
220
}
+14
-9
parakeet/src/xrpc/app_bsky/graph/mutes.rs
+14
-9
parakeet/src/xrpc/app_bsky/graph/mutes.rs
···
1
1
use crate::hydration::StatefulHydrator;
2
2
use crate::xrpc::error::XrpcResult;
3
3
use crate::xrpc::extract::{AtpAcceptLabelers, AtpAuth};
4
-
use crate::xrpc::CursorQuery;
4
+
use crate::xrpc::{datetime_cursor, CursorQuery};
5
5
use crate::GlobalState;
6
6
use axum::extract::{Query, State};
7
7
use axum::Json;
···
31
31
let limit = query.limit.unwrap_or(50).clamp(1, 100);
32
32
33
33
let mut muted_query = schema::mutes::table
34
-
.select(schema::mutes::subject)
34
+
.select((schema::mutes::created_at, schema::mutes::subject))
35
35
.filter(schema::mutes::did.eq(did))
36
36
.into_boxed();
37
37
38
-
if let Some(cursor) = query.cursor {
39
-
muted_query = muted_query.filter(schema::mutes::subject.lt(cursor));
38
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
39
+
muted_query = muted_query.filter(schema::mutes::created_at.lt(cursor));
40
40
}
41
41
42
-
let muted = muted_query
43
-
.order(schema::mutes::subject.desc())
42
+
let results = muted_query
43
+
.order(schema::mutes::created_at.desc())
44
44
.limit(limit as i64)
45
-
.load(&mut conn)
45
+
.load::<(chrono::DateTime<chrono::Utc>, String)>(&mut conn)
46
46
.await?;
47
47
48
-
let profiles = hyd.hydrate_profiles(muted).await;
48
+
let cursor = results
49
+
.last()
50
+
.map(|(last, _)| last.timestamp_millis().to_string());
51
+
52
+
let dids = results.iter().map(|(_, did)| did.clone()).collect();
53
+
54
+
let profiles = hyd.hydrate_profiles(dids).await;
49
55
let mutes = profiles.into_values().collect::<Vec<_>>();
50
-
let cursor = mutes.last().map(|v| v.did.clone());
51
56
52
57
Ok(Json(GetMutesRes { cursor, mutes }))
53
58
}
+47
-1
parakeet/src/xrpc/app_bsky/graph/relations.rs
+47
-1
parakeet/src/xrpc/app_bsky/graph/relations.rs
···
1
1
use crate::hydration::StatefulHydrator;
2
2
use crate::xrpc::error::{Error, XrpcResult};
3
3
use crate::xrpc::extract::{AtpAcceptLabelers, AtpAuth};
4
-
use crate::xrpc::{datetime_cursor, get_actor_did, ActorWithCursorQuery};
4
+
use crate::xrpc::{datetime_cursor, get_actor_did, ActorWithCursorQuery, CursorQuery};
5
5
use crate::GlobalState;
6
6
use axum::extract::{Query, State};
7
7
use axum::Json;
···
10
10
use lexica::app_bsky::actor::ProfileView;
11
11
use parakeet_db::schema;
12
12
use serde::Serialize;
13
+
14
+
#[derive(Debug, Serialize)]
15
+
pub struct GetBlocksRes {
16
+
#[serde(skip_serializing_if = "Option::is_none")]
17
+
cursor: Option<String>,
18
+
blocks: Vec<ProfileView>,
19
+
}
20
+
21
+
pub async fn get_blocks(
22
+
State(state): State<GlobalState>,
23
+
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
24
+
auth: AtpAuth,
25
+
Query(query): Query<CursorQuery>,
26
+
) -> XrpcResult<Json<GetBlocksRes>> {
27
+
let mut conn = state.pool.get().await?;
28
+
let did = auth.0.clone();
29
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, Some(auth));
30
+
31
+
let limit = query.limit.unwrap_or(50).clamp(1, 100);
32
+
33
+
let mut blocked_query = schema::blocks::table
34
+
.select((schema::blocks::created_at, schema::blocks::subject))
35
+
.filter(schema::blocks::did.eq(did))
36
+
.into_boxed();
37
+
38
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
39
+
blocked_query = blocked_query.filter(schema::blocks::created_at.lt(cursor));
40
+
}
41
+
42
+
let results = blocked_query
43
+
.order(schema::blocks::created_at.desc())
44
+
.limit(limit as i64)
45
+
.load::<(chrono::DateTime<chrono::Utc>, String)>(&mut conn)
46
+
.await?;
47
+
48
+
let cursor = results
49
+
.last()
50
+
.map(|(last, _)| last.timestamp_millis().to_string());
51
+
52
+
let dids = results.iter().map(|(_, did)| did.clone()).collect();
53
+
54
+
let profiles = hyd.hydrate_profiles(dids).await;
55
+
let blocks = profiles.into_values().collect::<Vec<_>>();
56
+
57
+
Ok(Json(GetBlocksRes { cursor, blocks }))
58
+
}
13
59
14
60
#[derive(Debug, Serialize)]
15
61
pub struct AppBskyGraphGetFollowersRes {
+15
-2
parakeet/src/xrpc/app_bsky/mod.rs
+15
-2
parakeet/src/xrpc/app_bsky/mod.rs
···
2
2
use axum::Router;
3
3
4
4
mod actor;
5
+
mod bookmark;
5
6
mod feed;
6
7
mod graph;
7
8
mod labeler;
9
+
mod unspecced;
8
10
9
11
#[rustfmt::skip]
10
12
pub fn routes() -> Router<crate::GlobalState> {
11
13
Router::new()
14
+
.route("/app.bsky.actor.getPreferences", get(not_implemented))
15
+
.route("/app.bsky.actor.putPreferences", post(not_implemented))
12
16
.route("/app.bsky.actor.getProfile", get(actor::get_profile))
13
17
.route("/app.bsky.actor.getProfiles", get(actor::get_profiles))
14
18
// TODO: app.bsky.actor.getSuggestions (recs)
15
19
// TODO: app.bsky.actor.searchActor (search)
16
20
// TODO: app.bsky.actor.searchActorTypeahead (search)
21
+
.route("/app.bsky.bookmark.createBookmark", post(bookmark::create_bookmark))
22
+
.route("/app.bsky.bookmark.deleteBookmark", post(bookmark::delete_bookmark))
23
+
.route("/app.bsky.bookmark.getBookmarks", get(bookmark::get_bookmarks))
17
24
.route("/app.bsky.feed.getActorFeeds", get(feed::feedgen::get_actor_feeds))
18
25
.route("/app.bsky.feed.getActorLikes", get(feed::likes::get_actor_likes))
19
26
.route("/app.bsky.feed.getAuthorFeed", get(feed::posts::get_author_feed))
···
30
37
// TODO: app.bsky.feed.getTimeline (complicated)
31
38
// TODO: app.bsky.feed.searchPosts (search)
32
39
.route("/app.bsky.graph.getActorStarterPacks", get(graph::starter_packs::get_actor_starter_packs))
33
-
// TODO: app.bsky.graph.getBlocks
40
+
.route("/app.bsky.graph.getBlocks", get(graph::relations::get_blocks))
34
41
.route("/app.bsky.graph.getFollowers", get(graph::relations::get_followers))
35
42
.route("/app.bsky.graph.getFollows", get(graph::relations::get_follows))
36
43
// TODO: app.bsky.graph.getKnownFollowers
37
44
.route("/app.bsky.graph.getList", get(graph::lists::get_list))
38
-
// TODO: app.bsky.graph.getListBlocks
45
+
.route("/app.bsky.graph.getListBlocks", get(graph::lists::get_list_blocks))
39
46
.route("/app.bsky.graph.getListMutes", get(graph::lists::get_list_mutes))
40
47
.route("/app.bsky.graph.getLists", get(graph::lists::get_lists))
41
48
.route("/app.bsky.graph.getMutes", get(graph::mutes::get_mutes))
···
58
65
// TODO: app.bsky.notification.putActivitySubscriptions
59
66
// TODO: app.bsky.notification.putPreferences
60
67
// TODO: app.bsky.notification.putPreferencesV2
68
+
.route("/app.bsky.unspecced.getPostThreadV2", get(unspecced::thread_v2::get_post_thread_v2))
69
+
.route("/app.bsky.unspecced.getPostThreadOtherV2", get(unspecced::thread_v2::get_post_thread_other_v2))
70
+
}
71
+
72
+
async fn not_implemented() -> axum::http::StatusCode {
73
+
axum::http::StatusCode::NOT_IMPLEMENTED
61
74
}
+1
parakeet/src/xrpc/app_bsky/unspecced/mod.rs
+1
parakeet/src/xrpc/app_bsky/unspecced/mod.rs
···
1
+
pub mod thread_v2;
+382
parakeet/src/xrpc/app_bsky/unspecced/thread_v2.rs
+382
parakeet/src/xrpc/app_bsky/unspecced/thread_v2.rs
···
1
+
use crate::db::ThreadItem;
2
+
use crate::hydration::StatefulHydrator;
3
+
use crate::xrpc::error::{Error, XrpcResult};
4
+
use crate::xrpc::extract::{AtpAcceptLabelers, AtpAuth};
5
+
use crate::xrpc::normalise_at_uri;
6
+
use crate::GlobalState;
7
+
use axum::extract::{Query, State};
8
+
use axum::Json;
9
+
use itertools::Itertools;
10
+
use lexica::app_bsky::feed::{BlockedAuthor, PostView, ThreadgateView};
11
+
use lexica::app_bsky::unspecced::{ThreadItemPost, ThreadV2Item, ThreadV2ItemType};
12
+
use serde::{Deserialize, Serialize};
13
+
use std::cmp::Ordering;
14
+
use std::collections::{HashMap, HashSet};
15
+
16
+
const THREAD_PARENTS: usize = 50;
17
+
const DEFAULT_BRANCHING: u32 = 10;
18
+
const DEFAULT_DEPTH: u32 = 6;
19
+
20
+
#[derive(Copy, Clone, Debug, Default, Deserialize)]
21
+
#[serde(rename_all = "lowercase")]
22
+
pub enum PostThreadSort {
23
+
Newest,
24
+
#[default]
25
+
Oldest,
26
+
Top,
27
+
}
28
+
29
+
#[derive(Debug, Deserialize)]
30
+
#[serde(rename_all = "camelCase")]
31
+
pub struct GetPostThreadV2Req {
32
+
pub anchor: String,
33
+
pub above: Option<bool>,
34
+
pub below: Option<u32>,
35
+
pub branching_factor: Option<u32>,
36
+
#[serde(default)]
37
+
pub sort: PostThreadSort,
38
+
}
39
+
40
+
#[derive(Debug, Serialize)]
41
+
#[serde(rename_all = "camelCase")]
42
+
pub struct GetPostThreadV2Res {
43
+
pub thread: Vec<ThreadV2Item>,
44
+
#[serde(skip_serializing_if = "Option::is_none")]
45
+
pub threadgate: Option<ThreadgateView>,
46
+
pub has_other_replies: bool,
47
+
}
48
+
49
+
pub async fn get_post_thread_v2(
50
+
State(state): State<GlobalState>,
51
+
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
52
+
maybe_auth: Option<AtpAuth>,
53
+
Query(query): Query<GetPostThreadV2Req>,
54
+
) -> XrpcResult<Json<GetPostThreadV2Res>> {
55
+
let mut conn = state.pool.get().await?;
56
+
let maybe_did = maybe_auth.clone().map(|v| v.0);
57
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
58
+
59
+
let uri = normalise_at_uri(&state.dataloaders, &query.anchor).await?;
60
+
let depth = query.below.unwrap_or(DEFAULT_DEPTH).clamp(0, 20) as i32;
61
+
let branching_factor = query
62
+
.branching_factor
63
+
.unwrap_or(DEFAULT_BRANCHING)
64
+
.clamp(0, 100) as i32;
65
+
66
+
let anchor = hyd
67
+
.hydrate_post(uri.clone())
68
+
.await
69
+
.ok_or(Error::not_found())?;
70
+
71
+
if let Some(v) = &anchor.author.viewer {
72
+
if v.blocked_by || v.blocking.is_some() {
73
+
let block = ThreadV2ItemType::Blocked {
74
+
author: BlockedAuthor {
75
+
did: anchor.author.did,
76
+
viewer: anchor.author.viewer,
77
+
},
78
+
};
79
+
80
+
return Ok(Json(GetPostThreadV2Res {
81
+
thread: vec![ThreadV2Item {
82
+
uri,
83
+
depth: 0,
84
+
value: block,
85
+
}],
86
+
threadgate: anchor.threadgate,
87
+
has_other_replies: false,
88
+
}));
89
+
}
90
+
}
91
+
92
+
// get the root post URI (if there is one) and return its author's DID.
93
+
let root_uri = crate::db::get_root_post(&mut conn, &uri)
94
+
.await?
95
+
.unwrap_or(uri.clone());
96
+
let root_did = root_uri[5..].split('/').collect::<Vec<_>>()[0];
97
+
98
+
let replies =
99
+
crate::db::get_thread_children_branching(&mut conn, &uri, depth, branching_factor + 1)
100
+
.await?;
101
+
let reply_uris = replies
102
+
.iter()
103
+
.map(|item| item.at_uri.clone())
104
+
.collect::<Vec<_>>();
105
+
106
+
// bluesky seems to use -50 atm. we get 1 extra to know if to set more_parents.
107
+
let parents = match query.above.unwrap_or(true) {
108
+
true => crate::db::get_thread_parents(&mut conn, &uri, THREAD_PARENTS as i32 + 1).await?,
109
+
false => vec![],
110
+
};
111
+
let parent_uris = parents
112
+
.iter()
113
+
.map(|item| item.at_uri.clone())
114
+
.collect::<Vec<_>>();
115
+
116
+
let (mut replies_hyd, mut parents_hyd) = tokio::join!(
117
+
hyd.hydrate_posts(reply_uris),
118
+
hyd.hydrate_posts(parent_uris),
119
+
);
120
+
121
+
let threadgate = anchor.threadgate.clone();
122
+
let hidden: HashSet<_, std::hash::RandomState> = match &threadgate {
123
+
Some(tg) => crate::db::get_threadgate_hiddens(&mut conn, &tg.uri).await?,
124
+
None => None,
125
+
}
126
+
.map(|hiddens| HashSet::from_iter(Vec::from(hiddens)))
127
+
.unwrap_or_default();
128
+
129
+
let root_has_more = parents.len() > THREAD_PARENTS;
130
+
let mut is_op_thread = true;
131
+
132
+
let mut thread = Vec::with_capacity(1 + replies.len() + parents.len());
133
+
134
+
thread.extend(
135
+
parents
136
+
.into_iter()
137
+
.tail(THREAD_PARENTS)
138
+
.enumerate()
139
+
.map(|(idx, item)| {
140
+
let value = parents_hyd
141
+
.remove(&item.at_uri)
142
+
.map(|post| {
143
+
if let Some(v) = &post.author.viewer {
144
+
if v.blocked_by || v.blocking.is_some() {
145
+
return ThreadV2ItemType::Blocked {
146
+
author: BlockedAuthor {
147
+
did: post.author.did,
148
+
viewer: post.author.viewer,
149
+
},
150
+
};
151
+
}
152
+
}
153
+
154
+
let op_thread = (is_op_thread
155
+
|| item.root_uri.is_none() && item.parent_uri.is_none())
156
+
&& post.author.did == root_did;
157
+
158
+
ThreadV2ItemType::Post(ThreadItemPost {
159
+
post,
160
+
more_parents: idx == 0 && root_has_more,
161
+
more_replies: 0,
162
+
op_thread,
163
+
hidden_by_threadgate: false,
164
+
muted_by_viewer: false,
165
+
})
166
+
})
167
+
.unwrap_or(ThreadV2ItemType::NotFound {});
168
+
169
+
ThreadV2Item {
170
+
uri: item.at_uri,
171
+
depth: -item.depth - 1,
172
+
value,
173
+
}
174
+
}),
175
+
);
176
+
177
+
is_op_thread = is_op_thread && anchor.author.did == root_did;
178
+
thread.push(ThreadV2Item {
179
+
uri: uri.clone(),
180
+
depth: 0,
181
+
value: ThreadV2ItemType::Post(ThreadItemPost {
182
+
post: anchor,
183
+
more_parents: false,
184
+
more_replies: 0,
185
+
op_thread: is_op_thread,
186
+
hidden_by_threadgate: false,
187
+
muted_by_viewer: false,
188
+
}),
189
+
});
190
+
191
+
let mut replies_grouped = replies
192
+
.into_iter()
193
+
.into_group_map_by(|item| item.parent_uri.clone().unwrap_or_default());
194
+
195
+
// start with the anchor
196
+
let (children, has_other_replies) = build_thread_children(
197
+
&mut replies_grouped,
198
+
&mut replies_hyd,
199
+
&hidden,
200
+
&uri,
201
+
is_op_thread,
202
+
1,
203
+
&BuildThreadChildrenOpts {
204
+
root_did,
205
+
sort: query.sort,
206
+
maybe_did: &maybe_did,
207
+
max_depth: depth,
208
+
},
209
+
);
210
+
thread.extend(children);
211
+
212
+
Ok(Json(GetPostThreadV2Res {
213
+
thread,
214
+
threadgate,
215
+
has_other_replies,
216
+
}))
217
+
}
218
+
219
+
#[derive(Debug, Deserialize)]
220
+
#[serde(rename_all = "camelCase")]
221
+
pub struct GetPostThreadOtherV2Req {
222
+
pub anchor: String,
223
+
}
224
+
225
+
#[derive(Debug, Serialize)]
226
+
#[serde(rename_all = "camelCase")]
227
+
pub struct GetPostThreadOtherV2Res {
228
+
pub thread: Vec<ThreadV2Item>,
229
+
}
230
+
231
+
pub async fn get_post_thread_other_v2(
232
+
State(state): State<GlobalState>,
233
+
AtpAcceptLabelers(labelers): AtpAcceptLabelers,
234
+
maybe_auth: Option<AtpAuth>,
235
+
Query(query): Query<GetPostThreadOtherV2Req>,
236
+
) -> XrpcResult<Json<GetPostThreadOtherV2Res>> {
237
+
let mut conn = state.pool.get().await?;
238
+
let hyd = StatefulHydrator::new(&state.dataloaders, &state.cdn, &labelers, maybe_auth);
239
+
240
+
let uri = normalise_at_uri(&state.dataloaders, &query.anchor).await?;
241
+
242
+
let root = crate::db::get_root_post(&mut conn, &uri)
243
+
.await?
244
+
.unwrap_or(uri.clone());
245
+
246
+
// this only returns immediate children (depth==1) where hiddenByThreadgate=TRUE
247
+
let replies = crate::db::get_thread_children_hidden(&mut conn, &uri, &root).await?;
248
+
let reply_uris = replies
249
+
.into_iter()
250
+
.map(|item| item.at_uri)
251
+
.collect::<Vec<_>>();
252
+
let thread = hyd
253
+
.hydrate_posts(reply_uris)
254
+
.await
255
+
.into_iter()
256
+
.filter(|(_, post)| match &post.author.viewer {
257
+
Some(viewer) if viewer.blocked_by || viewer.blocking.is_some() => false,
258
+
_ => true,
259
+
})
260
+
.map(|(uri, post)| {
261
+
let post = ThreadItemPost {
262
+
post,
263
+
more_parents: false,
264
+
more_replies: 0,
265
+
op_thread: false,
266
+
hidden_by_threadgate: true,
267
+
muted_by_viewer: false,
268
+
};
269
+
270
+
ThreadV2Item {
271
+
uri,
272
+
depth: 1,
273
+
value: ThreadV2ItemType::Post(post),
274
+
}
275
+
})
276
+
.collect();
277
+
278
+
Ok(Json(GetPostThreadOtherV2Res { thread }))
279
+
}
280
+
281
+
#[derive(Debug)]
282
+
struct BuildThreadChildrenOpts<'a> {
283
+
root_did: &'a str,
284
+
sort: PostThreadSort,
285
+
maybe_did: &'a Option<String>,
286
+
max_depth: i32,
287
+
}
288
+
289
+
fn build_thread_children(
290
+
grouped_replies: &mut HashMap<String, Vec<ThreadItem>>,
291
+
replies_hyd: &mut HashMap<String, PostView>,
292
+
hidden: &HashSet<String>,
293
+
parent: &str,
294
+
is_op_thread: bool,
295
+
depth: i32,
296
+
opts: &BuildThreadChildrenOpts,
297
+
) -> (Vec<ThreadV2Item>, bool) {
298
+
let mut has_other_replies = false;
299
+
300
+
let Some(replies) = grouped_replies.remove(parent) else {
301
+
return (Vec::default(), has_other_replies);
302
+
};
303
+
304
+
let replies = replies
305
+
.into_iter()
306
+
.filter_map(|item| replies_hyd.remove(&item.at_uri))
307
+
.sorted_by(sort_replies(&opts.sort));
308
+
309
+
let mut out = Vec::new();
310
+
311
+
for post in replies {
312
+
let reply_count = grouped_replies
313
+
.get(&post.uri)
314
+
.map(|v| v.len())
315
+
.unwrap_or_default();
316
+
let at_max = depth == opts.max_depth;
317
+
let more_replies = if at_max { reply_count } else { 0 };
318
+
let op_thread = is_op_thread && post.author.did == opts.root_did;
319
+
320
+
// shouldn't push to the thread if there's a block relation. Bsky doesn't push a type of Blocked for replies...
321
+
if let Some(v) = &post.author.viewer {
322
+
if v.blocked_by || v.blocking.is_some() {
323
+
continue;
324
+
}
325
+
}
326
+
327
+
// check if the post is hidden AND we're NOT the author (hidden posts still show for their author)
328
+
if hidden.contains(&post.uri) && !did_is_cur(opts.maybe_did, &post.author.did) {
329
+
// post is hidden - do not ~pass go~ push to the thread.
330
+
if depth == 1 {
331
+
has_other_replies = true;
332
+
}
333
+
continue;
334
+
}
335
+
336
+
let uri = post.uri.clone();
337
+
out.push(ThreadV2Item {
338
+
uri: post.uri.clone(),
339
+
depth,
340
+
value: ThreadV2ItemType::Post(ThreadItemPost {
341
+
post,
342
+
more_parents: false,
343
+
more_replies: more_replies as i32,
344
+
op_thread,
345
+
hidden_by_threadgate: false,
346
+
muted_by_viewer: false,
347
+
}),
348
+
});
349
+
350
+
if !at_max {
351
+
// we don't care about has_other_replies when recursing
352
+
let (children, _) = build_thread_children(
353
+
grouped_replies,
354
+
replies_hyd,
355
+
hidden,
356
+
&uri,
357
+
op_thread,
358
+
depth + 1,
359
+
opts,
360
+
);
361
+
362
+
out.extend(children);
363
+
}
364
+
}
365
+
366
+
(out, has_other_replies)
367
+
}
368
+
369
+
fn sort_replies(sort: &PostThreadSort) -> impl Fn(&PostView, &PostView) -> Ordering + use<'_> {
370
+
move |a: &PostView, b: &PostView| match sort {
371
+
PostThreadSort::Newest => b.indexed_at.cmp(&a.indexed_at),
372
+
PostThreadSort::Oldest => a.indexed_at.cmp(&b.indexed_at),
373
+
PostThreadSort::Top => b.stats.like_count.cmp(&a.stats.like_count),
374
+
}
375
+
}
376
+
377
+
fn did_is_cur(cur: &Option<String>, did: &String) -> bool {
378
+
match cur {
379
+
Some(cur) => did == cur,
380
+
None => false,
381
+
}
382
+
}
+69
parakeet/src/xrpc/community_lexicon/bookmarks.rs
+69
parakeet/src/xrpc/community_lexicon/bookmarks.rs
···
1
+
use crate::xrpc::datetime_cursor;
2
+
use crate::xrpc::error::XrpcResult;
3
+
use crate::xrpc::extract::AtpAuth;
4
+
use crate::GlobalState;
5
+
use axum::extract::{Query, State};
6
+
use axum::Json;
7
+
use diesel::prelude::*;
8
+
use diesel_async::RunQueryDsl;
9
+
use lexica::community_lexicon::bookmarks::Bookmark;
10
+
use parakeet_db::{models, schema};
11
+
use serde::{Deserialize, Serialize};
12
+
13
+
#[derive(Debug, Deserialize)]
14
+
pub struct BookmarkCursorQuery {
15
+
pub tags: Option<Vec<String>>,
16
+
pub limit: Option<u8>,
17
+
pub cursor: Option<String>,
18
+
}
19
+
20
+
#[derive(Debug, Serialize)]
21
+
pub struct GetActorBookmarksRes {
22
+
#[serde(skip_serializing_if = "Option::is_none")]
23
+
cursor: Option<String>,
24
+
bookmarks: Vec<Bookmark>,
25
+
}
26
+
27
+
pub async fn get_actor_bookmarks(
28
+
State(state): State<GlobalState>,
29
+
auth: AtpAuth,
30
+
Query(query): Query<BookmarkCursorQuery>,
31
+
) -> XrpcResult<Json<GetActorBookmarksRes>> {
32
+
let mut conn = state.pool.get().await?;
33
+
34
+
let limit = query.limit.unwrap_or(50).clamp(1, 100);
35
+
36
+
let mut bookmarks_query = schema::bookmarks::table
37
+
.select(models::Bookmark::as_select())
38
+
.filter(schema::bookmarks::did.eq(&auth.0))
39
+
.into_boxed();
40
+
41
+
if let Some(cursor) = datetime_cursor(query.cursor.as_ref()) {
42
+
bookmarks_query = bookmarks_query.filter(schema::bookmarks::created_at.lt(cursor));
43
+
}
44
+
45
+
if let Some(tags) = query.tags {
46
+
bookmarks_query = bookmarks_query.filter(schema::bookmarks::tags.contains(tags));
47
+
}
48
+
49
+
let results = bookmarks_query
50
+
.order(schema::bookmarks::created_at.desc())
51
+
.limit(limit as i64)
52
+
.load(&mut conn)
53
+
.await?;
54
+
55
+
let cursor = results
56
+
.last()
57
+
.map(|bm| bm.created_at.timestamp_millis().to_string());
58
+
59
+
let bookmarks = results
60
+
.into_iter()
61
+
.map(|bookmark| Bookmark {
62
+
subject: bookmark.subject,
63
+
tags: bookmark.tags.into(),
64
+
created_at: bookmark.created_at,
65
+
})
66
+
.collect();
67
+
68
+
Ok(Json(GetActorBookmarksRes { cursor, bookmarks }))
69
+
}
+10
parakeet/src/xrpc/community_lexicon/mod.rs
+10
parakeet/src/xrpc/community_lexicon/mod.rs
+2
parakeet/src/xrpc/mod.rs
+2
parakeet/src/xrpc/mod.rs
···
8
8
mod app_bsky;
9
9
pub mod cdn;
10
10
mod com_atproto;
11
+
mod community_lexicon;
11
12
mod error;
12
13
pub mod extract;
13
14
pub mod jwt;
···
16
17
Router::new()
17
18
.merge(app_bsky::routes())
18
19
.merge(com_atproto::routes())
20
+
.merge(community_lexicon::routes())
19
21
}
20
22
21
23
fn datetime_cursor(cursor: Option<&String>) -> Option<chrono::DateTime<chrono::Utc>> {
+101
-12
parakeet-db/src/models.rs
+101
-12
parakeet-db/src/models.rs
···
37
37
pub joined_sp_uri: Option<String>,
38
38
pub joined_sp_cid: Option<String>,
39
39
40
+
pub pronouns: Option<String>,
41
+
pub website: Option<String>,
42
+
40
43
pub created_at: NaiveDateTime,
41
44
pub indexed_at: NaiveDateTime,
42
45
}
···
134
137
135
138
pub content: String,
136
139
pub facets: Option<serde_json::Value>,
137
-
pub languages: Vec<Option<String>>,
138
-
pub tags: Vec<Option<String>>,
140
+
pub languages: not_null_vec::TextArray,
141
+
pub tags: not_null_vec::TextArray,
139
142
140
143
pub parent_uri: Option<String>,
141
144
pub parent_cid: Option<String>,
···
145
148
pub embed: Option<String>,
146
149
pub embed_subtype: Option<String>,
147
150
151
+
pub mentions: Option<not_null_vec::TextArray>,
152
+
pub violates_threadgate: bool,
153
+
148
154
pub created_at: DateTime<Utc>,
149
155
pub indexed_at: NaiveDateTime,
150
156
}
···
230
236
pub cid: String,
231
237
pub post_uri: String,
232
238
233
-
pub detached: Vec<Option<String>>,
234
-
pub rules: Vec<Option<String>>,
239
+
pub detached: not_null_vec::TextArray,
240
+
pub rules: not_null_vec::TextArray,
235
241
236
242
pub created_at: DateTime<Utc>,
237
243
pub indexed_at: NaiveDateTime,
···
246
252
pub cid: String,
247
253
pub post_uri: String,
248
254
249
-
pub hidden_replies: Vec<Option<String>>,
250
-
pub allow: Vec<Option<String>>,
251
-
pub allowed_lists: Vec<Option<String>>,
255
+
pub hidden_replies: not_null_vec::TextArray,
256
+
pub allow: Option<not_null_vec::TextArray>,
257
+
pub allowed_lists: Option<not_null_vec::TextArray>,
252
258
253
259
pub record: serde_json::Value,
254
260
···
270
276
pub description: Option<String>,
271
277
pub description_facets: Option<serde_json::Value>,
272
278
pub list: String,
273
-
pub feeds: Option<Vec<Option<String>>>,
279
+
pub feeds: Option<not_null_vec::TextArray>,
274
280
275
281
pub created_at: DateTime<Utc>,
276
282
pub indexed_at: NaiveDateTime,
···
284
290
pub did: String,
285
291
pub cid: String,
286
292
287
-
pub reasons: Option<Vec<Option<String>>>,
288
-
pub subject_types: Option<Vec<Option<String>>>,
289
-
pub subject_collections: Option<Vec<Option<String>>>,
293
+
pub reasons: Option<not_null_vec::TextArray>,
294
+
pub subject_types: Option<not_null_vec::TextArray>,
295
+
pub subject_collections: Option<not_null_vec::TextArray>,
290
296
291
297
pub created_at: NaiveDateTime,
292
298
pub indexed_at: NaiveDateTime,
293
299
}
294
300
295
-
#[derive(Clone, Debug, Serialize, Deserialize, Queryable, Selectable, Identifiable, Associations)]
301
+
#[derive(
302
+
Clone, Debug, Serialize, Deserialize, Queryable, Selectable, Identifiable, Associations,
303
+
)]
296
304
#[diesel(table_name = crate::schema::labeler_defs)]
297
305
#[diesel(belongs_to(LabelerService, foreign_key = labeler))]
298
306
#[diesel(check_for_backend(diesel::pg::Pg))]
···
383
391
pub did: &'a str,
384
392
pub list_uri: &'a str,
385
393
}
394
+
395
+
#[derive(Clone, Debug, Serialize, Deserialize, Queryable, Selectable, Identifiable)]
396
+
#[diesel(table_name = crate::schema::bookmarks)]
397
+
#[diesel(primary_key(did, subject, subject_cid))]
398
+
#[diesel(check_for_backend(diesel::pg::Pg))]
399
+
pub struct Bookmark {
400
+
pub did: String,
401
+
pub rkey: Option<String>,
402
+
pub subject: String,
403
+
pub subject_cid: Option<String>,
404
+
pub subject_type: String,
405
+
pub tags: not_null_vec::TextArray,
406
+
pub created_at: DateTime<Utc>,
407
+
}
408
+
409
+
#[derive(Debug, Insertable, AsChangeset)]
410
+
#[diesel(table_name = crate::schema::bookmarks)]
411
+
#[diesel(check_for_backend(diesel::pg::Pg))]
412
+
pub struct NewBookmark<'a> {
413
+
pub did: &'a str,
414
+
pub rkey: Option<String>,
415
+
pub subject: &'a str,
416
+
pub subject_cid: Option<String>,
417
+
pub subject_type: &'a str,
418
+
pub tags: Vec<String>,
419
+
}
420
+
421
+
#[derive(Debug, Queryable, Selectable, Identifiable)]
422
+
#[diesel(table_name = crate::schema::author_feeds)]
423
+
#[diesel(primary_key(uri))]
424
+
#[diesel(check_for_backend(diesel::pg::Pg))]
425
+
pub struct AuthorFeedItem {
426
+
pub uri: String,
427
+
pub cid: String,
428
+
pub post: String,
429
+
pub did: String,
430
+
pub typ: String,
431
+
pub sort_at: DateTime<Utc>,
432
+
}
433
+
434
+
pub use not_null_vec::TextArray;
435
+
mod not_null_vec {
436
+
use diesel::deserialize::FromSql;
437
+
use diesel::pg::Pg;
438
+
use diesel::sql_types::{Array, Nullable, Text};
439
+
use diesel::{deserialize, FromSqlRow};
440
+
use serde::{Deserialize, Serialize};
441
+
use std::ops::{Deref, DerefMut};
442
+
443
+
#[derive(Clone, Debug, Default, Serialize, Deserialize, FromSqlRow)]
444
+
#[diesel(sql_type = Array<Nullable<Text>>)]
445
+
pub struct TextArray(pub Vec<String>);
446
+
447
+
impl FromSql<Array<Nullable<Text>>, Pg> for TextArray {
448
+
fn from_sql(bytes: diesel::pg::PgValue<'_>) -> deserialize::Result<Self> {
449
+
let vec_with_nulls =
450
+
<Vec<Option<String>> as FromSql<Array<Nullable<Text>>, Pg>>::from_sql(bytes)?;
451
+
Ok(TextArray(vec_with_nulls.into_iter().flatten().collect()))
452
+
}
453
+
}
454
+
455
+
impl Deref for TextArray {
456
+
type Target = Vec<String>;
457
+
458
+
fn deref(&self) -> &Self::Target {
459
+
&self.0
460
+
}
461
+
}
462
+
463
+
impl DerefMut for TextArray {
464
+
fn deref_mut(&mut self) -> &mut Self::Target {
465
+
&mut self.0
466
+
}
467
+
}
468
+
469
+
impl From<TextArray> for Vec<String> {
470
+
fn from(v: TextArray) -> Vec<String> {
471
+
v.0
472
+
}
473
+
}
474
+
}
+45
-2
parakeet-db/src/schema.rs
+45
-2
parakeet-db/src/schema.rs
···
13
13
}
14
14
15
15
diesel::table! {
16
+
author_feeds (uri) {
17
+
uri -> Text,
18
+
cid -> Text,
19
+
post -> Text,
20
+
did -> Text,
21
+
typ -> Text,
22
+
sort_at -> Timestamptz,
23
+
}
24
+
}
25
+
26
+
diesel::table! {
16
27
backfill (repo, repo_ver) {
17
28
repo -> Text,
18
29
repo_ver -> Text,
···
38
49
rkey -> Text,
39
50
did -> Text,
40
51
subject -> Text,
52
+
created_at -> Timestamptz,
53
+
}
54
+
}
55
+
56
+
diesel::table! {
57
+
bookmarks (did, subject) {
58
+
did -> Text,
59
+
rkey -> Nullable<Text>,
60
+
subject -> Text,
61
+
subject_cid -> Nullable<Text>,
62
+
subject_type -> Text,
63
+
tags -> Array<Nullable<Text>>,
41
64
created_at -> Timestamptz,
42
65
}
43
66
}
···
272
295
embed_subtype -> Nullable<Text>,
273
296
created_at -> Timestamptz,
274
297
indexed_at -> Timestamp,
298
+
mentions -> Nullable<Array<Nullable<Text>>>,
299
+
violates_threadgate -> Bool,
300
+
}
301
+
}
302
+
303
+
diesel::table! {
304
+
profile_states (did, subject) {
305
+
did -> Text,
306
+
subject -> Text,
307
+
muting -> Bool,
308
+
blocked -> Bool,
309
+
blocking -> Nullable<Text>,
310
+
following -> Nullable<Text>,
311
+
followed -> Nullable<Text>,
275
312
}
276
313
}
277
314
···
289
326
joined_sp_cid -> Nullable<Text>,
290
327
created_at -> Timestamp,
291
328
indexed_at -> Timestamp,
329
+
pronouns -> Nullable<Text>,
330
+
website -> Nullable<Text>,
292
331
}
293
332
}
294
333
···
352
391
cid -> Text,
353
392
post_uri -> Text,
354
393
hidden_replies -> Array<Nullable<Text>>,
355
-
allow -> Array<Nullable<Text>>,
356
-
allowed_lists -> Array<Nullable<Text>>,
394
+
allow -> Nullable<Array<Nullable<Text>>>,
395
+
allowed_lists -> Nullable<Array<Nullable<Text>>>,
357
396
record -> Jsonb,
358
397
created_at -> Timestamptz,
359
398
indexed_at -> Timestamp,
···
375
414
376
415
diesel::joinable!(backfill -> actors (repo));
377
416
diesel::joinable!(blocks -> actors (did));
417
+
diesel::joinable!(bookmarks -> actors (did));
378
418
diesel::joinable!(chat_decls -> actors (did));
379
419
diesel::joinable!(feedgens -> actors (owner));
380
420
diesel::joinable!(follows -> actors (did));
···
402
442
403
443
diesel::allow_tables_to_appear_in_same_query!(
404
444
actors,
445
+
author_feeds,
405
446
backfill,
406
447
backfill_jobs,
407
448
blocks,
449
+
bookmarks,
408
450
chat_decls,
409
451
feedgens,
410
452
follows,
···
425
467
post_embed_video_captions,
426
468
postgates,
427
469
posts,
470
+
profile_states,
428
471
profiles,
429
472
records,
430
473
reposts,
+1
-1
parakeet-db/src/types.rs
+1
-1
parakeet-db/src/types.rs
+11
parakeet-index/justfile
+11
parakeet-index/justfile
···
1
+
@release:
2
+
cargo build --release --features server
3
+
4
+
@lint:
5
+
cargo clippy
6
+
7
+
@run *params:
8
+
cargo run --features server -- {{params}}
9
+
10
+
@docker platform='linux/amd64' branch='main':
11
+
docker buildx build --platform {{platform}} -t registry.gitlab.com/parakeet-social/parakeet/parakeet-index:{{branch}} . -f parakeet-index/Dockerfile
-1
parakeet-index/run.sh
-1
parakeet-index/run.sh
···
1
-
cargo run --features server