+1
-1
ufos/fuzz/fuzz_targets/counts_value.rs
+1
-1
ufos/fuzz/fuzz_targets/counts_value.rs
···
19
19
assert_eq!(serialized.len(), n);
20
20
let (and_back, n_again) = CountsValue::from_db_bytes(&serialized).unwrap();
21
21
assert_eq!(n_again, n);
22
-
assert_eq!(and_back.records(), counts_value.records());
22
+
assert_eq!(and_back.counts(), counts_value.counts());
23
23
assert_eq!(and_back.dids().estimate(), counts_value.dids().estimate());
24
24
}
25
25
});
+9
ufos/src/db_types.rs
+9
ufos/src/db_types.rs
···
104
104
}
105
105
}
106
106
107
+
impl<P: DbBytes + Default, S: DbBytes + Default> Default for DbConcat<P, S> {
108
+
fn default() -> Self {
109
+
Self {
110
+
prefix: Default::default(),
111
+
suffix: Default::default(),
112
+
}
113
+
}
114
+
}
115
+
107
116
impl<P: DbBytes + std::fmt::Debug, S: DbBytes + std::fmt::Debug> fmt::Debug for DbConcat<P, S> {
108
117
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
109
118
write!(f, "DbConcat<{:?} || {:?}>", self.prefix, self.suffix)
+71
-25
ufos/src/lib.rs
+71
-25
ufos/src/lib.rs
···
27
27
28
28
#[derive(Debug, Default, Clone)]
29
29
pub struct CollectionCommits<const LIMIT: usize> {
30
-
pub total_seen: usize,
30
+
pub creates: usize,
31
+
pub updates: usize,
32
+
pub deletes: usize,
31
33
pub dids_estimate: Sketch<14>,
32
34
pub commits: Vec<UFOsCommit>,
33
35
head: usize,
34
-
non_creates: usize,
35
36
}
36
37
37
38
impl<const LIMIT: usize> CollectionCommits<LIMIT> {
···
41
42
self.head = 0;
42
43
}
43
44
}
45
+
/// lossy-ish commit insertion
46
+
///
47
+
/// - new commits are *always* added to the batch or else rejected as full.
48
+
/// - when LIMIT is reached, new commits can displace existing `creates`.
49
+
/// `update`s and `delete`s are *never* displaced.
50
+
/// - if all batched `creates` have been displaced, the batch is full.
51
+
///
52
+
/// in general it's rare for commits to be displaced except for very high-
53
+
/// volume collections such as `app.bsky.feed.like`.
54
+
///
55
+
/// it could be nice in the future to retain all batched commits and just
56
+
/// drop new `creates` after a limit instead.
44
57
pub fn truncating_insert(
45
58
&mut self,
46
59
commit: UFOsCommit,
47
60
sketch_secret: &SketchSecretPrefix,
48
61
) -> Result<(), BatchInsertError> {
49
-
if self.non_creates == LIMIT {
62
+
if (self.updates + self.deletes) == LIMIT {
63
+
// nothing can be displaced (only `create`s may be displaced)
50
64
return Err(BatchInsertError::BatchFull(commit));
51
65
}
52
-
let did = commit.did.clone();
53
-
let is_create = commit.action.is_create();
66
+
67
+
// every kind of commit counts as "user activity"
68
+
self.dids_estimate
69
+
.insert(did_element(sketch_secret, &commit.did));
70
+
71
+
match commit.action {
72
+
CommitAction::Put(PutAction {
73
+
is_update: false, ..
74
+
}) => {
75
+
self.creates += 1;
76
+
}
77
+
CommitAction::Put(PutAction {
78
+
is_update: true, ..
79
+
}) => {
80
+
self.updates += 1;
81
+
}
82
+
CommitAction::Cut => {
83
+
self.deletes += 1;
84
+
}
85
+
}
86
+
54
87
if self.commits.len() < LIMIT {
88
+
// normal insert: there's space left to put a new commit at the end
55
89
self.commits.push(commit);
56
-
if self.commits.capacity() > LIMIT {
57
-
self.commits.shrink_to(LIMIT); // save mem?????? maybe??
58
-
}
59
90
} else {
91
+
// displacement insert: find an old `create` we can displace
60
92
let head_started_at = self.head;
61
93
loop {
62
94
let candidate = self
···
74
106
}
75
107
}
76
108
77
-
if is_create {
78
-
self.total_seen += 1;
79
-
self.dids_estimate.insert(did_element(sketch_secret, &did));
80
-
} else {
81
-
self.non_creates += 1;
82
-
}
83
-
84
109
Ok(())
85
110
}
86
111
}
···
179
204
.truncating_insert(commit, sketch_secret)?;
180
205
Ok(())
181
206
}
182
-
pub fn total_records(&self) -> usize {
183
-
self.commits_by_nsid.values().map(|v| v.commits.len()).sum()
184
-
}
185
-
pub fn total_seen(&self) -> usize {
186
-
self.commits_by_nsid.values().map(|v| v.total_seen).sum()
187
-
}
188
207
pub fn total_collections(&self) -> usize {
189
208
self.commits_by_nsid.len()
190
209
}
···
237
256
#[derive(Debug, Serialize, JsonSchema)]
238
257
pub struct NsidCount {
239
258
nsid: String,
240
-
records: u64,
259
+
creates: u64,
241
260
dids_estimate: u64,
242
261
}
243
262
244
263
#[derive(Debug, Serialize, JsonSchema)]
245
264
pub struct JustCount {
246
-
records: u64,
265
+
creates: u64,
247
266
dids_estimate: u64,
248
267
}
249
268
···
309
328
&[0u8; 16],
310
329
)?;
311
330
312
-
assert_eq!(commits.total_seen, 3);
331
+
assert_eq!(commits.creates, 3);
313
332
assert_eq!(commits.dids_estimate.estimate(), 1);
314
333
assert_eq!(commits.commits.len(), 2);
315
334
···
333
352
}
334
353
335
354
#[test]
355
+
fn test_truncating_insert_counts_updates() -> anyhow::Result<()> {
356
+
let mut commits: CollectionCommits<2> = Default::default();
357
+
358
+
commits.truncating_insert(
359
+
UFOsCommit {
360
+
cursor: Cursor::from_raw_u64(100),
361
+
did: Did::new("did:plc:whatever".to_string()).unwrap(),
362
+
rkey: RecordKey::new("rkey-asdf-a".to_string()).unwrap(),
363
+
rev: "rev-asdf".to_string(),
364
+
action: CommitAction::Put(PutAction {
365
+
record: RawValue::from_string("{}".to_string())?,
366
+
is_update: true,
367
+
}),
368
+
},
369
+
&[0u8; 16],
370
+
)?;
371
+
372
+
assert_eq!(commits.creates, 0);
373
+
assert_eq!(commits.updates, 1);
374
+
assert_eq!(commits.deletes, 0);
375
+
assert_eq!(commits.dids_estimate.estimate(), 1);
376
+
assert_eq!(commits.commits.len(), 1);
377
+
Ok(())
378
+
}
379
+
380
+
#[test]
336
381
fn test_truncating_insert_does_not_truncate_deletes() -> anyhow::Result<()> {
337
382
let mut commits: CollectionCommits<2> = Default::default();
338
383
···
375
420
&[0u8; 16],
376
421
)?;
377
422
378
-
assert_eq!(commits.total_seen, 2);
423
+
assert_eq!(commits.creates, 2);
424
+
assert_eq!(commits.deletes, 1);
379
425
assert_eq!(commits.dids_estimate.estimate(), 1);
380
426
assert_eq!(commits.commits.len(), 2);
381
427
+5
-3
ufos/src/server.rs
+5
-3
ufos/src/server.rs
···
109
109
consumer,
110
110
})
111
111
}
112
+
113
+
// TODO: replace with normal (🙃) multi-qs value somehow
112
114
fn to_multiple_nsids(s: &str) -> Result<HashSet<Nsid>, String> {
113
115
let mut out = HashSet::new();
114
116
for collection in s.split(',') {
···
197
199
}
198
200
#[derive(Debug, Serialize, JsonSchema)]
199
201
struct TotalCounts {
200
-
total_records: u64,
202
+
total_creates: u64,
201
203
dids_estimate: u64,
202
204
}
203
205
/// Get total records seen by collection
···
218
220
let mut seen_by_collection = HashMap::with_capacity(collections.len());
219
221
220
222
for collection in &collections {
221
-
let (total_records, dids_estimate) = storage
223
+
let (total_creates, dids_estimate) = storage
222
224
.get_counts_by_collection(collection)
223
225
.await
224
226
.map_err(|e| HttpError::for_internal_error(format!("boooo: {e:?}")))?;
···
226
228
seen_by_collection.insert(
227
229
collection.to_string(),
228
230
TotalCounts {
229
-
total_records,
231
+
total_creates,
230
232
dids_estimate,
231
233
},
232
234
);
+66
-62
ufos/src/storage_fjall.rs
+66
-62
ufos/src/storage_fjall.rs
···
2
2
use crate::error::StorageError;
3
3
use crate::storage::{StorageResult, StorageWhatever, StoreBackground, StoreReader, StoreWriter};
4
4
use crate::store_types::{
5
-
AllTimeDidsKey, AllTimeRecordsKey, AllTimeRollupKey, CountsValue, CursorBucket,
5
+
AllTimeDidsKey, AllTimeRecordsKey, AllTimeRollupKey, CommitCounts, CountsValue, CursorBucket,
6
6
DeleteAccountQueueKey, DeleteAccountQueueVal, HourTruncatedCursor, HourlyDidsKey,
7
7
HourlyRecordsKey, HourlyRollupKey, HourlyRollupStaticPrefix, JetstreamCursorKey,
8
8
JetstreamCursorValue, JetstreamEndpointKey, JetstreamEndpointValue, LiveCountsKey,
···
507
507
}
508
508
out.push(NsidCount {
509
509
nsid: nsid.to_string(),
510
-
records: merged.records(),
510
+
creates: merged.counts().creates,
511
511
dids_estimate: merged.dids().estimate() as u64,
512
512
});
513
513
}
···
607
607
}
608
608
let mut ranked: Vec<(Nsid, CountsValue)> = ranked.into_iter().collect();
609
609
match order {
610
-
OrderCollectionsBy::RecordsCreated => ranked.sort_by_key(|(_, c)| c.records()),
610
+
OrderCollectionsBy::RecordsCreated => ranked.sort_by_key(|(_, c)| c.counts().creates),
611
611
OrderCollectionsBy::DidsEstimate => ranked.sort_by_key(|(_, c)| c.dids().estimate()),
612
612
OrderCollectionsBy::Lexi { .. } => unreachable!(),
613
613
}
···
617
617
.take(limit)
618
618
.map(|(nsid, cv)| NsidCount {
619
619
nsid: nsid.to_string(),
620
-
records: cv.records(),
620
+
creates: cv.counts().creates,
621
621
dids_estimate: cv.dids().estimate() as u64,
622
622
})
623
623
.collect();
···
746
746
}
747
747
}
748
748
Ok((
749
-
total_counts.records(),
749
+
total_counts.counts().creates,
750
750
total_counts.dids().estimate() as u64,
751
751
))
752
752
}
···
973
973
.unwrap_or_default();
974
974
975
975
// now that we have values, we can know the exising ranks
976
-
let before_records_count = rolled.records();
976
+
let before_creates_count = rolled.counts().creates;
977
977
let before_dids_estimate = rolled.dids().estimate() as u64;
978
978
979
979
// update the rollup
980
980
rolled.merge(&counts);
981
981
982
-
// replace rank entries
983
-
let (old_records, new_records, dids) = match rollup {
984
-
Rollup::Hourly(hourly_cursor) => {
985
-
let old_records =
986
-
HourlyRecordsKey::new(hourly_cursor, before_records_count.into(), &nsid);
987
-
let new_records = old_records.with_rank(rolled.records().into());
988
-
let new_estimate = rolled.dids().estimate() as u64;
989
-
let dids = if new_estimate == before_dids_estimate {
990
-
None
991
-
} else {
992
-
let old_dids =
993
-
HourlyDidsKey::new(hourly_cursor, before_dids_estimate.into(), &nsid);
994
-
let new_dids = old_dids.with_rank(new_estimate.into());
995
-
Some((old_dids.to_db_bytes()?, new_dids.to_db_bytes()?))
996
-
};
997
-
(old_records.to_db_bytes()?, new_records.to_db_bytes()?, dids)
998
-
}
999
-
Rollup::Weekly(weekly_cursor) => {
1000
-
let old_records =
1001
-
WeeklyRecordsKey::new(weekly_cursor, before_records_count.into(), &nsid);
1002
-
let new_records = old_records.with_rank(rolled.records().into());
1003
-
let new_estimate = rolled.dids().estimate() as u64;
1004
-
let dids = if new_estimate == before_dids_estimate {
1005
-
None
1006
-
} else {
1007
-
let old_dids =
1008
-
WeeklyDidsKey::new(weekly_cursor, before_dids_estimate.into(), &nsid);
1009
-
let new_dids = old_dids.with_rank(new_estimate.into());
1010
-
Some((old_dids.to_db_bytes()?, new_dids.to_db_bytes()?))
1011
-
};
1012
-
(old_records.to_db_bytes()?, new_records.to_db_bytes()?, dids)
1013
-
}
1014
-
Rollup::AllTime => {
1015
-
let old_records = AllTimeRecordsKey::new(before_records_count.into(), &nsid);
1016
-
let new_records = old_records.with_rank(rolled.records().into());
1017
-
let new_estimate = rolled.dids().estimate() as u64;
1018
-
let dids = if new_estimate == before_dids_estimate {
1019
-
None
1020
-
} else {
1021
-
let old_dids = AllTimeDidsKey::new(before_dids_estimate.into(), &nsid);
1022
-
let new_dids = old_dids.with_rank(new_estimate.into());
1023
-
Some((old_dids.to_db_bytes()?, new_dids.to_db_bytes()?))
1024
-
};
1025
-
(old_records.to_db_bytes()?, new_records.to_db_bytes()?, dids)
1026
-
}
1027
-
};
982
+
// new ranks
983
+
let new_creates_count = rolled.counts().creates;
984
+
let new_dids_estimate = rolled.dids().estimate() as u64;
1028
985
1029
-
// replace the ranks
1030
-
batch.remove(&self.rollups, &old_records);
1031
-
batch.insert(&self.rollups, &new_records, "");
1032
-
if let Some((old_dids, new_dids)) = dids {
1033
-
batch.remove(&self.rollups, &old_dids);
1034
-
batch.insert(&self.rollups, &new_dids, "");
986
+
// update create-ranked secondary index if rank changed
987
+
if new_creates_count != before_creates_count {
988
+
let (old_k, new_k) = match rollup {
989
+
Rollup::Hourly(cursor) => (
990
+
HourlyRecordsKey::new(cursor, before_creates_count.into(), &nsid)
991
+
.to_db_bytes()?,
992
+
HourlyRecordsKey::new(cursor, new_creates_count.into(), &nsid)
993
+
.to_db_bytes()?,
994
+
),
995
+
Rollup::Weekly(cursor) => (
996
+
WeeklyRecordsKey::new(cursor, before_creates_count.into(), &nsid)
997
+
.to_db_bytes()?,
998
+
WeeklyRecordsKey::new(cursor, new_creates_count.into(), &nsid)
999
+
.to_db_bytes()?,
1000
+
),
1001
+
Rollup::AllTime => (
1002
+
AllTimeRecordsKey::new(before_creates_count.into(), &nsid).to_db_bytes()?,
1003
+
AllTimeRecordsKey::new(new_creates_count.into(), &nsid).to_db_bytes()?,
1004
+
),
1005
+
};
1006
+
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1007
+
batch.insert(&self.rollups, &new_k, "");
1008
+
}
1009
+
1010
+
// update dids-ranked secondary index if rank changed
1011
+
if new_dids_estimate != before_dids_estimate {
1012
+
let (old_k, new_k) = match rollup {
1013
+
Rollup::Hourly(cursor) => (
1014
+
HourlyDidsKey::new(cursor, before_dids_estimate.into(), &nsid)
1015
+
.to_db_bytes()?,
1016
+
HourlyDidsKey::new(cursor, new_dids_estimate.into(), &nsid)
1017
+
.to_db_bytes()?,
1018
+
),
1019
+
Rollup::Weekly(cursor) => (
1020
+
WeeklyDidsKey::new(cursor, before_dids_estimate.into(), &nsid)
1021
+
.to_db_bytes()?,
1022
+
WeeklyDidsKey::new(cursor, new_dids_estimate.into(), &nsid)
1023
+
.to_db_bytes()?,
1024
+
),
1025
+
Rollup::AllTime => (
1026
+
AllTimeDidsKey::new(before_dids_estimate.into(), &nsid).to_db_bytes()?,
1027
+
AllTimeDidsKey::new(new_dids_estimate.into(), &nsid).to_db_bytes()?,
1028
+
),
1029
+
};
1030
+
batch.remove(&self.rollups, &old_k); // TODO: when fjall gets weak delete, this will hopefully work way better
1031
+
batch.insert(&self.rollups, &new_k, "");
1035
1032
}
1036
1033
1037
-
// replace the rollup
1034
+
// replace the main counts rollup
1038
1035
batch.insert(&self.rollups, &rollup_key_bytes, &rolled.to_db_bytes()?);
1039
1036
}
1040
1037
···
1114
1111
}
1115
1112
}
1116
1113
let live_counts_key: LiveCountsKey = (latest, &nsid).into();
1117
-
let counts_value = CountsValue::new(commits.total_seen as u64, commits.dids_estimate);
1114
+
let counts_value = CountsValue::new(
1115
+
CommitCounts {
1116
+
creates: commits.creates as u64,
1117
+
updates: commits.updates as u64,
1118
+
deletes: commits.deletes as u64,
1119
+
},
1120
+
commits.dids_estimate,
1121
+
);
1118
1122
batch.insert(
1119
1123
&self.rollups,
1120
1124
&live_counts_key.to_db_bytes()?,
···
1838
1842
);
1839
1843
write.insert_batch(batch.batch)?;
1840
1844
1841
-
let (records, dids) = read.get_counts_by_collection(&collection)?;
1842
-
assert_eq!(records, 1);
1845
+
let (creates, dids) = read.get_counts_by_collection(&collection)?;
1846
+
assert_eq!(creates, 1);
1843
1847
assert_eq!(dids, 1);
1844
1848
1845
1849
let records = read.get_records_by_collections([collection].into(), 2, false)?;
+12
-5
ufos/src/storage_mem.rs
+12
-5
ufos/src/storage_mem.rs
···
5
5
use crate::error::StorageError;
6
6
use crate::storage::{StorageResult, StorageWhatever, StoreBackground, StoreReader, StoreWriter};
7
7
use crate::store_types::{
8
-
AllTimeRollupKey, CountsValue, DeleteAccountQueueKey, DeleteAccountQueueVal,
8
+
AllTimeRollupKey, CommitCounts, CountsValue, DeleteAccountQueueKey, DeleteAccountQueueVal,
9
9
HourTruncatedCursor, HourlyRollupKey, JetstreamCursorKey, JetstreamCursorValue,
10
10
JetstreamEndpointKey, JetstreamEndpointValue, LiveCountsKey, NewRollupCursorKey,
11
11
NewRollupCursorValue, NsidRecordFeedKey, NsidRecordFeedVal, RecordLocationKey,
···
483
483
}
484
484
}
485
485
Ok((
486
-
total_counts.records(),
486
+
total_counts.counts().creates,
487
487
total_counts.dids().estimate() as u64,
488
488
))
489
489
}
···
724
724
assert_eq!(n, tripppin.len());
725
725
assert_eq!(counts.prefix, and_back.prefix);
726
726
assert_eq!(counts.dids().estimate(), and_back.dids().estimate());
727
-
if counts.records() > 20000000 {
727
+
if counts.counts().creates > 20000000 {
728
728
panic!("COUNTS maybe wtf? {counts:?}")
729
729
}
730
730
// assert_eq!(rolled, and_back);
···
737
737
assert_eq!(n, tripppin.len());
738
738
assert_eq!(rolled.prefix, and_back.prefix);
739
739
assert_eq!(rolled.dids().estimate(), and_back.dids().estimate());
740
-
if rolled.records() > 20000000 {
740
+
if rolled.counts().creates > 20000000 {
741
741
panic!("maybe wtf? {rolled:?}")
742
742
}
743
743
// assert_eq!(rolled, and_back);
···
804
804
}
805
805
}
806
806
let live_counts_key: LiveCountsKey = (latest, &nsid).into();
807
-
let counts_value = CountsValue::new(commits.total_seen as u64, commits.dids_estimate);
807
+
let counts_value = CountsValue::new(
808
+
CommitCounts {
809
+
creates: commits.creates as u64,
810
+
updates: commits.updates as u64,
811
+
deletes: commits.deletes as u64,
812
+
},
813
+
commits.dids_estimate,
814
+
);
808
815
batch.insert(
809
816
&self.rollups,
810
817
&live_counts_key.to_db_bytes()?,
+39
-23
ufos/src/store_types.rs
+39
-23
ufos/src/store_types.rs
···
209
209
)
210
210
}
211
211
}
212
-
#[derive(Debug, PartialEq, Decode, Encode)]
213
-
pub struct TotalRecordsValue(pub u64);
214
-
impl UseBincodePlz for TotalRecordsValue {}
215
212
216
-
#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)]
213
+
#[derive(Debug, Clone, Copy, Default, PartialEq, Decode, Encode)]
214
+
pub struct CommitCounts {
215
+
pub creates: u64,
216
+
pub updates: u64,
217
+
pub deletes: u64,
218
+
}
219
+
impl CommitCounts {
220
+
pub fn merge(&mut self, other: &Self) {
221
+
self.creates += other.creates;
222
+
self.updates += other.updates;
223
+
self.deletes += other.deletes;
224
+
}
225
+
}
226
+
impl UseBincodePlz for CommitCounts {}
227
+
228
+
#[derive(Debug, Default, PartialEq, serde::Serialize, serde::Deserialize)]
217
229
pub struct EstimatedDidsValue(pub Sketch<14>);
218
230
impl SerdeBytes for EstimatedDidsValue {}
219
231
impl DbBytes for EstimatedDidsValue {
···
236
248
}
237
249
}
238
250
239
-
pub type CountsValue = DbConcat<TotalRecordsValue, EstimatedDidsValue>;
251
+
pub type CountsValue = DbConcat<CommitCounts, EstimatedDidsValue>;
240
252
impl CountsValue {
241
-
pub fn new(total: u64, dids: Sketch<14>) -> Self {
253
+
pub fn new(counts: CommitCounts, dids: Sketch<14>) -> Self {
242
254
Self {
243
-
prefix: TotalRecordsValue(total),
255
+
prefix: counts,
244
256
suffix: EstimatedDidsValue(dids),
245
257
}
246
258
}
247
-
pub fn records(&self) -> u64 {
248
-
self.prefix.0
259
+
pub fn counts(&self) -> CommitCounts {
260
+
self.prefix
249
261
}
250
262
pub fn dids(&self) -> &Sketch<14> {
251
263
&self.suffix.0
252
264
}
253
265
pub fn merge(&mut self, other: &Self) {
254
-
self.prefix.0 += other.records();
255
-
self.suffix.0.merge(other.dids());
256
-
}
257
-
}
258
-
impl Default for CountsValue {
259
-
fn default() -> Self {
260
-
Self {
261
-
prefix: TotalRecordsValue(0),
262
-
suffix: EstimatedDidsValue(Sketch::<14>::default()),
263
-
}
266
+
self.prefix.merge(&other.prefix);
267
+
self.suffix.0.merge(&other.suffix.0);
264
268
}
265
269
}
266
270
impl From<&CountsValue> for JustCount {
267
271
fn from(cv: &CountsValue) -> Self {
268
272
Self {
269
-
records: cv.records(),
273
+
creates: cv.counts().creates,
270
274
dids_estimate: cv.dids().estimate() as u64,
271
275
}
272
276
}
···
608
612
#[cfg(test)]
609
613
mod test {
610
614
use super::{
611
-
CountsValue, Cursor, CursorBucket, Did, EncodingError, HourTruncatedCursor,
615
+
CommitCounts, CountsValue, Cursor, CursorBucket, Did, EncodingError, HourTruncatedCursor,
612
616
HourlyRollupKey, Nsid, Sketch, HOUR_IN_MICROS, WEEK_IN_MICROS,
613
617
};
614
618
use crate::db_types::DbBytes;
···
642
646
Did::new(format!("did:plc:inze6wrmsm7pjl7yta3oig7{i}")).unwrap(),
643
647
));
644
648
}
645
-
let original = CountsValue::new(123, estimator.clone());
649
+
let original = CountsValue::new(
650
+
CommitCounts {
651
+
creates: 123,
652
+
..Default::default()
653
+
},
654
+
estimator.clone(),
655
+
);
646
656
let serialized = original.to_db_bytes()?;
647
657
let (restored, bytes_consumed) = CountsValue::from_db_bytes(&serialized)?;
648
658
assert_eq!(restored, original);
···
653
663
Did::new(format!("did:plc:inze6wrmsm7pjl7yta3oig{i}")).unwrap(),
654
664
));
655
665
}
656
-
let original = CountsValue::new(123, estimator);
666
+
let original = CountsValue::new(
667
+
CommitCounts {
668
+
creates: 123,
669
+
..Default::default()
670
+
},
671
+
estimator,
672
+
);
657
673
let serialized = original.to_db_bytes()?;
658
674
let (restored, bytes_consumed) = CountsValue::from_db_bytes(&serialized)?;
659
675
assert_eq!(restored, original);