+12
-12
ufos/src/consumer.rs
+12
-12
ufos/src/consumer.rs
···
8
8
use std::time::Duration;
9
9
use tokio::sync::mpsc::{channel, Receiver, Sender};
10
10
11
-
use crate::{DeleteAccount, EventBatch, UFOsCommit};
12
11
use crate::error::FirehoseEventError;
12
+
use crate::{DeleteAccount, EventBatch, UFOsCommit};
13
13
14
14
const MAX_BATCHED_RECORDS: usize = 128; // *non-blocking* limit. drops oldest batched record per collection once reached.
15
15
const MAX_ACCOUNT_REMOVES: usize = 1024; // hard limit, extremely unlikely to reach, but just in case
···
94
94
95
95
match event.kind {
96
96
EventKind::Commit => {
97
-
let commit = event.commit.ok_or(FirehoseEventError::CommitEventMissingCommit)?;
97
+
let commit = event
98
+
.commit
99
+
.ok_or(FirehoseEventError::CommitEventMissingCommit)?;
98
100
let (commit, nsid) = UFOsCommit::from_commit_info(commit, event.did, event.cursor)?;
99
101
self.handle_commit(commit, nsid).await?;
100
102
}
101
103
EventKind::Account => {
102
-
let account = event.account.ok_or(FirehoseEventError::AccountEventMissingAccount)?;
104
+
let account = event
105
+
.account
106
+
.ok_or(FirehoseEventError::AccountEventMissingAccount)?;
103
107
if !account.active {
104
108
self.handle_delete_account(event.did, event.cursor).await?;
105
109
}
···
109
113
110
114
// if the queue is empty and we have enough, send immediately. otherewise, let the current batch fill up.
111
115
if let Some(earliest) = &self.current_batch.initial_cursor {
112
-
if event.cursor.duration_since(earliest)?.as_secs_f64() > MIN_BATCH_SPAN_SECS &&
113
-
self.batch_sender.capacity() == BATCH_QUEUE_SIZE {
116
+
if event.cursor.duration_since(earliest)?.as_secs_f64() > MIN_BATCH_SPAN_SECS
117
+
&& self.batch_sender.capacity() == BATCH_QUEUE_SIZE
118
+
{
114
119
log::info!("queue empty: immediately sending batch.");
115
120
self.send_current_batch_now().await?;
116
121
}
···
119
124
}
120
125
121
126
async fn handle_commit(&mut self, commit: UFOsCommit, nsid: Nsid) -> anyhow::Result<()> {
122
-
if !self
123
-
.current_batch
124
-
.batch
125
-
.commits_by_nsid
126
-
.contains_key(&nsid)
127
+
if !self.current_batch.batch.commits_by_nsid.contains_key(&nsid)
127
128
&& self.current_batch.batch.commits_by_nsid.len() >= MAX_BATCHED_COLLECTIONS
128
129
{
129
130
self.send_current_batch_now().await?;
130
131
}
131
132
132
-
self
133
-
.current_batch
133
+
self.current_batch
134
134
.batch
135
135
.commits_by_nsid
136
136
.entry(nsid)
+1
-1
ufos/src/db_types.rs
+1
-1
ufos/src/db_types.rs
+1
-1
ufos/src/error.rs
+1
-1
ufos/src/error.rs
+10
-7
ufos/src/lib.rs
+10
-7
ufos/src/lib.rs
···
6
6
pub mod storage_fjall;
7
7
pub mod store_types;
8
8
9
-
use jetstream::events::{Cursor, CommitEvent, CommitOp};
10
-
use jetstream::exports::{Did, Nsid, RecordKey};
11
-
use std::collections::{HashMap, VecDeque};
12
-
use serde_json::value::RawValue;
13
9
use cardinality_estimator::CardinalityEstimator;
14
10
use error::FirehoseEventError;
11
+
use jetstream::events::{CommitEvent, CommitOp, Cursor};
12
+
use jetstream::exports::{Did, Nsid, RecordKey};
13
+
use serde_json::value::RawValue;
14
+
use std::collections::{HashMap, VecDeque};
15
15
16
16
#[derive(Debug, Default, Clone)]
17
17
pub struct CollectionCommits {
···
42
42
}
43
43
44
44
#[derive(Debug, Clone)]
45
-
pub struct PutAction { record: Box<RawValue>, is_update: bool }
45
+
pub struct PutAction {
46
+
record: Box<RawValue>,
47
+
is_update: bool,
48
+
}
46
49
47
50
#[derive(Debug, Clone)]
48
51
pub struct UFOsCommit {
···
57
60
pub fn from_commit_info(
58
61
commit: CommitEvent,
59
62
did: Did,
60
-
cursor: Cursor
63
+
cursor: Cursor,
61
64
) -> Result<(Self, Nsid), FirehoseEventError> {
62
65
let action = match commit.operation {
63
66
CommitOp::Delete => CommitAction::Cut,
64
67
cru @ _ => CommitAction::Put(PutAction {
65
68
record: commit.record.ok_or(FirehoseEventError::CruMissingRecord)?,
66
69
is_update: cru == CommitOp::Update,
67
-
})
70
+
}),
68
71
};
69
72
let batched = Self {
70
73
cursor,
+8
-3
ufos/src/main.rs
+8
-3
ufos/src/main.rs
···
46
46
47
47
let args = Args::parse();
48
48
let jetstream = args.jetstream.clone();
49
-
let (_read_store, mut write_store, cursor) =
50
-
FjallStorage::init(args.data, jetstream, args.jetstream_force, Default::default())?;
49
+
let (_read_store, mut write_store, cursor) = FjallStorage::init(
50
+
args.data,
51
+
jetstream,
52
+
args.jetstream_force,
53
+
Default::default(),
54
+
)?;
51
55
52
56
// println!("starting server with storage...");
53
57
// let serving = server::serve(storage.clone());
···
73
77
write_store.step_rollup()?;
74
78
}
75
79
Ok::<(), StorageError>(())
76
-
}).await??;
80
+
})
81
+
.await??;
77
82
78
83
// let r = storage.receive(batches).await;
79
84
log::warn!("storage.receive ended with");
+1
-1
ufos/src/server.rs
+1
-1
ufos/src/server.rs
+7
-4
ufos/src/storage.rs
+7
-4
ufos/src/storage.rs
···
1
-
use std::path::Path;
1
+
use crate::{error::StorageError, Cursor, EventBatch};
2
2
use jetstream::exports::Nsid;
3
-
use crate::{error::StorageError, Cursor, EventBatch};
3
+
use std::path::Path;
4
4
5
-
pub trait StorageWhatever<R: StoreReader, W: StoreWriter, C> { // TODO: extract this
5
+
pub trait StorageWhatever<R: StoreReader, W: StoreWriter, C> {
6
+
// TODO: extract this
6
7
fn init(
7
8
path: impl AsRef<Path>,
8
9
endpoint: String,
9
10
force_endpoint: bool,
10
11
config: C,
11
-
) -> Result<(R, W, Option<Cursor>), StorageError> where Self: Sized;
12
+
) -> Result<(R, W, Option<Cursor>), StorageError>
13
+
where
14
+
Self: Sized;
12
15
}
13
16
14
17
pub trait StoreWriter {
+65
-65
ufos/src/storage_fjall.rs
+65
-65
ufos/src/storage_fjall.rs
···
1
+
use crate::db_types::{db_complete, DbBytes, DbStaticStr, EncodingError, StaticStr};
2
+
use crate::error::StorageError;
1
3
use crate::storage::{StorageWhatever, StoreReader, StoreWriter};
2
-
use crate::db_types::{db_complete, DbBytes, DbStaticStr, EncodingError, StaticStr};
3
4
use crate::store_types::{
4
5
ByCollectionKey, ByCollectionValue, ByCursorSeenKey, ByCursorSeenValue, ByIdKey, ByIdValue,
5
-
JetstreamCursorKey, JetstreamCursorValue, JetstreamEndpointKey, JetstreamEndpointValue,
6
-
ModCursorKey, ModCursorValue, ModQueueItemKey, ModQueueItemStringValue, ModQueueItemValue,
7
-
RollupCursorKey, RollupCursorValue, SeenCounter,
8
-
NsidRecordFeedKey, NsidRecordFeedVal, RecordLocationKey, RecordLocationVal,
9
-
LiveRecordsKey, LiveRecordsValue, LiveDidsKey, LiveDidsValue,
10
-
DeleteAccountQueueKey, DeleteAccountQueueVal,
11
-
NewRollupCursorKey, NewRollupCursorValue,
12
-
TakeoffKey, TakeoffValue,
6
+
DeleteAccountQueueKey, DeleteAccountQueueVal, JetstreamCursorKey, JetstreamCursorValue,
7
+
JetstreamEndpointKey, JetstreamEndpointValue, LiveDidsKey, LiveDidsValue, LiveRecordsKey,
8
+
LiveRecordsValue, ModCursorKey, ModCursorValue, ModQueueItemKey, ModQueueItemStringValue,
9
+
ModQueueItemValue, NewRollupCursorKey, NewRollupCursorValue, NsidRecordFeedKey,
10
+
NsidRecordFeedVal, RecordLocationKey, RecordLocationVal, RollupCursorKey, RollupCursorValue,
11
+
SeenCounter, TakeoffKey, TakeoffValue,
13
12
};
14
-
use crate::{
15
-
DeleteAccount, Did, EventBatch, Nsid, RecordKey, CommitAction,
16
-
};
17
-
use crate::error::StorageError;
13
+
use crate::{CommitAction, DeleteAccount, Did, EventBatch, Nsid, RecordKey};
18
14
use fjall::{
19
15
Batch as FjallBatch, CompressionType, Config, Keyspace, PartitionCreateOptions, PartitionHandle,
20
16
};
···
41
37
struct Db {
42
38
keyspace: Keyspace,
43
39
global: PartitionHandle,
44
-
45
40
}
46
41
47
42
/**
···
153
148
let js_cursor = get_static_neu::<JetstreamCursorKey, JetstreamCursorValue>(&global)?;
154
149
155
150
if js_cursor.is_some() {
156
-
let stored_endpoint = get_static_neu::<JetstreamEndpointKey, JetstreamEndpointValue>(&global)?;
151
+
let stored_endpoint =
152
+
get_static_neu::<JetstreamEndpointKey, JetstreamEndpointValue>(&global)?;
157
153
158
-
let JetstreamEndpointValue(stored) = stored_endpoint
159
-
.ok_or(StorageError::InitError("found cursor but missing js_endpoint, refusing to start.".to_string()))?;
154
+
let JetstreamEndpointValue(stored) = stored_endpoint.ok_or(StorageError::InitError(
155
+
"found cursor but missing js_endpoint, refusing to start.".to_string(),
156
+
))?;
160
157
161
158
if stored != endpoint {
162
159
if force_endpoint {
···
175
172
&global,
176
173
JetstreamEndpointValue(endpoint.to_string()),
177
174
)?;
178
-
insert_static_neu::<TakeoffKey>(
179
-
&global,
180
-
Cursor::at(SystemTime::now()),
181
-
)?;
182
-
insert_static_neu::<NewRollupCursorKey>(
183
-
&global,
184
-
Cursor::from_start(),
185
-
)?;
175
+
insert_static_neu::<TakeoffKey>(&global, Cursor::at(SystemTime::now()))?;
176
+
insert_static_neu::<NewRollupCursorKey>(&global, Cursor::from_start())?;
186
177
}
187
178
188
179
let reader = FjallReader {
···
191
182
records: records.clone(),
192
183
rollups: rollups.clone(),
193
184
};
194
-
let writer = FjallWriter { keyspace, global, feeds, records, rollups, queues };
185
+
let writer = FjallWriter {
186
+
keyspace,
187
+
global,
188
+
feeds,
189
+
records,
190
+
rollups,
191
+
queues,
192
+
};
195
193
Ok((reader, writer, js_cursor))
196
194
}
197
195
}
···
205
203
}
206
204
207
205
impl StoreReader for FjallReader {
208
-
fn get_total_by_collection(&self, collection: &jetstream::exports::Nsid) -> Result<u64, StorageError> {
206
+
fn get_total_by_collection(
207
+
&self,
208
+
collection: &jetstream::exports::Nsid,
209
+
) -> Result<u64, StorageError> {
209
210
// TODO: start from rollup
210
211
let full_range = LiveRecordsKey::range_from_cursor(Cursor::from_start())?;
211
212
let mut total = 0;
···
234
235
pub fn step_rollup(&mut self) -> Result<(), StorageError> {
235
236
// let mut batch = self.keyspace.batch();
236
237
237
-
let rollup_cursor = get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
238
-
.ok_or(StorageError::BadStateError("Could not find current rollup cursor".to_string()))?;
238
+
let rollup_cursor =
239
+
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?.ok_or(
240
+
StorageError::BadStateError("Could not find current rollup cursor".to_string()),
241
+
)?;
239
242
240
243
// timelies
241
244
let live_records_range = LiveRecordsKey::range_from_cursor(rollup_cursor)?;
···
245
248
let next_timely = timely_iter
246
249
.next()
247
250
.transpose()?
248
-
.map(|(key_bytes, val_bytes)|
249
-
db_complete::<LiveRecordsKey>(&key_bytes)
250
-
.map(|k| (k, val_bytes)))
251
+
.map(|(key_bytes, val_bytes)| {
252
+
db_complete::<LiveRecordsKey>(&key_bytes).map(|k| (k, val_bytes))
253
+
})
251
254
.transpose()?;
252
255
253
256
// delete accounts
254
-
let delete_accounts_range = DeleteAccountQueueKey::new(rollup_cursor).range_to_prefix_end()?;
257
+
let delete_accounts_range =
258
+
DeleteAccountQueueKey::new(rollup_cursor).range_to_prefix_end()?;
255
259
256
-
let next_delete = self.queues.range(delete_accounts_range)
260
+
let next_delete = self
261
+
.queues
262
+
.range(delete_accounts_range)
257
263
.next()
258
264
.transpose()?
259
-
.map(|(key_bytes, val_bytes)|
260
-
db_complete::<DeleteAccountQueueKey>(&key_bytes)
261
-
.map(|k| (k.suffix, val_bytes)))
265
+
.map(|(key_bytes, val_bytes)| {
266
+
db_complete::<DeleteAccountQueueKey>(&key_bytes).map(|k| (k.suffix, val_bytes))
267
+
})
262
268
.transpose()?;
263
269
264
270
match (next_timely, next_delete) {
···
288
294
impl StoreWriter for FjallWriter {
289
295
fn insert_batch(&mut self, event_batch: EventBatch) -> Result<(), StorageError> {
290
296
if event_batch.is_empty() {
291
-
return Ok(())
297
+
return Ok(());
292
298
}
293
299
294
300
let mut batch = self.keyspace.batch();
···
306
312
}
307
313
CommitAction::Put(put_action) => {
308
314
let feed_key = NsidRecordFeedKey::from_pair(nsid.clone(), commit.cursor);
309
-
let feed_val: NsidRecordFeedVal = (&commit.did, &commit.rkey, commit.rev.as_str()).into();
315
+
let feed_val: NsidRecordFeedVal =
316
+
(&commit.did, &commit.rkey, commit.rev.as_str()).into();
310
317
batch.insert(
311
318
&self.feeds,
312
319
feed_key.to_db_bytes()?,
313
320
feed_val.to_db_bytes()?,
314
321
);
315
322
316
-
let location_val: RecordLocationVal = (commit.cursor, commit.rev.as_str(), put_action).into();
323
+
let location_val: RecordLocationVal =
324
+
(commit.cursor, commit.rev.as_str(), put_action).into();
317
325
batch.insert(
318
326
&self.records,
319
327
&location_key.to_db_bytes()?,
···
360
368
}
361
369
}
362
370
363
-
364
371
#[derive(Clone)]
365
372
pub struct Storage {
366
373
/// horrible: gate all db access behind this to force global serialization to avoid deadlock
···
375
382
PartitionCreateOptions::default().compression(CompressionType::None),
376
383
)?;
377
384
Ok(Self {
378
-
db: Db {
379
-
keyspace,
380
-
global,
381
-
},
385
+
db: Db { keyspace, global },
382
386
})
383
387
}
384
388
···
681
685
}
682
686
683
687
/// Get a value from a fixed key
684
-
fn get_static_neu<K: StaticStr, V: DbBytes>(global: &PartitionHandle) -> Result<Option<V>, StorageError> {
688
+
fn get_static_neu<K: StaticStr, V: DbBytes>(
689
+
global: &PartitionHandle,
690
+
) -> Result<Option<V>, StorageError> {
685
691
let key_bytes = DbStaticStr::<K>::default().to_db_bytes()?;
686
692
let value = global
687
693
.get(&key_bytes)?
···
736
742
}
737
743
738
744
/// Get stats that haven't been rolled up yet
739
-
fn get_unrolled_collection_seen(
740
-
global: &PartitionHandle,
741
-
collection: Nsid,
742
-
) -> anyhow::Result<u64> {
745
+
fn get_unrolled_collection_seen(global: &PartitionHandle, collection: Nsid) -> anyhow::Result<u64> {
743
746
let range =
744
747
if let Some(cursor_value) = get_static::<RollupCursorKey, RollupCursorValue>(global)? {
745
748
eprintln!("found existing cursor");
···
773
776
Ok(collection_total)
774
777
}
775
778
776
-
fn get_unrolled_top_collections(
777
-
global: &PartitionHandle,
778
-
) -> anyhow::Result<HashMap<String, u64>> {
779
+
fn get_unrolled_top_collections(global: &PartitionHandle) -> anyhow::Result<HashMap<String, u64>> {
779
780
let range =
780
781
if let Some(cursor_value) = get_static::<RollupCursorKey, RollupCursorValue>(global)? {
781
782
eprintln!("found existing cursor");
···
905
906
906
907
log::trace!("delete_record: iterate over up to current cursor...");
907
908
908
-
for (i, pair) in self
909
-
.global
910
-
.range(key_prefix_bytes..key_limit)
911
-
.enumerate()
912
-
{
909
+
for (i, pair) in self.global.range(key_prefix_bytes..key_limit).enumerate() {
913
910
log::trace!("delete_record iter {i}: found");
914
911
// find all (hopefully 1)
915
912
let (key_bytes, _) = pair?;
···
1123
1120
)
1124
1121
}
1125
1122
1126
-
1127
-
1128
1123
#[cfg(test)]
1129
1124
mod tests {
1125
+
use super::*;
1126
+
use crate::{CollectionCommits, UFOsCommit};
1127
+
use jetstream::events::{CommitEvent, CommitOp};
1130
1128
use jetstream::exports::Cid;
1131
-
use jetstream::events::{CommitEvent, CommitOp};
1132
1129
use serde_json::value::RawValue;
1133
-
use crate::{UFOsCommit, CollectionCommits};
1134
-
use super::*;
1135
1130
1136
1131
#[test]
1137
1132
fn test_hello() -> anyhow::Result<()> {
···
1166
1161
rev: "asdf".to_string(),
1167
1162
operation: CommitOp::Create,
1168
1163
record: Some(*Box::new(RawValue::from_string("{}".to_string()).unwrap())),
1169
-
cid: Some("bafyreidofvwoqvd2cnzbun6dkzgfucxh57tirf3ohhde7lsvh4fu3jehgy".parse().unwrap()),
1164
+
cid: Some(
1165
+
"bafyreidofvwoqvd2cnzbun6dkzgfucxh57tirf3ohhde7lsvh4fu3jehgy"
1166
+
.parse()
1167
+
.unwrap(),
1168
+
),
1170
1169
};
1171
-
let (commit, collection) = UFOsCommit::from_commit_info(event, did.clone(), Cursor::from_raw_u64(100))?;
1170
+
let (commit, collection) =
1171
+
UFOsCommit::from_commit_info(event, did.clone(), Cursor::from_raw_u64(100))?;
1172
1172
1173
1173
let mut commits = CollectionCommits::default();
1174
1174
commits.total_seen += 1;
+15
-14
ufos/src/store_types.rs
+15
-14
ufos/src/store_types.rs
···
1
-
use cardinality_estimator::CardinalityEstimator;
2
1
use crate::db_types::{
3
-
DbBytes, DbConcat, DbEmpty, DbStaticStr, EncodingError, StaticStr, UseBincodePlz, SerdeBytes,
2
+
DbBytes, DbConcat, DbEmpty, DbStaticStr, EncodingError, SerdeBytes, StaticStr, UseBincodePlz,
4
3
};
5
-
use crate::{Cursor, Did, Nsid, RecordKey, UFOsCommit, PutAction};
4
+
use crate::{Cursor, Did, Nsid, PutAction, RecordKey, UFOsCommit};
6
5
use bincode::{Decode, Encode};
6
+
use cardinality_estimator::CardinalityEstimator;
7
7
use std::ops::Range;
8
8
9
9
/// key format: ["js_cursor"]
···
37
37
/// value format: [rollup_cursor(Cursor)|collection(Nsid)]
38
38
pub type RollupCursorValue = DbConcat<Cursor, Nsid>;
39
39
40
-
41
40
/// key format: ["rollup_cursor"]
42
41
#[derive(Debug, PartialEq)]
43
42
pub struct NewRollupCursorKey {}
···
49
48
// pub type NewRollupCursorKey = DbStaticStr<_NewRollupCursorKey>;
50
49
/// value format: [rollup_cursor(Cursor)|collection(Nsid)]
51
50
pub type NewRollupCursorValue = Cursor;
52
-
53
51
54
52
/// key format: ["js_endpoint"]
55
53
#[derive(Debug, PartialEq)]
···
61
59
}
62
60
pub type TakeoffValue = Cursor;
63
61
64
-
65
62
/// key format: ["js_endpoint"]
66
63
#[derive(Debug, PartialEq)]
67
64
pub struct JetstreamEndpointKey {}
···
92
89
fn from((did, rkey, rev): (&Did, &RecordKey, &str)) -> Self {
93
90
Self::from_pair(
94
91
did.clone(),
95
-
DbConcat::from_pair(rkey.clone(), rev.to_string()))
92
+
DbConcat::from_pair(rkey.clone(), rev.to_string()),
93
+
)
96
94
}
97
95
}
98
96
99
97
pub type RecordLocationKey = DbConcat<Did, DbConcat<Nsid, RecordKey>>;
100
98
impl From<(&UFOsCommit, &Nsid)> for RecordLocationKey {
101
99
fn from((commit, collection): (&UFOsCommit, &Nsid)) -> Self {
102
-
Self::from_pair(commit.did.clone(), DbConcat::from_pair(collection.clone(), commit.rkey.clone()))
100
+
Self::from_pair(
101
+
commit.did.clone(),
102
+
DbConcat::from_pair(collection.clone(), commit.rkey.clone()),
103
+
)
103
104
}
104
105
}
105
106
#[derive(Debug, PartialEq, Encode, Decode)]
···
193
194
}
194
195
}
195
196
#[derive(Debug, PartialEq, serde::Serialize, serde::Deserialize)]
196
-
pub struct LiveDidsValue(pub CardinalityEstimator::<Did>);
197
+
pub struct LiveDidsValue(pub CardinalityEstimator<Did>);
197
198
impl SerdeBytes for LiveDidsValue {}
198
199
impl DbBytes for LiveDidsValue {
199
200
fn to_db_bytes(&self) -> Result<Vec<u8>, EncodingError> {
···
219
220
}
220
221
}
221
222
pub type DeleteAccountQueueVal = Did;
222
-
223
223
224
224
#[derive(Debug, Clone, Encode, Decode)]
225
225
pub struct SeenCounter(pub u64);
···
474
474
}
475
475
}
476
476
477
-
478
477
const HOUR_IN_MICROS: u64 = 1_000_000 * 3600;
479
478
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd)]
480
479
pub struct HourTrucatedCursor(u64);
···
487
486
pub fn try_from_raw_u64(time_us: u64) -> Result<Self, EncodingError> {
488
487
let rem = time_us % HOUR_IN_MICROS;
489
488
if rem != 0 {
490
-
return Err(EncodingError::InvalidHourlyTruncated(rem))
489
+
return Err(EncodingError::InvalidHourlyTruncated(rem));
491
490
}
492
491
Ok(Self(time_us))
493
492
}
···
503
502
}
504
503
}
505
504
506
-
507
505
#[cfg(test)]
508
506
mod test {
509
-
use super::{ByCollectionKey, ByCollectionValue, Cursor, Did, EncodingError, Nsid, RecordKey, HourTrucatedCursor, HOUR_IN_MICROS};
507
+
use super::{
508
+
ByCollectionKey, ByCollectionValue, Cursor, Did, EncodingError, HourTrucatedCursor, Nsid,
509
+
RecordKey, HOUR_IN_MICROS,
510
+
};
510
511
use crate::db_types::DbBytes;
511
512
512
513
#[test]