tracks lexicons and how many times they appeared on the jetstream
3
fork

Configure Feed

Select the types of activity you want to include in your feed.

feat(server): improve the logging of sync and compaction

ptr.pet 9036e3a9 ce8f1ca0

verified
+34 -29
+13 -6
server/src/db/handle.rs
··· 68 68 } 69 69 } 70 70 71 + pub fn span(&self) -> tracing::Span { 72 + tracing::info_span!("handle", nsid = %self.nsid) 73 + } 74 + 71 75 pub fn nsid(&self) -> &SmolStr { 72 76 &self.nsid 73 77 } ··· 101 105 range: impl RangeBounds<u64>, 102 106 sort: bool, 103 107 ) -> AppResult<()> { 108 + let _span = self.span().entered(); 109 + 104 110 let start_limit = match range.start_bound().cloned() { 105 111 Bound::Included(start) => start, 106 112 Bound::Excluded(start) => start.saturating_add(1), ··· 120 126 .range(start_key..end_key) 121 127 .collect::<Result<Vec<_>, _>>()?; 122 128 if blocks_to_compact.len() < 2 { 123 - tracing::info!("{}: nothing to compact", self.nsid); 124 129 return Ok(()); 125 130 } 126 131 ··· 163 168 self.tree.insert(block.key, block.data)?; 164 169 } 165 170 171 + let reduction = 172 + ((start_blocks_size - end_blocks_size) as f64 / start_blocks_size as f64) * 100.0; 166 173 tracing::info!( 167 - "{}: compacted {} blocks to {} blocks ({}% reduction)", 168 - self.nsid, 169 - start_blocks_size, 170 - end_blocks_size, 171 - ((start_blocks_size - end_blocks_size) as f64 / start_blocks_size as f64) * 100.0, 174 + { 175 + start = start_blocks_size, 176 + end = end_blocks_size, 177 + }, 178 + "blocks compacted {reduction:.2}%", 172 179 ); 173 180 174 181 Ok(())
+21 -23
server/src/db/mod.rs
··· 21 21 db::handle::{ItemDecoder, LexiconHandle}, 22 22 error::{AppError, AppResult}, 23 23 jetstream::JetstreamEvent, 24 - utils::{RateTracker, ReadVariableExt, varints_unsigned_encoded}, 24 + utils::{CLOCK, RateTracker, ReadVariableExt, varints_unsigned_encoded}, 25 25 }; 26 26 27 27 mod block; ··· 160 160 } 161 161 162 162 pub fn sync(&self, all: bool) -> AppResult<()> { 163 + let start = CLOCK.now(); 163 164 // prepare all the data 164 165 let mut data = Vec::with_capacity(self.hits.len()); 165 166 let _guard = scc::ebr::Guard::new(); ··· 180 181 let count = handle.item_count(); 181 182 let data_count = count / block_size; 182 183 if count > 0 && (all || data_count > 0 || is_too_old) { 183 - for i in 0..data_count { 184 - nsid_data.push((i, handle.clone(), block_size)); 184 + for _ in 0..data_count { 185 + nsid_data.push((handle.clone(), block_size)); 185 186 total_count += block_size; 186 187 } 187 188 // only sync remainder if we haven't met block size 188 189 let remainder = count % block_size; 189 190 if (all || data_count == 0) && remainder > 0 { 190 - nsid_data.push((data_count, handle.clone(), remainder)); 191 + nsid_data.push((handle.clone(), remainder)); 191 192 total_count += remainder; 192 193 } 193 194 } 195 + let _span = handle.span().entered(); 194 196 tracing::info!( 195 - "{}: will sync {} blocks ({} count)", 196 - handle.nsid(), 197 - nsid_data.len(), 198 - total_count, 197 + {blocks = %nsid_data.len(), count = %total_count}, 198 + "will encode & sync", 199 199 ); 200 200 data.push(nsid_data); 201 201 } ··· 206 206 .map(|chunk| { 207 207 chunk 208 208 .into_iter() 209 - .map(|(i, handle, max_block_size)| { 210 - (i, handle.take_block_items(max_block_size), handle) 209 + .map(|(handle, max_block_size)| { 210 + (handle.take_block_items(max_block_size), handle) 211 211 }) 212 212 .collect::<Vec<_>>() 213 213 .into_par_iter() 214 - .map(|(i, items, handle)| { 214 + .map(|(items, handle)| { 215 215 let count = items.len(); 216 216 let block = LexiconHandle::encode_block_from_items(items, count)?; 217 - tracing::info!( 218 - "{}: encoded block with {} items", 219 - handle.nsid(), 220 - block.written, 221 - ); 222 - AppResult::Ok((i, block, handle)) 217 + AppResult::Ok((block, handle)) 223 218 }) 224 219 .collect::<Result<Vec<_>, _>>() 225 220 }) 226 221 .try_for_each(|chunk| { 227 222 let chunk = chunk?; 228 - for (i, block, handle) in chunk { 229 - self.sync_pool 230 - .execute(move || match handle.insert(block.key, block.data) { 223 + for (block, handle) in chunk { 224 + self.sync_pool.execute(move || { 225 + let _span = handle.span().entered(); 226 + match handle.insert(block.key, block.data) { 231 227 Ok(_) => { 232 - tracing::info!("{}: [{i}] synced {}", handle.nsid(), block.written) 228 + tracing::info!({count = %block.written}, "synced") 233 229 } 234 - Err(err) => tracing::error!("failed to sync block: {}", err), 235 - }); 230 + Err(err) => tracing::error!({ err = %err }, "failed to sync block"), 231 + } 232 + }); 236 233 } 237 234 AppResult::Ok(()) 238 235 })?; 239 236 self.sync_pool.join(); 237 + tracing::info!(time = %start.elapsed().as_secs_f64(), "synced all blocks"); 240 238 241 239 Ok(()) 242 240 }