Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

new clippies

Changed files
+12 -12
.github
workflows
spacedust
src
ufos
+1 -1
.github/workflows/checks.yml
··· 28 28 - name: get nightly toolchain for jetstream fmt 29 29 run: rustup toolchain install nightly --allow-downgrade -c rustfmt 30 30 - name: fmt 31 - run: cargo fmt --package links --package constellation --package ufos -- --check 31 + run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i -- --check 32 32 - name: fmt jetstream (nightly) 33 33 run: cargo +nightly fmt --package jetstream -- --check 34 34 - name: clippy
+1 -1
spacedust/src/server.rs
··· 268 268 ) -> Result<MultiSubscribeQuery, HttpError> { 269 269 let raw_query = ctx.request.uri().query().unwrap_or(""); 270 270 let q = serde_qs::from_str(raw_query).map_err(|e| { 271 - HttpError::for_bad_request(None, format!("unable to parse query string: {}", e)) 271 + HttpError::for_bad_request(None, format!("unable to parse query string: {e}")) 272 272 })?; 273 273 Ok(q) 274 274 }
+1 -1
ufos/src/consumer.rs
··· 223 223 224 224 let beginning = match self.current_batch.initial_cursor.map(|c| c.elapsed()) { 225 225 None => "unknown".to_string(), 226 - Some(Ok(t)) => format!("{:?}", t), 226 + Some(Ok(t)) => format!("{t:?}"), 227 227 Some(Err(e)) => format!("+{:?}", e.duration()), 228 228 }; 229 229 log::trace!(
+2 -2
ufos/src/db_types.rs
··· 427 427 ] { 428 428 let serialized = s.to_string().to_db_bytes()?; 429 429 let prefixed = String::sub_prefix(pre)?; 430 - assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc); 430 + assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}"); 431 431 } 432 432 Ok(()) 433 433 } ··· 445 445 ] { 446 446 let serialized = Nsid::new(s.to_string()).unwrap().to_db_bytes()?; 447 447 let prefixed = Nsid::sub_prefix(pre)?; 448 - assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc); 448 + assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}"); 449 449 } 450 450 Ok(()) 451 451 }
+1 -1
ufos/src/server/collections_query.rs
··· 53 53 ) -> Result<MultiCollectionQuery, HttpError> { 54 54 let raw_query = ctx.request.uri().query().unwrap_or(""); 55 55 let q = serde_qs::from_str(raw_query).map_err(|e| { 56 - HttpError::for_bad_request(None, format!("unable to parse query string: {}", e)) 56 + HttpError::for_bad_request(None, format!("unable to parse query string: {e}")) 57 57 })?; 58 58 Ok(q) 59 59 }
+5 -5
ufos/src/server/mod.rs
··· 444 444 }; 445 445 446 446 if !(1..=200).contains(&limit) { 447 - let msg = format!("limit not in 1..=200: {}", limit); 447 + let msg = format!("limit not in 1..=200: {limit}"); 448 448 return Err(HttpError::for_bad_request(None, msg)); 449 449 } 450 450 ··· 577 577 }; 578 578 579 579 if !(1..=200).contains(&limit) { 580 - let msg = format!("limit not in 1..=200: {}", limit); 580 + let msg = format!("limit not in 1..=200: {limit}"); 581 581 return Err(HttpError::for_bad_request(None, msg)); 582 582 } 583 583 ··· 649 649 650 650 let step = if let Some(secs) = q.step { 651 651 if secs < 3600 { 652 - let msg = format!("step is too small: {}", secs); 652 + let msg = format!("step is too small: {secs}"); 653 653 Err(HttpError::for_bad_request(None, msg))?; 654 654 } 655 655 (secs / 3600) * 3600 // trucate to hour ··· 658 658 }; 659 659 660 660 let nsid = Nsid::new(q.collection).map_err(|e| { 661 - HttpError::for_bad_request(None, format!("collection was not a valid NSID: {:?}", e)) 661 + HttpError::for_bad_request(None, format!("collection was not a valid NSID: {e:?}")) 662 662 })?; 663 663 664 664 let (range_cursors, series) = storage ··· 762 762 ..Default::default() 763 763 }) 764 764 .start() 765 - .map_err(|error| format!("failed to start server: {}", error))? 765 + .map_err(|error| format!("failed to start server: {error}"))? 766 766 .await 767 767 }
+1 -1
ufos/src/storage_fjall.rs
··· 1615 1615 } 1616 1616 } 1617 1617 let dt = t0.elapsed(); 1618 - log::trace!("finished trimming {n} nsids in {:?}: {total_danglers} dangling and {total_deleted} total removed.", dt); 1618 + log::trace!("finished trimming {n} nsids in {dt:?}: {total_danglers} dangling and {total_deleted} total removed."); 1619 1619 histogram!("storage_trim_dirty_nsids").record(completed.len() as f64); 1620 1620 histogram!("storage_trim_duration").record(dt.as_micros() as f64); 1621 1621 counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);