Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

new clippies

Changed files
+12 -12
.github
workflows
spacedust
src
ufos
+1 -1
.github/workflows/checks.yml
··· 28 - name: get nightly toolchain for jetstream fmt 29 run: rustup toolchain install nightly --allow-downgrade -c rustfmt 30 - name: fmt 31 - run: cargo fmt --package links --package constellation --package ufos -- --check 32 - name: fmt jetstream (nightly) 33 run: cargo +nightly fmt --package jetstream -- --check 34 - name: clippy
··· 28 - name: get nightly toolchain for jetstream fmt 29 run: rustup toolchain install nightly --allow-downgrade -c rustfmt 30 - name: fmt 31 + run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i -- --check 32 - name: fmt jetstream (nightly) 33 run: cargo +nightly fmt --package jetstream -- --check 34 - name: clippy
+1 -1
spacedust/src/server.rs
··· 268 ) -> Result<MultiSubscribeQuery, HttpError> { 269 let raw_query = ctx.request.uri().query().unwrap_or(""); 270 let q = serde_qs::from_str(raw_query).map_err(|e| { 271 - HttpError::for_bad_request(None, format!("unable to parse query string: {}", e)) 272 })?; 273 Ok(q) 274 }
··· 268 ) -> Result<MultiSubscribeQuery, HttpError> { 269 let raw_query = ctx.request.uri().query().unwrap_or(""); 270 let q = serde_qs::from_str(raw_query).map_err(|e| { 271 + HttpError::for_bad_request(None, format!("unable to parse query string: {e}")) 272 })?; 273 Ok(q) 274 }
+1 -1
ufos/src/consumer.rs
··· 223 224 let beginning = match self.current_batch.initial_cursor.map(|c| c.elapsed()) { 225 None => "unknown".to_string(), 226 - Some(Ok(t)) => format!("{:?}", t), 227 Some(Err(e)) => format!("+{:?}", e.duration()), 228 }; 229 log::trace!(
··· 223 224 let beginning = match self.current_batch.initial_cursor.map(|c| c.elapsed()) { 225 None => "unknown".to_string(), 226 + Some(Ok(t)) => format!("{t:?}"), 227 Some(Err(e)) => format!("+{:?}", e.duration()), 228 }; 229 log::trace!(
+2 -2
ufos/src/db_types.rs
··· 427 ] { 428 let serialized = s.to_string().to_db_bytes()?; 429 let prefixed = String::sub_prefix(pre)?; 430 - assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc); 431 } 432 Ok(()) 433 } ··· 445 ] { 446 let serialized = Nsid::new(s.to_string()).unwrap().to_db_bytes()?; 447 let prefixed = Nsid::sub_prefix(pre)?; 448 - assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc); 449 } 450 Ok(()) 451 }
··· 427 ] { 428 let serialized = s.to_string().to_db_bytes()?; 429 let prefixed = String::sub_prefix(pre)?; 430 + assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}"); 431 } 432 Ok(()) 433 } ··· 445 ] { 446 let serialized = Nsid::new(s.to_string()).unwrap().to_db_bytes()?; 447 let prefixed = Nsid::sub_prefix(pre)?; 448 + assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}"); 449 } 450 Ok(()) 451 }
+1 -1
ufos/src/server/collections_query.rs
··· 53 ) -> Result<MultiCollectionQuery, HttpError> { 54 let raw_query = ctx.request.uri().query().unwrap_or(""); 55 let q = serde_qs::from_str(raw_query).map_err(|e| { 56 - HttpError::for_bad_request(None, format!("unable to parse query string: {}", e)) 57 })?; 58 Ok(q) 59 }
··· 53 ) -> Result<MultiCollectionQuery, HttpError> { 54 let raw_query = ctx.request.uri().query().unwrap_or(""); 55 let q = serde_qs::from_str(raw_query).map_err(|e| { 56 + HttpError::for_bad_request(None, format!("unable to parse query string: {e}")) 57 })?; 58 Ok(q) 59 }
+5 -5
ufos/src/server/mod.rs
··· 444 }; 445 446 if !(1..=200).contains(&limit) { 447 - let msg = format!("limit not in 1..=200: {}", limit); 448 return Err(HttpError::for_bad_request(None, msg)); 449 } 450 ··· 577 }; 578 579 if !(1..=200).contains(&limit) { 580 - let msg = format!("limit not in 1..=200: {}", limit); 581 return Err(HttpError::for_bad_request(None, msg)); 582 } 583 ··· 649 650 let step = if let Some(secs) = q.step { 651 if secs < 3600 { 652 - let msg = format!("step is too small: {}", secs); 653 Err(HttpError::for_bad_request(None, msg))?; 654 } 655 (secs / 3600) * 3600 // trucate to hour ··· 658 }; 659 660 let nsid = Nsid::new(q.collection).map_err(|e| { 661 - HttpError::for_bad_request(None, format!("collection was not a valid NSID: {:?}", e)) 662 })?; 663 664 let (range_cursors, series) = storage ··· 762 ..Default::default() 763 }) 764 .start() 765 - .map_err(|error| format!("failed to start server: {}", error))? 766 .await 767 }
··· 444 }; 445 446 if !(1..=200).contains(&limit) { 447 + let msg = format!("limit not in 1..=200: {limit}"); 448 return Err(HttpError::for_bad_request(None, msg)); 449 } 450 ··· 577 }; 578 579 if !(1..=200).contains(&limit) { 580 + let msg = format!("limit not in 1..=200: {limit}"); 581 return Err(HttpError::for_bad_request(None, msg)); 582 } 583 ··· 649 650 let step = if let Some(secs) = q.step { 651 if secs < 3600 { 652 + let msg = format!("step is too small: {secs}"); 653 Err(HttpError::for_bad_request(None, msg))?; 654 } 655 (secs / 3600) * 3600 // trucate to hour ··· 658 }; 659 660 let nsid = Nsid::new(q.collection).map_err(|e| { 661 + HttpError::for_bad_request(None, format!("collection was not a valid NSID: {e:?}")) 662 })?; 663 664 let (range_cursors, series) = storage ··· 762 ..Default::default() 763 }) 764 .start() 765 + .map_err(|error| format!("failed to start server: {error}"))? 766 .await 767 }
+1 -1
ufos/src/storage_fjall.rs
··· 1615 } 1616 } 1617 let dt = t0.elapsed(); 1618 - log::trace!("finished trimming {n} nsids in {:?}: {total_danglers} dangling and {total_deleted} total removed.", dt); 1619 histogram!("storage_trim_dirty_nsids").record(completed.len() as f64); 1620 histogram!("storage_trim_duration").record(dt.as_micros() as f64); 1621 counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);
··· 1615 } 1616 } 1617 let dt = t0.elapsed(); 1618 + log::trace!("finished trimming {n} nsids in {dt:?}: {total_danglers} dangling and {total_deleted} total removed."); 1619 histogram!("storage_trim_dirty_nsids").record(completed.len() as f64); 1620 histogram!("storage_trim_duration").record(dt.as_micros() as f64); 1621 counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);