···12121313use crate::stats::collect_stats;
14141515-// Schema version - increment when the schema changes incompatibly.
1616-const SCHEMA_VERSION: u32 = 1;
1515+const fn schema_hash(s: &[u8]) -> u32 {
1616+ let mut h: u32 = 2166136261;
1717+ let mut i = 0;
1818+ while i < s.len() { h ^= s[i] as u32; h = h.wrapping_mul(16777619); i += 1; }
1919+ h
2020+}
17211822const SCHEMA: &str = "
1923CREATE TABLE IF NOT EXISTS events (
···3034 total_bytes INTEGER
3135);
3236";
3737+3838+// Append a new hash entry when the schema changes - SCHEMA_VERSION auto-increments.
3939+const SCHEMA_HASHES: &[u32] = &[
4040+ 0x9bc94a70, // v1
4141+];
4242+const SCHEMA_VERSION: u32 = SCHEMA_HASHES.len() as u32;
4343+const _: () = assert!(
4444+ schema_hash(SCHEMA.as_bytes()) == SCHEMA_HASHES[SCHEMA_VERSION as usize - 1],
4545+ "schema changed - append new hash to SCHEMA_HASHES"
4646+);
33473448#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
3549#[repr(u64)]
···114128 }
115129}
116130117117-#[derive(Debug, Deserialize, Serialize)]
118118-struct NixEvent {
119119- action: String,
120120- id: u64,
121121- // Raw number: ActivityType for "start", ResultType for "result", absent on "stop"
122122- #[serde(default, rename = "type")]
123123- event_type: u64,
124124- #[serde(default)]
125125- text: String,
126126- #[serde(default)]
127127- fields: Vec<serde_json::Value>,
128128- #[serde(default)]
129129- parent: u64,
131131+#[derive(Debug, Deserialize)]
132132+#[serde(tag = "action", rename_all = "snake_case")]
133133+enum NixEvent {
134134+ Start {
135135+ id: u64,
136136+ #[serde(rename = "type", default)]
137137+ event_type: u64,
138138+ #[serde(default)]
139139+ text: String,
140140+ #[serde(default)]
141141+ fields: Vec<serde_json::Value>,
142142+ #[serde(default)]
143143+ parent: u64,
144144+ },
145145+ Stop {
146146+ id: u64,
147147+ },
148148+ Result {
149149+ id: u64,
150150+ #[serde(rename = "type", default)]
151151+ event_type: u64,
152152+ #[serde(default)]
153153+ fields: Vec<serde_json::Value>,
154154+ },
155155+}
156156+157157+#[derive(Debug, Deserialize)]
158158+#[serde(tag = "action", rename_all = "snake_case")]
159159+enum ClientCommand {
160160+ GetStats { since: Option<i64> },
161161+ Clean,
162162+}
163163+164164+// Untagged outer enum: serde tries ClientCommand first, then NixEvent.
165165+// The action values never overlap so routing is unambiguous.
166166+#[derive(Debug, Deserialize)]
167167+#[serde(untagged)]
168168+enum SocketMessage {
169169+ Command(ClientCommand),
170170+ Event(NixEvent),
130171}
131172132173struct Activity {
···211252 loop {
212253 line.clear();
213254 if reader.read_line(&mut line).await? == 0 { break; }
214214- let cmd = line.trim();
215255216216- if cmd.starts_with("get_stats") {
217217- let since = cmd.split_whitespace().nth(1)
218218- .unwrap_or("1970-01-01T00:00:00+00:00")
219219- .to_string();
220220- let db = state.lock().unwrap().db.clone();
221221- let stats = tokio::task::spawn_blocking(move || collect_stats(&db, &since))
222222- .await??;
223223- writer.write_all((serde_json::to_string(&stats)? + "\n").as_bytes()).await?;
224224- break;
225225- }
226226- if cmd == "clean" {
227227- let db = state.lock().unwrap().db.clone();
228228- tokio::task::spawn_blocking(move || -> Result<()> {
229229- let conn = db.lock().unwrap();
230230- conn.execute_batch("DELETE FROM events; VACUUM; PRAGMA wal_checkpoint(TRUNCATE);")?;
231231- Ok(())
232232- }).await??;
233233- writer.write_all(b"ok\n").await?;
234234- info!("Database cleared via socket command");
235235- break;
236236- }
237237-238238- if let Ok(event) = serde_json::from_str::<NixEvent>(cmd) {
239239- process_event(event, &state)?;
256256+ match serde_json::from_str::<SocketMessage>(line.trim()) {
257257+ Ok(SocketMessage::Command(ClientCommand::GetStats { since })) => {
258258+ let db = state.lock().unwrap().db.clone();
259259+ let stats = tokio::task::spawn_blocking(move || collect_stats(&db, since))
260260+ .await??;
261261+ writer.write_all((serde_json::to_string(&stats)? + "\n").as_bytes()).await?;
262262+ break;
263263+ }
264264+ Ok(SocketMessage::Command(ClientCommand::Clean)) => {
265265+ let db = state.lock().unwrap().db.clone();
266266+ tokio::task::spawn_blocking(move || -> Result<()> {
267267+ let conn = db.lock().unwrap();
268268+ conn.execute_batch("DELETE FROM events; VACUUM; PRAGMA wal_checkpoint(TRUNCATE);")?;
269269+ Ok(())
270270+ }).await??;
271271+ writer.write_all(b"ok\n").await?;
272272+ info!("Database cleared via socket command");
273273+ break;
274274+ }
275275+ Ok(SocketMessage::Event(event)) => {
276276+ if let Err(e) = process_event(event, &state) {
277277+ error!("Failed to process event: {}", e);
278278+ }
279279+ }
280280+ Err(e) => error!("Invalid message: {}", e),
240281 }
241282 }
242283 Ok(())
···245286fn process_event(event: NixEvent, state: &Arc<Mutex<State>>) -> Result<()> {
246287 let mut s = state.lock().unwrap();
247288248248- match event.action.as_str() {
249249- "start" => {
250250- let act_type = ActivityType::from(event.event_type);
251251- let text = if !event.text.is_empty() {
252252- event.text.clone()
289289+ match event {
290290+ NixEvent::Start { id, event_type, text, fields, parent } => {
291291+ let act_type = ActivityType::from(event_type);
292292+ let text = if !text.is_empty() {
293293+ text
253294 } else {
254254- event.fields.get(0).and_then(|v| v.as_str()).unwrap_or("").to_string()
295295+ fields.get(0).and_then(|v| v.as_str()).unwrap_or("").to_string()
255296 };
256297257298 info!(
258258- id = event.id,
259259- parent = event.parent,
299299+ id,
300300+ parent,
260301 act_type = %act_type,
261302 text = %text,
262262- fields = ?event.fields,
303303+ fields = ?fields,
263304 "start"
264305 );
265306266266- s.active_activities.insert(event.id, Activity {
267267- id: event.id,
268268- parent_id: event.parent,
269269- event_type: event.event_type,
307307+ s.active_activities.insert(id, Activity {
308308+ id,
309309+ parent_id: parent,
310310+ event_type,
270311 text,
271312 start_time: Utc::now(),
272272- fields: event.fields,
313313+ fields,
273314 total_bytes: 0,
274315 });
275316 }
276276- "result" => {
277277- let res_type = ResultType::from(event.event_type);
317317+ NixEvent::Result { id, event_type, fields } => {
318318+ let res_type = ResultType::from(event_type);
278319279320 if res_type != ResultType::BuildLogLine && res_type != ResultType::Progress && res_type != ResultType::SetExpected {
280321 info!(
281281- id = event.id,
322322+ id,
282323 res_type = %res_type,
283283- fields = ?event.fields,
324324+ fields = ?fields,
284325 "result"
285326 );
286327 }
287328288288- if let Some(act) = s.active_activities.get_mut(&event.id) {
329329+ if let Some(act) = s.active_activities.get_mut(&id) {
289330 if res_type == ResultType::Progress {
290290- if let Some(total) = event.fields.get(1).and_then(|v| v.as_u64()) {
331331+ if let Some(total) = fields.get(1).and_then(|v| v.as_u64()) {
291332 if total > 0 { act.total_bytes = total; }
292333 }
293334 }
294335 }
295336 }
296296- "stop" => {
297297- if let Some(act) = s.active_activities.remove(&event.id) {
337337+ NixEvent::Stop { id } => {
338338+ if let Some(act) = s.active_activities.remove(&id) {
298339 let act_type = ActivityType::from(act.event_type);
299340 let end_time = Utc::now();
300341 let duration_ms = end_time.signed_duration_since(act.start_time).num_milliseconds();
···337378 ],
338379 ).context("Failed to insert event")?;
339380 }
340340- }
341341- _ => {
342342- info!(action = %event.action, id = event.id, "unknown action");
343381 }
344382 }
345383 Ok(())
+12-10
src/main.rs
···103103 .unwrap_or_else(|| PathBuf::from("/tmp/nod.sock"))
104104 });
105105106106- // Compute the since timestamp. Flags are additive (e.g. -y 1 -d 3 = 1 year + 3 days ago).
107107- let mut since = Utc::now();
108108- if let Some(y) = years { since = since - chrono::Months::new(y * 12); }
109109- if let Some(m) = months { since = since - chrono::Months::new(m); }
110110- if let Some(d) = days { since = since - chrono::Duration::days(d as i64); }
111111- let has_filter = days.is_some() || months.is_some() || years.is_some();
112112- // Epoch as the "no filter" sentinel — a WHERE start_time >= epoch matches everything.
113113- let since_str = if has_filter { since.to_rfc3339() } else { "1970-01-01T00:00:00+00:00".to_string() };
106106+ let since: Option<i64> = if days.is_some() || months.is_some() || years.is_some() {
107107+ let mut t = Utc::now();
108108+ if let Some(y) = years { t = t - chrono::Months::new(y * 12); }
109109+ if let Some(m) = months { t = t - chrono::Months::new(m); }
110110+ if let Some(d) = days { t = t - chrono::Duration::days(d as i64); }
111111+ Some(t.timestamp())
112112+ } else {
113113+ None
114114+ };
114115115116 let mut stream = UnixStream::connect(&socket_path)
116117 .await
117118 .with_context(|| format!("Failed to connect to daemon at {}", socket_path.display()))?;
118119119119- stream.write_all(format!("get_stats {}\n", since_str).as_bytes()).await?;
120120+ let cmd = serde_json::json!({"action": "get_stats", "since": since});
121121+ stream.write_all((cmd.to_string() + "\n").as_bytes()).await?;
120122121123 let mut reader = BufReader::new(stream);
122124 let mut line = String::new();
···136138 .await
137139 .with_context(|| format!("Failed to connect to daemon at {}", socket_path.display()))?;
138140139139- stream.write_all(b"clean\n").await?;
141141+ stream.write_all(b"{\"action\":\"clean\"}\n").await?;
140142141143 let mut reader = BufReader::new(stream);
142144 let mut line = String::new();
+14-7
src/stats.rs
···3030 pub count: i64,
3131}
32323333-pub fn collect_stats(db: &Arc<Mutex<Connection>>, since: &str) -> Result<Stats> {
3333+pub fn collect_stats(db: &Arc<Mutex<Connection>>, since: Option<i64>) -> Result<Stats> {
3434 let conn = db.lock().unwrap();
35353636+ // Convert unix timestamp to RFC3339 for comparison with stored start_time strings.
3737+ // None means no filter - SQL NULL makes the condition vacuously true.
3838+ let since_str: Option<String> = since
3939+ .and_then(|ts| chrono::DateTime::from_timestamp(ts, 0))
4040+ .map(|dt| dt.to_rfc3339());
4141+ let p = since_str.as_deref();
4242+3643 let (build_count, build_total_ms, subst_count, subst_total_ms, download_bytes, download_ms) =
3744 conn.query_row(
3845 "SELECT
···4249 COALESCE(SUM(duration_ms) FILTER (WHERE event_type = 108), 0),
4350 COALESCE(SUM(total_bytes) FILTER (WHERE event_type = 101), 0),
4451 COALESCE(SUM(duration_ms) FILTER (WHERE event_type = 101), 0)
4545- FROM events WHERE start_time >= ?1",
4646- [since],
5252+ FROM events WHERE (?1 IS NULL OR start_time >= ?1)",
5353+ rusqlite::params![p],
4754 |r| Ok((r.get::<_, i64>(0)?, r.get::<_, i64>(1)?, r.get::<_, i64>(2)?,
4855 r.get::<_, i64>(3)?, r.get::<_, i64>(4)?, r.get::<_, i64>(5)?)),
4956 ).context("Failed to query summary stats")?;
50575158 let mut stmt = conn.prepare(
5259 "SELECT duration_ms, drv_path, text
5353- FROM events WHERE event_type = 105 AND start_time >= ?1
6060+ FROM events WHERE event_type = 105 AND (?1 IS NULL OR start_time >= ?1)
5461 ORDER BY duration_ms DESC LIMIT 10",
5562 ).context("Failed to prepare slowest builds query")?;
5656- let slowest_builds: Vec<SlowBuild> = stmt.query_map([since], |r| {
6363+ let slowest_builds: Vec<SlowBuild> = stmt.query_map(rusqlite::params![p], |r| {
5764 Ok(SlowBuild {
5865 duration_ms: r.get(0)?,
5966 drv_path: r.get(1)?,
···6572 // per substituter, not just metadata query time (QueryPathInfo).
6673 let mut stmt = conn.prepare(
6774 "SELECT cache_url, AVG(duration_ms), COUNT(*)
6868- FROM events WHERE event_type = 108 AND cache_url IS NOT NULL AND start_time >= ?1
7575+ FROM events WHERE event_type = 108 AND cache_url IS NOT NULL AND (?1 IS NULL OR start_time >= ?1)
6976 GROUP BY cache_url ORDER BY AVG(duration_ms) DESC",
7077 ).context("Failed to prepare cache latency query")?;
7171- let cache_latency: Vec<CacheStat> = stmt.query_map([since], |r| {
7878+ let cache_latency: Vec<CacheStat> = stmt.query_map(rusqlite::params![p], |r| {
7279 Ok(CacheStat {
7380 cache_url: r.get(0)?,
7481 avg_ms: r.get(1)?,