use bevy::prelude::*; /// Frenet-Serret frame at a point on the helix. #[derive(Debug, Clone, Copy)] pub struct Frame { pub position: Vec3, #[allow(dead_code)] pub tangent: Vec3, pub normal: Vec3, pub binormal: Vec3, } #[derive(Debug, Clone)] pub struct HelixLevel { pub turns_per_parent: f32, pub samples_per_turn: u32, #[allow(dead_code)] pub label: &'static str, pub color: Color, pub radius_scale: f32, } /// Outermost level is index 0, innermost is last. #[derive(Resource, Debug, Clone)] pub struct HelixHierarchy { pub levels: Vec, pub fill_factor: f32, pub base_radius: f32, pub pitch_per_turn: f32, } /// 1 unit of focal_time = 1 outermost helix turn = 1 minute. #[derive(Resource, Debug)] pub struct HelixState { pub focal_time: f32, pub active_level: usize, pub target_level: f32, pub interpolated_level: f32, pub auto_follow: bool, pub time_scale: f32, } pub fn default_hierarchy() -> HelixHierarchy { HelixHierarchy { levels: vec![ HelixLevel { turns_per_parent: 1.0, samples_per_turn: 32, label: "minutes", color: Color::srgb(0.9, 0.2, 0.3), radius_scale: 1.0, }, HelixLevel { turns_per_parent: 60.0, samples_per_turn: 32, label: "seconds", color: Color::srgb(0.2, 0.8, 0.4), radius_scale: 1.0, }, HelixLevel { turns_per_parent: 60.0, samples_per_turn: 32, label: "sub-second", color: Color::srgb(0.3, 0.6, 0.9), radius_scale: 2.0, }, ], fill_factor: 0.55, base_radius: 1.0, pitch_per_turn: 7.0, } } impl Default for HelixHierarchy { fn default() -> Self { default_hierarchy() } } /// Evaluate helix frame at t through all levels up to `level`. /// Uses iterative Gram-Schmidt to avoid frame discontinuities. pub fn eval_coil(t: f32, level: usize, hierarchy: &HelixHierarchy) -> Frame { use std::f32::consts::TAU; let r = hierarchy.base_radius; let p = hierarchy.pitch_per_turn; let theta = t * TAU; let (s, c) = theta.sin_cos(); let mut px = r * c; let mut py = t * p; let mut pz = r * s; let mut dtx = -r * TAU * s; let mut dty = p; let mut dtz = r * TAU * c; let mut nx = -c; let mut ny = 0.0_f32; let mut nz = -s; let t_len = (dtx * dtx + dty * dty + dtz * dtz).sqrt(); let (mut tx, mut ty, mut tz) = (dtx / t_len, dty / t_len, dtz / t_len); let mut bx = ty * nz - tz * ny; let mut by = tz * nx - tx * nz; let mut bz = tx * ny - ty * nx; for lvl in 1..=level { let level_info = &hierarchy.levels[lvl]; // Child radius: cumulative depth scaling let depth_scale: f32 = (0..lvl) .map(|l| { let child = &hierarchy.levels[l + 1]; hierarchy.fill_factor * child.radius_scale / child.turns_per_parent.sqrt() }) .product(); let child_r = hierarchy.base_radius * depth_scale; let total_turns: f32 = hierarchy.levels[..=lvl] .iter() .map(|l| l.turns_per_parent) .product(); let alpha = t * total_turns * TAU; let (sa, ca) = alpha.sin_cos(); let w = total_turns * TAU; let dx = nx * child_r * ca + bx * child_r * sa; let dy = ny * child_r * ca + by * child_r * sa; let dz = nz * child_r * ca + bz * child_r * sa; px += dx; py += dy; pz += dz; let ex = -nx * child_r * sa * w + bx * child_r * ca * w; let ey = -ny * child_r * sa * w + by * child_r * ca * w; let ez = -nz * child_r * sa * w + bz * child_r * ca * w; dtx += ex; dty += ey; dtz += ez; let t_len = (dtx * dtx + dty * dty + dtz * dtz).sqrt(); tx = dtx / t_len; ty = dty / t_len; tz = dtz / t_len; let dot = dx * tx + dy * ty + dz * tz; let mut nnx = dx - dot * tx; let mut nny = dy - dot * ty; let mut nnz = dz - dot * tz; let n_len = (nnx * nnx + nny * nny + nnz * nnz).sqrt(); nnx /= n_len; nny /= n_len; nnz /= n_len; nx = nnx; ny = nny; nz = nnz; bx = ty * nz - tz * ny; by = tz * nx - tx * nz; bz = tx * ny - ty * nx; } Frame { position: Vec3::new(px, py, pz), tangent: Vec3::new(tx, ty, tz), normal: Vec3::new(nx, ny, nz), binormal: Vec3::new(bx, by, bz), } } pub fn compute_focal_point(state: &HelixState, hierarchy: &HelixHierarchy) -> Vec3 { let frame = eval_coil(state.focal_time, 0, hierarchy); frame.position } #[derive(Resource)] pub struct HelixGeometry { pub levels: Vec, pub precomputed_to: f32, } pub struct HelixLevelGeometry { pub t_start: f32, pub t_step: f32, pub positions: Vec, } fn samples_per_t(level_idx: usize, hierarchy: &HelixHierarchy) -> f32 { let total_turns: f32 = hierarchy.levels[..=level_idx] .iter() .map(|l| l.turns_per_parent) .product(); let spt = if level_idx == hierarchy.levels.len() - 1 { 64.0 } else { 32.0 }; total_turns * spt } const PRECOMPUTE_AHEAD: f32 = 30.0; const PRECOMPUTE_BEHIND: f32 = 10.0; pub fn setup_helix_geometry(mut commands: Commands, hierarchy: Res) { let t_start = -PRECOMPUTE_BEHIND; let t_end = PRECOMPUTE_AHEAD; let mut levels = Vec::with_capacity(hierarchy.levels.len()); for level_idx in 0..hierarchy.levels.len() { let spt = samples_per_t(level_idx, &hierarchy); let t_step = 1.0 / spt; let num_samples = ((t_end - t_start) / t_step) as usize + 1; let mut positions = Vec::with_capacity(num_samples); for i in 0..num_samples { let t = t_start + i as f32 * t_step; let frame = eval_coil(t, level_idx, &hierarchy); positions.push(frame.position); } info!( "helix level {level_idx}: precomputed {num_samples} points, t=[{t_start}..{t_end}], step={t_step:.6}" ); levels.push(HelixLevelGeometry { t_start, t_step, positions, }); } commands.insert_resource(HelixGeometry { levels, precomputed_to: t_end, }); } pub fn extend_helix_geometry( state: Res, hierarchy: Res, mut geometry: ResMut, ) { if state.focal_time + 5.0 < geometry.precomputed_to { return; } let new_end = geometry.precomputed_to + PRECOMPUTE_AHEAD; for (level_idx, level_geom) in geometry.levels.iter_mut().enumerate() { let spt = samples_per_t(level_idx, &hierarchy); let t_step = 1.0 / spt; let current_end = level_geom.t_start + (level_geom.positions.len() as f32) * t_step; let num_new = ((new_end - current_end) / t_step) as usize; for i in 0..num_new { let t = current_end + i as f32 * t_step; let frame = eval_coil(t, level_idx, &hierarchy); level_geom.positions.push(frame.position); } } info!("extended helix geometry to t={new_end}"); geometry.precomputed_to = new_end; } #[derive(Resource)] pub struct HelixGizmoCache { pub last_focal_time: f32, pub last_level: f32, pub entity: Option, pub asset_handle: Option>, } impl Default for HelixGizmoCache { fn default() -> Self { Self { last_focal_time: 0.0, last_level: -1.0, entity: None, asset_handle: None, } } } pub fn draw_helix( mut commands: Commands, hierarchy: Res, state: Res, geometry: Res, mut gizmo_assets: ResMut>, mut cache: ResMut, ) { const WINDOW: f32 = 120.0; const UPDATE_THRESHOLD: f32 = 0.005; let focal_changed = (state.focal_time - cache.last_focal_time).abs() > UPDATE_THRESHOLD; let level_changed = (state.interpolated_level - cache.last_level).abs() > 0.01; if !focal_changed && !level_changed && cache.entity.is_some() { return; } let mut gizmo = GizmoAsset::default(); for (level_idx, level) in hierarchy.levels.iter().enumerate() { let distance_from_active = (level_idx as f32 - state.interpolated_level).abs(); let alpha = if distance_from_active < 0.5 { 1.0 } else { 0.4 }; let total_turns: f32 = hierarchy.levels[..=level_idx] .iter() .map(|l| l.turns_per_parent) .product(); let window_half_t = (WINDOW / 2.0) / total_turns; let t_start = state.focal_time - window_half_t; let t_end = state.focal_time + window_half_t; let level_geom = &geometry.levels[level_idx]; let idx_start = ((t_start - level_geom.t_start) / level_geom.t_step).max(0.0) as usize; let idx_end = ((t_end - level_geom.t_start) / level_geom.t_step) .min(level_geom.positions.len() as f32 - 1.0) .max(0.0) as usize; if idx_end <= idx_start + 1 { continue; } let slice = &level_geom.positions[idx_start..=idx_end]; if slice.len() > 1 { gizmo.linestrip(slice.iter().copied(), level.color.with_alpha(alpha)); } } match &cache.asset_handle { Some(h) => { let _ = gizmo_assets.insert(h.id(), gizmo); } None => { let h = gizmo_assets.add(gizmo); cache.asset_handle = Some(h.clone()); let entity = commands .spawn(Gizmo { handle: h, ..default() }) .id(); cache.entity = Some(entity); } } cache.last_focal_time = state.focal_time; cache.last_level = state.interpolated_level; } #[cfg(test)] mod tests { use super::*; /// Assert all components of a Vec3 are finite (non-NaN and non-Inf). fn assert_finite(v: Vec3, label: &str) { assert!(v.x.is_finite(), "{label}.x is not finite: {}", v.x); assert!(v.y.is_finite(), "{label}.y is not finite: {}", v.y); assert!(v.z.is_finite(), "{label}.z is not finite: {}", v.z); } /// Assert a Vec3 has length approximately 1.0 (within 0.01). fn assert_unit(v: Vec3, label: &str) { let len = v.length(); assert!( (len - 1.0).abs() < 0.01, "{label} is not unit length: length = {len}" ); } #[test] fn default_hierarchy_has_three_levels_with_positive_params() { let h = default_hierarchy(); assert_eq!(h.levels.len(), 3, "expected 3 levels"); assert!(h.fill_factor > 0.0, "fill_factor must be positive"); assert!(h.base_radius > 0.0, "base_radius must be positive"); assert!(h.pitch_per_turn > 0.0, "pitch_per_turn must be positive"); for (i, level) in h.levels.iter().enumerate() { assert!( level.turns_per_parent > 0.0, "level[{i}].turns_per_parent must be positive" ); assert!( level.samples_per_turn > 0, "level[{i}].samples_per_turn must be positive" ); } } #[test] fn eval_coil_level0_finite_for_extreme_t_values() { let h = default_hierarchy(); let test_values = [-10.0_f32, 0.0, 0.5, 1.0, 100.0]; for &t in &test_values { let frame = eval_coil(t, 0, &h); assert_finite(frame.position, &format!("level0 t={t} position")); assert_finite(frame.normal, &format!("level0 t={t} normal")); assert_finite(frame.binormal, &format!("level0 t={t} binormal")); } } #[test] fn eval_coil_level0_frame_is_orthonormal() { let h = default_hierarchy(); let test_values = [-10.0_f32, 0.0, 0.5, 1.0, 100.0]; for &t in &test_values { let frame = eval_coil(t, 0, &h); // Each basis vector must be approximately unit length assert_unit(frame.normal, &format!("level0 t={t} normal")); assert_unit(frame.binormal, &format!("level0 t={t} binormal")); // Orthogonality: dot products of distinct basis vectors must be ~0 let dot_nb = frame.normal.dot(frame.binormal).abs(); assert!( dot_nb < 0.01, "level0 t={t}: normal·binormal = {dot_nb} (not orthogonal)" ); } } #[test] fn eval_coil_level1_offset_from_level0_by_child_radius() { let h = default_hierarchy(); // Compute expected child_radius for level 1 using the same formula as eval_coil. // depth_scale = fill_factor / sqrt(levels[1].turns_per_parent) let depth_scale = h.fill_factor / h.levels[1].turns_per_parent.sqrt(); let child_radius = h.base_radius * depth_scale; let test_values = [0.0_f32, 0.25, 0.5, 1.0]; for &t in &test_values { let frame0 = eval_coil(t, 0, &h); let frame1 = eval_coil(t, 1, &h); let offset = (frame1.position - frame0.position).length(); assert!( (offset - child_radius).abs() < 0.01, "t={t}: level-1 offset = {offset}, expected child_radius = {child_radius}" ); } } #[test] fn eval_coil_level1_finite_for_various_t_values() { let h = default_hierarchy(); let test_values = [-10.0_f32, 0.0, 0.5, 1.0, 100.0]; for &t in &test_values { let frame = eval_coil(t, 1, &h); assert_finite(frame.position, &format!("level1 t={t} position")); assert_finite(frame.normal, &format!("level1 t={t} normal")); assert_finite(frame.binormal, &format!("level1 t={t} binormal")); } } }