use clap::Parser; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; #[derive(Parser, Debug)] #[command(author, version, about = "A CPU/GPU heater that churns hardware", long_about = None)] struct Args { /// Duration to run in seconds (omit for indefinite run) #[arg(short, long)] duration: Option, /// Number of cores to leave free (default: 1) #[arg(short = 'f', long, default_value_t = 1)] free_cores: usize, /// Enable GPU heating (requires compatible GPU) #[arg(short, long)] gpu: bool, } fn main() { let args = Args::parse(); // Get the number of logical CPU cores let total_cores = num_cpus::get(); let cores_to_use = total_cores.saturating_sub(args.free_cores).max(1); println!("Total CPU cores: {}", total_cores); println!("Cores to heat: {}", cores_to_use); println!("Cores to leave free: {}", args.free_cores); if args.gpu { println!("GPU heating: ENABLED"); } match args.duration { Some(seconds) => println!("Running for {} seconds", seconds), None => println!("Running indefinitely (press Ctrl+C to stop)"), } // Set up signal handler for graceful shutdown let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { println!("\nReceived Ctrl+C, shutting down..."); r.store(false, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); let start_time = Instant::now(); let duration = args.duration.map(Duration::from_secs); // Start GPU heating if requested let gpu_handle = if args.gpu { let running = running.clone(); Some(thread::spawn(move || { match run_gpu_heater(running) { Ok(_) => println!("GPU heater stopped successfully"), Err(e) => eprintln!("GPU heater error: {}", e), } })) } else { None }; // Spawn threads to churn CPU let mut handles = vec![]; for i in 0..cores_to_use { let running = running.clone(); let handle = thread::spawn(move || { println!("CPU Thread {} started", i); let mut counter: u64 = 0; while running.load(Ordering::SeqCst) { //compute something meaningless but CPU-intensive counter = counter.wrapping_add(1); for j in 0..1000 { counter = counter .wrapping_mul(1103515245) .wrapping_add(j) .wrapping_rem(2147483648); } } println!("CPU Thread {} stopped after {} iterations", i, counter); }); handles.push(handle); } // Monitor duration if specified if let Some(dur) = duration { while start_time.elapsed() < dur && running.load(Ordering::SeqCst) { thread::sleep(Duration::from_millis(100)); } running.store(false, Ordering::SeqCst); } else { // Wait indefinitely until Ctrl+C for handle in handles { handle.join().unwrap(); } if let Some(handle) = gpu_handle { handle.join().unwrap(); } return; } // Wait for all threads to finish for handle in handles { handle.join().unwrap(); } if let Some(handle) = gpu_handle { if let Err(e) = handle.join() { println!("GPU thread exited with error: {:?}", e); } } println!("Finished heating for {:?}", start_time.elapsed()); } fn run_gpu_heater(running: Arc) -> Result<(), Box> { // Initialize GPU let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor { backends: wgpu::Backends::all(), ..Default::default() }); let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: None, force_fallback_adapter: false, })); let adapter = match adapter { Ok(a) => a, Err(_) => return Err("Failed to find GPU adapter".into()), }; println!("GPU: {} ({:?})", adapter.get_info().name, adapter.get_info().backend); let (device, queue) = pollster::block_on(adapter.request_device( &wgpu::DeviceDescriptor { label: Some("GPU Heater Device"), required_features: wgpu::Features::empty(), required_limits: wgpu::Limits::default(), memory_hints: Default::default(), experimental_features: Default::default(), trace: Default::default(), }, ))?; // Create compute shader that does intensive matrix operations let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { label: Some("GPU Heater Shader"), source: wgpu::ShaderSource::Wgsl(include_str!("heater.wgsl").into()), }); // Create buffers for computation let buffer_size = 1024 * 1024 * 4; // 4MB of data let input_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Input Buffer"), size: buffer_size, usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); let output_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Output Buffer"), size: buffer_size, usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC, mapped_at_creation: false, }); let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: Some("Compute Bind Group Layout"), entries: &[ wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStages::COMPUTE, ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Storage { read_only: true }, has_dynamic_offset: false, min_binding_size: None, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStages::COMPUTE, ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Storage { read_only: false }, has_dynamic_offset: false, min_binding_size: None, }, count: None, }, ], }); let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { label: Some("Compute Bind Group"), layout: &bind_group_layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: input_buffer.as_entire_binding(), }, wgpu::BindGroupEntry { binding: 1, resource: output_buffer.as_entire_binding(), }, ], }); let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("Compute Pipeline Layout"), bind_group_layouts: &[&bind_group_layout], push_constant_ranges: &[], }); let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor { label: Some("Compute Pipeline"), layout: Some(&pipeline_layout), module: &shader, entry_point: Some("main"), compilation_options: Default::default(), cache: None, }); println!("GPU heater started"); let mut iteration = 0u64; // Main GPU compute loop while running.load(Ordering::SeqCst) { let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Compute Encoder"), }); { let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: Some("Compute Pass"), timestamp_writes: None, }); compute_pass.set_pipeline(&compute_pipeline); compute_pass.set_bind_group(0, &bind_group, &[]); // Dispatch enough work to keep GPU busy. Use a smaller grid to // avoid very long-running single submissions which can trigger // backend timeouts during shutdown. compute_pass.dispatch_workgroups(128, 128, 1); } // Gracefully handle errors on shutdown // Submit work queue.submit(Some(encoder.finish())); iteration += 1; } // Wait for all submitted work to complete before returning to avoid // backend timeouts during shutdown. `on_submitted_work_done` takes a // callback, so use a channel and wait with a timeout to avoid hanging // indefinitely. let (tx, rx) = std::sync::mpsc::sync_channel(1); queue.on_submitted_work_done(move || { // best-effort send; ignore error if receiver was dropped let _ = tx.send(()); }); match rx.recv_timeout(Duration::from_secs(30)) { Ok(()) => (), Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { eprintln!("Warning: timed out waiting for GPU work to complete during shutdown"); } Err(e) => { eprintln!("Warning: error while waiting for GPU shutdown: {:?}", e); } } println!("GPU heater stopped after {} iterations", iteration); Ok(()) }