CPUs can make a little heat, as a treat
1use clap::Parser;
2use std::sync::atomic::{AtomicBool, Ordering};
3use std::sync::Arc;
4use std::thread;
5use std::time::{Duration, Instant};
6
7#[derive(Parser, Debug)]
8#[command(author, version, about = "A CPU/GPU heater that churns hardware", long_about = None)]
9struct Args {
10 /// Duration to run in seconds (omit for indefinite run)
11 #[arg(short, long)]
12 duration: Option<u64>,
13
14 /// Number of cores to leave free (default: 1)
15 #[arg(short = 'f', long, default_value_t = 1)]
16 free_cores: usize,
17
18 /// Enable GPU heating (requires compatible GPU)
19 #[arg(short, long)]
20 gpu: bool,
21}
22
23fn main() {
24 let args = Args::parse();
25
26 // Get the number of logical CPU cores
27 let total_cores = num_cpus::get();
28 let cores_to_use = total_cores.saturating_sub(args.free_cores).max(1);
29
30 println!("Total CPU cores: {}", total_cores);
31 println!("Cores to heat: {}", cores_to_use);
32 println!("Cores to leave free: {}", args.free_cores);
33 if args.gpu {
34 println!("GPU heating: ENABLED");
35 }
36
37 match args.duration {
38 Some(seconds) => println!("Running for {} seconds", seconds),
39 None => println!("Running indefinitely (press Ctrl+C to stop)"),
40 }
41
42 // Set up signal handler for graceful shutdown
43 let running = Arc::new(AtomicBool::new(true));
44 let r = running.clone();
45
46 ctrlc::set_handler(move || {
47 println!("\nReceived Ctrl+C, shutting down...");
48 r.store(false, Ordering::SeqCst);
49 })
50 .expect("Error setting Ctrl-C handler");
51
52 let start_time = Instant::now();
53 let duration = args.duration.map(Duration::from_secs);
54
55 // Start GPU heating if requested
56 let gpu_handle = if args.gpu {
57 let running = running.clone();
58 Some(thread::spawn(move || {
59 match run_gpu_heater(running) {
60 Ok(_) => println!("GPU heater stopped successfully"),
61 Err(e) => eprintln!("GPU heater error: {}", e),
62 }
63 }))
64 } else {
65 None
66 };
67
68 // Spawn threads to churn CPU
69 let mut handles = vec![];
70 for i in 0..cores_to_use {
71 let running = running.clone();
72 let handle = thread::spawn(move || {
73 println!("CPU Thread {} started", i);
74 let mut counter: u64 = 0;
75 while running.load(Ordering::SeqCst) {
76 //compute something meaningless but CPU-intensive
77 counter = counter.wrapping_add(1);
78 for j in 0..1000 {
79 counter = counter
80 .wrapping_mul(1103515245)
81 .wrapping_add(j)
82 .wrapping_rem(2147483648);
83 }
84 }
85 println!("CPU Thread {} stopped after {} iterations", i, counter);
86 });
87 handles.push(handle);
88 }
89
90 // Monitor duration if specified
91 if let Some(dur) = duration {
92 while start_time.elapsed() < dur && running.load(Ordering::SeqCst) {
93 thread::sleep(Duration::from_millis(100));
94 }
95 running.store(false, Ordering::SeqCst);
96 } else {
97 // Wait indefinitely until Ctrl+C
98 for handle in handles {
99 handle.join().unwrap();
100 }
101 if let Some(handle) = gpu_handle {
102 handle.join().unwrap();
103 }
104 return;
105 }
106
107 // Wait for all threads to finish
108 for handle in handles {
109 handle.join().unwrap();
110 }
111 if let Some(handle) = gpu_handle {
112 if let Err(e) = handle.join() {
113 println!("GPU thread exited with error: {:?}", e);
114 }
115 }
116
117 println!("Finished heating for {:?}", start_time.elapsed());
118}
119
120fn run_gpu_heater(running: Arc<AtomicBool>) -> Result<(), Box<dyn std::error::Error>> {
121 // Initialize GPU
122 let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor {
123 backends: wgpu::Backends::all(),
124 ..Default::default()
125 });
126
127 let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
128 power_preference: wgpu::PowerPreference::HighPerformance,
129 compatible_surface: None,
130 force_fallback_adapter: false,
131 }));
132
133 let adapter = match adapter {
134 Ok(a) => a,
135 Err(_) => return Err("Failed to find GPU adapter".into()),
136 };
137
138 println!("GPU: {} ({:?})", adapter.get_info().name, adapter.get_info().backend);
139
140 let (device, queue) = pollster::block_on(adapter.request_device(
141 &wgpu::DeviceDescriptor {
142 label: Some("GPU Heater Device"),
143 required_features: wgpu::Features::empty(),
144 required_limits: wgpu::Limits::default(),
145 memory_hints: Default::default(),
146 experimental_features: Default::default(),
147 trace: Default::default(),
148 },
149 ))?;
150
151 // Create compute shader that does intensive matrix operations
152 let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
153 label: Some("GPU Heater Shader"),
154 source: wgpu::ShaderSource::Wgsl(include_str!("heater.wgsl").into()),
155 });
156
157 // Create buffers for computation
158 let buffer_size = 1024 * 1024 * 4; // 4MB of data
159 let input_buffer = device.create_buffer(&wgpu::BufferDescriptor {
160 label: Some("Input Buffer"),
161 size: buffer_size,
162 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
163 mapped_at_creation: false,
164 });
165
166 let output_buffer = device.create_buffer(&wgpu::BufferDescriptor {
167 label: Some("Output Buffer"),
168 size: buffer_size,
169 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_SRC,
170 mapped_at_creation: false,
171 });
172
173 let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
174 label: Some("Compute Bind Group Layout"),
175 entries: &[
176 wgpu::BindGroupLayoutEntry {
177 binding: 0,
178 visibility: wgpu::ShaderStages::COMPUTE,
179 ty: wgpu::BindingType::Buffer {
180 ty: wgpu::BufferBindingType::Storage { read_only: true },
181 has_dynamic_offset: false,
182 min_binding_size: None,
183 },
184 count: None,
185 },
186 wgpu::BindGroupLayoutEntry {
187 binding: 1,
188 visibility: wgpu::ShaderStages::COMPUTE,
189 ty: wgpu::BindingType::Buffer {
190 ty: wgpu::BufferBindingType::Storage { read_only: false },
191 has_dynamic_offset: false,
192 min_binding_size: None,
193 },
194 count: None,
195 },
196 ],
197 });
198
199 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
200 label: Some("Compute Bind Group"),
201 layout: &bind_group_layout,
202 entries: &[
203 wgpu::BindGroupEntry {
204 binding: 0,
205 resource: input_buffer.as_entire_binding(),
206 },
207 wgpu::BindGroupEntry {
208 binding: 1,
209 resource: output_buffer.as_entire_binding(),
210 },
211 ],
212 });
213
214 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
215 label: Some("Compute Pipeline Layout"),
216 bind_group_layouts: &[&bind_group_layout],
217 push_constant_ranges: &[],
218 });
219
220 let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
221 label: Some("Compute Pipeline"),
222 layout: Some(&pipeline_layout),
223 module: &shader,
224 entry_point: Some("main"),
225 compilation_options: Default::default(),
226 cache: None,
227 });
228
229 println!("GPU heater started");
230 let mut iteration = 0u64;
231
232 // Main GPU compute loop
233 while running.load(Ordering::SeqCst) {
234 let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
235 label: Some("Compute Encoder"),
236 });
237
238 {
239 let mut compute_pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
240 label: Some("Compute Pass"),
241 timestamp_writes: None,
242 });
243 compute_pass.set_pipeline(&compute_pipeline);
244 compute_pass.set_bind_group(0, &bind_group, &[]);
245 // Dispatch enough work to keep GPU busy. Use a smaller grid to
246 // avoid very long-running single submissions which can trigger
247 // backend timeouts during shutdown.
248 compute_pass.dispatch_workgroups(128, 128, 1);
249 }
250
251 // Gracefully handle errors on shutdown
252 // Submit work
253 queue.submit(Some(encoder.finish()));
254 iteration += 1;
255 }
256
257 // Wait for all submitted work to complete before returning to avoid
258 // backend timeouts during shutdown. `on_submitted_work_done` takes a
259 // callback, so use a channel and wait with a timeout to avoid hanging
260 // indefinitely.
261 let (tx, rx) = std::sync::mpsc::sync_channel(1);
262 queue.on_submitted_work_done(move || {
263 // best-effort send; ignore error if receiver was dropped
264 let _ = tx.send(());
265 });
266 match rx.recv_timeout(Duration::from_secs(30)) {
267 Ok(()) => (),
268 Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
269 eprintln!("Warning: timed out waiting for GPU work to complete during shutdown");
270 }
271 Err(e) => {
272 eprintln!("Warning: error while waiting for GPU shutdown: {:?}", e);
273 }
274 }
275
276 println!("GPU heater stopped after {} iterations", iteration);
277 Ok(())
278}