Nothing to see here, move along
1use lancer_core::run_queue::RunQueue;
2use lancer_core::timer_wheel::TimerWheel;
3use x86_64::PhysAddr;
4
5use crate::mem::addr;
6use crate::mem::phys::BitmapFrameAllocator;
7use crate::sync::IrqMutex;
8use crate::types::{BlockedPid, CreatedPid, Generation, MAX_PIDS, Pid};
9
10use super::address_space::{self, Pml4ReleaseResult};
11use super::context::{CpuContext, FpuState, IpcMessage};
12use super::pid_table::PidTable;
13use super::{ExecContext, PROC_NAME_LEN, ProcessState, SchedEntity};
14use crate::mem::typed_addr::Pml4Phys;
15use crate::ring::RingIndex;
16
17pub(crate) fn make_default_sched(pid: Pid, generation: Generation, exec_phys: u64) -> SchedEntity {
18 SchedEntity {
19 pid,
20 generation,
21 state: ProcessState::Created,
22 exec_phys,
23 ..SchedEntity::EMPTY
24 }
25}
26
27pub(crate) fn make_default_exec(pml4_phys: Pml4Phys, from_untyped: bool) -> ExecContext {
28 ExecContext {
29 saved_context: CpuContext::zero(),
30 fpu_state: FpuState::default_init(),
31 pml4_phys,
32 fs_base: 0,
33 context_checksum: 0,
34 ipc_message: IpcMessage::zero(),
35 ipc_badge: 0,
36 reply_target: None,
37 ring_region_id: None,
38 ring_sq_head: RingIndex::new(0),
39 ring_cq_tail: RingIndex::new(0),
40 allocated_frames: 0,
41 name: [0u8; PROC_NAME_LEN],
42 name_len: 0,
43 root_cnode: None,
44 cnode_depth: 0,
45 root_guard_value: 0,
46 root_guard_bits: 0,
47 from_untyped,
48 }
49}
50
51pub struct ProcessManager {
52 pid_table: PidTable,
53 run_queue: RunQueue,
54 timer_wheel: TimerWheel,
55}
56
57impl ProcessManager {
58 pub const fn empty() -> Self {
59 Self {
60 pid_table: PidTable::empty(),
61 run_queue: RunQueue::new(),
62 timer_wheel: TimerWheel::new(),
63 }
64 }
65
66 pub fn init(&mut self, _allocator: &mut BitmapFrameAllocator) {
67 self.pid_table.init();
68 }
69
70 pub fn capacity(&self) -> usize {
71 self.pid_table.capacity() as usize
72 }
73
74 #[cfg(lancer_test)]
75 pub fn pid_count(&self) -> u32 {
76 self.pid_table.count()
77 }
78
79 pub fn pid_table_mut(&mut self) -> &mut PidTable {
80 &mut self.pid_table
81 }
82
83 #[cfg(lancer_test)]
84 pub fn pid_table_leaf_count(&self) -> u16 {
85 self.pid_table.leaf_count()
86 }
87
88 pub fn exec(&self, pid: Pid) -> Option<&ExecContext> {
89 self.pid_table.get(pid).map(|sched| {
90 debug_assert!(
91 sched.exec_phys != 0,
92 "exec() on slot with zero exec_phys (pid={})",
93 pid.raw()
94 );
95 let virt = addr::phys_to_virt(PhysAddr::new(sched.exec_phys));
96 unsafe { &*(virt.as_ptr::<ExecContext>()) }
97 })
98 }
99
100 pub fn exec_mut(&mut self, pid: Pid) -> Option<&mut ExecContext> {
101 self.pid_table.get(pid).map(|sched| {
102 debug_assert!(
103 sched.exec_phys != 0,
104 "exec_mut() on slot with zero exec_phys (pid={})",
105 pid.raw()
106 );
107 let virt = addr::phys_to_virt(PhysAddr::new(sched.exec_phys));
108 unsafe { &mut *(virt.as_mut_ptr::<ExecContext>()) }
109 })
110 }
111
112 #[allow(clippy::type_complexity)]
113 pub fn pair_mut(
114 &mut self,
115 a: Pid,
116 b: Pid,
117 ) -> Option<(
118 (&mut SchedEntity, &mut ExecContext),
119 (&mut SchedEntity, &mut ExecContext),
120 )> {
121 debug_assert!(a != b, "pair_mut called with same pid");
122
123 let sched_a = self.pid_table.get_mut(a)? as *mut SchedEntity;
124 let exec_phys_a = unsafe { (*sched_a).exec_phys };
125 let exec_a = addr::phys_to_virt(PhysAddr::new(exec_phys_a)).as_mut_ptr::<ExecContext>();
126
127 let sched_b = self.pid_table.get_mut(b)? as *mut SchedEntity;
128 let exec_phys_b = unsafe { (*sched_b).exec_phys };
129 let exec_b = addr::phys_to_virt(PhysAddr::new(exec_phys_b)).as_mut_ptr::<ExecContext>();
130
131 Some(unsafe { ((&mut *sched_a, &mut *exec_a), (&mut *sched_b, &mut *exec_b)) })
132 }
133
134 pub fn allocate(&mut self, allocator: &mut BitmapFrameAllocator) -> Option<CreatedPid> {
135 let pml4_phys = Pml4Phys::from_create(address_space::create_user_pml4(allocator)?);
136
137 let exec_frame = match allocator.allocate_contiguous(1) {
138 Some(phys) => phys,
139 None => {
140 address_space::teardown_user_space(pml4_phys.raw(), allocator);
141 return None;
142 }
143 };
144
145 let sched = make_default_sched(Pid::new(0), Generation::new(0), exec_frame.as_u64());
146 let (pid, _generation) = match self.pid_table.allocate(sched, allocator) {
147 Some(pair) => pair,
148 None => {
149 BitmapFrameAllocator::free_frame_by_addr(exec_frame);
150 address_space::teardown_user_space(pml4_phys.raw(), allocator);
151 return None;
152 }
153 };
154
155 if address_space::pml4_ref_create(pml4_phys.raw(), pid).is_err() {
156 let _ = self.pid_table.free(pid);
157 BitmapFrameAllocator::free_frame_by_addr(exec_frame);
158 address_space::teardown_user_space(pml4_phys.raw(), allocator);
159 return None;
160 }
161
162 let exec = make_default_exec(pml4_phys, false);
163 let exec_virt = addr::phys_to_virt(exec_frame);
164 unsafe {
165 core::ptr::write(exec_virt.as_mut_ptr::<ExecContext>(), exec);
166 }
167
168 let exec_ref = self.exec_mut(pid).unwrap();
169 exec_ref.seal_context();
170
171 Some(CreatedPid::trust(pid))
172 }
173
174 pub fn get(&self, pid: Pid) -> Option<&SchedEntity> {
175 self.pid_table.get(pid).inspect(|s| {
176 debug_assert!(
177 s.state() != ProcessState::Free,
178 "occupied pid_table slot has Free state for pid {}",
179 pid.raw()
180 );
181 })
182 }
183
184 pub fn get_mut(&mut self, pid: Pid) -> Option<&mut SchedEntity> {
185 self.pid_table.get_mut(pid).inspect(|s| {
186 debug_assert!(
187 s.state() != ProcessState::Free,
188 "occupied pid_table slot has Free state for pid {}",
189 pid.raw()
190 );
191 })
192 }
193
194 pub fn as_created(&self, pid: Pid) -> Option<CreatedPid> {
195 self.get(pid)
196 .filter(|s| s.state() == ProcessState::Created)
197 .map(|_| CreatedPid::trust(pid))
198 }
199
200 pub fn start(&mut self, created: CreatedPid) -> Result<(), crate::error::KernelError> {
201 self[created.pid()].transition_to(ProcessState::Ready)?;
202 self.enqueue_ready(created.pid());
203 Ok(())
204 }
205
206 #[cfg(lancer_test)]
207 pub fn simulate_dispatch(&mut self, pid: Pid) {
208 self.remove_from_run_queue(pid);
209 self[pid]
210 .transition_to(ProcessState::Running)
211 .expect("simulate_dispatch: transition to Running failed");
212 }
213
214 pub fn enqueue_ready(&mut self, pid: Pid) {
215 let entry = &self[pid];
216 debug_assert!(
217 entry.run_next == super::NONE_SENTINEL && entry.run_prev == super::NONE_SENTINEL,
218 "enqueue_ready: pid {} already linked (next={}, prev={})",
219 pid.raw(),
220 entry.run_next,
221 entry.run_prev,
222 );
223 let prio = entry.effective_priority().raw();
224 self[pid].run_priority = prio;
225 self.run_queue.enqueue(pid.raw(), prio, &mut self.pid_table);
226 }
227
228 pub fn remove_from_run_queue(&mut self, pid: Pid) {
229 let prio = self[pid].run_priority;
230 self.run_queue.remove(pid.raw(), prio, &mut self.pid_table);
231 }
232
233 pub fn unblock_and_enqueue(
234 &mut self,
235 pid: Pid,
236 proof: BlockedPid,
237 ) -> Result<(), crate::error::KernelError> {
238 self[pid].unblock(proof)?;
239 self.timer_cancel(pid);
240 self.enqueue_ready(pid);
241 Ok(())
242 }
243
244 pub fn dequeue_highest(&mut self) -> Option<Pid> {
245 self.run_queue
246 .dequeue_highest(&mut self.pid_table)
247 .and_then(|(raw, _)| Pid::try_new(raw))
248 }
249
250 pub fn timer_insert(&mut self, pid: Pid, deadline: u64) {
251 self.timer_wheel
252 .insert(pid.raw(), deadline, &mut self.pid_table);
253 }
254
255 pub fn timer_cancel(&mut self, pid: Pid) {
256 self.timer_wheel.cancel(pid.raw(), &mut self.pid_table);
257 }
258
259 pub fn timer_seed(&mut self, tick: u64) {
260 self.timer_wheel.seed_tick(tick);
261 }
262
263 pub fn timer_current_tick(&self) -> u64 {
264 self.timer_wheel.current_tick()
265 }
266
267 pub fn timer_advance(&mut self, now: u64, callback: impl FnMut(u32)) {
268 self.timer_wheel
269 .advance(now, &mut self.pid_table, callback);
270 }
271
272 pub fn clear_reply_targets_for(&mut self, target: Pid) {
273 (0..MAX_PIDS as u32)
274 .filter_map(Pid::try_new)
275 .for_each(|pid| {
276 let Some(sched) = self.pid_table.get(pid) else {
277 return;
278 };
279 let exec_phys = sched.exec_phys;
280 let exec = unsafe {
281 &mut *(addr::phys_to_virt(PhysAddr::new(exec_phys)).as_mut_ptr::<ExecContext>())
282 };
283 if exec.reply_target == Some(target) {
284 exec.reply_target = None;
285 }
286 });
287 }
288
289 fn zombify_inner(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) {
290 crate::ipc::endpoint::remove_from_queues(pid, pool, self);
291
292 if let Some((notif_id, notif_gen, bits)) =
293 self.get(pid).and_then(|s| s.death_notification())
294 {
295 if let Ok(notif) =
296 pool.write_as::<lancer_core::object_layout::NotificationObject>(notif_id, notif_gen)
297 && crate::ipc::notification::signal_inner(notif, bits)
298 {
299 crate::ipc::notification::drain_and_wake(notif, self);
300 }
301 let _ = crate::ipc::notification::wake_bound_receivers_with_pool(
302 notif_id, notif_gen, bits, self, pool,
303 );
304 }
305
306 self.clear_reply_targets_for(pid);
307 }
308
309 fn teardown_inner(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) {
310 let exec = match self.exec(pid) {
311 Some(e) => e,
312 None => return,
313 };
314
315 let pml4 = exec.pml4_phys.raw();
316 let root_cnode = exec.root_cnode;
317 let from_untyped = exec.from_untyped;
318
319 self.exec_mut(pid).unwrap().root_cnode = None;
320
321 if let Some((sc_id, sc_gen)) = self[pid].sched_context()
322 && let Ok(sc) =
323 pool.write_as::<lancer_core::object_layout::SchedContextObject>(sc_id, sc_gen)
324 {
325 sc.attached_pid = lancer_core::header::NONE_SENTINEL;
326 }
327
328 if let Some((cnode_id, cnode_gen)) = root_cnode {
329 match pool.dec_ref_phys(cnode_id, cnode_gen) {
330 Some((phys, lancer_core::object_tag::ObjectTag::CNode)) => {
331 crate::cap::derivation::unlink_child(pool, cnode_id);
332 let obj = unsafe {
333 &*(crate::mem::addr::phys_to_virt(PhysAddr::new(phys))
334 .as_ptr::<lancer_core::object_layout::CNodeObject>())
335 };
336 crate::cap::cnode::drain_cnode_phys(
337 PhysAddr::new(obj.slots_phys),
338 obj.size_bits,
339 obj.frame_count,
340 pool,
341 self,
342 );
343 crate::cap::kernel_objects::free_slot(phys);
344 }
345 Some((phys, tag)) => {
346 crate::cap::derivation::unlink_child(pool, cnode_id);
347 crate::cap::ops::cleanup_by_tag(tag, phys);
348 }
349 None => {}
350 }
351 }
352
353 let mut allocator = BitmapFrameAllocator;
354 match address_space::pml4_ref_release(pml4) {
355 Pml4ReleaseResult::LastRef => address_space::teardown_user_space(pml4, &mut allocator),
356 Pml4ReleaseResult::StillShared => {}
357 }
358
359 let exec_phys = self.pid_table.get(pid).map(|s| PhysAddr::new(s.exec_phys));
360 let _ = self.pid_table.free(pid);
361
362 if !from_untyped {
363 exec_phys.inspect(|&phys| {
364 BitmapFrameAllocator::free_frame_by_addr(phys);
365 });
366 }
367 }
368
369 pub fn zombify(&mut self, pid: Pid) -> bool {
370 let sched = match self.pid_table.get_mut(pid) {
371 Some(s) => s,
372 None => return false,
373 };
374
375 if matches!(sched.state(), ProcessState::Free | ProcessState::Zombie) {
376 return false;
377 }
378
379 let was_ready = sched.state() == ProcessState::Ready;
380
381 if sched.zombify_state().is_err() {
382 crate::kprintln!(
383 "[proc] BUG: pid {} failed -> Zombie (state={:?})",
384 pid.raw(),
385 sched.state()
386 );
387 return false;
388 }
389
390 if was_ready {
391 self.remove_from_run_queue(pid);
392 }
393 self.timer_cancel(pid);
394
395 let mut pool = crate::cap::pool::POOL.lock();
396 self.zombify_inner(pid, &mut pool);
397
398 true
399 }
400
401 pub fn reap(&mut self, pid: Pid, _allocator: &mut BitmapFrameAllocator) {
402 let is_zombie = self
403 .pid_table
404 .get(pid)
405 .is_some_and(|s| s.state() == ProcessState::Zombie);
406
407 if !is_zombie {
408 return;
409 }
410
411 let mut pool = crate::cap::pool::POOL.lock();
412 self.teardown_inner(pid, &mut pool);
413 }
414
415 pub fn destroy(&mut self, pid: Pid, allocator: &mut BitmapFrameAllocator) -> bool {
416 let sched = match self.pid_table.get(pid) {
417 Some(s) => s,
418 None => return true,
419 };
420
421 if sched.state() != ProcessState::Zombie && !self.zombify(pid) {
422 return false;
423 }
424
425 self.reap(pid, allocator);
426 self.pid_table.try_reclaim();
427 true
428 }
429
430 pub fn force_destroy_with_pool(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) {
431 let sched = match self.pid_table.get(pid) {
432 Some(s) => s,
433 None => return,
434 };
435
436 if !matches!(sched.state(), ProcessState::Zombie) {
437 let was_ready = sched.state() == ProcessState::Ready;
438 if let Some(s) = self.pid_table.get_mut(pid) {
439 let _ = s.zombify_state();
440 }
441 if was_ready {
442 self.remove_from_run_queue(pid);
443 }
444 self.timer_cancel(pid);
445 self.zombify_inner(pid, pool);
446 }
447
448 self.teardown_inner(pid, pool);
449 }
450}
451
452impl core::ops::Index<Pid> for ProcessManager {
453 type Output = SchedEntity;
454 fn index(&self, pid: Pid) -> &SchedEntity {
455 self.pid_table
456 .get(pid)
457 .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw()))
458 }
459}
460
461impl core::ops::IndexMut<Pid> for ProcessManager {
462 fn index_mut(&mut self, pid: Pid) -> &mut SchedEntity {
463 self.pid_table
464 .get_mut(pid)
465 .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw()))
466 }
467}
468
469impl core::ops::Index<&BlockedPid> for ProcessManager {
470 type Output = SchedEntity;
471 fn index(&self, bp: &BlockedPid) -> &SchedEntity {
472 &self[bp.pid()]
473 }
474}
475
476impl core::ops::IndexMut<&BlockedPid> for ProcessManager {
477 fn index_mut(&mut self, bp: &BlockedPid) -> &mut SchedEntity {
478 &mut self[bp.pid()]
479 }
480}
481
482impl core::ops::Index<CreatedPid> for ProcessManager {
483 type Output = SchedEntity;
484 fn index(&self, cp: CreatedPid) -> &SchedEntity {
485 &self[cp.pid()]
486 }
487}
488
489impl core::ops::IndexMut<CreatedPid> for ProcessManager {
490 fn index_mut(&mut self, cp: CreatedPid) -> &mut SchedEntity {
491 &mut self[cp.pid()]
492 }
493}
494
495pub static PROCESSES: IrqMutex<ProcessManager, 0> = IrqMutex::new(ProcessManager::empty());