use lancer_core::run_queue::RunQueue; use lancer_core::timer_wheel::TimerWheel; use x86_64::PhysAddr; use crate::mem::addr; use crate::mem::phys::BitmapFrameAllocator; use crate::sync::IrqMutex; use crate::types::{BlockedPid, CreatedPid, Generation, MAX_PIDS, Pid}; use super::address_space::{self, Pml4ReleaseResult}; use super::context::{CpuContext, FpuState, IpcMessage}; use super::pid_table::PidTable; use super::{ExecContext, PROC_NAME_LEN, ProcessState, SchedEntity}; use crate::mem::typed_addr::Pml4Phys; use crate::ring::RingIndex; pub(crate) fn make_default_sched(pid: Pid, generation: Generation, exec_phys: u64) -> SchedEntity { SchedEntity { pid, generation, state: ProcessState::Created, exec_phys, ..SchedEntity::EMPTY } } pub(crate) fn make_default_exec(pml4_phys: Pml4Phys, from_untyped: bool) -> ExecContext { ExecContext { saved_context: CpuContext::zero(), fpu_state: FpuState::default_init(), pml4_phys, fs_base: 0, context_checksum: 0, ipc_message: IpcMessage::zero(), ipc_badge: 0, reply_target: None, ring_region_id: None, ring_sq_head: RingIndex::new(0), ring_cq_tail: RingIndex::new(0), allocated_frames: 0, name: [0u8; PROC_NAME_LEN], name_len: 0, root_cnode: None, cnode_depth: 0, root_guard_value: 0, root_guard_bits: 0, from_untyped, } } pub struct ProcessManager { pid_table: PidTable, run_queue: RunQueue, timer_wheel: TimerWheel, } impl ProcessManager { pub const fn empty() -> Self { Self { pid_table: PidTable::empty(), run_queue: RunQueue::new(), timer_wheel: TimerWheel::new(), } } pub fn init(&mut self, _allocator: &mut BitmapFrameAllocator) { self.pid_table.init(); } pub fn capacity(&self) -> usize { self.pid_table.capacity() as usize } #[cfg(lancer_test)] pub fn pid_count(&self) -> u32 { self.pid_table.count() } pub fn pid_table_mut(&mut self) -> &mut PidTable { &mut self.pid_table } #[cfg(lancer_test)] pub fn pid_table_leaf_count(&self) -> u16 { self.pid_table.leaf_count() } pub fn exec(&self, pid: Pid) -> Option<&ExecContext> { self.pid_table.get(pid).map(|sched| { debug_assert!( sched.exec_phys != 0, "exec() on slot with zero exec_phys (pid={})", pid.raw() ); let virt = addr::phys_to_virt(PhysAddr::new(sched.exec_phys)); unsafe { &*(virt.as_ptr::()) } }) } pub fn exec_mut(&mut self, pid: Pid) -> Option<&mut ExecContext> { self.pid_table.get(pid).map(|sched| { debug_assert!( sched.exec_phys != 0, "exec_mut() on slot with zero exec_phys (pid={})", pid.raw() ); let virt = addr::phys_to_virt(PhysAddr::new(sched.exec_phys)); unsafe { &mut *(virt.as_mut_ptr::()) } }) } #[allow(clippy::type_complexity)] pub fn pair_mut( &mut self, a: Pid, b: Pid, ) -> Option<( (&mut SchedEntity, &mut ExecContext), (&mut SchedEntity, &mut ExecContext), )> { debug_assert!(a != b, "pair_mut called with same pid"); let sched_a = self.pid_table.get_mut(a)? as *mut SchedEntity; let exec_phys_a = unsafe { (*sched_a).exec_phys }; let exec_a = addr::phys_to_virt(PhysAddr::new(exec_phys_a)).as_mut_ptr::(); let sched_b = self.pid_table.get_mut(b)? as *mut SchedEntity; let exec_phys_b = unsafe { (*sched_b).exec_phys }; let exec_b = addr::phys_to_virt(PhysAddr::new(exec_phys_b)).as_mut_ptr::(); Some(unsafe { ((&mut *sched_a, &mut *exec_a), (&mut *sched_b, &mut *exec_b)) }) } pub fn allocate(&mut self, allocator: &mut BitmapFrameAllocator) -> Option { let pml4_phys = Pml4Phys::from_create(address_space::create_user_pml4(allocator)?); let exec_frame = match allocator.allocate_contiguous(1) { Some(phys) => phys, None => { address_space::teardown_user_space(pml4_phys.raw(), allocator); return None; } }; let sched = make_default_sched(Pid::new(0), Generation::new(0), exec_frame.as_u64()); let (pid, _generation) = match self.pid_table.allocate(sched, allocator) { Some(pair) => pair, None => { BitmapFrameAllocator::free_frame_by_addr(exec_frame); address_space::teardown_user_space(pml4_phys.raw(), allocator); return None; } }; if address_space::pml4_ref_create(pml4_phys.raw(), pid).is_err() { let _ = self.pid_table.free(pid); BitmapFrameAllocator::free_frame_by_addr(exec_frame); address_space::teardown_user_space(pml4_phys.raw(), allocator); return None; } let exec = make_default_exec(pml4_phys, false); let exec_virt = addr::phys_to_virt(exec_frame); unsafe { core::ptr::write(exec_virt.as_mut_ptr::(), exec); } let exec_ref = self.exec_mut(pid).unwrap(); exec_ref.seal_context(); Some(CreatedPid::trust(pid)) } pub fn get(&self, pid: Pid) -> Option<&SchedEntity> { self.pid_table.get(pid).inspect(|s| { debug_assert!( s.state() != ProcessState::Free, "occupied pid_table slot has Free state for pid {}", pid.raw() ); }) } pub fn get_mut(&mut self, pid: Pid) -> Option<&mut SchedEntity> { self.pid_table.get_mut(pid).inspect(|s| { debug_assert!( s.state() != ProcessState::Free, "occupied pid_table slot has Free state for pid {}", pid.raw() ); }) } pub fn as_created(&self, pid: Pid) -> Option { self.get(pid) .filter(|s| s.state() == ProcessState::Created) .map(|_| CreatedPid::trust(pid)) } pub fn start(&mut self, created: CreatedPid) -> Result<(), crate::error::KernelError> { self[created.pid()].transition_to(ProcessState::Ready)?; self.enqueue_ready(created.pid()); Ok(()) } #[cfg(lancer_test)] pub fn simulate_dispatch(&mut self, pid: Pid) { self.remove_from_run_queue(pid); self[pid] .transition_to(ProcessState::Running) .expect("simulate_dispatch: transition to Running failed"); } pub fn enqueue_ready(&mut self, pid: Pid) { let entry = &self[pid]; debug_assert!( entry.run_next == super::NONE_SENTINEL && entry.run_prev == super::NONE_SENTINEL, "enqueue_ready: pid {} already linked (next={}, prev={})", pid.raw(), entry.run_next, entry.run_prev, ); let prio = entry.effective_priority().raw(); self[pid].run_priority = prio; self.run_queue.enqueue(pid.raw(), prio, &mut self.pid_table); } pub fn remove_from_run_queue(&mut self, pid: Pid) { let prio = self[pid].run_priority; self.run_queue.remove(pid.raw(), prio, &mut self.pid_table); } pub fn unblock_and_enqueue( &mut self, pid: Pid, proof: BlockedPid, ) -> Result<(), crate::error::KernelError> { self[pid].unblock(proof)?; self.timer_cancel(pid); self.enqueue_ready(pid); Ok(()) } pub fn dequeue_highest(&mut self) -> Option { self.run_queue .dequeue_highest(&mut self.pid_table) .and_then(|(raw, _)| Pid::try_new(raw)) } pub fn timer_insert(&mut self, pid: Pid, deadline: u64) { self.timer_wheel .insert(pid.raw(), deadline, &mut self.pid_table); } pub fn timer_cancel(&mut self, pid: Pid) { self.timer_wheel.cancel(pid.raw(), &mut self.pid_table); } pub fn timer_seed(&mut self, tick: u64) { self.timer_wheel.seed_tick(tick); } pub fn timer_current_tick(&self) -> u64 { self.timer_wheel.current_tick() } pub fn timer_advance(&mut self, now: u64, callback: impl FnMut(u32)) { self.timer_wheel .advance(now, &mut self.pid_table, callback); } pub fn clear_reply_targets_for(&mut self, target: Pid) { (0..MAX_PIDS as u32) .filter_map(Pid::try_new) .for_each(|pid| { let Some(sched) = self.pid_table.get(pid) else { return; }; let exec_phys = sched.exec_phys; let exec = unsafe { &mut *(addr::phys_to_virt(PhysAddr::new(exec_phys)).as_mut_ptr::()) }; if exec.reply_target == Some(target) { exec.reply_target = None; } }); } fn zombify_inner(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) { crate::ipc::endpoint::remove_from_queues(pid, pool, self); if let Some((notif_id, notif_gen, bits)) = self.get(pid).and_then(|s| s.death_notification()) { if let Ok(notif) = pool.write_as::(notif_id, notif_gen) && crate::ipc::notification::signal_inner(notif, bits) { crate::ipc::notification::drain_and_wake(notif, self); } let _ = crate::ipc::notification::wake_bound_receivers_with_pool( notif_id, notif_gen, bits, self, pool, ); } self.clear_reply_targets_for(pid); } fn teardown_inner(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) { let exec = match self.exec(pid) { Some(e) => e, None => return, }; let pml4 = exec.pml4_phys.raw(); let root_cnode = exec.root_cnode; let from_untyped = exec.from_untyped; self.exec_mut(pid).unwrap().root_cnode = None; if let Some((sc_id, sc_gen)) = self[pid].sched_context() && let Ok(sc) = pool.write_as::(sc_id, sc_gen) { sc.attached_pid = lancer_core::header::NONE_SENTINEL; } if let Some((cnode_id, cnode_gen)) = root_cnode { match pool.dec_ref_phys(cnode_id, cnode_gen) { Some((phys, lancer_core::object_tag::ObjectTag::CNode)) => { crate::cap::derivation::unlink_child(pool, cnode_id); let obj = unsafe { &*(crate::mem::addr::phys_to_virt(PhysAddr::new(phys)) .as_ptr::()) }; crate::cap::cnode::drain_cnode_phys( PhysAddr::new(obj.slots_phys), obj.size_bits, obj.frame_count, pool, self, ); crate::cap::kernel_objects::free_slot(phys); } Some((phys, tag)) => { crate::cap::derivation::unlink_child(pool, cnode_id); crate::cap::ops::cleanup_by_tag(tag, phys); } None => {} } } let mut allocator = BitmapFrameAllocator; match address_space::pml4_ref_release(pml4) { Pml4ReleaseResult::LastRef => address_space::teardown_user_space(pml4, &mut allocator), Pml4ReleaseResult::StillShared => {} } let exec_phys = self.pid_table.get(pid).map(|s| PhysAddr::new(s.exec_phys)); let _ = self.pid_table.free(pid); if !from_untyped { exec_phys.inspect(|&phys| { BitmapFrameAllocator::free_frame_by_addr(phys); }); } } pub fn zombify(&mut self, pid: Pid) -> bool { let sched = match self.pid_table.get_mut(pid) { Some(s) => s, None => return false, }; if matches!(sched.state(), ProcessState::Free | ProcessState::Zombie) { return false; } let was_ready = sched.state() == ProcessState::Ready; if sched.zombify_state().is_err() { crate::kprintln!( "[proc] BUG: pid {} failed -> Zombie (state={:?})", pid.raw(), sched.state() ); return false; } if was_ready { self.remove_from_run_queue(pid); } self.timer_cancel(pid); let mut pool = crate::cap::pool::POOL.lock(); self.zombify_inner(pid, &mut pool); true } pub fn reap(&mut self, pid: Pid, _allocator: &mut BitmapFrameAllocator) { let is_zombie = self .pid_table .get(pid) .is_some_and(|s| s.state() == ProcessState::Zombie); if !is_zombie { return; } let mut pool = crate::cap::pool::POOL.lock(); self.teardown_inner(pid, &mut pool); } pub fn destroy(&mut self, pid: Pid, allocator: &mut BitmapFrameAllocator) -> bool { let sched = match self.pid_table.get(pid) { Some(s) => s, None => return true, }; if sched.state() != ProcessState::Zombie && !self.zombify(pid) { return false; } self.reap(pid, allocator); self.pid_table.try_reclaim(); true } pub fn force_destroy_with_pool(&mut self, pid: Pid, pool: &mut crate::cap::pool::ObjectPool) { let sched = match self.pid_table.get(pid) { Some(s) => s, None => return, }; if !matches!(sched.state(), ProcessState::Zombie) { let was_ready = sched.state() == ProcessState::Ready; if let Some(s) = self.pid_table.get_mut(pid) { let _ = s.zombify_state(); } if was_ready { self.remove_from_run_queue(pid); } self.timer_cancel(pid); self.zombify_inner(pid, pool); } self.teardown_inner(pid, pool); } } impl core::ops::Index for ProcessManager { type Output = SchedEntity; fn index(&self, pid: Pid) -> &SchedEntity { self.pid_table .get(pid) .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw())) } } impl core::ops::IndexMut for ProcessManager { fn index_mut(&mut self, pid: Pid) -> &mut SchedEntity { self.pid_table .get_mut(pid) .unwrap_or_else(|| panic!("ProcessManager: invalid pid {}", pid.raw())) } } impl core::ops::Index<&BlockedPid> for ProcessManager { type Output = SchedEntity; fn index(&self, bp: &BlockedPid) -> &SchedEntity { &self[bp.pid()] } } impl core::ops::IndexMut<&BlockedPid> for ProcessManager { fn index_mut(&mut self, bp: &BlockedPid) -> &mut SchedEntity { &mut self[bp.pid()] } } impl core::ops::Index for ProcessManager { type Output = SchedEntity; fn index(&self, cp: CreatedPid) -> &SchedEntity { &self[cp.pid()] } } impl core::ops::IndexMut for ProcessManager { fn index_mut(&mut self, cp: CreatedPid) -> &mut SchedEntity { &mut self[cp.pid()] } } pub static PROCESSES: IrqMutex = IrqMutex::new(ProcessManager::empty());