Nothing to see here, move along
1use crate::cap::kernel_objects;
2use crate::cap::pool::ObjectPool;
3use crate::error::KernelError;
4use crate::mem::phys::BitmapFrameAllocator;
5use crate::types::{Generation, ObjPhys, Pid, Priority};
6use lancer_core::header::KernelObjectHeader;
7use lancer_core::object_layout::{
8 CNodeObject, EndpointObject, KernelObject, NotificationObject, SchedContextObject,
9 UntypedObject,
10};
11use lancer_core::object_tag::ObjectTag;
12
13pub fn alloc_typed<T: KernelObject>(
14 pool: &mut ObjectPool,
15 tag: ObjectTag,
16 obj: T,
17) -> Result<(ObjPhys, Generation), KernelError> {
18 let phys = kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?;
19 kernel_objects::write_at(phys, obj);
20 pool.register_object(phys, tag).map_err(|e| {
21 kernel_objects::free_slot(phys);
22 e
23 })
24}
25
26fn alloc_default<T: KernelObject>(
27 pool: &mut ObjectPool,
28) -> Result<(ObjPhys, Generation), KernelError> {
29 let header = KernelObjectHeader::new(T::TAG, 0, 64);
30 alloc_typed(pool, T::TAG, T::init_default(header))
31}
32
33pub fn alloc_endpoint(pool: &mut ObjectPool) -> Result<(ObjPhys, Generation), KernelError> {
34 alloc_default::<EndpointObject>(pool)
35}
36
37pub fn alloc_notification(pool: &mut ObjectPool) -> Result<(ObjPhys, Generation), KernelError> {
38 alloc_default::<NotificationObject>(pool)
39}
40
41pub fn alloc_endpoint_cap() -> (ObjPhys, Generation, crate::cap::table::CapRef) {
42 let mut pool = crate::cap::pool::POOL.lock();
43 let (id, generation) = alloc_endpoint(&mut pool).expect("alloc endpoint");
44 let cap = crate::cap::table::CapRef::new(ObjectTag::Endpoint, id, crate::cap::table::Rights::ALL, generation);
45 (id, generation, cap)
46}
47
48pub fn alloc_notification_cap() -> (ObjPhys, Generation, crate::cap::table::CapRef) {
49 let mut pool = crate::cap::pool::POOL.lock();
50 let (id, generation) = alloc_notification(&mut pool).expect("alloc notification");
51 let cap = crate::cap::table::CapRef::new(ObjectTag::Notification, id, crate::cap::table::Rights::ALL, generation);
52 (id, generation, cap)
53}
54
55pub fn alloc_cnode(pool: &mut ObjectPool) -> Result<(ObjPhys, Generation, u8), KernelError> {
56 let size_bits = crate::proc::ROOT_CNODE_SIZE_BITS;
57 let allocator = &crate::mem::phys::BitmapFrameAllocator;
58 let cnode_data = crate::cap::cnode::create_cnode(size_bits, allocator)?;
59 let frame_count = cnode_data.frame_count;
60 let header = KernelObjectHeader::new(ObjectTag::CNode, 0, 64);
61 let mut cnode_obj = CNodeObject::init_default(header);
62 cnode_obj.slots_phys = cnode_data.slots_phys.as_u64();
63 cnode_obj.size_bits = cnode_data.size_bits;
64 cnode_obj.frame_count = cnode_data.frame_count;
65 let (id, generation) = alloc_typed(pool, ObjectTag::CNode, cnode_obj)?;
66 Ok((id, generation, frame_count))
67}
68
69pub fn bootstrap_test_cnode(pid: Pid, ptable: &mut crate::proc::ProcessManager) {
70 let mut pool = crate::cap::pool::POOL.lock();
71 let (cnode_id, cnode_gen, frame_count) = alloc_cnode(&mut pool).expect("alloc cnode");
72 let exec = ptable.exec_mut(pid).expect("get exec");
73 exec.root_cnode = Some((cnode_id, cnode_gen));
74 exec.cnode_depth = 64;
75 exec.root_guard_bits = 64 - crate::proc::ROOT_CNODE_SIZE_BITS;
76 exec.root_guard_value = 0;
77 exec.charge_frames(frame_count as u16)
78 .expect("charge frames");
79}
80
81pub fn allocate_small_untyped(
82 ptable: &crate::sync::IrqMutexGuard<'_, crate::proc::ProcessManager, 0>,
83 size_bits: u8,
84) -> (ObjPhys, Generation, x86_64::PhysAddr) {
85 allocate_untyped_inner(ptable, size_bits, false)
86}
87
88pub fn allocate_untyped(
89 ptable: &crate::sync::IrqMutexGuard<'_, crate::proc::ProcessManager, 0>,
90 is_device: bool,
91) -> (ObjPhys, Generation, x86_64::PhysAddr) {
92 allocate_untyped_inner(ptable, 16, is_device)
93}
94
95pub fn allocate_untyped_inner(
96 ptable: &crate::sync::IrqMutexGuard<'_, crate::proc::ProcessManager, 0>,
97 size_bits: u8,
98 is_device: bool,
99) -> (ObjPhys, Generation, x86_64::PhysAddr) {
100 let frame_count = 1usize << size_bits.saturating_sub(12);
101 let allocator = &crate::mem::phys::BitmapFrameAllocator;
102 let phys_base = allocator
103 .allocate_contiguous(frame_count)
104 .expect("alloc untyped backing");
105 let base_virt = crate::mem::addr::phys_to_virt(phys_base);
106 unsafe {
107 core::ptr::write_bytes(base_virt.as_mut_ptr::<u8>(), 0, frame_count * 4096);
108 }
109 let header = KernelObjectHeader::new(ObjectTag::Untyped, 0, 64);
110 let mut ut_obj = UntypedObject::init_default(header);
111 ut_obj.phys_base = phys_base.as_u64();
112 ut_obj.size_bits = size_bits;
113 ut_obj.is_device = is_device as u8;
114 let phys = kernel_objects::alloc_slot().expect("alloc ut slot");
115 kernel_objects::write_at(phys, ut_obj);
116 let (id, generation) = crate::cap::pool::POOL
117 .lock_after(ptable)
118 .register_object(phys, ObjectTag::Untyped)
119 .expect("register untyped obj");
120 (id, generation, phys_base)
121}
122
123pub struct SchedBatch {
124 pub pids: crate::static_vec::StaticVec<Pid, 2048>,
125 pub sc_ids: crate::static_vec::StaticVec<(ObjPhys, Generation), 2048>,
126}
127
128pub fn spawn_batch_with_sched(
129 count: usize,
130 budget_us: u64,
131 period_us: u64,
132 priority_fn: fn(usize) -> u8,
133) -> SchedBatch {
134 let mut allocator = BitmapFrameAllocator;
135 let mut ptable = crate::proc::PROCESSES.lock();
136 let mut batch = SchedBatch {
137 pids: crate::static_vec::StaticVec::new(),
138 sc_ids: crate::static_vec::StaticVec::new(),
139 };
140
141 (0..count).for_each(|i| {
142 let created = ptable
143 .allocate(&mut allocator)
144 .unwrap_or_else(|| panic!("spawn failed at {}/{}", i, count));
145 let pid = created.pid();
146
147 let priority = Priority::new(priority_fn(i));
148 let header = KernelObjectHeader::new(ObjectTag::SchedContext, 0, 64);
149 let mut sc = SchedContextObject::init_default(header);
150 sc.budget_us = budget_us;
151 sc.period_us = period_us;
152 sc.remaining_us = budget_us;
153 sc.priority = priority.raw();
154 sc.attached_pid = pid.raw();
155 let (sc_id, sc_gen) = alloc_typed(
156 &mut crate::cap::pool::POOL.lock_after(&ptable),
157 ObjectTag::SchedContext,
158 sc,
159 )
160 .unwrap_or_else(|e| panic!("sched context alloc failed at {}: {:?}", i, e));
161
162 ptable[pid].attach_sched_context(sc_id, sc_gen, priority);
163 ptable.start(created).expect("start");
164 batch.pids.push(pid).expect("pid vec overflow");
165 batch.sc_ids.push((sc_id, sc_gen)).expect("sc vec overflow");
166 });
167
168 batch
169}
170
171pub fn destroy_batch_and_verify(batch: &SchedBatch, baseline_free_frames: usize) {
172 let mut allocator = BitmapFrameAllocator;
173 let mut ptable = crate::proc::PROCESSES.lock();
174 batch.pids.iter().for_each(|&pid| {
175 ptable.destroy(pid, &mut allocator);
176 });
177 let leaves_after = ptable.pid_table_leaf_count();
178 let mut pool = crate::cap::pool::POOL.lock_after(&ptable);
179 batch.sc_ids.iter().for_each(|&(id, generation)| {
180 if let Some((phys, _)) = pool.dec_ref_phys(id, generation) {
181 kernel_objects::free_slot(phys);
182 }
183 });
184 drop(pool);
185 drop(ptable);
186
187 let after = BitmapFrameAllocator::free_frames();
188 let retained_leaves = leaves_after as usize;
189 assert!(
190 after >= baseline_free_frames.saturating_sub(retained_leaves + 2),
191 "frame leak: had {} free before, {} after (delta={}, {} retained leaf frames)",
192 baseline_free_frames,
193 after,
194 baseline_free_frames as i64 - after as i64,
195 retained_leaves,
196 );
197}
198
199pub fn dequeue_ours(
200 ptable: &mut crate::proc::ProcessManager,
201 batch: &SchedBatch,
202 limit: usize,
203 ours: &mut crate::static_vec::StaticVec<Pid, 128>,
204) {
205 let mut all = crate::static_vec::StaticVec::<Pid, 256>::new();
206 core::iter::from_fn(|| ptable.dequeue_highest())
207 .take(limit + 64)
208 .for_each(|pid| {
209 all.push(pid).expect("dequeue overflow");
210 });
211 all.iter().for_each(
212 |&pid| match batch.pids.iter().any(|&p| p == pid) {
213 true if ours.len() < limit => ours.push(pid).expect("ours overflow"),
214 _ => ptable.enqueue_ready(pid),
215 },
216 );
217}