Nothing to see here, move along
1use lancer_core::header::{KernelObjectHeader, NONE_SENTINEL};
2use lancer_core::object_layout::{
3 CNodeObject, EndpointObject, FrameObject, IrqHandlerObject, KernelObject, NotificationObject,
4 SchedContextObject,
5};
6use lancer_core::object_tag::ObjectTag;
7
8use super::object::PidQueue;
9use super::pool::POOL;
10use super::table::{CapRef, Rights};
11use crate::error::KernelError;
12use crate::mem::addr;
13use crate::types::{Generation, ObjPhys, Pid};
14use x86_64::PhysAddr;
15
16pub fn cleanup_by_tag(tag: ObjectTag, phys: u64) {
17 match tag {
18 ObjectTag::IrqHandler => {
19 let obj =
20 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<IrqHandlerObject>()) };
21 match crate::arch::idt::IrqVector::try_new(obj.vector) {
22 Some(vec) => crate::irq::unbind_by_vector(vec),
23 None => {}
24 }
25 }
26 ObjectTag::CNode => {
27 let obj =
28 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<CNodeObject>()) };
29 let cnode_data = super::object::CNodeData {
30 slots_phys: PhysAddr::new(obj.slots_phys),
31 size_bits: obj.size_bits,
32 frame_count: obj.frame_count,
33 };
34 super::cnode::destroy_cnode(&cnode_data, &crate::mem::phys::BitmapFrameAllocator);
35 super::kernel_objects::free_slot(phys);
36 }
37 ObjectTag::Frame => {
38 let obj =
39 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<FrameObject>()) };
40 let idx = obj.frame_table_idx;
41 match idx != NONE_SENTINEL as u16 {
42 true => {
43 let mut ft = super::frame_table::FRAME_TABLE.lock();
44 ft.get(idx).iommu_mapping().inspect(|mapping| {
45 crate::iommu::unmap_frame_iommu(mapping.bus, mapping.devfn, mapping.iova);
46 });
47 ft.clear(idx);
48 }
49 false => {}
50 }
51 super::kernel_objects::free_slot(phys);
52 }
53 ObjectTag::Process => {
54 super::kernel_objects::free_slot(phys);
55 }
56 ObjectTag::VRegion => {
57 super::kernel_objects::free_slot(phys);
58 }
59 ObjectTag::Endpoint
60 | ObjectTag::Notification
61 | ObjectTag::SchedContext
62 | ObjectTag::Framebuffer
63 | ObjectTag::PciDevice
64 | ObjectTag::Untyped => {}
65 }
66}
67
68pub fn cleanup_by_tag_with_ptable(
69 tag: ObjectTag,
70 phys: u64,
71 ptable: &mut crate::proc::ProcessManager,
72) {
73 match tag {
74 ObjectTag::Frame | ObjectTag::VRegion => {}
75 _ => cleanup_by_tag(tag, phys),
76 }
77 match tag {
78 ObjectTag::Endpoint => {
79 let obj =
80 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<EndpointObject>()) };
81 let senders = PidQueue::from_repr_c(obj.sender_head, obj.sender_tail, obj.sender_len);
82 let receivers =
83 PidQueue::from_repr_c(obj.receiver_head, obj.receiver_tail, obj.receiver_len);
84 unblock_queue(&senders, ptable);
85 unblock_queue(&receivers, ptable);
86 }
87 ObjectTag::Notification => {
88 let obj = unsafe {
89 &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<NotificationObject>())
90 };
91 (0..obj.waiter_count as usize)
92 .filter_map(|i| Pid::try_new(obj.waiters[i]))
93 .for_each(|pid| {
94 let proof = ptable[pid].blocked_proof();
95 match ptable.unblock_and_enqueue(pid, proof) {
96 Ok(()) => {
97 let exec = ptable.exec_mut(pid).unwrap();
98 exec.saved_context.rax =
99 crate::error::KernelError::InvalidObject.to_errno() as u64;
100 exec.seal_context();
101 }
102 Err(e) => {
103 crate::kprintln!(
104 "[cap] BUG: notification cleanup failed to unblock pid {}: {:?}",
105 pid.raw(),
106 e
107 );
108 }
109 }
110 });
111 }
112 ObjectTag::SchedContext => {
113 let obj = unsafe {
114 &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<SchedContextObject>())
115 };
116 Pid::try_new(obj.attached_pid).inspect(|&pid| {
117 if let Some(sched) = ptable.get_mut(pid) {
118 sched.detach_sched_context();
119 }
120 });
121 }
122 ObjectTag::Frame => {
123 let obj =
124 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<FrameObject>()) };
125 let frame_phys = PhysAddr::new(obj.phys_addr);
126 let owned_by_untyped = obj.header.parent_untyped != NONE_SENTINEL;
127 let idx = obj.frame_table_idx;
128 match idx != NONE_SENTINEL as u16 {
129 true => {
130 let mut ft = super::frame_table::FRAME_TABLE.lock();
131 let entry = ft.get_mut(idx);
132 entry.for_each_mapping(|pid, vaddr| {
133 if let Some(exec) = ptable.exec(pid) {
134 let _ =
135 crate::proc::address_space::unmap_user_page(exec.pml4_phys, vaddr);
136 match crate::mem::refcount::decrement(frame_phys) {
137 Ok(0) if !owned_by_untyped => {
138 crate::mem::phys::BitmapFrameAllocator::free_frame_by_addr(
139 frame_phys,
140 )
141 }
142 Ok(_) => {}
143 Err(e) => crate::kprintln!(
144 "[cap] frame refcount decrement failed: {:#x} {:?}",
145 frame_phys.as_u64(),
146 e
147 ),
148 }
149 }
150 });
151 entry.iommu_mapping().inspect(|mapping| {
152 crate::iommu::unmap_frame_iommu(mapping.bus, mapping.devfn, mapping.iova);
153 });
154 ft.clear(idx);
155 }
156 false => {}
157 }
158 super::kernel_objects::free_slot(phys);
159 }
160 ObjectTag::VRegion => {
161 let obj = unsafe {
162 &*(addr::phys_to_virt(PhysAddr::new(phys))
163 .as_ptr::<lancer_core::object_layout::VRegionObject>())
164 };
165 let page_count = obj.page_count;
166 unmap_vregion_mapping(obj.owner_pid, obj.owner_vaddr, page_count, ptable);
167 unmap_vregion_mapping(obj.child_pid, obj.child_vaddr, page_count, ptable);
168 super::kernel_objects::free_slot(phys);
169 }
170 ObjectTag::Process
171 | ObjectTag::Framebuffer
172 | ObjectTag::PciDevice
173 | ObjectTag::Untyped
174 | ObjectTag::CNode
175 | ObjectTag::IrqHandler => {}
176 }
177}
178
179pub(crate) fn unmap_region(
180 pml4_phys: crate::mem::typed_addr::Pml4Phys,
181 vaddr_base: u64,
182 page_count: u16,
183) {
184 (0..page_count as u64).for_each(|i| {
185 let vaddr = x86_64::VirtAddr::new(vaddr_base + i * 4096);
186 let _ = crate::proc::address_space::unmap_user_page(pml4_phys, vaddr);
187 });
188}
189
190fn unmap_vregion_mapping(
191 pid_raw: u32,
192 vaddr_base: u64,
193 page_count: u16,
194 ptable: &mut crate::proc::ProcessManager,
195) {
196 Pid::try_new(pid_raw)
197 .and_then(|pid| ptable.exec(pid).map(|e| e.pml4_phys))
198 .inspect(|&pml4_phys| {
199 unmap_region(pml4_phys, vaddr_base, page_count);
200 });
201}
202
203fn unblock_queue(queue: &super::object::PidQueue, ptable: &mut crate::proc::ProcessManager) {
204 let mut cursor = queue.head;
205 let mut steps = 0u32;
206 let max = crate::types::MAX_PIDS as u32;
207 core::iter::from_fn(|| {
208 cursor.filter(|_| steps < max).inspect(|&pid| {
209 steps += 1;
210 cursor = ptable[pid].next_ipc;
211 ptable[pid].next_ipc = None;
212 let proof = ptable[pid].blocked_proof();
213 match ptable.unblock_and_enqueue(pid, proof) {
214 Ok(()) => {
215 let exec = ptable.exec_mut(pid).unwrap();
216 exec.saved_context.rax =
217 crate::error::KernelError::InvalidObject.to_errno() as u64;
218 exec.seal_context();
219 }
220 Err(e) => {
221 crate::kprintln!(
222 "[cap] BUG: unblock_queue failed to unblock pid {}: {:?}",
223 pid.raw(),
224 e
225 );
226 }
227 }
228 ptable.clear_reply_targets_for(pid);
229 })
230 })
231 .count();
232}
233
234pub fn resolve_process_cap(
235 cap: &CapRef,
236 pool: &super::pool::ObjectPool,
237) -> Result<crate::types::Pid, KernelError> {
238 let obj = pool
239 .read_as::<lancer_core::object_layout::ProcessObject>(cap.phys(), cap.generation())?;
240 crate::types::Pid::try_new(obj.pid).ok_or(KernelError::InvalidObject)
241}
242
243#[allow(clippy::too_many_arguments)]
244pub fn create_via_cnode(
245 pool: &mut super::pool::ObjectPool,
246 cnode_phys: ObjPhys,
247 cnode_gen: Generation,
248 address: u64,
249 depth: u8,
250 guard_value: u64,
251 guard_bits: u8,
252 tag: ObjectTag,
253) -> Result<ObjPhys, KernelError> {
254 let obj_phys = match tag {
255 ObjectTag::Endpoint => {
256 let phys = super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?;
257 let header = KernelObjectHeader::new(ObjectTag::Endpoint, 0, 64);
258 super::kernel_objects::write_at(phys, EndpointObject::init_default(header));
259 phys
260 }
261 ObjectTag::Notification => {
262 let phys = super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?;
263 let header = KernelObjectHeader::new(ObjectTag::Notification, 0, 64);
264 super::kernel_objects::write_at(phys, NotificationObject::init_default(header));
265 phys
266 }
267 ObjectTag::Process
268 | ObjectTag::SchedContext
269 | ObjectTag::IrqHandler
270 | ObjectTag::Framebuffer
271 | ObjectTag::PciDevice
272 | ObjectTag::CNode
273 | ObjectTag::Untyped
274 | ObjectTag::Frame
275 | ObjectTag::VRegion => {
276 return Err(KernelError::InvalidType);
277 }
278 };
279
280 let (registered_phys, obj_gen) = match pool.register_object(obj_phys, tag) {
281 Ok(pair) => pair,
282 Err(e) => {
283 super::kernel_objects::free_slot(obj_phys);
284 return Err(e);
285 }
286 };
287 let cap = CapRef::new(tag, registered_phys, Rights::ALL, obj_gen);
288 match super::cnode::resolve_and_insert(
289 pool,
290 cnode_phys,
291 cnode_gen,
292 address,
293 depth,
294 guard_value,
295 guard_bits,
296 cap,
297 ) {
298 Ok(()) => Ok(registered_phys),
299 Err(e) => {
300 let _ = pool.free_phys(registered_phys, obj_gen);
301 Err(e)
302 }
303 }
304}
305
306#[allow(clippy::too_many_arguments)]
307pub fn derive_via_cnode(
308 pool: &mut super::pool::ObjectPool,
309 cnode_phys: ObjPhys,
310 cnode_gen: Generation,
311 src_addr: u64,
312 dest_addr: u64,
313 depth: u8,
314 guard_value: u64,
315 guard_bits: u8,
316 rights_mask: Rights,
317) -> Result<(), KernelError> {
318 let src = super::cnode::resolve_and_read(
319 pool,
320 cnode_phys,
321 cnode_gen,
322 src_addr,
323 depth,
324 guard_value,
325 guard_bits,
326 )?;
327 if !src.rights().contains(Rights::GRANT) {
328 return Err(KernelError::InsufficientRights);
329 }
330 pool.inc_ref(src.phys(), src.generation())?;
331 let derived = src.with_rights(src.rights() & rights_mask);
332 match super::cnode::resolve_and_insert(
333 pool,
334 cnode_phys,
335 cnode_gen,
336 dest_addr,
337 depth,
338 guard_value,
339 guard_bits,
340 derived,
341 ) {
342 Ok(()) => Ok(()),
343 Err(e) => {
344 pool.dec_ref_phys(src.phys(), src.generation());
345 Err(e)
346 }
347 }
348}
349
350pub fn identify_via_cnode(
351 pool: &super::pool::ObjectPool,
352 cnode_phys: ObjPhys,
353 cnode_gen: Generation,
354 address: u64,
355 depth: u8,
356 guard_value: u64,
357 guard_bits: u8,
358) -> Result<(ObjectTag, Rights), KernelError> {
359 let cap = super::cnode::resolve_and_read(
360 pool,
361 cnode_phys,
362 cnode_gen,
363 address,
364 depth,
365 guard_value,
366 guard_bits,
367 )?;
368 match pool.get_tag(cap.phys(), cap.generation()) {
369 Ok(_) => Ok((cap.tag(), cap.rights())),
370 Err(KernelError::StaleGeneration) => {
371 let _ = super::cnode::resolve_and_clear(
372 pool,
373 cnode_phys,
374 cnode_gen,
375 address,
376 depth,
377 guard_value,
378 guard_bits,
379 );
380 Err(KernelError::StaleGeneration)
381 }
382 Err(e) => Err(e),
383 }
384}
385
386#[allow(clippy::too_many_arguments)]
387pub fn insert_phys_cap_via_cnode(
388 pool: &mut super::pool::ObjectPool,
389 cnode_phys: ObjPhys,
390 cnode_gen: Generation,
391 address: u64,
392 depth: u8,
393 guard_value: u64,
394 guard_bits: u8,
395 tag: ObjectTag,
396 obj_phys: u64,
397 rights: Rights,
398) -> Result<ObjPhys, KernelError> {
399 let (registered_phys, obj_gen) = match pool.register_object(obj_phys, tag) {
400 Ok(pair) => pair,
401 Err(e) => {
402 super::kernel_objects::free_slot(obj_phys);
403 return Err(e);
404 }
405 };
406 if tag == ObjectTag::Frame {
407 let mut ft = super::frame_table::FRAME_TABLE.lock();
408 if let Some(idx) = ft.alloc_idx() {
409 let frame_obj = unsafe {
410 &mut *(crate::mem::addr::phys_to_virt(x86_64::PhysAddr::new(
411 registered_phys.raw(),
412 ))
413 .as_mut_ptr::<lancer_core::object_layout::FrameObject>())
414 };
415 frame_obj.frame_table_idx = idx;
416 }
417 }
418 let cap = CapRef::new(tag, registered_phys, rights, obj_gen);
419 match super::cnode::resolve_and_insert(
420 pool,
421 cnode_phys,
422 cnode_gen,
423 address,
424 depth,
425 guard_value,
426 guard_bits,
427 cap,
428 ) {
429 Ok(()) => Ok(registered_phys),
430 Err(e) => {
431 let _ = pool.free_phys(registered_phys, obj_gen);
432 Err(e)
433 }
434 }
435}
436
437pub fn revoke_via_cnode(
438 pid: crate::types::Pid,
439 address: u64,
440 ptable: &mut crate::proc::ProcessManager,
441) -> Result<(), KernelError> {
442 let (cnode_phys, cnode_gen, depth, guard_value, guard_bits) =
443 super::cnode::cnode_coords(pid, ptable)?;
444 let cap_snapshot = {
445 let pool = POOL.lock();
446 super::cnode::resolve_and_read(
447 &pool,
448 cnode_phys,
449 cnode_gen,
450 address,
451 depth,
452 guard_value,
453 guard_bits,
454 )?
455 };
456 if !cap_snapshot.rights().contains(Rights::REVOKE) {
457 return Err(KernelError::InsufficientRights);
458 }
459 let stale_phys = cap_snapshot.phys();
460 let stale_gen = cap_snapshot.generation();
461
462 let is_untyped = cap_snapshot.tag() == ObjectTag::Untyped;
463
464 match is_untyped {
465 true => {
466 let mut pool = POOL.lock();
467 super::derivation::destroy_children(&mut pool, ptable, stale_phys, stale_gen)?;
468 Ok(())
469 }
470 false => {
471 {
472 let mut pool = POOL.lock();
473 super::derivation::unlink_child(&mut pool, stale_phys);
474 }
475 let (_new_gen, old_phys_tag) = POOL.lock().revoke_phys(stale_phys, stale_gen)?;
476 {
477 let pool = POOL.lock();
478 let _ = super::cnode::resolve_and_clear(
479 &pool,
480 cnode_phys,
481 cnode_gen,
482 address,
483 depth,
484 guard_value,
485 guard_bits,
486 );
487 invalidate_stale_caps_via_cnode(ptable, &pool, stale_phys, stale_gen);
488 }
489 old_phys_tag.inspect(|&(phys, tag)| {
490 cleanup_by_tag_with_ptable(tag, phys, ptable);
491 });
492 Ok(())
493 }
494 }
495}
496
497pub fn invalidate_stale_caps_via_cnode(
498 ptable: &crate::proc::ProcessManager,
499 pool: &super::pool::ObjectPool,
500 phys: ObjPhys,
501 stale_gen: Generation,
502) {
503 let cap = ptable.capacity();
504 (0..cap as u32)
505 .filter_map(crate::types::Pid::try_new)
506 .filter_map(|pid| ptable.exec(pid).and_then(|e| e.root_cnode()))
507 .for_each(|(cphys, cgen)| {
508 let _ = super::cnode::invalidate_stale_in_cnode(pool, cphys, cgen, phys, stale_gen);
509 });
510}