Nothing to see here, move along
1use lancer_core::object_layout::CNodeObject;
2
3use super::object::{CNodeData, ObjectTag};
4use super::pool::ObjectPool;
5use super::table::{CapRef, CapSlot, Rights};
6use crate::error::KernelError;
7use crate::mem::addr;
8use crate::mem::phys::BitmapFrameAllocator;
9use crate::proc::ProcessManager;
10use crate::types::{Generation, ObjPhys, Pid};
11use x86_64::PhysAddr;
12
13pub use lancer_core::cnode::{MAX_CNODE_BITS, MIN_CNODE_BITS};
14use lancer_core::cnode::{
15 MAX_RESOLVE_DEPTH, PAGE_SIZE, extract_guard, extract_index, frames_for_cnode,
16};
17
18pub fn create_cnode(
19 size_bits: u8,
20 allocator: &BitmapFrameAllocator,
21) -> Result<CNodeData, KernelError> {
22 if !(MIN_CNODE_BITS..=MAX_CNODE_BITS).contains(&size_bits) {
23 return Err(KernelError::InvalidParameter);
24 }
25 let frame_count = frames_for_cnode(size_bits);
26 let slots_phys = allocator
27 .allocate_contiguous(frame_count as usize)
28 .ok_or(KernelError::ResourceExhausted)?;
29
30 let slot_count = 1usize << size_bits;
31 let base_ptr = addr::phys_to_virt(slots_phys).as_mut_ptr::<CapSlot>();
32 (0..slot_count).for_each(|i| unsafe {
33 base_ptr.add(i).write(CapSlot::Empty);
34 });
35
36 Ok(CNodeData {
37 slots_phys,
38 size_bits,
39 frame_count,
40 })
41}
42
43pub fn destroy_cnode(cnode: &CNodeData, allocator: &BitmapFrameAllocator) {
44 let base_frame_idx = (cnode.slots_phys.as_u64() / PAGE_SIZE as u64) as usize;
45 (0..cnode.frame_count as usize).for_each(|i| {
46 let frame_phys = PhysAddr::new((base_frame_idx + i) as u64 * PAGE_SIZE as u64);
47 crate::mem::addr::zero_frame(frame_phys);
48 let frame = unsafe {
49 x86_64::structures::paging::PhysFrame::from_start_address_unchecked(frame_phys)
50 };
51 allocator.deallocate_frame(frame);
52 });
53}
54
55unsafe fn read_slot(slots_phys: PhysAddr, index: usize) -> CapSlot {
56 let base = addr::phys_to_virt(slots_phys).as_ptr::<CapSlot>();
57 unsafe { base.add(index).read() }
58}
59
60unsafe fn slot_ptr(slots_phys: PhysAddr, index: usize) -> *mut CapSlot {
61 let base = addr::phys_to_virt(slots_phys).as_mut_ptr::<CapSlot>();
62 unsafe { base.add(index) }
63}
64
65pub fn resolve(
66 pool: &ObjectPool,
67 cnode_phys: ObjPhys,
68 cnode_gen: Generation,
69 address: u64,
70 depth: u8,
71 guard_value: u64,
72 guard_bits: u8,
73) -> Result<CapSlot, KernelError> {
74 resolve_inner(
75 pool,
76 cnode_phys,
77 cnode_gen,
78 address,
79 depth,
80 guard_value,
81 guard_bits,
82 0,
83 )
84}
85
86#[allow(clippy::too_many_arguments)]
87fn resolve_inner(
88 pool: &ObjectPool,
89 cnode_phys: ObjPhys,
90 cnode_gen: Generation,
91 address: u64,
92 depth: u8,
93 guard_value: u64,
94 guard_bits: u8,
95 recursion: u8,
96) -> Result<CapSlot, KernelError> {
97 if recursion >= MAX_RESOLVE_DEPTH {
98 return Err(KernelError::InvalidSlot);
99 }
100
101 if depth < guard_bits {
102 return Err(KernelError::InvalidSlot);
103 }
104
105 let extracted = extract_guard(address, depth, guard_bits);
106 if extracted != guard_value {
107 return Err(KernelError::GuardMismatch);
108 }
109
110 let post_guard_depth = depth - guard_bits;
111
112 let (slots_phys, size_bits) = {
113 let cnode = pool.read_as::<CNodeObject>(cnode_phys, cnode_gen)?;
114 (PhysAddr::new(cnode.slots_phys), cnode.size_bits)
115 };
116
117 if post_guard_depth < size_bits {
118 return Err(KernelError::InvalidSlot);
119 }
120
121 let index = extract_index(address, post_guard_depth, size_bits) as usize;
122 let remaining = post_guard_depth - size_bits;
123 let slot = unsafe { read_slot(slots_phys, index) };
124
125 match remaining {
126 0 => Ok(slot),
127 _ => match slot {
128 CapSlot::Active(cap) if cap.tag() == ObjectTag::CNode => resolve_inner(
129 pool,
130 cap.phys(),
131 cap.generation(),
132 address,
133 remaining,
134 cap.guard_value(),
135 cap.guard_bits(),
136 recursion + 1,
137 ),
138 _ => Err(KernelError::InvalidSlot),
139 },
140 }
141}
142
143fn resolve_slot_ptr(
144 pool: &ObjectPool,
145 cnode_phys: ObjPhys,
146 cnode_gen: Generation,
147 address: u64,
148 depth: u8,
149 guard_value: u64,
150 guard_bits: u8,
151) -> Result<*mut CapSlot, KernelError> {
152 resolve_slot_ptr_inner(
153 pool,
154 cnode_phys,
155 cnode_gen,
156 address,
157 depth,
158 guard_value,
159 guard_bits,
160 0,
161 )
162}
163
164#[allow(clippy::too_many_arguments)]
165fn resolve_slot_ptr_inner(
166 pool: &ObjectPool,
167 cnode_phys: ObjPhys,
168 cnode_gen: Generation,
169 address: u64,
170 depth: u8,
171 guard_value: u64,
172 guard_bits: u8,
173 recursion: u8,
174) -> Result<*mut CapSlot, KernelError> {
175 if recursion >= MAX_RESOLVE_DEPTH {
176 return Err(KernelError::InvalidSlot);
177 }
178
179 if depth < guard_bits {
180 return Err(KernelError::InvalidSlot);
181 }
182
183 let extracted = extract_guard(address, depth, guard_bits);
184 if extracted != guard_value {
185 return Err(KernelError::GuardMismatch);
186 }
187
188 let post_guard_depth = depth - guard_bits;
189
190 let (slots_phys, size_bits) = {
191 let cnode = pool.read_as::<CNodeObject>(cnode_phys, cnode_gen)?;
192 (PhysAddr::new(cnode.slots_phys), cnode.size_bits)
193 };
194
195 if post_guard_depth < size_bits {
196 return Err(KernelError::InvalidSlot);
197 }
198
199 let index = extract_index(address, post_guard_depth, size_bits) as usize;
200 let remaining = post_guard_depth - size_bits;
201
202 match remaining {
203 0 => Ok(unsafe { slot_ptr(slots_phys, index) }),
204 _ => {
205 let slot = unsafe { read_slot(slots_phys, index) };
206 match slot {
207 CapSlot::Active(cap) if cap.tag() == ObjectTag::CNode => resolve_slot_ptr_inner(
208 pool,
209 cap.phys(),
210 cap.generation(),
211 address,
212 remaining,
213 cap.guard_value(),
214 cap.guard_bits(),
215 recursion + 1,
216 ),
217 _ => Err(KernelError::InvalidSlot),
218 }
219 }
220 }
221}
222
223#[allow(clippy::too_many_arguments)]
224pub fn resolve_and_insert(
225 pool: &ObjectPool,
226 cnode_phys: ObjPhys,
227 cnode_gen: Generation,
228 address: u64,
229 depth: u8,
230 guard_value: u64,
231 guard_bits: u8,
232 cap: CapRef,
233) -> Result<(), KernelError> {
234 let ptr = resolve_slot_ptr(
235 pool,
236 cnode_phys,
237 cnode_gen,
238 address,
239 depth,
240 guard_value,
241 guard_bits,
242 )?;
243 let slot = unsafe { &mut *ptr };
244 match slot {
245 CapSlot::Active(_) => Err(KernelError::SlotOccupied),
246 CapSlot::Empty => {
247 *slot = CapSlot::Active(cap);
248 Ok(())
249 }
250 }
251}
252
253#[allow(clippy::too_many_arguments)]
254pub fn resolve_and_validate(
255 pool: &ObjectPool,
256 cnode_phys: ObjPhys,
257 cnode_gen: Generation,
258 address: u64,
259 depth: u8,
260 guard_value: u64,
261 guard_bits: u8,
262 expected_tag: ObjectTag,
263 required_rights: Rights,
264) -> Result<CapRef, KernelError> {
265 match resolve(
266 pool,
267 cnode_phys,
268 cnode_gen,
269 address,
270 depth,
271 guard_value,
272 guard_bits,
273 )? {
274 CapSlot::Empty => Err(KernelError::SlotEmpty),
275 CapSlot::Active(cap) => {
276 if cap.tag() != expected_tag {
277 return Err(KernelError::InvalidType);
278 }
279 if !cap.rights().contains(required_rights) {
280 return Err(KernelError::InsufficientRights);
281 }
282 Ok(cap)
283 }
284 }
285}
286
287#[allow(clippy::too_many_arguments)]
288pub fn resolve_and_read(
289 pool: &ObjectPool,
290 cnode_phys: ObjPhys,
291 cnode_gen: Generation,
292 address: u64,
293 depth: u8,
294 guard_value: u64,
295 guard_bits: u8,
296) -> Result<CapRef, KernelError> {
297 match resolve(
298 pool,
299 cnode_phys,
300 cnode_gen,
301 address,
302 depth,
303 guard_value,
304 guard_bits,
305 )? {
306 CapSlot::Empty => Err(KernelError::SlotEmpty),
307 CapSlot::Active(cap) => Ok(cap),
308 }
309}
310
311pub fn resolve_and_clear(
312 pool: &ObjectPool,
313 cnode_phys: ObjPhys,
314 cnode_gen: Generation,
315 address: u64,
316 depth: u8,
317 guard_value: u64,
318 guard_bits: u8,
319) -> Result<CapRef, KernelError> {
320 let ptr = resolve_slot_ptr(
321 pool,
322 cnode_phys,
323 cnode_gen,
324 address,
325 depth,
326 guard_value,
327 guard_bits,
328 )?;
329 let slot = unsafe { &mut *ptr };
330 match slot {
331 CapSlot::Empty => Err(KernelError::SlotEmpty),
332 CapSlot::Active(cap) => {
333 let cap = *cap;
334 *slot = CapSlot::Empty;
335 Ok(cap)
336 }
337 }
338}
339
340pub fn walk_cnode_slots(
341 pool: &ObjectPool,
342 cnode_phys: ObjPhys,
343 cnode_gen: Generation,
344 mut f: impl FnMut(&mut CapSlot),
345) -> Result<(), KernelError> {
346 walk_cnode_slots_recursive(pool, cnode_phys, cnode_gen, &mut f, 0)
347}
348
349fn walk_cnode_slots_recursive(
350 pool: &ObjectPool,
351 cnode_phys: ObjPhys,
352 cnode_gen: Generation,
353 f: &mut impl FnMut(&mut CapSlot),
354 depth: u8,
355) -> Result<(), KernelError> {
356 if depth >= MAX_RESOLVE_DEPTH {
357 return Ok(());
358 }
359
360 let (slots_phys, size_bits) = {
361 let cnode = pool.read_as::<CNodeObject>(cnode_phys, cnode_gen)?;
362 (PhysAddr::new(cnode.slots_phys), cnode.size_bits)
363 };
364
365 let slot_count = 1usize << size_bits;
366 (0..slot_count).for_each(|i| {
367 let slot = unsafe { &mut *slot_ptr(slots_phys, i) };
368 match slot {
369 CapSlot::Active(cap) if cap.tag() == ObjectTag::CNode => {
370 let child_phys = cap.phys();
371 let child_gen = cap.generation();
372 f(slot);
373 let _ = walk_cnode_slots_recursive(pool, child_phys, child_gen, f, depth + 1);
374 }
375 _ => f(slot),
376 }
377 });
378 Ok(())
379}
380
381pub fn cnode_coords(
382 pid: Pid,
383 ptable: &ProcessManager,
384) -> Result<(ObjPhys, Generation, u8, u64, u8), KernelError> {
385 let _sched = ptable.get(pid).ok_or(KernelError::InvalidObject)?;
386 let exec = ptable.exec(pid).ok_or(KernelError::InvalidObject)?;
387 let (cnode_phys, cnode_gen) = exec.root_cnode().ok_or(KernelError::InvalidObject)?;
388 Ok((
389 cnode_phys,
390 cnode_gen,
391 exec.cnode_depth(),
392 exec.root_guard_value(),
393 exec.root_guard_bits(),
394 ))
395}
396
397pub fn resolve_caller_validate(
398 pid: Pid,
399 address: u64,
400 expected_tag: ObjectTag,
401 required_rights: Rights,
402 ptable: &ProcessManager,
403 pool: &ObjectPool,
404) -> Result<CapRef, KernelError> {
405 let (cnode_phys, cnode_gen, depth, gv, gb) = cnode_coords(pid, ptable)?;
406 resolve_and_validate(
407 pool,
408 cnode_phys,
409 cnode_gen,
410 address,
411 depth,
412 gv,
413 gb,
414 expected_tag,
415 required_rights,
416 )
417}
418
419pub fn resolve_caller_read(
420 pid: Pid,
421 address: u64,
422 ptable: &ProcessManager,
423 pool: &ObjectPool,
424) -> Result<CapRef, KernelError> {
425 let (cnode_phys, cnode_gen, depth, gv, gb) = cnode_coords(pid, ptable)?;
426 resolve_and_read(pool, cnode_phys, cnode_gen, address, depth, gv, gb)
427}
428
429pub fn resolve_caller_insert(
430 pid: Pid,
431 address: u64,
432 cap: CapRef,
433 ptable: &ProcessManager,
434 pool: &ObjectPool,
435) -> Result<(), KernelError> {
436 let (cnode_phys, cnode_gen, depth, gv, gb) = cnode_coords(pid, ptable)?;
437 resolve_and_insert(pool, cnode_phys, cnode_gen, address, depth, gv, gb, cap)
438}
439
440pub fn resolve_caller_clear(
441 pid: Pid,
442 address: u64,
443 ptable: &ProcessManager,
444 pool: &ObjectPool,
445) -> Result<CapRef, KernelError> {
446 let (cnode_phys, cnode_gen, depth, gv, gb) = cnode_coords(pid, ptable)?;
447 resolve_and_clear(pool, cnode_phys, cnode_gen, address, depth, gv, gb)
448}
449
450#[cfg(lancer_test)]
451pub fn drain_cnode_tree(
452 pool: &mut ObjectPool,
453 cnode_phys: ObjPhys,
454 cnode_gen: Generation,
455 callback: &mut impl FnMut(CapRef, &mut ObjectPool),
456) -> Result<(), KernelError> {
457 drain_cnode_tree_inner(pool, cnode_phys, cnode_gen, callback, 0)
458}
459
460#[cfg(lancer_test)]
461fn drain_cnode_tree_inner(
462 pool: &mut ObjectPool,
463 cnode_phys: ObjPhys,
464 cnode_gen: Generation,
465 callback: &mut impl FnMut(CapRef, &mut ObjectPool),
466 recursion: u8,
467) -> Result<(), KernelError> {
468 if recursion >= MAX_RESOLVE_DEPTH {
469 return Err(KernelError::InvalidSlot);
470 }
471
472 let (slots_phys, size_bits) = {
473 let cnode = pool.read_as::<CNodeObject>(cnode_phys, cnode_gen)?;
474 (PhysAddr::new(cnode.slots_phys), cnode.size_bits)
475 };
476
477 let slot_count = 1usize << size_bits;
478 (0..slot_count).for_each(|i| {
479 let slot = unsafe { &mut *slot_ptr(slots_phys, i) };
480 match slot {
481 CapSlot::Active(cap) => {
482 let cap = *cap;
483 *slot = CapSlot::Empty;
484 if cap.tag() == ObjectTag::CNode {
485 let _ = drain_cnode_tree_inner(
486 pool,
487 cap.phys(),
488 cap.generation(),
489 callback,
490 recursion + 1,
491 );
492 }
493 callback(cap, pool);
494 }
495 CapSlot::Empty => {}
496 }
497 });
498 Ok(())
499}
500
501pub fn drain_cnode_phys(
502 slots_phys: PhysAddr,
503 size_bits: u8,
504 frame_count: u8,
505 pool: &mut ObjectPool,
506 ptable: &mut crate::proc::ProcessManager,
507) {
508 drain_cnode_phys_inner(slots_phys, size_bits, pool, ptable, 0);
509 let cnode_data = super::object::CNodeData {
510 slots_phys,
511 size_bits,
512 frame_count,
513 };
514 destroy_cnode(&cnode_data, &BitmapFrameAllocator);
515}
516
517fn drain_cnode_phys_inner(
518 slots_phys: PhysAddr,
519 size_bits: u8,
520 pool: &mut ObjectPool,
521 ptable: &mut crate::proc::ProcessManager,
522 recursion: u8,
523) {
524 if recursion >= MAX_RESOLVE_DEPTH {
525 return;
526 }
527
528 let slot_count = 1usize << size_bits;
529 (0..slot_count).for_each(|i| {
530 let slot = unsafe { &mut *slot_ptr(slots_phys, i) };
531 match slot {
532 CapSlot::Active(cap) => {
533 let cap = *cap;
534 *slot = CapSlot::Empty;
535 let freed_phys = cap.phys();
536 match cap.tag() == ObjectTag::CNode {
537 true => match pool.dec_ref_phys(cap.phys(), cap.generation()) {
538 Some((nested_phys, ObjectTag::CNode)) => {
539 super::derivation::unlink_child(pool, freed_phys);
540 let nested = unsafe {
541 &*(crate::mem::addr::phys_to_virt(PhysAddr::new(nested_phys))
542 .as_ptr::<CNodeObject>())
543 };
544 drain_cnode_phys_inner(
545 PhysAddr::new(nested.slots_phys),
546 nested.size_bits,
547 pool,
548 ptable,
549 recursion + 1,
550 );
551 let nested_data = super::object::CNodeData {
552 slots_phys: PhysAddr::new(nested.slots_phys),
553 size_bits: nested.size_bits,
554 frame_count: nested.frame_count,
555 };
556 destroy_cnode(&nested_data, &BitmapFrameAllocator);
557 super::kernel_objects::free_slot(nested_phys);
558 }
559 Some((other_phys, other_tag)) => {
560 super::derivation::unlink_child(pool, freed_phys);
561 super::ops::cleanup_by_tag_with_ptable(
562 other_tag,
563 other_phys,
564 ptable,
565 );
566 }
567 None => {}
568 },
569 false => match pool.dec_ref_phys(cap.phys(), cap.generation()) {
570 Some((obj_phys, tag)) => {
571 super::derivation::unlink_child(pool, freed_phys);
572 super::ops::cleanup_by_tag_with_ptable(
573 tag,
574 obj_phys,
575 ptable,
576 );
577 }
578 None => {}
579 },
580 }
581 }
582 CapSlot::Empty => {}
583 }
584 });
585}
586
587pub fn invalidate_stale_in_cnode(
588 pool: &ObjectPool,
589 cnode_phys: ObjPhys,
590 cnode_gen: Generation,
591 target_phys: ObjPhys,
592 target_gen: Generation,
593) -> Result<(), KernelError> {
594 invalidate_stale_inner(pool, cnode_phys, cnode_gen, target_phys, target_gen, 0)
595}
596
597fn invalidate_stale_inner(
598 pool: &ObjectPool,
599 cnode_phys: ObjPhys,
600 cnode_gen: Generation,
601 target_phys: ObjPhys,
602 target_gen: Generation,
603 recursion: u8,
604) -> Result<(), KernelError> {
605 if recursion >= MAX_RESOLVE_DEPTH {
606 return Err(KernelError::InvalidSlot);
607 }
608
609 let (slots_phys, size_bits) = {
610 let cnode = pool.read_as::<CNodeObject>(cnode_phys, cnode_gen)?;
611 (PhysAddr::new(cnode.slots_phys), cnode.size_bits)
612 };
613
614 let slot_count = 1usize << size_bits;
615 (0..slot_count).for_each(|i| {
616 let slot = unsafe { &mut *slot_ptr(slots_phys, i) };
617 match slot {
618 CapSlot::Active(cap) => {
619 if cap.tag() == ObjectTag::CNode {
620 let _ = invalidate_stale_inner(
621 pool,
622 cap.phys(),
623 cap.generation(),
624 target_phys,
625 target_gen,
626 recursion + 1,
627 );
628 }
629 if cap.phys() == target_phys && cap.generation() == target_gen {
630 *slot = CapSlot::Empty;
631 }
632 }
633 CapSlot::Empty => {}
634 }
635 });
636 Ok(())
637}