Nothing to see here, move along
1use lancer_core::header::KernelObjectHeader;
2use lancer_core::object_layout::{
3 CNodeObject, EndpointObject, FrameObject, IrqHandlerObject, KernelObject, NotificationObject,
4 SchedContextObject, UntypedObject, VRegionObject,
5};
6use lancer_core::object_tag::ObjectTag;
7
8use super::pool::ObjectPool;
9use super::table::{CapRef, Rights};
10use crate::error::KernelError;
11use crate::mem::addr;
12use crate::types::{Generation, ObjPhys};
13use x86_64::PhysAddr;
14
15const MAX_RETYPE_COUNT: u32 = 64;
16
17fn write_inline<T: KernelObject>(virt: x86_64::VirtAddr, header: KernelObjectHeader) {
18 unsafe { core::ptr::write(virt.as_mut_ptr::<T>(), T::init_default(header)) };
19}
20
21fn write_repr_c_object(tag: ObjectTag, obj_phys: u64, size_bits: u8) -> Result<u64, KernelError> {
22 let header = KernelObjectHeader::new(tag, 0, 64);
23 let virt = addr::phys_to_virt(PhysAddr::new(obj_phys));
24
25 match tag {
26 ObjectTag::Endpoint => {
27 write_inline::<EndpointObject>(virt, header);
28 Ok(obj_phys)
29 }
30 ObjectTag::Notification => {
31 write_inline::<NotificationObject>(virt, header);
32 Ok(obj_phys)
33 }
34 ObjectTag::SchedContext => {
35 write_inline::<SchedContextObject>(virt, header);
36 Ok(obj_phys)
37 }
38 ObjectTag::IrqHandler => {
39 write_inline::<IrqHandlerObject>(virt, header);
40 Ok(obj_phys)
41 }
42 ObjectTag::Frame => {
43 let kobj_phys =
44 super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?;
45 let mut obj = FrameObject::init_default(header);
46 obj.phys_addr = obj_phys;
47 obj.size_bits = match size_bits {
48 0 => 12,
49 b => b,
50 };
51 super::kernel_objects::write_at(kobj_phys, obj);
52 Ok(kobj_phys)
53 }
54 ObjectTag::CNode => {
55 let frame_count = lancer_core::cnode::frames_for_cnode(size_bits);
56 let mut obj = CNodeObject::init_default(header);
57 obj.slots_phys = obj_phys;
58 obj.size_bits = size_bits;
59 obj.frame_count = frame_count;
60 unsafe { core::ptr::write(virt.as_mut_ptr::<CNodeObject>(), obj) };
61 Ok(obj_phys)
62 }
63 ObjectTag::Untyped
64 | ObjectTag::Process
65 | ObjectTag::Framebuffer
66 | ObjectTag::PciDevice
67 | ObjectTag::VRegion => Ok(obj_phys),
68 }
69}
70
71fn validate_retype_tag(tag: ObjectTag) -> Result<(), KernelError> {
72 match tag {
73 ObjectTag::Endpoint
74 | ObjectTag::Notification
75 | ObjectTag::Frame
76 | ObjectTag::CNode
77 | ObjectTag::SchedContext
78 | ObjectTag::IrqHandler => Ok(()),
79 ObjectTag::Untyped
80 | ObjectTag::Process
81 | ObjectTag::Framebuffer
82 | ObjectTag::PciDevice
83 | ObjectTag::VRegion => Err(KernelError::InvalidType),
84 }
85}
86
87struct TcbInitResult {
88 pid: crate::types::Pid,
89 pml4_phys: crate::mem::typed_addr::Pml4Phys,
90 root_cnode: Option<(ObjPhys, Generation)>,
91}
92
93enum AddressSpaceMode {
94 New,
95 Shared { parent_pid: crate::types::Pid },
96}
97
98fn init_tcb_memory(
99 ptable: &mut crate::proc::ProcessManager,
100 phys_base: PhysAddr,
101 offset: u64,
102 mode: AddressSpaceMode,
103) -> Result<TcbInitResult, KernelError> {
104 let tcb_phys = PhysAddr::new(phys_base.as_u64() + offset);
105 let header_size = lancer_core::untyped::HEADER_SIZE as u64;
106 let tcb_offset = (header_size + 63) & !63;
107
108 let exec_phys = PhysAddr::new(tcb_phys.as_u64() + tcb_offset);
109
110 let hhdm = crate::mem::addr::hhdm_offset();
111 let exec_virt = x86_64::VirtAddr::new(exec_phys.as_u64() + hhdm);
112
113 match mode {
114 AddressSpaceMode::New => {
115 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
116 let pml4_raw = crate::proc::address_space::create_user_pml4(&mut allocator)
117 .ok_or(KernelError::ResourceExhausted)?;
118 let pml4_phys = crate::mem::typed_addr::Pml4Phys::from_create(pml4_raw);
119
120 let sched = crate::proc::manager::make_default_sched(
121 crate::types::Pid::new(0),
122 Generation::new(0),
123 exec_phys.as_u64(),
124 );
125 let (pid, _) =
126 ptable.pid_table_mut().allocate(sched, &allocator).ok_or_else(|| {
127 crate::proc::address_space::teardown_user_space(
128 pml4_phys.raw(),
129 &mut allocator,
130 );
131 KernelError::PoolExhausted
132 })?;
133
134 if crate::proc::address_space::pml4_ref_create(pml4_phys.raw(), pid).is_err() {
135 let _ = ptable.pid_table_mut().free(pid);
136 crate::proc::address_space::teardown_user_space(pml4_phys.raw(), &mut allocator);
137 return Err(KernelError::ResourceExhausted);
138 }
139
140 let exec = crate::proc::manager::make_default_exec(pml4_phys, true);
141 finish_tcb_init(ptable, pid, exec_virt, exec, pml4_phys, None)
142 }
143 AddressSpaceMode::Shared { parent_pid } => {
144 let parent_exec = ptable.exec(parent_pid).ok_or(KernelError::InvalidObject)?;
145 let pml4_phys = parent_exec.pml4_phys;
146 let root_cnode = parent_exec.root_cnode;
147 let cnode_depth = parent_exec.cnode_depth;
148 let guard_value = parent_exec.root_guard_value;
149 let guard_bits = parent_exec.root_guard_bits;
150
151 crate::proc::address_space::pml4_ref_share(pml4_phys.raw())?;
152
153 let sched = crate::proc::manager::make_default_sched(
154 crate::types::Pid::new(0),
155 Generation::new(0),
156 exec_phys.as_u64(),
157 );
158 let allocator = crate::mem::phys::BitmapFrameAllocator;
159 let (pid, _) =
160 ptable.pid_table_mut().allocate(sched, &allocator).ok_or_else(|| {
161 let _ = crate::proc::address_space::pml4_ref_release(pml4_phys.raw());
162 KernelError::PoolExhausted
163 })?;
164
165 let mut exec = crate::proc::manager::make_default_exec(pml4_phys, true);
166 exec.root_cnode = root_cnode;
167 exec.cnode_depth = cnode_depth;
168 exec.root_guard_value = guard_value;
169 exec.root_guard_bits = guard_bits;
170
171 finish_tcb_init(ptable, pid, exec_virt, exec, pml4_phys, root_cnode)
172 }
173 }
174}
175
176fn finish_tcb_init(
177 ptable: &mut crate::proc::ProcessManager,
178 pid: crate::types::Pid,
179 exec_virt: x86_64::VirtAddr,
180 exec: crate::proc::ExecContext,
181 pml4_phys: crate::mem::typed_addr::Pml4Phys,
182 root_cnode: Option<(ObjPhys, Generation)>,
183) -> Result<TcbInitResult, KernelError> {
184 unsafe {
185 core::ptr::write(exec_virt.as_mut_ptr::<crate::proc::ExecContext>(), exec);
186 }
187
188 let exec_ref = ptable.exec_mut(pid).ok_or(KernelError::InvalidObject)?;
189 exec_ref.seal_context();
190
191 Ok(TcbInitResult {
192 pid,
193 pml4_phys,
194 root_cnode,
195 })
196}
197
198#[allow(clippy::too_many_arguments)]
199pub fn kernel_retype(
200 pool: &mut ObjectPool,
201 ptable: Option<&mut crate::proc::ProcessManager>,
202 untyped_phys: ObjPhys,
203 untyped_gen: Generation,
204 obj_tag: ObjectTag,
205 size_bits: u8,
206 dest_cnode_phys: ObjPhys,
207 dest_cnode_gen: Generation,
208 dest_addr: u64,
209 dest_depth: u8,
210 dest_guard_value: u64,
211 dest_guard_bits: u8,
212 count: u32,
213) -> Result<(), KernelError> {
214 if count > MAX_RETYPE_COUNT {
215 return Err(KernelError::InvalidParameter);
216 }
217
218 if obj_tag == ObjectTag::Process {
219 match count {
220 0 => return Ok(()),
221 1 => {}
222 _ => return Err(KernelError::InvalidParameter),
223 }
224 let pt = ptable.ok_or(KernelError::InvalidParameter)?;
225 retype_tcb_from_untyped(
226 pool,
227 pt,
228 untyped_phys,
229 untyped_gen,
230 AddressSpaceMode::New,
231 size_bits,
232 dest_cnode_phys,
233 dest_cnode_gen,
234 dest_addr,
235 dest_depth,
236 dest_guard_value,
237 dest_guard_bits,
238 )?;
239 return Ok(());
240 }
241
242 if obj_tag == ObjectTag::VRegion {
243 const MAX_PAGES: u32 = lancer_core::types::MAX_VREGION_PAGES as u32;
244 let page_count = match count {
245 0 => return Ok(()),
246 1..=MAX_PAGES => count as u16,
247 _ => return Err(KernelError::InvalidParameter),
248 };
249 retype_vregion_from_untyped(
250 pool,
251 untyped_phys,
252 untyped_gen,
253 page_count,
254 dest_cnode_phys,
255 dest_cnode_gen,
256 dest_addr,
257 dest_depth,
258 dest_guard_value,
259 dest_guard_bits,
260 )?;
261 return Ok(());
262 }
263
264 validate_retype_tag(obj_tag)?;
265
266 let ut = pool.read_as::<UntypedObject>(untyped_phys, untyped_gen)?;
267 let phys_base = PhysAddr::new(ut.phys_base);
268 let state = ut.to_state();
269
270 let result = match obj_tag {
271 ObjectTag::Frame => state
272 .try_retype(ObjectTag::Frame, size_bits, count)
273 .map_err(retype_to_kernel_error)?,
274 _ => {
275 let (obj_size, obj_align) = lancer_core::untyped::object_layout(obj_tag, size_bits)
276 .map_err(retype_to_kernel_error)?;
277 state
278 .try_allocate_raw(obj_size, obj_align, count)
279 .map_err(retype_to_kernel_error)?
280 }
281 };
282
283 let mut allocated: crate::static_vec::StaticVec<
284 (ObjPhys, Generation),
285 { MAX_RETYPE_COUNT as usize },
286 > = crate::static_vec::StaticVec::new();
287
288 let rollback_result = (0..count).try_for_each(|i| {
289 let obj_phys =
290 phys_base.as_u64() + result.start_offset() as u64 + (i as u64) * result.stride() as u64;
291
292 if obj_tag == ObjectTag::CNode {
293 let slot_count = 1usize << size_bits;
294 let hhdm = crate::mem::addr::hhdm_offset();
295 let base_ptr = (obj_phys + hhdm) as *mut u8;
296 let byte_count = slot_count * core::mem::size_of::<super::table::CapSlot>();
297 unsafe { core::ptr::write_bytes(base_ptr, 0, byte_count) };
298 }
299
300 let register_phys = write_repr_c_object(obj_tag, obj_phys, size_bits)?;
301
302 let (obj_registered_phys, obj_gen) = match pool.register_object(register_phys, obj_tag) {
303 Ok(pair) => pair,
304 Err(_) => {
305 if obj_tag == ObjectTag::Frame {
306 super::kernel_objects::free_slot(register_phys);
307 }
308 return Err(KernelError::PoolExhausted);
309 }
310 };
311
312 if obj_tag == ObjectTag::Frame {
313 let mut ft = super::frame_table::FRAME_TABLE.lock();
314 if let Some(idx) = ft.alloc_idx() {
315 let frame_obj = unsafe {
316 &mut *(addr::phys_to_virt(PhysAddr::new(obj_registered_phys.raw()))
317 .as_mut_ptr::<FrameObject>())
318 };
319 frame_obj.frame_table_idx = idx;
320 }
321 }
322
323 let cap = CapRef::new(obj_tag, obj_registered_phys, Rights::ALL, obj_gen);
324 let slot_addr = dest_addr + i as u64;
325 match super::cnode::resolve_and_insert(
326 pool,
327 dest_cnode_phys,
328 dest_cnode_gen,
329 slot_addr,
330 dest_depth,
331 dest_guard_value,
332 dest_guard_bits,
333 cap,
334 ) {
335 Ok(()) => {
336 let _ = allocated.push((obj_registered_phys, obj_gen));
337 Ok(())
338 }
339 Err(e) => {
340 let _ = pool.free_phys(obj_registered_phys, obj_gen);
341 if obj_tag == ObjectTag::Frame {
342 super::kernel_objects::free_slot(register_phys);
343 }
344 Err(e)
345 }
346 }
347 });
348
349 match rollback_result {
350 Ok(()) => {
351 let ut_mut = pool.write_as::<UntypedObject>(untyped_phys, untyped_gen)?;
352 let mut state = ut_mut.to_state();
353 state.commit_retype(&result);
354 ut_mut.apply_state(&state);
355
356 allocated
357 .as_slice()
358 .iter()
359 .for_each(|&(child_phys, _child_gen)| {
360 let link_result =
361 super::derivation::link_child(pool, untyped_phys, untyped_gen, child_phys);
362 debug_assert!(
363 link_result.is_ok(),
364 "link_child failed after commit: {:?}",
365 link_result.err()
366 );
367 });
368
369 let frames_per_obj: u32 = match obj_tag {
370 ObjectTag::Frame => 1,
371 ObjectTag::CNode => lancer_core::cnode::frames_for_cnode(size_bits) as u32,
372 _ => 0,
373 };
374 if frames_per_obj > 0 {
375 (0..count).for_each(|i| {
376 let base = phys_base.as_u64()
377 + result.start_offset() as u64
378 + (i as u64) * result.stride() as u64;
379 (0..frames_per_obj).for_each(|f| {
380 let frame_idx = ((base + f as u64 * 4096) / 4096) as usize;
381 crate::mem::phys::BitmapFrameAllocator::mark_used(frame_idx);
382 });
383 });
384 }
385 Ok(())
386 }
387 Err(e) => {
388 allocated
389 .as_slice()
390 .iter()
391 .enumerate()
392 .for_each(|(i, &(phys, generation))| {
393 let slot_addr = dest_addr + i as u64;
394 let _ = super::cnode::resolve_and_clear(
395 pool,
396 dest_cnode_phys,
397 dest_cnode_gen,
398 slot_addr,
399 dest_depth,
400 dest_guard_value,
401 dest_guard_bits,
402 );
403 if let Ok(Some((old_phys, _))) = pool.free_phys(phys, generation)
404 && obj_tag == ObjectTag::Frame
405 {
406 super::kernel_objects::free_slot(old_phys);
407 }
408 });
409 Err(e)
410 }
411 }
412}
413
414#[allow(clippy::too_many_arguments)]
415fn retype_tcb_from_untyped(
416 pool: &mut ObjectPool,
417 ptable: &mut crate::proc::ProcessManager,
418 untyped_phys: ObjPhys,
419 untyped_gen: Generation,
420 mode: AddressSpaceMode,
421 size_bits: u8,
422 dest_cnode_phys: ObjPhys,
423 dest_cnode_gen: Generation,
424 dest_addr: u64,
425 dest_depth: u8,
426 dest_guard_value: u64,
427 dest_guard_bits: u8,
428) -> Result<crate::types::Pid, KernelError> {
429 let ut = pool.read_as::<UntypedObject>(untyped_phys, untyped_gen)?;
430 if matches!(mode, AddressSpaceMode::Shared { .. }) && ut.is_device != 0 {
431 return Err(KernelError::InvalidType);
432 }
433
434 let phys_base = PhysAddr::new(ut.phys_base);
435 let (obj_size, obj_align) = lancer_core::untyped::object_layout(ObjectTag::Process, size_bits)
436 .map_err(retype_to_kernel_error)?;
437 let result = ut
438 .to_state()
439 .try_allocate_raw(obj_size, obj_align, 1)
440 .map_err(retype_to_kernel_error)?;
441
442 let tcb_init = init_tcb_memory(ptable, phys_base, result.start_offset() as u64, mode)?;
443 let pid = tcb_init.pid;
444 let pml4_phys = tcb_init.pml4_phys;
445 let shared_cnode = tcb_init.root_cnode;
446
447 if let Some((cnode_phys, cnode_gen)) = shared_cnode
448 && let Err(e) = pool.inc_ref(cnode_phys, cnode_gen)
449 {
450 let _ = ptable.pid_table_mut().free(pid);
451 let _ = crate::proc::address_space::pml4_ref_release(pml4_phys.raw());
452 return Err(e);
453 }
454
455 let teardown_tcb = |pt: &mut crate::proc::ProcessManager, pool: &mut ObjectPool| {
456 if let Some((cnode_phys, cnode_gen)) = shared_cnode {
457 pool.dec_ref_phys(cnode_phys, cnode_gen);
458 }
459 let _ = pt.pid_table_mut().free(pid);
460 match shared_cnode {
461 Some(_) => {
462 let _ = crate::proc::address_space::pml4_ref_release(pml4_phys.raw());
463 }
464 None => {
465 let mut allocator = crate::mem::phys::BitmapFrameAllocator;
466 crate::proc::address_space::teardown_user_space(pml4_phys.raw(), &mut allocator);
467 }
468 }
469 };
470
471 let obj_phys = phys_base.as_u64() + result.start_offset() as u64;
472 let header = KernelObjectHeader::new(ObjectTag::Process, 0, 64);
473 let mut proc_obj = lancer_core::object_layout::ProcessObject::init_default(header);
474 proc_obj.pid = pid.raw();
475 unsafe {
476 core::ptr::write(
477 addr::phys_to_virt(PhysAddr::new(obj_phys))
478 .as_mut_ptr::<lancer_core::object_layout::ProcessObject>(),
479 proc_obj,
480 );
481 }
482
483 let (obj_registered_phys, obj_gen) = match pool.register_object(obj_phys, ObjectTag::Process) {
484 Ok(pair) => pair,
485 Err(_) => {
486 teardown_tcb(ptable, pool);
487 return Err(KernelError::PoolExhausted);
488 }
489 };
490
491 let cap = CapRef::new(ObjectTag::Process, obj_registered_phys, Rights::ALL, obj_gen);
492 if let Err(e) = super::cnode::resolve_and_insert(
493 pool,
494 dest_cnode_phys,
495 dest_cnode_gen,
496 dest_addr,
497 dest_depth,
498 dest_guard_value,
499 dest_guard_bits,
500 cap,
501 ) {
502 let _ = pool.free_phys(obj_registered_phys, obj_gen);
503 teardown_tcb(ptable, pool);
504 return Err(e);
505 }
506
507 let ut_mut = pool.write_as::<UntypedObject>(untyped_phys, untyped_gen)?;
508 let mut state = ut_mut.to_state();
509 state.commit_retype(&result);
510 ut_mut.apply_state(&state);
511
512 let link_result =
513 super::derivation::link_child(pool, untyped_phys, untyped_gen, obj_registered_phys);
514 debug_assert!(
515 link_result.is_ok(),
516 "link_child failed after tcb commit: {:?}",
517 link_result.err()
518 );
519
520 let total_frames = result.stride().div_ceil(4096);
521 (0..total_frames).for_each(|f| {
522 let frame_idx = ((obj_phys + f as u64 * 4096) / 4096) as usize;
523 crate::mem::phys::BitmapFrameAllocator::mark_used(frame_idx);
524 });
525
526 Ok(pid)
527}
528
529#[allow(clippy::too_many_arguments)]
530pub fn retype_thread_from_untyped(
531 pool: &mut ObjectPool,
532 ptable: &mut crate::proc::ProcessManager,
533 untyped_phys: ObjPhys,
534 untyped_gen: Generation,
535 parent_pid: crate::types::Pid,
536 dest_cnode_phys: ObjPhys,
537 dest_cnode_gen: Generation,
538 dest_addr: u64,
539 dest_depth: u8,
540 dest_guard_value: u64,
541 dest_guard_bits: u8,
542) -> Result<crate::types::Pid, KernelError> {
543 retype_tcb_from_untyped(
544 pool,
545 ptable,
546 untyped_phys,
547 untyped_gen,
548 AddressSpaceMode::Shared { parent_pid },
549 0,
550 dest_cnode_phys,
551 dest_cnode_gen,
552 dest_addr,
553 dest_depth,
554 dest_guard_value,
555 dest_guard_bits,
556 )
557}
558
559#[allow(clippy::too_many_arguments)]
560pub(crate) fn retype_vregion_from_untyped(
561 pool: &mut ObjectPool,
562 untyped_phys: ObjPhys,
563 untyped_gen: Generation,
564 page_count: u16,
565 dest_cnode_phys: ObjPhys,
566 dest_cnode_gen: Generation,
567 dest_addr: u64,
568 dest_depth: u8,
569 dest_guard_value: u64,
570 dest_guard_bits: u8,
571) -> Result<(), KernelError> {
572 let ut = pool.read_as::<UntypedObject>(untyped_phys, untyped_gen)?;
573 if ut.is_device != 0 {
574 return Err(KernelError::InvalidType);
575 }
576
577 let phys_base = PhysAddr::new(ut.phys_base);
578 let backing_size = page_count as u32 * 4096;
579 let result = ut
580 .to_state()
581 .try_allocate_raw(backing_size, 4096, 1)
582 .map_err(retype_to_kernel_error)?;
583
584 let backing_phys = phys_base.as_u64() + result.start_offset() as u64;
585
586 let ut_mut = pool.write_as::<UntypedObject>(untyped_phys, untyped_gen)?;
587 let mut state = ut_mut.to_state();
588 state.commit_retype(&result);
589 ut_mut.apply_state(&state);
590
591 let kobj_phys =
592 super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?;
593
594 let header = KernelObjectHeader::new(ObjectTag::VRegion, 0, 64);
595 let mut obj = VRegionObject::init_default(header);
596 obj.phys_base = backing_phys;
597 obj.page_count = page_count;
598 super::kernel_objects::write_at(kobj_phys, obj);
599
600 let (obj_registered_phys, obj_gen) = match pool.register_object(kobj_phys, ObjectTag::VRegion) {
601 Ok(pair) => pair,
602 Err(_) => {
603 super::kernel_objects::free_slot(kobj_phys);
604 return Err(KernelError::PoolExhausted);
605 }
606 };
607
608 let cap = CapRef::new(ObjectTag::VRegion, obj_registered_phys, Rights::ALL, obj_gen);
609 if let Err(e) = super::cnode::resolve_and_insert(
610 pool,
611 dest_cnode_phys,
612 dest_cnode_gen,
613 dest_addr,
614 dest_depth,
615 dest_guard_value,
616 dest_guard_bits,
617 cap,
618 ) {
619 let _ = pool.free_phys(obj_registered_phys, obj_gen);
620 super::kernel_objects::free_slot(kobj_phys);
621 return Err(e);
622 }
623
624 super::derivation::link_child(pool, untyped_phys, untyped_gen, obj_registered_phys)
625 .expect("link_child failed after vregion commit");
626
627 (0..page_count as u32).for_each(|f| {
628 let frame_idx = ((backing_phys + f as u64 * 4096) / 4096) as usize;
629 crate::mem::phys::BitmapFrameAllocator::mark_used(frame_idx);
630 });
631
632 Ok(())
633}
634
635fn retype_to_kernel_error(e: lancer_core::untyped::RetypeError) -> KernelError {
636 match e {
637 lancer_core::untyped::RetypeError::InsufficientSpace => KernelError::ResourceExhausted,
638 lancer_core::untyped::RetypeError::DeviceRejectsNonFrame => KernelError::InvalidType,
639 lancer_core::untyped::RetypeError::ZeroCount => KernelError::InvalidParameter,
640 lancer_core::untyped::RetypeError::Overflow => KernelError::InvalidParameter,
641 lancer_core::untyped::RetypeError::InvalidSizeBits => KernelError::InvalidParameter,
642 lancer_core::untyped::RetypeError::InvalidType => KernelError::InvalidType,
643 }
644}