Nothing to see here, move along
at main 472 lines 16 kB view raw
1use lancer_core::header::{KernelObjectHeader, NONE_SENTINEL}; 2use lancer_core::object_layout::{ 3 CNodeObject, EndpointObject, FrameObject, IrqHandlerObject, KernelObject, NotificationObject, 4 SchedContextObject, 5}; 6use lancer_core::object_tag::ObjectTag; 7 8use super::object::PidQueue; 9use super::pool::POOL; 10use super::table::{CapRef, Rights}; 11use crate::error::KernelError; 12use crate::mem::addr; 13use crate::types::{Generation, ObjPhys, Pid}; 14use x86_64::PhysAddr; 15 16pub fn cleanup_by_tag(tag: ObjectTag, phys: u64) { 17 match tag { 18 ObjectTag::IrqHandler => { 19 let obj = 20 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<IrqHandlerObject>()) }; 21 match crate::arch::idt::IrqVector::try_new(obj.vector) { 22 Some(vec) => crate::irq::unbind_by_vector(vec), 23 None => {} 24 } 25 } 26 ObjectTag::CNode => { 27 let obj = 28 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<CNodeObject>()) }; 29 let cnode_data = super::object::CNodeData { 30 slots_phys: PhysAddr::new(obj.slots_phys), 31 size_bits: obj.size_bits, 32 frame_count: obj.frame_count, 33 }; 34 super::cnode::destroy_cnode(&cnode_data, &crate::mem::phys::BitmapFrameAllocator); 35 super::kernel_objects::free_slot(phys); 36 } 37 ObjectTag::Frame => { 38 let obj = 39 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<FrameObject>()) }; 40 let idx = obj.frame_table_idx; 41 match idx != NONE_SENTINEL as u16 { 42 true => { 43 let mut ft = super::frame_table::FRAME_TABLE.lock(); 44 ft.get(idx).iommu_mapping().inspect(|mapping| { 45 crate::iommu::unmap_frame_iommu(mapping.bus, mapping.devfn, mapping.iova); 46 }); 47 ft.clear(idx); 48 } 49 false => {} 50 } 51 super::kernel_objects::free_slot(phys); 52 } 53 ObjectTag::Process => { 54 super::kernel_objects::free_slot(phys); 55 } 56 ObjectTag::Endpoint 57 | ObjectTag::Notification 58 | ObjectTag::SchedContext 59 | ObjectTag::Framebuffer 60 | ObjectTag::PciDevice 61 | ObjectTag::Untyped => {} 62 } 63} 64 65pub fn cleanup_by_tag_with_ptable( 66 tag: ObjectTag, 67 phys: u64, 68 ptable: &mut crate::proc::ProcessManager, 69) { 70 match tag { 71 ObjectTag::Frame => {} 72 _ => cleanup_by_tag(tag, phys), 73 } 74 match tag { 75 ObjectTag::Endpoint => { 76 let obj = 77 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<EndpointObject>()) }; 78 let senders = PidQueue::from_repr_c(obj.sender_head, obj.sender_tail, obj.sender_len); 79 let receivers = 80 PidQueue::from_repr_c(obj.receiver_head, obj.receiver_tail, obj.receiver_len); 81 unblock_queue(&senders, ptable); 82 unblock_queue(&receivers, ptable); 83 } 84 ObjectTag::Notification => { 85 let obj = unsafe { 86 &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<NotificationObject>()) 87 }; 88 (0..obj.waiter_count as usize) 89 .filter_map(|i| Pid::try_new(obj.waiters[i])) 90 .for_each(|pid| { 91 let proof = ptable[pid].blocked_proof(); 92 match ptable.unblock_and_enqueue(pid, proof) { 93 Ok(()) => { 94 let exec = ptable.exec_mut(pid).unwrap(); 95 exec.saved_context.rax = 96 crate::error::KernelError::InvalidObject.to_errno() as u64; 97 exec.seal_context(); 98 } 99 Err(e) => { 100 crate::kprintln!( 101 "[cap] BUG: notification cleanup failed to unblock pid {}: {:?}", 102 pid.raw(), 103 e 104 ); 105 } 106 } 107 }); 108 } 109 ObjectTag::SchedContext => { 110 let obj = unsafe { 111 &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<SchedContextObject>()) 112 }; 113 Pid::try_new(obj.attached_pid).inspect(|&pid| { 114 if let Some(sched) = ptable.get_mut(pid) { 115 sched.detach_sched_context(); 116 } 117 }); 118 } 119 ObjectTag::Frame => { 120 let obj = 121 unsafe { &*(addr::phys_to_virt(PhysAddr::new(phys)).as_ptr::<FrameObject>()) }; 122 let frame_phys = PhysAddr::new(obj.phys_addr); 123 let owned_by_untyped = obj.header.parent_untyped != NONE_SENTINEL; 124 let idx = obj.frame_table_idx; 125 match idx != NONE_SENTINEL as u16 { 126 true => { 127 let mut ft = super::frame_table::FRAME_TABLE.lock(); 128 let entry = ft.get_mut(idx); 129 entry.for_each_mapping(|pid, vaddr| { 130 if let Some(exec) = ptable.exec(pid) { 131 let _ = 132 crate::proc::address_space::unmap_user_page(exec.pml4_phys, vaddr); 133 match crate::mem::refcount::decrement(frame_phys) { 134 Ok(0) if !owned_by_untyped => { 135 crate::mem::phys::BitmapFrameAllocator::free_frame_by_addr( 136 frame_phys, 137 ) 138 } 139 Ok(_) => {} 140 Err(e) => crate::kprintln!( 141 "[cap] frame refcount decrement failed: {:#x} {:?}", 142 frame_phys.as_u64(), 143 e 144 ), 145 } 146 } 147 }); 148 entry.iommu_mapping().inspect(|mapping| { 149 crate::iommu::unmap_frame_iommu(mapping.bus, mapping.devfn, mapping.iova); 150 }); 151 ft.clear(idx); 152 } 153 false => {} 154 } 155 super::kernel_objects::free_slot(phys); 156 } 157 ObjectTag::Process 158 | ObjectTag::Framebuffer 159 | ObjectTag::PciDevice 160 | ObjectTag::Untyped 161 | ObjectTag::CNode 162 | ObjectTag::IrqHandler => {} 163 } 164} 165 166fn unblock_queue(queue: &super::object::PidQueue, ptable: &mut crate::proc::ProcessManager) { 167 let mut cursor = queue.head; 168 let mut steps = 0u32; 169 let max = crate::types::MAX_PIDS as u32; 170 core::iter::from_fn(|| { 171 cursor.filter(|_| steps < max).inspect(|&pid| { 172 steps += 1; 173 cursor = ptable[pid].next_ipc; 174 ptable[pid].next_ipc = None; 175 let proof = ptable[pid].blocked_proof(); 176 match ptable.unblock_and_enqueue(pid, proof) { 177 Ok(()) => { 178 let exec = ptable.exec_mut(pid).unwrap(); 179 exec.saved_context.rax = 180 crate::error::KernelError::InvalidObject.to_errno() as u64; 181 exec.seal_context(); 182 } 183 Err(e) => { 184 crate::kprintln!( 185 "[cap] BUG: unblock_queue failed to unblock pid {}: {:?}", 186 pid.raw(), 187 e 188 ); 189 } 190 } 191 ptable.clear_reply_targets_for(pid); 192 }) 193 }) 194 .count(); 195} 196 197pub fn resolve_process_cap( 198 cap: &CapRef, 199 pool: &super::pool::ObjectPool, 200) -> Result<crate::types::Pid, KernelError> { 201 let obj = pool 202 .read_as::<lancer_core::object_layout::ProcessObject>(cap.phys(), cap.generation())?; 203 crate::types::Pid::try_new(obj.pid).ok_or(KernelError::InvalidObject) 204} 205 206#[allow(clippy::too_many_arguments)] 207pub fn create_via_cnode( 208 pool: &mut super::pool::ObjectPool, 209 cnode_phys: ObjPhys, 210 cnode_gen: Generation, 211 address: u64, 212 depth: u8, 213 guard_value: u64, 214 guard_bits: u8, 215 tag: ObjectTag, 216) -> Result<ObjPhys, KernelError> { 217 let obj_phys = match tag { 218 ObjectTag::Endpoint => { 219 let phys = super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?; 220 let header = KernelObjectHeader::new(ObjectTag::Endpoint, 0, 64); 221 super::kernel_objects::write_at(phys, EndpointObject::init_default(header)); 222 phys 223 } 224 ObjectTag::Notification => { 225 let phys = super::kernel_objects::alloc_slot().ok_or(KernelError::PoolExhausted)?; 226 let header = KernelObjectHeader::new(ObjectTag::Notification, 0, 64); 227 super::kernel_objects::write_at(phys, NotificationObject::init_default(header)); 228 phys 229 } 230 ObjectTag::Process 231 | ObjectTag::SchedContext 232 | ObjectTag::IrqHandler 233 | ObjectTag::Framebuffer 234 | ObjectTag::PciDevice 235 | ObjectTag::CNode 236 | ObjectTag::Untyped 237 | ObjectTag::Frame => { 238 return Err(KernelError::InvalidType); 239 } 240 }; 241 242 let (registered_phys, obj_gen) = match pool.register_object(obj_phys, tag) { 243 Ok(pair) => pair, 244 Err(e) => { 245 super::kernel_objects::free_slot(obj_phys); 246 return Err(e); 247 } 248 }; 249 let cap = CapRef::new(tag, registered_phys, Rights::ALL, obj_gen); 250 match super::cnode::resolve_and_insert( 251 pool, 252 cnode_phys, 253 cnode_gen, 254 address, 255 depth, 256 guard_value, 257 guard_bits, 258 cap, 259 ) { 260 Ok(()) => Ok(registered_phys), 261 Err(e) => { 262 let _ = pool.free_phys(registered_phys, obj_gen); 263 Err(e) 264 } 265 } 266} 267 268#[allow(clippy::too_many_arguments)] 269pub fn derive_via_cnode( 270 pool: &mut super::pool::ObjectPool, 271 cnode_phys: ObjPhys, 272 cnode_gen: Generation, 273 src_addr: u64, 274 dest_addr: u64, 275 depth: u8, 276 guard_value: u64, 277 guard_bits: u8, 278 rights_mask: Rights, 279) -> Result<(), KernelError> { 280 let src = super::cnode::resolve_and_read( 281 pool, 282 cnode_phys, 283 cnode_gen, 284 src_addr, 285 depth, 286 guard_value, 287 guard_bits, 288 )?; 289 if !src.rights().contains(Rights::GRANT) { 290 return Err(KernelError::InsufficientRights); 291 } 292 pool.inc_ref(src.phys(), src.generation())?; 293 let derived = src.with_rights(src.rights() & rights_mask); 294 match super::cnode::resolve_and_insert( 295 pool, 296 cnode_phys, 297 cnode_gen, 298 dest_addr, 299 depth, 300 guard_value, 301 guard_bits, 302 derived, 303 ) { 304 Ok(()) => Ok(()), 305 Err(e) => { 306 pool.dec_ref_phys(src.phys(), src.generation()); 307 Err(e) 308 } 309 } 310} 311 312pub fn identify_via_cnode( 313 pool: &super::pool::ObjectPool, 314 cnode_phys: ObjPhys, 315 cnode_gen: Generation, 316 address: u64, 317 depth: u8, 318 guard_value: u64, 319 guard_bits: u8, 320) -> Result<(ObjectTag, Rights), KernelError> { 321 let cap = super::cnode::resolve_and_read( 322 pool, 323 cnode_phys, 324 cnode_gen, 325 address, 326 depth, 327 guard_value, 328 guard_bits, 329 )?; 330 match pool.get_tag(cap.phys(), cap.generation()) { 331 Ok(_) => Ok((cap.tag(), cap.rights())), 332 Err(KernelError::StaleGeneration) => { 333 let _ = super::cnode::resolve_and_clear( 334 pool, 335 cnode_phys, 336 cnode_gen, 337 address, 338 depth, 339 guard_value, 340 guard_bits, 341 ); 342 Err(KernelError::StaleGeneration) 343 } 344 Err(e) => Err(e), 345 } 346} 347 348#[allow(clippy::too_many_arguments)] 349pub fn insert_phys_cap_via_cnode( 350 pool: &mut super::pool::ObjectPool, 351 cnode_phys: ObjPhys, 352 cnode_gen: Generation, 353 address: u64, 354 depth: u8, 355 guard_value: u64, 356 guard_bits: u8, 357 tag: ObjectTag, 358 obj_phys: u64, 359 rights: Rights, 360) -> Result<ObjPhys, KernelError> { 361 let (registered_phys, obj_gen) = match pool.register_object(obj_phys, tag) { 362 Ok(pair) => pair, 363 Err(e) => { 364 super::kernel_objects::free_slot(obj_phys); 365 return Err(e); 366 } 367 }; 368 if tag == ObjectTag::Frame { 369 let mut ft = super::frame_table::FRAME_TABLE.lock(); 370 if let Some(idx) = ft.alloc_idx() { 371 let frame_obj = unsafe { 372 &mut *(crate::mem::addr::phys_to_virt(x86_64::PhysAddr::new( 373 registered_phys.raw(), 374 )) 375 .as_mut_ptr::<lancer_core::object_layout::FrameObject>()) 376 }; 377 frame_obj.frame_table_idx = idx; 378 } 379 } 380 let cap = CapRef::new(tag, registered_phys, rights, obj_gen); 381 match super::cnode::resolve_and_insert( 382 pool, 383 cnode_phys, 384 cnode_gen, 385 address, 386 depth, 387 guard_value, 388 guard_bits, 389 cap, 390 ) { 391 Ok(()) => Ok(registered_phys), 392 Err(e) => { 393 let _ = pool.free_phys(registered_phys, obj_gen); 394 Err(e) 395 } 396 } 397} 398 399pub fn revoke_via_cnode( 400 pid: crate::types::Pid, 401 address: u64, 402 ptable: &mut crate::proc::ProcessManager, 403) -> Result<(), KernelError> { 404 let (cnode_phys, cnode_gen, depth, guard_value, guard_bits) = 405 super::cnode::cnode_coords(pid, ptable)?; 406 let cap_snapshot = { 407 let pool = POOL.lock(); 408 super::cnode::resolve_and_read( 409 &pool, 410 cnode_phys, 411 cnode_gen, 412 address, 413 depth, 414 guard_value, 415 guard_bits, 416 )? 417 }; 418 if !cap_snapshot.rights().contains(Rights::REVOKE) { 419 return Err(KernelError::InsufficientRights); 420 } 421 let stale_phys = cap_snapshot.phys(); 422 let stale_gen = cap_snapshot.generation(); 423 424 let is_untyped = cap_snapshot.tag() == ObjectTag::Untyped; 425 426 match is_untyped { 427 true => { 428 let mut pool = POOL.lock(); 429 super::derivation::destroy_children(&mut pool, ptable, stale_phys, stale_gen)?; 430 Ok(()) 431 } 432 false => { 433 { 434 let mut pool = POOL.lock(); 435 super::derivation::unlink_child(&mut pool, stale_phys); 436 } 437 let (_new_gen, old_phys_tag) = POOL.lock().revoke_phys(stale_phys, stale_gen)?; 438 { 439 let pool = POOL.lock(); 440 let _ = super::cnode::resolve_and_clear( 441 &pool, 442 cnode_phys, 443 cnode_gen, 444 address, 445 depth, 446 guard_value, 447 guard_bits, 448 ); 449 invalidate_stale_caps_via_cnode(ptable, &pool, stale_phys, stale_gen); 450 } 451 old_phys_tag.inspect(|&(phys, tag)| { 452 cleanup_by_tag_with_ptable(tag, phys, ptable); 453 }); 454 Ok(()) 455 } 456 } 457} 458 459pub fn invalidate_stale_caps_via_cnode( 460 ptable: &crate::proc::ProcessManager, 461 pool: &super::pool::ObjectPool, 462 phys: ObjPhys, 463 stale_gen: Generation, 464) { 465 let cap = ptable.capacity(); 466 (0..cap as u32) 467 .filter_map(crate::types::Pid::try_new) 468 .filter_map(|pid| ptable.exec(pid).and_then(|e| e.root_cnode())) 469 .for_each(|(cphys, cgen)| { 470 let _ = super::cnode::invalidate_stale_in_cnode(pool, cphys, cgen, phys, stale_gen); 471 }); 472}