use crate::error::KernelError; use crate::mem::addr; use crate::mem::phys::BitmapFrameAllocator; use crate::mem::typed_addr::Pml4Phys; use crate::proc::address_space; use crate::proc::elf; use crate::syscall::{MIN_USER_VADDR, USER_ADDR_LIMIT, UserVirtAddr}; use crate::types::Pid; const USER_STACK_PAGES: u64 = 256; const USER_STACK_VIRT: u64 = 0x7FFF_FFE0_0000; const USER_STACK_GUARD: u64 = USER_STACK_VIRT - 4096; const USER_RFLAGS: u64 = 0x202; const MAX_PAGES_PER_SEGMENT: u64 = 1024; const _: () = { assert!(USER_STACK_VIRT + USER_STACK_PAGES * 4096 <= USER_ADDR_LIMIT); }; fn unmap_segment_pages(pml4_phys: Pml4Phys, seg: &lancer_core::elf::LoadSegment, page_count: u64) { let base_page = seg.vaddr & !0xFFF; (0..page_count).for_each(|p| { let virt = x86_64::VirtAddr::new(base_page + p * 4096); match address_space::unmap_user_page(pml4_phys, virt) { Ok(frame) => { let phys = frame.start_address(); match crate::mem::refcount::decrement(phys) { Ok(0) => BitmapFrameAllocator::free_frame_by_addr(phys), Ok(_) => {} Err(e) => crate::show!( loader, error, "refcount decrement failed during cleanup {:#x} {:?}", phys.as_u64(), e ), } } Err(e) => crate::show!( loader, error, "unmap failed during cleanup {:#x} {:?}", virt.as_u64(), e ), } }); } fn map_user_stack( exec: &mut super::ExecContext, allocator: &mut BitmapFrameAllocator, ) -> Result<(), KernelError> { let pml4_phys = exec.pml4_phys; (0..USER_STACK_PAGES).try_fold((), |(), i| { let stack_frame = allocator.allocate().ok_or(KernelError::ResourceExhausted)?; addr::zero_frame(stack_frame.phys_addr()); address_space::map_user_page( pml4_phys, UserVirtAddr::new(USER_STACK_VIRT + i * 4096) .map_err(|_| KernelError::InvalidAddress)?, stack_frame, address_space::PageAccess::ReadWrite, allocator, ) })?; let sels = crate::arch::gdt::selectors(); exec.saved_context.rsp = USER_STACK_VIRT + USER_STACK_PAGES * 4096 - 8; exec.saved_context.rflags = USER_RFLAGS; exec.saved_context.cs = sels.user_code.0 as u64; exec.saved_context.ss = sels.user_data.0 as u64; Ok(()) } pub fn spawn_module( pid: Pid, module_data: &[u8], allocator: &mut BitmapFrameAllocator, ) -> Result<(), KernelError> { let elf_info = elf::parse(module_data).map_err(|e| { crate::show!( loader, error, "elf parse error for pid {} {:?}", pid.raw(), e ); KernelError::InvalidParameter })?; let (pml4_phys, expected_gen) = { let ptable = crate::proc::PROCESSES.lock(); let sched = ptable.get(pid).ok_or(KernelError::InvalidObject)?; let exec = ptable.exec(pid).ok_or(KernelError::InvalidObject)?; (exec.pml4_phys, sched.generation) }; let all_user_space = elf_info.segments.iter().all(|seg| { seg.vaddr >= MIN_USER_VADDR && seg .vaddr .checked_add(seg.memsz) .is_some_and(|end| end <= USER_ADDR_LIMIT) }); if !all_user_space { return Err(KernelError::InvalidAddress); } let entry_in_executable = elf_info.segments.iter().any(|seg| { seg.executable && elf_info.entry >= seg.vaddr && seg .vaddr .checked_add(seg.memsz) .is_some_and(|end| elf_info.entry < end) }); if !entry_in_executable { return Err(KernelError::InvalidParameter); } let has_overlap = (0..elf_info.segments.len()).any(|i| { let a = *elf_info.segments.get(i).unwrap(); let a_base = a.vaddr & !0xFFF; let a_end = a .vaddr .checked_add(a.memsz) .and_then(|v| v.checked_add(0xFFF)) .map(|v| v & !0xFFF) .unwrap_or(u64::MAX); ((i + 1)..elf_info.segments.len()).any(|j| { let b = *elf_info.segments.get(j).unwrap(); let b_base = b.vaddr & !0xFFF; let b_end = b .vaddr .checked_add(b.memsz) .and_then(|v| v.checked_add(0xFFF)) .map(|v| v & !0xFFF) .unwrap_or(u64::MAX); a_base < b_end && b_base < a_end }) }); if has_overlap { return Err(KernelError::InvalidAddress); } let stack_region_base = USER_STACK_GUARD; let stack_region_end = USER_STACK_VIRT + USER_STACK_PAGES * 4096; let hits_stack = elf_info.segments.iter().any(|seg| { let seg_base = seg.vaddr & !0xFFF; let seg_end = seg .vaddr .checked_add(seg.memsz) .and_then(|v| v.checked_add(0xFFF)) .map(|v| v & !0xFFF) .unwrap_or(u64::MAX); seg_base < stack_region_end && stack_region_base < seg_end }); if hits_stack { return Err(KernelError::InvalidAddress); } let mut total_pages_mapped: u16 = 0; let mut loaded_segments: usize = 0; let load_result: Result<(), KernelError> = elf_info.segments.iter().try_fold((), |(), seg| { let base_page = seg.vaddr & !0xFFF; let end = seg .vaddr .checked_add(seg.memsz) .ok_or(KernelError::InvalidParameter)?; let pages = (end - base_page).div_ceil(4096); if pages > MAX_PAGES_PER_SEGMENT { return Err(KernelError::ResourceExhausted); } let mut pages_mapped: u64 = 0; let page_result: Result<(), KernelError> = (0..pages).try_fold((), |(), p| { let page_vaddr = base_page + p * 4096; let frame = allocator.allocate().ok_or(KernelError::ResourceExhausted)?; let frame_virt = addr::phys_to_virt(frame.phys_addr()); addr::zero_frame(frame.phys_addr()); unsafe { let dst = frame_virt.as_mut_ptr::(); let page_start = page_vaddr; let page_end = page_vaddr + 4096; let data_start = seg.vaddr; let data_end = seg.vaddr + seg.filesz; let copy_start = page_start.max(data_start); let copy_end = page_end.min(data_end); if copy_start < copy_end { let file_off = seg .file_offset .checked_add(copy_start - seg.vaddr) .ok_or(KernelError::InvalidParameter)?; let dst_off = copy_start - page_vaddr; let len = (copy_end - copy_start) as usize; let src_end = file_off .checked_add(len as u64) .ok_or(KernelError::InvalidParameter)?; if src_end > module_data.len() as u64 { return Err(KernelError::InvalidParameter); } core::ptr::copy_nonoverlapping( module_data.as_ptr().add(file_off as usize), dst.add(dst_off as usize), len, ); } } address_space::map_user_page( pml4_phys, UserVirtAddr::new(page_vaddr)?, frame, match (seg.writable, seg.executable) { (false, false) => address_space::PageAccess::ReadOnly, (true, false) => address_space::PageAccess::ReadWrite, (false, true) => address_space::PageAccess::ReadExecute, (true, true) => address_space::PageAccess::ReadWriteExecute, }, allocator, )?; pages_mapped += 1; Ok(()) }); match page_result { Ok(()) => { total_pages_mapped = total_pages_mapped.saturating_add(pages_mapped as u16); loaded_segments += 1; Ok(()) } Err(e) => { unmap_segment_pages(pml4_phys, seg, pages_mapped); Err(e) } } }); if let Err(e) = load_result { elf_info.segments.as_slice()[..loaded_segments] .iter() .for_each(|seg| { let base_page = seg.vaddr & !0xFFF; let end = seg.vaddr.saturating_add(seg.memsz); let pages = (end - base_page).div_ceil(4096); unmap_segment_pages(pml4_phys, seg, pages); }); return Err(e); } let mut ptable = crate::proc::PROCESSES.lock(); let sched = ptable .get(pid) .filter(|s| s.generation == expected_gen) .ok_or(KernelError::InvalidObject)?; let _ = sched; let exec = ptable.exec_mut(pid).ok_or(KernelError::InvalidObject)?; map_user_stack(exec, allocator)?; total_pages_mapped = total_pages_mapped.saturating_add(USER_STACK_PAGES as u16); exec.charge_frames(total_pages_mapped)?; exec.saved_context.rip = elf_info.entry; exec.seal_context(); Ok(()) }