Nothing to see here, move along
1use crate::error::KernelError;
2use crate::mem::addr;
3use crate::mem::phys::BitmapFrameAllocator;
4use crate::mem::typed_addr::Pml4Phys;
5use crate::proc::address_space;
6use crate::proc::elf;
7use crate::syscall::{MIN_USER_VADDR, USER_ADDR_LIMIT, UserVirtAddr};
8use crate::types::Pid;
9const USER_STACK_PAGES: u64 = 256;
10const USER_STACK_VIRT: u64 = 0x7FFF_FFE0_0000;
11const USER_STACK_GUARD: u64 = USER_STACK_VIRT - 4096;
12const USER_RFLAGS: u64 = 0x202;
13const MAX_PAGES_PER_SEGMENT: u64 = 1024;
14
15const _: () = {
16 assert!(USER_STACK_VIRT + USER_STACK_PAGES * 4096 <= USER_ADDR_LIMIT);
17};
18
19fn unmap_segment_pages(pml4_phys: Pml4Phys, seg: &lancer_core::elf::LoadSegment, page_count: u64) {
20 let base_page = seg.vaddr & !0xFFF;
21 (0..page_count).for_each(|p| {
22 let virt = x86_64::VirtAddr::new(base_page + p * 4096);
23 match address_space::unmap_user_page(pml4_phys, virt) {
24 Ok(frame) => {
25 let phys = frame.start_address();
26 match crate::mem::refcount::decrement(phys) {
27 Ok(0) => BitmapFrameAllocator::free_frame_by_addr(phys),
28 Ok(_) => {}
29 Err(e) => crate::show!(
30 loader,
31 error,
32 "refcount decrement failed during cleanup {:#x} {:?}",
33 phys.as_u64(),
34 e
35 ),
36 }
37 }
38 Err(e) => crate::show!(
39 loader,
40 error,
41 "unmap failed during cleanup {:#x} {:?}",
42 virt.as_u64(),
43 e
44 ),
45 }
46 });
47}
48
49fn map_user_stack(
50 exec: &mut super::ExecContext,
51 allocator: &mut BitmapFrameAllocator,
52) -> Result<(), KernelError> {
53 let pml4_phys = exec.pml4_phys;
54
55 (0..USER_STACK_PAGES).try_fold((), |(), i| {
56 let stack_frame = allocator.allocate().ok_or(KernelError::ResourceExhausted)?;
57 addr::zero_frame(stack_frame.phys_addr());
58 address_space::map_user_page(
59 pml4_phys,
60 UserVirtAddr::new(USER_STACK_VIRT + i * 4096)
61 .map_err(|_| KernelError::InvalidAddress)?,
62 stack_frame,
63 address_space::PageAccess::ReadWrite,
64 allocator,
65 )
66 })?;
67
68 let sels = crate::arch::gdt::selectors();
69 exec.saved_context.rsp = USER_STACK_VIRT + USER_STACK_PAGES * 4096 - 8;
70 exec.saved_context.rflags = USER_RFLAGS;
71 exec.saved_context.cs = sels.user_code.0 as u64;
72 exec.saved_context.ss = sels.user_data.0 as u64;
73 Ok(())
74}
75
76pub fn spawn_module(
77 pid: Pid,
78 module_data: &[u8],
79 allocator: &mut BitmapFrameAllocator,
80) -> Result<(), KernelError> {
81 let elf_info = elf::parse(module_data).map_err(|e| {
82 crate::show!(
83 loader,
84 error,
85 "elf parse error for pid {} {:?}",
86 pid.raw(),
87 e
88 );
89 KernelError::InvalidParameter
90 })?;
91
92 let (pml4_phys, expected_gen) = {
93 let ptable = crate::proc::PROCESSES.lock();
94 let sched = ptable.get(pid).ok_or(KernelError::InvalidObject)?;
95 let exec = ptable.exec(pid).ok_or(KernelError::InvalidObject)?;
96 (exec.pml4_phys, sched.generation)
97 };
98
99 let all_user_space = elf_info.segments.iter().all(|seg| {
100 seg.vaddr >= MIN_USER_VADDR
101 && seg
102 .vaddr
103 .checked_add(seg.memsz)
104 .is_some_and(|end| end <= USER_ADDR_LIMIT)
105 });
106
107 if !all_user_space {
108 return Err(KernelError::InvalidAddress);
109 }
110
111 let entry_in_executable = elf_info.segments.iter().any(|seg| {
112 seg.executable
113 && elf_info.entry >= seg.vaddr
114 && seg
115 .vaddr
116 .checked_add(seg.memsz)
117 .is_some_and(|end| elf_info.entry < end)
118 });
119
120 if !entry_in_executable {
121 return Err(KernelError::InvalidParameter);
122 }
123
124 let has_overlap = (0..elf_info.segments.len()).any(|i| {
125 let a = *elf_info.segments.get(i).unwrap();
126 let a_base = a.vaddr & !0xFFF;
127 let a_end = a
128 .vaddr
129 .checked_add(a.memsz)
130 .and_then(|v| v.checked_add(0xFFF))
131 .map(|v| v & !0xFFF)
132 .unwrap_or(u64::MAX);
133 ((i + 1)..elf_info.segments.len()).any(|j| {
134 let b = *elf_info.segments.get(j).unwrap();
135 let b_base = b.vaddr & !0xFFF;
136 let b_end = b
137 .vaddr
138 .checked_add(b.memsz)
139 .and_then(|v| v.checked_add(0xFFF))
140 .map(|v| v & !0xFFF)
141 .unwrap_or(u64::MAX);
142 a_base < b_end && b_base < a_end
143 })
144 });
145
146 if has_overlap {
147 return Err(KernelError::InvalidAddress);
148 }
149
150 let stack_region_base = USER_STACK_GUARD;
151 let stack_region_end = USER_STACK_VIRT + USER_STACK_PAGES * 4096;
152 let hits_stack = elf_info.segments.iter().any(|seg| {
153 let seg_base = seg.vaddr & !0xFFF;
154 let seg_end = seg
155 .vaddr
156 .checked_add(seg.memsz)
157 .and_then(|v| v.checked_add(0xFFF))
158 .map(|v| v & !0xFFF)
159 .unwrap_or(u64::MAX);
160 seg_base < stack_region_end && stack_region_base < seg_end
161 });
162
163 if hits_stack {
164 return Err(KernelError::InvalidAddress);
165 }
166
167 let mut total_pages_mapped: u16 = 0;
168 let mut loaded_segments: usize = 0;
169 let load_result: Result<(), KernelError> = elf_info.segments.iter().try_fold((), |(), seg| {
170 let base_page = seg.vaddr & !0xFFF;
171 let end = seg
172 .vaddr
173 .checked_add(seg.memsz)
174 .ok_or(KernelError::InvalidParameter)?;
175 let pages = (end - base_page).div_ceil(4096);
176
177 if pages > MAX_PAGES_PER_SEGMENT {
178 return Err(KernelError::ResourceExhausted);
179 }
180
181 let mut pages_mapped: u64 = 0;
182 let page_result: Result<(), KernelError> = (0..pages).try_fold((), |(), p| {
183 let page_vaddr = base_page + p * 4096;
184 let frame = allocator.allocate().ok_or(KernelError::ResourceExhausted)?;
185 let frame_virt = addr::phys_to_virt(frame.phys_addr());
186
187 addr::zero_frame(frame.phys_addr());
188 unsafe {
189 let dst = frame_virt.as_mut_ptr::<u8>();
190
191 let page_start = page_vaddr;
192 let page_end = page_vaddr + 4096;
193 let data_start = seg.vaddr;
194 let data_end = seg.vaddr + seg.filesz;
195
196 let copy_start = page_start.max(data_start);
197 let copy_end = page_end.min(data_end);
198
199 if copy_start < copy_end {
200 let file_off = seg
201 .file_offset
202 .checked_add(copy_start - seg.vaddr)
203 .ok_or(KernelError::InvalidParameter)?;
204 let dst_off = copy_start - page_vaddr;
205 let len = (copy_end - copy_start) as usize;
206 let src_end = file_off
207 .checked_add(len as u64)
208 .ok_or(KernelError::InvalidParameter)?;
209 if src_end > module_data.len() as u64 {
210 return Err(KernelError::InvalidParameter);
211 }
212 core::ptr::copy_nonoverlapping(
213 module_data.as_ptr().add(file_off as usize),
214 dst.add(dst_off as usize),
215 len,
216 );
217 }
218 }
219
220 address_space::map_user_page(
221 pml4_phys,
222 UserVirtAddr::new(page_vaddr)?,
223 frame,
224 match (seg.writable, seg.executable) {
225 (false, false) => address_space::PageAccess::ReadOnly,
226 (true, false) => address_space::PageAccess::ReadWrite,
227 (false, true) => address_space::PageAccess::ReadExecute,
228 (true, true) => address_space::PageAccess::ReadWriteExecute,
229 },
230 allocator,
231 )?;
232 pages_mapped += 1;
233 Ok(())
234 });
235
236 match page_result {
237 Ok(()) => {
238 total_pages_mapped = total_pages_mapped.saturating_add(pages_mapped as u16);
239 loaded_segments += 1;
240 Ok(())
241 }
242 Err(e) => {
243 unmap_segment_pages(pml4_phys, seg, pages_mapped);
244 Err(e)
245 }
246 }
247 });
248
249 if let Err(e) = load_result {
250 elf_info.segments.as_slice()[..loaded_segments]
251 .iter()
252 .for_each(|seg| {
253 let base_page = seg.vaddr & !0xFFF;
254 let end = seg.vaddr.saturating_add(seg.memsz);
255 let pages = (end - base_page).div_ceil(4096);
256 unmap_segment_pages(pml4_phys, seg, pages);
257 });
258 return Err(e);
259 }
260
261 let mut ptable = crate::proc::PROCESSES.lock();
262 let sched = ptable
263 .get(pid)
264 .filter(|s| s.generation == expected_gen)
265 .ok_or(KernelError::InvalidObject)?;
266 let _ = sched;
267 let exec = ptable.exec_mut(pid).ok_or(KernelError::InvalidObject)?;
268 map_user_stack(exec, allocator)?;
269 total_pages_mapped = total_pages_mapped.saturating_add(USER_STACK_PAGES as u16);
270 exec.charge_frames(total_pages_mapped)?;
271 exec.saved_context.rip = elf_info.entry;
272 exec.seal_context();
273
274 Ok(())
275}