Next Generation WASM Microkernel Operating System
wasm os rust microkernel
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

fix: use non-contiguous allocations wherever possible (#364)

This change brings back non-contiguous physical allocations in places where it makes sense to have them. This helps to make better use of memory especially as we approach the maxmimum system memory.

authored by

Jonas Kruckenberg and committed by
GitHub
8b581b6f 942e8e27

+304 -149
+19 -1
kernel/src/vm/frame_alloc/arena.rs
··· 247 247 }; 248 248 249 249 for region in self.inner.by_ref() { 250 - let pages_in_hole = region.start.checked_sub_addr(arena.end).unwrap() / arch::PAGE_SIZE; 250 + tracing::debug!(arena.end=?arena.end,region=?region, "Attempting to add free region"); 251 + 252 + debug_assert!(!arena.is_overlapping(&region)); 253 + 254 + let pages_in_hole = if arena.end <= region.start { 255 + // the region is higher than the current arena 256 + region.start.checked_sub_addr(arena.end).unwrap() / arch::PAGE_SIZE 257 + } else { 258 + debug_assert!(region.end <= arena.start); 259 + // the region is lower than the current arena 260 + arena.start.checked_sub_addr(region.end).unwrap() / arch::PAGE_SIZE 261 + }; 262 + 251 263 let waste_from_hole = ARENA_PAGE_BOOKKEEPING_SIZE * pages_in_hole; 252 264 253 265 if self.wasted_bytes + waste_from_hole > MAX_WASTED_ARENA_BYTES { ··· 255 267 } else { 256 268 self.wasted_bytes += waste_from_hole; 257 269 arena.end = region.end; 270 + 271 + if arena.end <= region.start { 272 + arena.end = region.end; 273 + } else { 274 + arena.start = region.start; 275 + } 258 276 } 259 277 } 260 278
+4 -6
loader/src/arch/riscv64.rs
··· 284 284 ); 285 285 debug_assert!( 286 286 virt % PAGE_SIZE == 0, 287 - "virtual address must be aligned to at least 4KiB page size {virt:?}" 287 + "virtual address must be aligned to at least 4KiB page size ({virt:#x})" 288 288 ); 289 289 debug_assert!( 290 290 phys % PAGE_SIZE == 0, 291 - "physical address must be aligned to at least 4KiB page size {phys:?}" 291 + "physical address must be aligned to at least 4KiB page size ({phys:#x})" 292 292 ); 293 293 294 294 // To map out contiguous chunk of physical memory into the virtual address space efficiently ··· 338 338 // we need to allocate a new sub-table and retry. 339 339 // allocate a new physical frame to hold the next level table and 340 340 // mark this PTE as a valid internal node pointing to that sub-table. 341 - let frame = frame_alloc 342 - .allocate_one_zeroed(phys_off) 343 - .ok_or(Error::NoMemory)?; // we should always be able to map a single page 341 + let frame = frame_alloc.allocate_one_zeroed(phys_off)?; // we should always be able to map a single page 344 342 345 343 // TODO memory barrier 346 344 ··· 353 351 pgtable = pgtable_ptr_from_phys(pte.get_address_and_flags().0, phys_off); 354 352 } else { 355 353 unreachable!( 356 - "Invalid state: PTE can't be valid leaf (this means {virt:?} is already mapped) {pte:?} {pte:p}" 354 + "Invalid state: PTE can't be valid leaf (this means {virt:#x} is already mapped) {pte:?} {pte:p}" 357 355 ); 358 356 } 359 357 }
+17 -11
loader/src/boot_info.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use crate::arch; 9 - use crate::error::Error; 10 9 use crate::frame_alloc::FrameAllocator; 11 10 use core::alloc::Layout; 12 11 use core::mem::MaybeUninit; ··· 27 26 hart_mask: usize, 28 27 rng_seed: [u8; 32], 29 28 ) -> crate::Result<*mut BootInfo> { 30 - let frame = frame_alloc 31 - .allocate_contiguous_zeroed( 32 - Layout::from_size_align(arch::PAGE_SIZE, arch::PAGE_SIZE).unwrap(), 33 - arch::KERNEL_ASPACE_BASE, 34 - ) 35 - .ok_or(Error::NoMemory)?; 29 + let frame = frame_alloc.allocate_contiguous_zeroed( 30 + Layout::from_size_align(arch::PAGE_SIZE, arch::PAGE_SIZE).unwrap(), 31 + arch::KERNEL_ASPACE_BASE, 32 + )?; 36 33 let page = physical_address_offset.checked_add(frame).unwrap(); 37 34 38 - let memory_regions = init_boot_info_memory_regions(page, frame_alloc, fdt_phys, loader_phys); 35 + let memory_regions = 36 + init_boot_info_memory_regions(page, frame_alloc, fdt_phys, loader_phys, kernel_phys); 39 37 40 38 let mut boot_info = BootInfo::new(memory_regions); 41 39 boot_info.physical_address_offset = physical_address_offset; ··· 58 56 frame_alloc: FrameAllocator, 59 57 fdt_phys: Range<usize>, 60 58 loader_phys: Range<usize>, 59 + kernel_phys: Range<usize>, 61 60 ) -> MemoryRegions { 62 61 // Safety: we just allocated a whole frame for the boot info 63 62 let regions: &mut [MaybeUninit<MemoryRegion>] = unsafe { ··· 69 68 70 69 let mut len = 0; 71 70 let mut push_region = |region: MemoryRegion| { 71 + debug_assert!(!region.range.is_empty()); 72 72 regions[len].write(region); 73 73 len += 1; 74 74 }; ··· 89 89 }); 90 90 } 91 91 92 - // The memory occupied by the loader is not needed once the kernel is running. 93 - // Mark it as usable. 92 + // Most of the memory occupied by the loader is not needed once the kernel is running, 93 + // but the kernel itself lies somewhere in the loader memory. 94 + // 95 + // We can still mark the range before and after the kernel as usable. 94 96 push_region(MemoryRegion { 95 - range: loader_phys, 97 + range: Range::from(loader_phys.start..kernel_phys.start), 98 + kind: MemoryRegionKind::Usable, 99 + }); 100 + push_region(MemoryRegion { 101 + range: Range::from(kernel_phys.end..loader_phys.end), 96 102 kind: MemoryRegionKind::Usable, 97 103 }); 98 104
+118 -18
loader/src/frame_alloc.rs
··· 6 6 // copied, modified, or distributed except according to those terms. 7 7 8 8 use crate::arch; 9 + use crate::error::Error; 9 10 use core::alloc::Layout; 11 + use core::num::NonZeroUsize; 10 12 use core::range::Range; 11 - use core::{iter, ptr, slice}; 13 + use core::{cmp, iter, ptr, slice}; 14 + use fallible_iterator::FallibleIterator; 12 15 13 16 pub struct FrameAllocator<'a> { 14 17 regions: &'a [Range<usize>], 15 18 // offset from the top of memory regions 16 19 offset: usize, 17 - phys_offset: usize, 18 20 } 19 21 20 22 impl<'a> FrameAllocator<'a> { 21 23 /// Create a new frame allocator over a given set of physical memory regions. 22 24 #[must_use] 23 25 pub fn new(regions: &'a [Range<usize>]) -> Self { 24 - Self { 25 - regions, 26 - offset: 0, 27 - phys_offset: 0, 28 - } 29 - } 30 - 31 - pub fn set_phys_offset(&mut self, phys_offset: usize) { 32 - self.phys_offset = phys_offset; 26 + Self { regions, offset: 0 } 33 27 } 34 28 35 29 #[must_use] ··· 52 46 self.offset >> arch::PAGE_SHIFT 53 47 } 54 48 55 - pub fn allocate_one_zeroed(&mut self, phys_offset: usize) -> Option<usize> { 49 + pub fn allocate_one_zeroed(&mut self, phys_offset: usize) -> Result<usize, Error> { 56 50 self.allocate_contiguous_zeroed( 57 51 // Safety: the layout is always valid 58 52 unsafe { Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE) }, ··· 60 54 ) 61 55 } 62 56 63 - pub fn allocate_contiguous(&mut self, layout: Layout) -> Option<usize> { 57 + pub fn allocate(&mut self, layout: Layout) -> FrameIter<'a, '_> { 58 + assert_eq!( 59 + layout.align(), 60 + arch::PAGE_SIZE, 61 + "BootstrapAllocator only supports page-aligned allocations" 62 + ); 63 + 64 + let remaining = layout.pad_to_align().size(); 65 + 66 + debug_assert!(remaining % arch::PAGE_SIZE == 0); 67 + FrameIter { 68 + alloc: self, 69 + remaining, 70 + } 71 + } 72 + 73 + pub fn allocate_zeroed( 74 + &mut self, 75 + layout: Layout, 76 + phys_offset: usize, 77 + ) -> FrameIterZeroed<'a, '_> { 78 + FrameIterZeroed { 79 + inner: self.allocate(layout), 80 + phys_offset, 81 + } 82 + } 83 + 84 + pub fn allocate_contiguous(&mut self, layout: Layout) -> Result<usize, Error> { 64 85 let requested_size = layout.pad_to_align().size(); 65 86 assert_eq!( 66 87 layout.align(), ··· 90 111 91 112 let frame = region.end.checked_sub(offset + requested_size).unwrap(); 92 113 self.offset += requested_size; 93 - 94 - return Some(frame); 114 + return Ok(frame); 95 115 } 96 116 97 117 offset -= region_size; 98 118 } 99 119 100 - None 120 + Err(Error::NoMemory) 101 121 } 102 122 103 123 pub fn allocate_contiguous_zeroed( 104 124 &mut self, 105 125 layout: Layout, 106 126 phys_offset: usize, 107 - ) -> Option<usize> { 127 + ) -> Result<usize, Error> { 108 128 let requested_size = layout.pad_to_align().size(); 109 129 let addr = self.allocate_contiguous(layout)?; 110 130 // Safety: we just allocated the frame ··· 115 135 requested_size, 116 136 ); 117 137 } 118 - Some(addr) 138 + Ok(addr) 139 + } 140 + } 141 + 142 + pub struct FrameIter<'a, 'b> { 143 + alloc: &'b mut FrameAllocator<'a>, 144 + remaining: usize, 145 + } 146 + 147 + impl<'a> FrameIter<'a, '_> { 148 + pub fn alloc(&mut self) -> &mut FrameAllocator<'a> { 149 + self.alloc 150 + } 151 + } 152 + 153 + impl FallibleIterator for FrameIter<'_, '_> { 154 + type Item = (usize, NonZeroUsize); 155 + type Error = Error; 156 + 157 + fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { 158 + if self.remaining > 0 { 159 + let mut offset = self.alloc.offset; 160 + 161 + for region in self.alloc.regions.iter().rev() { 162 + let region_size = region.end.checked_sub(region.start).unwrap(); 163 + 164 + // only consider regions that we haven't already exhausted 165 + if let Some(allocatable_size) = region_size.checked_sub(offset) 166 + && allocatable_size >= arch::PAGE_SIZE 167 + { 168 + let allocation_size = cmp::min(self.remaining, allocatable_size) 169 + & 0usize.wrapping_sub(arch::PAGE_SIZE); 170 + debug_assert!(allocation_size % arch::PAGE_SIZE == 0); 171 + 172 + let frame = region.end.checked_sub(offset + allocation_size).unwrap(); 173 + self.alloc.offset += allocation_size; 174 + self.remaining -= allocation_size; 175 + 176 + return Ok(Some((frame, NonZeroUsize::new(allocation_size).unwrap()))); 177 + } 178 + 179 + offset -= region_size; 180 + } 181 + 182 + Err(Error::NoMemory) 183 + } else { 184 + Ok(None) 185 + } 186 + } 187 + } 188 + 189 + pub struct FrameIterZeroed<'a, 'b> { 190 + inner: FrameIter<'a, 'b>, 191 + phys_offset: usize, 192 + } 193 + 194 + impl<'a> FrameIterZeroed<'a, '_> { 195 + pub fn alloc(&mut self) -> &mut FrameAllocator<'a> { 196 + self.inner.alloc 197 + } 198 + } 199 + 200 + impl FallibleIterator for FrameIterZeroed<'_, '_> { 201 + type Item = (usize, NonZeroUsize); 202 + type Error = Error; 203 + 204 + fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> { 205 + let Some((base, len)) = self.inner.next()? else { 206 + return Ok(None); 207 + }; 208 + 209 + // Safety: we just allocated the frame 210 + unsafe { 211 + ptr::write_bytes::<u8>( 212 + self.phys_offset.checked_add(base).unwrap() as *mut u8, 213 + 0, 214 + len.get(), 215 + ); 216 + } 217 + 218 + Ok(Some((base, len))) 119 219 } 120 220 } 121 221
+65 -39
loader/src/main.rs
··· 23 23 map_physical_memory, 24 24 }; 25 25 use arrayvec::ArrayVec; 26 - use core::alloc::Layout; 27 26 use core::ffi::c_void; 28 27 use core::range::Range; 29 - use core::{ptr, slice}; 28 + use core::slice; 30 29 use rand::SeedableRng; 31 30 use rand_chacha::ChaCha20Rng; 32 31 use spin::{Barrier, OnceLock}; ··· 97 96 let self_regions = SelfRegions::collect(&minfo); 98 97 log::debug!("{self_regions:#x?}"); 99 98 99 + let fdt_phys = { 100 + let fdt = minfo.fdt.as_ptr_range(); 101 + Range::from(fdt.start as usize..fdt.end as usize) 102 + }; 103 + let kernel_phys = { 104 + let fdt = INLINED_KERNEL_BYTES.0.as_ptr_range(); 105 + Range::from(fdt.start as usize..fdt.end as usize) 106 + }; 107 + 100 108 // Initialize the frame allocator 101 - let allocatable_memories = allocatable_memory_regions(&minfo, &self_regions); 109 + let allocatable_memories = allocatable_memory_regions(&minfo, &self_regions, fdt_phys); 110 + log::debug!("allocatable memory regions {allocatable_memories:#x?}"); 102 111 let mut frame_alloc = FrameAllocator::new(&allocatable_memories); 103 112 104 113 // initialize the random number generator ··· 109 118 110 119 // Initialize the page allocator 111 120 let mut page_alloc = page_alloc::init(rng); 112 - 113 - let fdt_phys = allocate_and_copy(&mut frame_alloc, minfo.fdt).unwrap(); 114 - let kernel_phys = allocate_and_copy(&mut frame_alloc, &INLINED_KERNEL_BYTES.0).unwrap(); 115 121 116 122 let root_pgtable = frame_alloc 117 123 .allocate_one_zeroed( ··· 143 149 arch::activate_aspace(root_pgtable); 144 150 log::trace!("activated."); 145 151 } 146 - frame_alloc.set_phys_offset(phys_off); 147 152 148 153 // Safety: The kernel elf file is inlined into the loader executable as part of the build setup 149 154 // which means we just need to parse it here. 150 155 let kernel = parse_kernel(unsafe { 151 156 let base = phys_off.checked_add(kernel_phys.start).unwrap(); 152 - let len = kernel_phys.end.checked_sub(kernel_phys.start).unwrap(); 153 157 154 - slice::from_raw_parts(base as *mut u8, len) 158 + slice::from_raw_parts(base as *mut u8, INLINED_KERNEL_BYTES.0.len()) 155 159 }) 156 160 .unwrap(); 157 161 // print the elf sections for debugging purposes ··· 253 257 fn allocatable_memory_regions( 254 258 minfo: &MachineInfo, 255 259 self_regions: &SelfRegions, 260 + fdt: Range<usize>, 256 261 ) -> ArrayVec<Range<usize>, 16> { 257 - let mut out = ArrayVec::new(); 258 - let to_exclude = Range::from(self_regions.executable.start..self_regions.read_write.end); 262 + let mut temp: ArrayVec<Range<usize>, 16> = minfo.memories.clone(); 259 263 260 - for mut region in minfo.memories.clone() { 261 - if to_exclude.contains(&region.start) && to_exclude.contains(&region.end) { 262 - // remove region 263 - continue; 264 - } else if region.contains(&to_exclude.start) && region.contains(&to_exclude.end) { 265 - out.push(Range::from(region.start..to_exclude.start)); 266 - out.push(Range::from(to_exclude.end..region.end)); 267 - } else if to_exclude.contains(&region.start) { 268 - region.start = to_exclude.end; 269 - out.push(region); 270 - } else if to_exclude.contains(&region.end) { 271 - region.end = to_exclude.start; 272 - out.push(region); 273 - } else { 274 - out.push(region); 264 + let mut exclude = |to_exclude: Range<usize>| { 265 + for mut region in temp.take() { 266 + if to_exclude.contains(&region.start) && to_exclude.contains(&region.end) { 267 + // remove region 268 + continue; 269 + } else if region.contains(&to_exclude.start) && region.contains(&to_exclude.end) { 270 + temp.push(Range::from(region.start..to_exclude.start)); 271 + temp.push(Range::from(to_exclude.end..region.end)); 272 + } else if to_exclude.contains(&region.start) { 273 + region.start = to_exclude.end; 274 + temp.push(region); 275 + } else if to_exclude.contains(&region.end) { 276 + region.end = to_exclude.start; 277 + temp.push(region); 278 + } else { 279 + temp.push(region); 280 + } 275 281 } 276 - } 282 + }; 277 283 278 - out 279 - } 284 + exclude(Range::from( 285 + self_regions.executable.start..self_regions.read_write.end, 286 + )); 280 287 281 - fn allocate_and_copy(frame_alloc: &mut FrameAllocator, src: &[u8]) -> Result<Range<usize>> { 282 - let layout = Layout::from_size_align(src.len(), arch::PAGE_SIZE).unwrap(); 283 - let base = frame_alloc 284 - .allocate_contiguous(layout) 285 - .ok_or(Error::NoMemory)?; 288 + exclude(fdt); 286 289 287 - // Safety: we just allocated the frame 288 - unsafe { 289 - let dst = slice::from_raw_parts_mut(base as *mut u8, src.len()); 290 + // // merge adjacent regions 291 + // let mut out: ArrayVec<Range<usize>, 16> = ArrayVec::new(); 292 + // 'outer: for region in temp { 293 + // for other in &mut out { 294 + // if region.start == other.end { 295 + // other.end = region.end; 296 + // continue 'outer; 297 + // } 298 + // if region.end == other.start { 299 + // other.start = region.start; 300 + // continue 'outer; 301 + // } 302 + // } 303 + // 304 + // out.push(region); 305 + // } 290 306 291 - ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), dst.len()); 307 + temp.sort_unstable_by_key(|region| region.start); 308 + 309 + #[cfg(debug_assertions)] 310 + for (i, region) in temp.iter().enumerate() { 311 + for (j, other) in temp.iter().enumerate() { 312 + if i == j { 313 + continue; 314 + } 315 + 316 + assert!(!other.contains(&region.start) && !other.contains(&region.end)); 317 + } 292 318 } 293 319 294 - Ok(Range::from(base..base.checked_add(layout.size()).unwrap())) 320 + temp 295 321 }
+81 -74
loader/src/mapping.rs
··· 13 13 use crate::{SelfRegions, arch}; 14 14 use bitflags::bitflags; 15 15 use core::alloc::Layout; 16 - use core::num::{NonZero, NonZeroUsize}; 16 + use core::num::NonZeroUsize; 17 17 use core::range::Range; 18 18 use core::{cmp, ptr, slice}; 19 + use fallible_iterator::FallibleIterator; 19 20 use loader_api::TlsTemplate; 20 21 use xmas_elf::P64; 21 22 use xmas_elf::dynamic::Tag; ··· 328 329 ph.align, 329 330 ); 330 331 331 - let new_frame = frame_alloc 332 - .allocate_contiguous_zeroed( 333 - Layout::from_size_align(arch::PAGE_SIZE, arch::PAGE_SIZE).unwrap(), 334 - arch::KERNEL_ASPACE_BASE, 335 - ) 336 - .ok_or(Error::NoMemory)?; 332 + let new_frame = frame_alloc.allocate_one_zeroed(arch::KERNEL_ASPACE_BASE)?; 337 333 338 334 // Safety: we just allocated the frame 339 335 unsafe { ··· 365 361 } 366 362 367 363 log::trace!("zero_start {zero_start:#x} zero_end {zero_end:#x}"); 368 - let (additional_virt, additional_len) = { 364 + let (mut virt, len) = { 369 365 // zero_start either lies at a page boundary OR somewhere within the first page 370 366 // by aligning up, we move it to the beginning of the *next* page. 371 367 let start = checked_align_up(zero_start, ph.align).unwrap(); ··· 373 369 (start, end.checked_sub(start).unwrap()) 374 370 }; 375 371 376 - if additional_len > 0 { 377 - let additional_phys = frame_alloc 378 - .allocate_contiguous_zeroed( 379 - Layout::from_size_align(additional_len, arch::PAGE_SIZE).unwrap(), 380 - arch::KERNEL_ASPACE_BASE, 381 - ) 382 - .unwrap(); 383 - 384 - log::trace!( 385 - "mapping additional zeros {additional_virt:#x}..{:#x}", 386 - additional_virt.checked_add(additional_len).unwrap() 372 + if len > 0 { 373 + let mut phys_iter = frame_alloc.allocate_zeroed( 374 + Layout::from_size_align(len, arch::PAGE_SIZE).unwrap(), 375 + arch::KERNEL_ASPACE_BASE, 387 376 ); 388 - // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 389 - // abort startup anyway 390 - unsafe { 391 - arch::map_contiguous( 392 - root_pgtable, 393 - frame_alloc, 394 - additional_virt, 395 - additional_phys, 396 - NonZeroUsize::new(additional_len).unwrap(), 397 - flags, 398 - arch::KERNEL_ASPACE_BASE, 399 - )?; 377 + 378 + while let Some((phys, len)) = phys_iter.next()? { 379 + log::trace!( 380 + "mapping additional zeros {virt:#x}..{:#x}", 381 + virt.checked_add(len.get()).unwrap() 382 + ); 383 + 384 + // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 385 + // abort startup anyway 386 + unsafe { 387 + arch::map_contiguous( 388 + root_pgtable, 389 + phys_iter.alloc(), 390 + virt, 391 + phys, 392 + len, 393 + flags, 394 + arch::KERNEL_ASPACE_BASE, 395 + )?; 396 + } 397 + 398 + virt += len.get(); 400 399 } 401 400 } 402 401 ··· 483 482 .unwrap() 484 483 .0 485 484 .pad_to_align(); 485 + log::trace!("allocating TLS segment {layout:?}..."); 486 486 487 - let phys = frame_alloc 488 - .allocate_contiguous_zeroed(layout, phys_off) 489 - .unwrap(); 490 487 let virt = page_alloc.allocate(layout); 488 + let mut virt_start = virt.start; 491 489 492 - log::trace!("Mapping TLS region {virt:#x?}..."); 493 - // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 494 - // abort startup anyway 495 - unsafe { 496 - arch::map_contiguous( 497 - root_pgtable, 498 - frame_alloc, 499 - virt.start, 500 - phys, 501 - NonZero::new(layout.size()).unwrap(), 502 - Flags::READ | Flags::WRITE, 503 - phys_off, 504 - )?; 490 + let mut phys_iter = frame_alloc.allocate_zeroed(layout, phys_off); 491 + while let Some((phys, len)) = phys_iter.next()? { 492 + log::trace!( 493 + "Mapping TLS region {virt_start:#x}..{:#x} {len} ...", 494 + virt_start.checked_add(len.get()).unwrap() 495 + ); 496 + 497 + // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 498 + // abort startup anyway 499 + unsafe { 500 + arch::map_contiguous( 501 + root_pgtable, 502 + phys_iter.alloc(), 503 + virt_start, 504 + phys, 505 + len, 506 + Flags::READ | Flags::WRITE, 507 + phys_off, 508 + )?; 509 + } 510 + 511 + virt_start += len.get(); 505 512 } 506 513 507 514 Ok(TlsAllocation { ··· 578 585 let virt = page_alloc.allocate(layout_with_guard); 579 586 log::trace!("Mapping stacks region {virt:#x?}..."); 580 587 581 - // Mapping stacks region 0xffffffc0c0000000..0xffffffc0c0101000... 582 - 583 588 for hart in 0..minfo.hart_mask.count_ones() { 584 589 let layout = Layout::from_size_align(per_cpu_size, arch::PAGE_SIZE).unwrap(); 585 590 586 - log::trace!("Allocating stack {layout:?}..."); 587 - // The stacks region doesn't need to be zeroed, since we will be filling it with 588 - // the canary pattern anyway 589 - let phys = frame_alloc 590 - .allocate_contiguous(layout) 591 - .ok_or(Error::NoMemory)?; 592 - 593 - let virt = virt 591 + let mut virt = virt 594 592 .end 595 593 .checked_sub(per_cpu_size_with_guard * hart as usize) 596 594 .and_then(|a| a.checked_sub(per_cpu_size)) 597 595 .unwrap(); 598 596 599 - log::trace!( 600 - "mapping stack for hart {hart} {virt:#x}..{:#x} => {phys:#x}..{:#x}", 601 - virt.checked_add(per_cpu_size).unwrap(), 602 - phys.checked_add(per_cpu_size).unwrap() 603 - ); 597 + log::trace!("Allocating stack {layout:?}..."); 598 + // The stacks region doesn't need to be zeroed, since we will be filling it with 599 + // the canary pattern anyway 600 + let mut phys_iter = frame_alloc.allocate(layout); 604 601 605 - // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 606 - // abort startup anyway 607 - unsafe { 608 - arch::map_contiguous( 609 - root_pgtable, 610 - frame_alloc, 611 - virt, 612 - phys, 613 - NonZero::new(layout.size()).unwrap(), 614 - Flags::READ | Flags::WRITE | Flags::USER, 615 - phys_off, 616 - )?; 602 + while let Some((phys, len)) = phys_iter.next()? { 603 + log::trace!( 604 + "mapping stack for hart {hart} {virt:#x}..{:#x} => {phys:#x}..{:#x}", 605 + virt.checked_add(len.get()).unwrap(), 606 + phys.checked_add(len.get()).unwrap() 607 + ); 608 + 609 + // Safety: Leaving the address space in an invalid state here is fine since on panic we'll 610 + // abort startup anyway 611 + unsafe { 612 + arch::map_contiguous( 613 + root_pgtable, 614 + phys_iter.alloc(), 615 + virt, 616 + phys, 617 + len, 618 + Flags::READ | Flags::WRITE | Flags::USER, 619 + phys_off, 620 + )?; 621 + } 622 + 623 + virt += len.get(); 617 624 } 618 625 } 619 626