Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use crate::mem::address_space_region::AddressSpaceRegion;
10use crate::mem::frame_alloc::FrameAllocator;
11use crate::mem::{
12 AddressRangeExt, ArchAddressSpace, Flush, PageFaultFlags, Permissions, PhysicalAddress,
13 VirtualAddress,
14};
15use alloc::boxed::Box;
16use alloc::string::String;
17use alloc::vec;
18use alloc::vec::Vec;
19use anyhow::{bail, ensure};
20use core::alloc::Layout;
21use core::fmt;
22use core::num::NonZeroUsize;
23use core::ops::DerefMut;
24use core::pin::Pin;
25use core::ptr::NonNull;
26use core::range::{Bound, Range, RangeBounds, RangeInclusive};
27use rand::Rng;
28use rand::distr::Uniform;
29use rand_chacha::ChaCha20Rng;
30
31// const VIRT_ALLOC_ENTROPY: u8 = u8::try_from((arch::VIRT_ADDR_BITS - arch::PAGE_SHIFT as u32) + 1).unwrap();
32const VIRT_ALLOC_ENTROPY: u8 = 27;
33
34#[derive(Debug, Clone, Copy)]
35pub enum AddressSpaceKind {
36 User,
37 Kernel,
38}
39
40pub struct AddressSpace {
41 kind: AddressSpaceKind,
42 /// A binary search tree of regions that make up this address space.
43 pub(super) regions: wavltree::WAVLTree<AddressSpaceRegion>,
44 /// The maximum range this address space can encompass.
45 ///
46 /// This is used to check new mappings against and speed up page fault handling.
47 max_range: RangeInclusive<VirtualAddress>,
48 /// The pseudo-random number generator used for address space layout randomization or `None`
49 /// if ASLR is disabled.
50 rng: Option<ChaCha20Rng>,
51 /// The hardware address space backing this "logical" address space that changes need to be
52 /// materialized into in order to take effect.
53 pub arch: arch::AddressSpace,
54 pub frame_alloc: &'static FrameAllocator,
55 last_fault: Option<(NonNull<AddressSpaceRegion>, VirtualAddress)>,
56}
57// Safety: the last_fault field makes the not-Send, but its only ever accessed behind a &mut Self
58unsafe impl Send for AddressSpace {}
59// Safety: the last_fault field makes the not-Send, but its only ever accessed behind a &mut Self
60unsafe impl Sync for AddressSpace {}
61
62impl fmt::Debug for AddressSpace {
63 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
64 f.debug_struct("AddressSpace")
65 .field_with("regions", |f| {
66 let mut f = f.debug_list();
67 for region in self.regions.iter() {
68 f.entry(&format_args!(
69 "{:<40?} {}..{} {}",
70 region.name, region.range.start, region.range.end, region.permissions
71 ));
72 }
73 f.finish()
74 })
75 .field("max_range", &self.max_range)
76 .field("kind", &self.kind)
77 .field("rng", &self.rng)
78 .finish_non_exhaustive()
79 }
80}
81
82impl AddressSpace {
83 pub fn new_user(
84 asid: u16,
85 rng: Option<ChaCha20Rng>,
86 frame_alloc: &'static FrameAllocator,
87 ) -> crate::Result<Self> {
88 let (arch, _) = arch::AddressSpace::new(asid, frame_alloc)?;
89
90 #[allow(tail_expr_drop_order, reason = "")]
91 Ok(Self {
92 regions: wavltree::WAVLTree::default(),
93 arch,
94 max_range: arch::USER_ASPACE_RANGE,
95 rng,
96 kind: AddressSpaceKind::User,
97 frame_alloc,
98 last_fault: None,
99 })
100 }
101
102 pub unsafe fn from_active_kernel(
103 arch_aspace: arch::AddressSpace,
104 rng: Option<ChaCha20Rng>,
105 frame_alloc: &'static FrameAllocator,
106 ) -> Self {
107 #[allow(tail_expr_drop_order, reason = "")]
108 Self {
109 regions: wavltree::WAVLTree::default(),
110 arch: arch_aspace,
111 max_range: arch::KERNEL_ASPACE_RANGE,
112 rng,
113 kind: AddressSpaceKind::Kernel,
114 frame_alloc,
115 last_fault: None,
116 }
117 }
118
119 pub fn kind(&self) -> AddressSpaceKind {
120 self.kind
121 }
122
123 pub unsafe fn activate(&self) {
124 // Safety: ensured by caller
125 unsafe { self.arch.activate() }
126 }
127
128 pub fn map(
129 &mut self,
130 layout: Layout,
131 permissions: Permissions,
132 map: impl FnOnce(
133 Range<VirtualAddress>,
134 Permissions,
135 &mut Batch,
136 ) -> crate::Result<AddressSpaceRegion>,
137 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
138 ensure!(layout.pad_to_align().size() % arch::PAGE_SIZE == 0,);
139 ensure!(
140 layout.pad_to_align().size()
141 <= self
142 .max_range
143 .end
144 .checked_sub_addr(self.max_range.start)
145 .unwrap_or_default(),
146 );
147 ensure!(layout.align() <= self.frame_alloc.max_alignment(),);
148 ensure!(permissions.is_valid());
149
150 // Actually do the mapping now
151 // Safety: we checked all invariants above
152 unsafe { self.map_unchecked(layout, permissions, map) }
153 }
154
155 pub unsafe fn map_unchecked(
156 &mut self,
157 layout: Layout,
158 permissions: Permissions,
159 map: impl FnOnce(
160 Range<VirtualAddress>,
161 Permissions,
162 &mut Batch,
163 ) -> crate::Result<AddressSpaceRegion>,
164 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
165 let layout = layout.pad_to_align();
166 let base = self.find_spot(layout, VIRT_ALLOC_ENTROPY)?;
167 let range = Range::from(
168 base..base
169 .checked_add(layout.size())
170 .expect("chosen memory range end overflows"),
171 );
172
173 self.map_internal(range, permissions, map)
174 }
175
176 pub fn map_specific(
177 &mut self,
178 range: Range<VirtualAddress>,
179 permissions: Permissions,
180 map: impl FnOnce(
181 Range<VirtualAddress>,
182 Permissions,
183 &mut Batch,
184 ) -> crate::Result<AddressSpaceRegion>,
185 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
186 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),);
187 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),);
188 ensure!(
189 range.size()
190 <= self
191 .max_range
192 .end
193 .checked_sub_addr(self.max_range.start)
194 .unwrap_or_default(),
195 );
196 ensure!(permissions.is_valid());
197 // ensure the entire address space range is free
198 if let Some(prev) = self.regions.upper_bound(range.start_bound()).get() {
199 ensure!(prev.range.end <= range.start);
200 }
201
202 // Actually do the mapping now
203 // Safety: we checked all invariants above
204 unsafe { self.map_specific_unchecked(range, permissions, map) }
205 }
206
207 pub unsafe fn map_specific_unchecked(
208 &mut self,
209 range: Range<VirtualAddress>,
210 permissions: Permissions,
211 map: impl FnOnce(
212 Range<VirtualAddress>,
213 Permissions,
214 &mut Batch,
215 ) -> crate::Result<AddressSpaceRegion>,
216 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
217 self.map_internal(range, permissions, map)
218 }
219
220 pub fn unmap(&mut self, range: Range<VirtualAddress>) -> crate::Result<()> {
221 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),);
222 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),);
223 ensure!(
224 range.size()
225 <= self
226 .max_range
227 .end
228 .checked_sub_addr(self.max_range.start)
229 .unwrap_or_default(),
230 );
231
232 // ensure the entire range is mapped and doesn't cover any holes
233 // `for_each_region_in_range` covers the last half so we just need to check that the regions
234 // aren't smaller than the requested range.
235 // We do that by adding up their sizes checking that their total size is at least as large
236 // as the requested range.
237 let mut bytes_seen = 0;
238 self.for_each_region_in_range(range, |region| {
239 bytes_seen += region.range.size();
240 Ok(())
241 })?;
242 ensure!(bytes_seen == range.size());
243
244 // Actually do the unmapping now
245 // Safety: we checked all invariant above
246 unsafe { self.unmap_unchecked(range) }
247 }
248
249 pub unsafe fn unmap_unchecked(&mut self, range: Range<VirtualAddress>) -> crate::Result<()> {
250 let mut bytes_remaining = range.size();
251 let mut c = self.regions.find_mut(&range.start);
252 while bytes_remaining > 0 {
253 let mut region = c.remove().unwrap();
254 let range = region.range;
255 Pin::as_mut(&mut region).unmap(range)?;
256 bytes_remaining -= range.size();
257 }
258
259 let mut flush = self.arch.new_flush();
260 // Safety: caller has to ensure invariants are checked
261 unsafe {
262 self.arch.unmap(
263 range.start,
264 NonZeroUsize::new(range.size()).unwrap(),
265 &mut flush,
266 )?;
267 }
268 flush.flush()?;
269
270 Ok(())
271 }
272
273 pub fn protect(
274 &mut self,
275 range: Range<VirtualAddress>,
276 new_permissions: Permissions,
277 ) -> crate::Result<()> {
278 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),);
279 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),);
280 ensure!(
281 range.size()
282 <= self
283 .max_range
284 .end
285 .checked_sub_addr(self.max_range.start)
286 .unwrap_or_default(),
287 );
288 ensure!(new_permissions.is_valid());
289
290 // ensure the entire range is mapped and doesn't cover any holes
291 // `for_each_region_in_range` covers the last half so we just need to check that the regions
292 // aren't smaller than the requested range.
293 // We do that by adding up their sizes checking that their total size is at least as large
294 // as the requested range.
295 // Along the way we also check for each region that the new permissions are a subset of the
296 // current ones.
297 let mut bytes_seen = 0;
298 self.for_each_region_in_range(range, |region| {
299 bytes_seen += region.range.size();
300
301 ensure!(region.permissions.contains(new_permissions),);
302
303 Ok(())
304 })?;
305 ensure!(bytes_seen == range.size());
306
307 // Actually do the permission changes now
308 // Safety: we checked all invariant above
309 unsafe { self.protect_unchecked(range, new_permissions) }
310 }
311
312 pub unsafe fn protect_unchecked(
313 &mut self,
314 range: Range<VirtualAddress>,
315 new_permissions: Permissions,
316 ) -> crate::Result<()> {
317 let mut bytes_remaining = range.size();
318 let mut c = self.regions.find_mut(&range.start);
319 while bytes_remaining > 0 {
320 let mut region = c.get_mut().unwrap();
321 region.permissions = new_permissions;
322 bytes_remaining -= range.size();
323 }
324
325 let mut flush = self.arch.new_flush();
326 // Safety: caller has to ensure invariants are checked
327 unsafe {
328 self.arch.update_flags(
329 range.start,
330 NonZeroUsize::new(range.size()).unwrap(),
331 new_permissions.into(),
332 &mut flush,
333 )?;
334 }
335 flush.flush()?;
336
337 Ok(())
338 }
339
340 pub fn page_fault(&mut self, addr: VirtualAddress, flags: PageFaultFlags) -> crate::Result<()> {
341 assert!(flags.is_valid(), "invalid page fault flags {flags:?}");
342
343 // make sure addr is even a valid address for this address space
344 match self.kind {
345 AddressSpaceKind::User => ensure!(
346 addr.is_user_accessible(),
347 "kernel fault in user space addr={addr}"
348 ),
349 AddressSpaceKind::Kernel => ensure!(
350 arch::is_kernel_address(addr),
351 "user fault in kernel space addr={addr}"
352 ),
353 }
354 ensure!(
355 self.max_range.contains(&addr),
356 "page fault at address outside of address space range"
357 );
358
359 let addr = addr.align_down(arch::PAGE_SIZE);
360
361 let region = if let Some((mut last_region, last_addr)) = self.last_fault.take() {
362 // Safety: we pinky-promise this is fine
363 let last_region = unsafe { Pin::new_unchecked(last_region.as_mut()) };
364
365 assert_ne!(addr, last_addr, "double fault");
366
367 if last_region.range.contains(&addr) {
368 Some(last_region)
369 } else {
370 self.regions
371 .upper_bound_mut(Bound::Included(&addr))
372 .get_mut()
373 .and_then(|region| region.range.contains(&addr).then_some(region))
374 }
375 } else {
376 self.regions
377 .upper_bound_mut(Bound::Included(&addr))
378 .get_mut()
379 .and_then(|region| region.range.contains(&addr).then_some(region))
380 };
381
382 if let Some(mut region) = region {
383 let region_ptr = NonNull::from(region.deref_mut());
384
385 let mut batch = Batch::new(&mut self.arch, self.frame_alloc);
386 region.page_fault(&mut batch, addr, flags)?;
387 batch.flush()?;
388
389 self.last_fault = Some((region_ptr, addr));
390
391 Ok(())
392 } else {
393 bail!("page fault at unmapped address {addr}");
394 }
395 }
396
397 pub fn reserve(
398 &mut self,
399 range: Range<VirtualAddress>,
400 permissions: Permissions,
401 name: Option<String>,
402 flush: &mut Flush,
403 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
404 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),);
405 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),);
406 ensure!(
407 range.size()
408 <= self
409 .max_range
410 .end
411 .checked_sub_addr(self.max_range.start)
412 .unwrap_or_default(),
413 );
414 ensure!(permissions.is_valid());
415
416 // ensure the entire address space range is free
417 if let Some(prev) = self.regions.upper_bound(range.start_bound()).get() {
418 ensure!(prev.range.end <= range.start);
419 }
420
421 let region = AddressSpaceRegion::new_wired(range, permissions, name);
422 let region = self.regions.insert(Box::pin(region));
423
424 // eagerly materialize any possible changes, we do this eagerly for the entire range here
425 // since `reserve` will only be called for kernel memory setup by the loader. For which it is
426 // critical that the MMUs and our "logical" view are in sync.
427 if permissions.is_empty() {
428 // Safety: we checked all invariants above
429 unsafe {
430 self.arch
431 .unmap(range.start, NonZeroUsize::new(range.size()).unwrap(), flush)?;
432 }
433 } else {
434 // Safety: we checked all invariants above
435 unsafe {
436 self.arch.update_flags(
437 range.start,
438 NonZeroUsize::new(range.size()).unwrap(),
439 permissions.into(),
440 flush,
441 )?;
442 }
443 }
444
445 Ok(region)
446 }
447
448 pub fn commit(&mut self, range: Range<VirtualAddress>, will_write: bool) -> crate::Result<()> {
449 ensure!(range.start.is_aligned_to(arch::PAGE_SIZE),);
450 ensure!(range.end.is_aligned_to(arch::PAGE_SIZE),);
451 ensure!(
452 range.size()
453 <= self
454 .max_range
455 .end
456 .checked_sub_addr(self.max_range.start)
457 .unwrap_or_default(),
458 );
459
460 let mut batch = Batch::new(&mut self.arch, self.frame_alloc);
461 let mut bytes_remaining = range.size();
462 let mut c = self.regions.find_mut(&range.start);
463 while bytes_remaining > 0 {
464 let region = c.get_mut().unwrap();
465 let clamped = range.clamp(region.range);
466 region.commit(&mut batch, clamped, will_write)?;
467
468 bytes_remaining -= range.size();
469 }
470 batch.flush()?;
471
472 Ok(())
473 }
474
475 fn map_internal(
476 &mut self,
477 range: Range<VirtualAddress>,
478 permissions: Permissions,
479 map: impl FnOnce(
480 Range<VirtualAddress>,
481 Permissions,
482 &mut Batch,
483 ) -> crate::Result<AddressSpaceRegion>,
484 ) -> crate::Result<Pin<&mut AddressSpaceRegion>> {
485 let mut batch = Batch::new(&mut self.arch, self.frame_alloc);
486 let region = map(range, permissions, &mut batch)?;
487 let region = self.regions.insert(Box::pin(region));
488
489 // TODO eagerly map a few pages now
490
491 batch.flush()?;
492 Ok(region)
493 }
494
495 /// Calls the provided callback for each `AddressSpaceRegion` in the given virtual address range.
496 /// This method will ensure the provided range does not cover any holes where no region exists,
497 /// returning an error on the first hole encountered.
498 fn for_each_region_in_range<F>(
499 &self,
500 range: Range<VirtualAddress>,
501 mut f: F,
502 ) -> crate::Result<()>
503 where
504 F: FnMut(&AddressSpaceRegion) -> crate::Result<()>,
505 {
506 let mut prev_end = None;
507 for region in self.regions.range(range) {
508 // ensure there is no gap between this region and the previous one
509 if let Some(prev_end) = prev_end.replace(region.range.end) {
510 if prev_end != region.range.start {
511 bail!("not mapped");
512 }
513 }
514
515 // call the callback
516 f(region)?;
517 }
518
519 Ok(())
520 }
521
522 /// Find a spot in the address space that satisfies the given `layout` requirements.
523 ///
524 /// This function will walk the ordered set of `Mappings` from left to right, looking for a gap
525 /// that is large enough to fit the given `layout`.
526 ///
527 /// To enable ASLR we additionally choose a random `target_index` and require that the chosen
528 /// gap is at lest the `target_index`th gap in the address space. The `target_index` is chosen
529 /// in the range [0, 2^entropy).
530 /// `entropy` is a configurable value, but by default it is set to `arch::VIRT_ADDR_BITS - arch::PAGE_SHIFT + 1`
531 /// which is the number of usable bits when allocating virtual memory addresses. `arch::VIRT_ADDR_BITS`
532 /// is the total number of usable bits in a virtual address, and `arch::PAGE_SHIFT` is the number
533 /// of bits that are "lost" to used because all addresses must be at least page aligned.
534 ///
535 /// If the algorithm fails to find a suitable spot in the first attempt, it will have collected the
536 /// total number of candidate spots and retry with a new `target_index` in the range [0, candidate_spot_count)
537 /// which guarantees that a spot will be found as long as `candidate_spot_count > 0`.
538 fn find_spot(&mut self, layout: Layout, entropy: u8) -> crate::Result<VirtualAddress> {
539 // behaviour:
540 // - find the leftmost gap that satisfies the size and alignment requirements
541 // - starting at the root,
542 // tracing::trace!("finding spot for {layout:?} entropy {entropy}");
543
544 let max_candidate_spaces: usize = 1 << entropy;
545 // tracing::trace!("max_candidate_spaces {max_candidate_spaces}");
546
547 let selected_index: usize = self
548 .rng
549 .as_mut()
550 .map(|prng| prng.sample(Uniform::new(0, max_candidate_spaces).unwrap()))
551 .unwrap_or_default();
552
553 let spot = match self.find_spot_at_index(selected_index, layout) {
554 Ok(spot) => spot,
555 Err(0) => bail!("out of virtual memory"),
556 Err(candidate_spot_count) => {
557 // tracing::trace!("couldn't find spot in first attempt (max_candidate_spaces {max_candidate_spaces}), retrying with (candidate_spot_count {candidate_spot_count})");
558 let selected_index: usize = self
559 .rng
560 .as_mut()
561 .unwrap()
562 .sample(Uniform::new(0, candidate_spot_count).unwrap());
563
564 self.find_spot_at_index(selected_index, layout).unwrap()
565 }
566 };
567 tracing::trace!("picked spot {spot}..{:?}", spot.checked_add(layout.size()));
568
569 debug_assert!(spot.is_canonical());
570 Ok(spot)
571 }
572
573 #[expect(clippy::undocumented_unsafe_blocks, reason = "intrusive tree access")]
574 fn find_spot_at_index(
575 &self,
576 mut target_index: usize,
577 layout: Layout,
578 ) -> Result<VirtualAddress, usize> {
579 tracing::trace!("attempting to find spot for {layout:?} at index {target_index}");
580
581 let spots_in_range = |layout: Layout, aligned: Range<VirtualAddress>| -> usize {
582 debug_assert!(
583 aligned.start.is_aligned_to(layout.align())
584 && aligned.end.is_aligned_to(layout.align())
585 );
586
587 // ranges passed in here can become empty for a number of reasons (aligning might produce ranges
588 // where end > start, or the range might be empty to begin with) in either case an empty
589 // range means no spots are available
590 if aligned.is_empty() {
591 return 0;
592 }
593
594 let range_size = aligned.size();
595 if range_size >= layout.size() {
596 ((range_size - layout.size()) >> layout.align().ilog2()) + 1
597 } else {
598 0
599 }
600 };
601
602 let mut candidate_spot_count = 0;
603
604 // if the tree is empty, treat max_range as the gap
605 if self.regions.is_empty() {
606 let aligned_gap = Range {
607 start: self
608 .max_range
609 .start
610 .checked_align_up(layout.align())
611 .unwrap(),
612 end: self
613 .max_range
614 .end
615 .checked_sub(1)
616 .unwrap()
617 .align_down(layout.align()),
618 };
619
620 let spot_count = spots_in_range(layout, aligned_gap);
621 candidate_spot_count += spot_count;
622 if target_index < spot_count {
623 tracing::trace!("tree is empty, chose gap {aligned_gap:?}");
624 return Ok(aligned_gap
625 .start
626 .checked_add(target_index << layout.align().ilog2())
627 .unwrap());
628 }
629 target_index -= spot_count;
630 }
631
632 // see if there is a suitable gap between the start of the address space and the first mapping
633 if let Some(root) = self.regions.root().get() {
634 let aligned_gap = Range::from(self.max_range.start..root.max_range.start)
635 .checked_align_in(layout.align())
636 .unwrap();
637 let spot_count = spots_in_range(layout, aligned_gap);
638 candidate_spot_count += spot_count;
639 if target_index < spot_count {
640 tracing::trace!("found gap left of tree in {aligned_gap:?}");
641 return Ok(aligned_gap
642 .start
643 .checked_add(target_index << layout.align().ilog2())
644 .unwrap());
645 }
646 target_index -= spot_count;
647 }
648
649 let mut maybe_node = self.regions.root().get();
650 let mut already_visited = VirtualAddress::default();
651
652 while let Some(node) = maybe_node {
653 if node.max_gap >= layout.size() {
654 if let Some(left) = node.links.left() {
655 let left = unsafe { left.as_ref() };
656
657 if left.max_gap >= layout.size() && left.max_range.end > already_visited {
658 maybe_node = Some(left);
659 continue;
660 }
661
662 let aligned_gap = Range::from(left.max_range.end..node.range.start)
663 .checked_align_in(layout.align())
664 .unwrap();
665
666 let spot_count = spots_in_range(layout, aligned_gap);
667
668 candidate_spot_count += spot_count;
669 if target_index < spot_count {
670 tracing::trace!("found gap in left subtree in {aligned_gap:?}");
671 return Ok(aligned_gap
672 .start
673 .checked_add(target_index << layout.align().ilog2())
674 .unwrap());
675 }
676 target_index -= spot_count;
677 }
678
679 if let Some(right) = node.links.right() {
680 let right = unsafe { right.as_ref() };
681
682 let aligned_gap = Range::from(node.range.end..right.max_range.start)
683 .checked_align_in(layout.align())
684 .unwrap();
685
686 let spot_count = spots_in_range(layout, aligned_gap);
687
688 candidate_spot_count += spot_count;
689 if target_index < spot_count {
690 tracing::trace!("found gap in right subtree in {aligned_gap:?}");
691 return Ok(aligned_gap
692 .start
693 .checked_add(target_index << layout.align().ilog2())
694 .unwrap());
695 }
696 target_index -= spot_count;
697
698 if right.max_gap >= layout.size() && right.max_range.end > already_visited {
699 maybe_node = Some(right);
700 continue;
701 }
702 }
703 }
704 already_visited = node.max_range.end;
705 maybe_node = node.links.parent().map(|ptr| unsafe { ptr.as_ref() });
706 }
707
708 // see if there is a suitable gap between the end of the last mapping and the end of the address space
709 if let Some(root) = self.regions.root().get() {
710 let aligned_gap = Range::from(root.max_range.end..self.max_range.end)
711 .checked_align_in(layout.align())
712 .unwrap();
713 let spot_count = spots_in_range(layout, aligned_gap);
714 candidate_spot_count += spot_count;
715 if target_index < spot_count {
716 tracing::trace!("found gap right of tree in {aligned_gap:?}");
717 return Ok(aligned_gap
718 .start
719 .checked_add(target_index << layout.align().ilog2())
720 .unwrap());
721 }
722 }
723
724 Err(candidate_spot_count)
725 }
726}
727
728// === Batch ===
729
730pub struct Batch<'a> {
731 pub aspace: &'a mut arch::AddressSpace,
732 pub frame_alloc: &'static FrameAllocator,
733 range: Range<VirtualAddress>,
734 flags: <arch::AddressSpace as ArchAddressSpace>::Flags,
735 actions: Vec<BBatchAction>,
736}
737
738#[derive(Debug)]
739enum BBatchAction {
740 Map(PhysicalAddress, usize),
741}
742
743impl Drop for Batch<'_> {
744 fn drop(&mut self) {
745 if !self.actions.is_empty() {
746 tracing::error!("batch was not flushed before dropping");
747 // panic_unwind::panic_in_drop!("batch was not flushed before dropping");
748 }
749 }
750}
751
752impl<'a> Batch<'a> {
753 pub fn new(aspace: &'a mut arch::AddressSpace, frame_alloc: &'static FrameAllocator) -> Self {
754 Self {
755 aspace,
756 frame_alloc,
757 range: Range::default(),
758 flags: <arch::AddressSpace as ArchAddressSpace>::Flags::empty(),
759 actions: vec![],
760 }
761 }
762
763 pub fn queue_map(
764 &mut self,
765 virt: VirtualAddress,
766 phys: PhysicalAddress,
767 len: NonZeroUsize,
768 flags: <arch::AddressSpace as ArchAddressSpace>::Flags,
769 ) -> crate::Result<()> {
770 debug_assert!(
771 len.get() % arch::PAGE_SIZE == 0,
772 "physical address range must be multiple of page size"
773 );
774
775 tracing::trace!("appending {phys:?} at {virt:?} with flags {flags:?}");
776 if self.range.end != virt || self.flags != flags {
777 self.flush()?;
778 self.flags = flags;
779 self.range = Range::from(virt..virt.checked_add(len.get()).unwrap());
780 } else {
781 self.range.end = self.range.end.checked_add(len.get()).unwrap();
782 }
783
784 self.actions.push(BBatchAction::Map(phys, len.get()));
785
786 Ok(())
787 }
788
789 pub fn flush(&mut self) -> crate::Result<()> {
790 if self.actions.is_empty() {
791 return Ok(());
792 }
793 tracing::trace!("flushing batch {:?} {:?}...", self.range, self.actions);
794
795 let mut flush = self.aspace.new_flush();
796 let mut virt = self.range.start;
797 for action in self.actions.drain(..) {
798 match action {
799 // Safety: we have checked all the invariants
800 BBatchAction::Map(phys, len) => unsafe {
801 self.aspace.map_contiguous(
802 self.frame_alloc,
803 virt,
804 phys,
805 NonZeroUsize::new(len).unwrap(),
806 self.flags,
807 &mut flush,
808 )?;
809 virt = virt.checked_add(len).unwrap();
810 },
811 }
812 }
813 flush.flush()?;
814
815 self.range = Range::from(self.range.end..self.range.end);
816 Ok(())
817 }
818
819 pub fn ignore(&mut self) {
820 self.actions.clear();
821 }
822}