Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use crate::mem::address::{AddressRangeExt, PhysicalAddress};
10use core::alloc::Layout;
11use core::range::Range;
12use core::{iter, ptr, slice};
13
14pub struct BootstrapAllocator<'a> {
15 regions: &'a [Range<PhysicalAddress>],
16 // offset from the top of memory regions
17 offset: usize,
18}
19
20impl<'a> BootstrapAllocator<'a> {
21 /// Create a new frame allocator over a given set of physical memory regions.
22 #[must_use]
23 pub fn new(regions: &'a [Range<PhysicalAddress>]) -> Self {
24 Self { regions, offset: 0 }
25 }
26
27 #[must_use]
28 pub fn free_regions(&self) -> FreeRegions<'_> {
29 FreeRegions {
30 offset: self.offset,
31 inner: self.regions.iter().rev().copied(),
32 }
33 }
34
35 pub fn allocate_one(&mut self) -> Option<PhysicalAddress> {
36 // Safety: layout is always valid
37 self.allocate_contiguous(unsafe {
38 Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE)
39 })
40 }
41
42 pub fn allocate_one_zeroed(&mut self) -> Option<PhysicalAddress> {
43 // Safety: layout is always valid
44 self.allocate_contiguous_zeroed(unsafe {
45 Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE)
46 })
47 }
48
49 pub fn allocate_contiguous(&mut self, layout: Layout) -> Option<PhysicalAddress> {
50 let requested_size = layout.pad_to_align().size();
51 assert_eq!(
52 layout.align(),
53 arch::PAGE_SIZE,
54 "BootstrapAllocator only supports page-aligned allocations"
55 );
56 let mut offset = self.offset;
57
58 for region in self.regions.iter().rev() {
59 // only consider regions that we haven't already exhausted
60 if offset < region.size() {
61 // Allocating a contiguous range has different requirements than "regular" allocation
62 // contiguous are rare and often happen in very critical paths where e.g. virtual
63 // memory is not available yet. So we rather waste some memory than outright crash.
64 if region.size() - offset < requested_size {
65 tracing::warn!(
66 "Skipped memory region {region:?} since it was too small to fulfill request for {requested_size} bytes. Wasted {} bytes in the process...",
67 region.size() - offset
68 );
69
70 self.offset += region.size() - offset;
71 offset = 0;
72 continue;
73 }
74
75 let frame = region.end.checked_sub(offset + requested_size).unwrap();
76 self.offset += requested_size;
77
78 return Some(frame);
79 }
80
81 offset -= region.size();
82 }
83
84 None
85 }
86
87 pub fn deallocate_contiguous(&mut self, _addr: PhysicalAddress, _layout: Layout) {
88 unimplemented!("Bootstrap allocator can't free");
89 }
90
91 pub fn allocate_contiguous_zeroed(&mut self, layout: Layout) -> Option<PhysicalAddress> {
92 let requested_size = layout.pad_to_align().size();
93 let addr = self.allocate_contiguous(layout)?;
94
95 // Safety: we just allocated the frame
96 unsafe {
97 ptr::write_bytes::<u8>(
98 arch::KERNEL_ASPACE_RANGE
99 .start
100 .checked_add(addr.get())
101 .unwrap()
102 .as_mut_ptr(),
103 0,
104 requested_size,
105 );
106 }
107 Some(addr)
108 }
109}
110
111pub struct FreeRegions<'a> {
112 offset: usize,
113 inner: iter::Copied<iter::Rev<slice::Iter<'a, Range<PhysicalAddress>>>>,
114}
115
116impl Iterator for FreeRegions<'_> {
117 type Item = Range<PhysicalAddress>;
118
119 fn next(&mut self) -> Option<Self::Item> {
120 loop {
121 let mut region = self.inner.next()?;
122 // keep advancing past already fully used memory regions
123 if self.offset >= region.size() {
124 self.offset -= region.size();
125 continue;
126 } else if self.offset > 0 {
127 region.end = region.end.checked_sub(self.offset).unwrap();
128 self.offset = 0;
129 }
130
131 return Some(region);
132 }
133 }
134}