Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use crate::error::Error;
10use core::alloc::Layout;
11use core::num::NonZeroUsize;
12use core::range::Range;
13use core::{cmp, iter, ptr, slice};
14use fallible_iterator::FallibleIterator;
15
16pub struct FrameAllocator<'a> {
17 regions: &'a [Range<usize>],
18 // offset from the top of memory regions
19 offset: usize,
20}
21
22impl<'a> FrameAllocator<'a> {
23 /// Create a new frame allocator over a given set of physical memory regions.
24 #[must_use]
25 pub fn new(regions: &'a [Range<usize>]) -> Self {
26 Self { regions, offset: 0 }
27 }
28
29 #[must_use]
30 pub fn free_regions(&self) -> FreeRegions<'_> {
31 FreeRegions {
32 offset: self.offset,
33 inner: self.regions.iter().rev().copied(),
34 }
35 }
36
37 #[must_use]
38 pub fn used_regions(&self) -> UsedRegions<'_> {
39 UsedRegions {
40 offset: self.offset,
41 inner: self.regions.iter().rev().copied(),
42 }
43 }
44
45 pub fn frame_usage(&self) -> usize {
46 self.offset >> arch::PAGE_SHIFT
47 }
48
49 pub fn allocate_one_zeroed(&mut self, phys_offset: usize) -> Result<usize, Error> {
50 self.allocate_contiguous_zeroed(
51 // Safety: the layout is always valid
52 unsafe { Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE) },
53 phys_offset,
54 )
55 }
56
57 pub fn allocate(&mut self, layout: Layout) -> FrameIter<'a, '_> {
58 assert_eq!(
59 layout.align(),
60 arch::PAGE_SIZE,
61 "BootstrapAllocator only supports page-aligned allocations"
62 );
63
64 let remaining = layout.pad_to_align().size();
65
66 debug_assert!(remaining % arch::PAGE_SIZE == 0);
67 FrameIter {
68 alloc: self,
69 remaining,
70 }
71 }
72
73 pub fn allocate_zeroed(
74 &mut self,
75 layout: Layout,
76 phys_offset: usize,
77 ) -> FrameIterZeroed<'a, '_> {
78 FrameIterZeroed {
79 inner: self.allocate(layout),
80 phys_offset,
81 }
82 }
83
84 pub fn allocate_contiguous(&mut self, layout: Layout) -> Result<usize, Error> {
85 let requested_size = layout.pad_to_align().size();
86 assert_eq!(
87 layout.align(),
88 arch::PAGE_SIZE,
89 "BootstrapAllocator only supports page-aligned allocations"
90 );
91 let mut offset = self.offset;
92
93 for region in self.regions.iter().rev() {
94 let region_size = region.end.checked_sub(region.start).unwrap();
95
96 // only consider regions that we haven't already exhausted
97 if offset < region_size {
98 // Allocating a contiguous range has different requirements than "regular" allocation
99 // contiguous are rare and often happen in very critical paths where e.g. virtual
100 // memory is not available yet. So we rather waste some memory than outright crash.
101 if region_size - offset < requested_size {
102 log::warn!(
103 "Skipped memory region {region:?} since it was too small to fulfill request for {requested_size} bytes. Wasted {} bytes in the process...",
104 region_size - offset
105 );
106
107 self.offset += region_size - offset;
108 offset = 0;
109 continue;
110 }
111
112 let frame = region.end.checked_sub(offset + requested_size).unwrap();
113 self.offset += requested_size;
114 return Ok(frame);
115 }
116
117 offset -= region_size;
118 }
119
120 Err(Error::NoMemory)
121 }
122
123 pub fn allocate_contiguous_zeroed(
124 &mut self,
125 layout: Layout,
126 phys_offset: usize,
127 ) -> Result<usize, Error> {
128 let requested_size = layout.pad_to_align().size();
129 let addr = self.allocate_contiguous(layout)?;
130 // Safety: we just allocated the frame
131 unsafe {
132 ptr::write_bytes::<u8>(
133 phys_offset.checked_add(addr).unwrap() as *mut u8,
134 0,
135 requested_size,
136 );
137 }
138 Ok(addr)
139 }
140}
141
142pub struct FrameIter<'a, 'b> {
143 alloc: &'b mut FrameAllocator<'a>,
144 remaining: usize,
145}
146
147impl<'a> FrameIter<'a, '_> {
148 pub fn alloc(&mut self) -> &mut FrameAllocator<'a> {
149 self.alloc
150 }
151}
152
153impl FallibleIterator for FrameIter<'_, '_> {
154 type Item = (usize, NonZeroUsize);
155 type Error = Error;
156
157 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
158 if self.remaining > 0 {
159 let mut offset = self.alloc.offset;
160
161 for region in self.alloc.regions.iter().rev() {
162 let region_size = region.end.checked_sub(region.start).unwrap();
163
164 // only consider regions that we haven't already exhausted
165 if let Some(allocatable_size) = region_size.checked_sub(offset)
166 && allocatable_size >= arch::PAGE_SIZE
167 {
168 let allocation_size = cmp::min(self.remaining, allocatable_size)
169 & 0usize.wrapping_sub(arch::PAGE_SIZE);
170 debug_assert!(allocation_size % arch::PAGE_SIZE == 0);
171
172 let frame = region.end.checked_sub(offset + allocation_size).unwrap();
173 self.alloc.offset += allocation_size;
174 self.remaining -= allocation_size;
175
176 return Ok(Some((frame, NonZeroUsize::new(allocation_size).unwrap())));
177 }
178
179 offset -= region_size;
180 }
181
182 Err(Error::NoMemory)
183 } else {
184 Ok(None)
185 }
186 }
187}
188
189pub struct FrameIterZeroed<'a, 'b> {
190 inner: FrameIter<'a, 'b>,
191 phys_offset: usize,
192}
193
194impl<'a> FrameIterZeroed<'a, '_> {
195 pub fn alloc(&mut self) -> &mut FrameAllocator<'a> {
196 self.inner.alloc
197 }
198}
199
200impl FallibleIterator for FrameIterZeroed<'_, '_> {
201 type Item = (usize, NonZeroUsize);
202 type Error = Error;
203
204 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
205 let Some((base, len)) = self.inner.next()? else {
206 return Ok(None);
207 };
208
209 // Safety: we just allocated the frame
210 unsafe {
211 ptr::write_bytes::<u8>(
212 self.phys_offset.checked_add(base).unwrap() as *mut u8,
213 0,
214 len.get(),
215 );
216 }
217
218 Ok(Some((base, len)))
219 }
220}
221
222pub struct FreeRegions<'a> {
223 offset: usize,
224 inner: iter::Copied<iter::Rev<slice::Iter<'a, Range<usize>>>>,
225}
226
227impl Iterator for FreeRegions<'_> {
228 type Item = Range<usize>;
229
230 fn next(&mut self) -> Option<Self::Item> {
231 loop {
232 let mut region = self.inner.next()?;
233 // keep advancing past already fully used memory regions
234 let region_size = region.end.checked_sub(region.start).unwrap();
235 if self.offset >= region_size {
236 self.offset -= region_size;
237 continue;
238 } else if self.offset > 0 {
239 region.end = region.end.checked_sub(self.offset).unwrap();
240 self.offset = 0;
241 }
242
243 return Some(region);
244 }
245 }
246}
247
248pub struct UsedRegions<'a> {
249 offset: usize,
250 inner: iter::Copied<iter::Rev<slice::Iter<'a, Range<usize>>>>,
251}
252
253impl Iterator for UsedRegions<'_> {
254 type Item = Range<usize>;
255
256 fn next(&mut self) -> Option<Self::Item> {
257 let mut region = self.inner.next()?;
258
259 if self.offset >= region.end.checked_sub(region.start).unwrap() {
260 Some(region)
261 } else if self.offset > 0 {
262 region.start = region.end.checked_sub(self.offset).unwrap();
263 self.offset = 0;
264
265 Some(region)
266 } else {
267 None
268 }
269 }
270}