Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use core::alloc::Layout;
9use core::ops::Range;
10use core::{cmp, iter, ptr, slice};
11
12use fallible_iterator::FallibleIterator;
13use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress};
14
15use crate::arch;
16use crate::error::Error;
17
18pub struct FrameAllocator<'a> {
19 regions: &'a [Range<PhysicalAddress>],
20 // offset from the top of memory regions
21 offset: usize,
22}
23
24impl<'a> FrameAllocator<'a> {
25 /// Create a new frame allocator over a given set of physical memory regions.
26 #[must_use]
27 pub fn new(regions: &'a [Range<PhysicalAddress>]) -> Self {
28 Self { regions, offset: 0 }
29 }
30
31 #[must_use]
32 pub fn free_regions(&self) -> FreeRegions<'_> {
33 FreeRegions {
34 offset: self.offset,
35 inner: self.regions.iter().rev().cloned(),
36 }
37 }
38
39 #[must_use]
40 pub fn used_regions(&self) -> UsedRegions<'_> {
41 UsedRegions {
42 offset: self.offset,
43 inner: self.regions.iter().rev().cloned(),
44 }
45 }
46
47 pub fn frame_usage(&self) -> usize {
48 self.offset >> arch::PAGE_SHIFT
49 }
50
51 pub fn allocate_one_zeroed(
52 &mut self,
53 phys_offset: VirtualAddress,
54 ) -> Result<PhysicalAddress, Error> {
55 self.allocate_contiguous_zeroed(
56 // Safety: the layout is always valid
57 unsafe { Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE) },
58 phys_offset,
59 )
60 }
61
62 pub fn allocate(&mut self, layout: Layout) -> FrameIter<'a, '_> {
63 assert_eq!(
64 layout.align(),
65 arch::PAGE_SIZE,
66 "BootstrapAllocator only supports page-aligned allocations"
67 );
68
69 let remaining = layout.pad_to_align().size();
70
71 debug_assert!(remaining.is_multiple_of(arch::PAGE_SIZE));
72 FrameIter {
73 alloc: self,
74 remaining,
75 }
76 }
77
78 pub fn allocate_zeroed(
79 &mut self,
80 layout: Layout,
81 phys_offset: VirtualAddress,
82 ) -> FrameIterZeroed<'a, '_> {
83 FrameIterZeroed {
84 inner: self.allocate(layout),
85 phys_offset,
86 }
87 }
88
89 pub fn allocate_contiguous(&mut self, layout: Layout) -> Result<PhysicalAddress, Error> {
90 let requested_size = layout.pad_to_align().size();
91 assert_eq!(
92 layout.align(),
93 arch::PAGE_SIZE,
94 "BootstrapAllocator only supports page-aligned allocations"
95 );
96 let mut offset = self.offset;
97
98 for region in self.regions.iter().rev() {
99 let region_size = region.len();
100
101 // only consider regions that we haven't already exhausted
102 if offset < region_size {
103 // Allocating a contiguous range has different requirements than "regular" allocation
104 // contiguous are rare and often happen in very critical paths where e.g. virtual
105 // memory is not available yet. So we rather waste some memory than outright crash.
106 if region_size - offset < requested_size {
107 log::warn!(
108 "Skipped memory region {region:?} since it was too small to fulfill request for {requested_size} bytes. Wasted {} bytes in the process...",
109 region_size - offset
110 );
111
112 self.offset += region_size - offset;
113 offset = 0;
114 continue;
115 }
116
117 let frame = region.end.sub(offset + requested_size);
118 self.offset += requested_size;
119 return Ok(frame);
120 }
121
122 offset -= region_size;
123 }
124
125 Err(Error::NoMemory)
126 }
127
128 pub fn allocate_contiguous_zeroed(
129 &mut self,
130 layout: Layout,
131 phys_offset: VirtualAddress,
132 ) -> Result<PhysicalAddress, Error> {
133 let requested_size = layout.pad_to_align().size();
134 let addr = self.allocate_contiguous(layout)?;
135 // Safety: we just allocated the frame
136 unsafe {
137 ptr::write_bytes::<u8>(addr.add(phys_offset.get()).as_mut_ptr(), 0, requested_size);
138 }
139 Ok(addr)
140 }
141}
142
143pub struct FrameIter<'a, 'b> {
144 alloc: &'b mut FrameAllocator<'a>,
145 remaining: usize,
146}
147
148impl<'a> FrameIter<'a, '_> {
149 pub fn alloc(&mut self) -> &mut FrameAllocator<'a> {
150 self.alloc
151 }
152}
153
154impl FallibleIterator for FrameIter<'_, '_> {
155 type Item = Range<PhysicalAddress>;
156 type Error = Error;
157
158 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
159 if self.remaining > 0 {
160 let mut offset = self.alloc.offset;
161
162 for region in self.alloc.regions.iter().rev() {
163 // only consider regions that we haven't already exhausted
164 if let Some(allocatable_size) = region.len().checked_sub(offset)
165 && allocatable_size >= arch::PAGE_SIZE
166 {
167 let allocation_size = cmp::min(self.remaining, allocatable_size)
168 & 0usize.wrapping_sub(arch::PAGE_SIZE);
169 debug_assert!(allocation_size.is_multiple_of(arch::PAGE_SIZE));
170
171 let frame = region.end.sub(offset + allocation_size);
172 self.alloc.offset += allocation_size;
173 self.remaining -= allocation_size;
174
175 return Ok(Some(Range::from_start_len(frame, allocation_size)));
176 }
177
178 offset -= region.len();
179 }
180
181 Err(Error::NoMemory)
182 } else {
183 Ok(None)
184 }
185 }
186}
187
188pub struct FrameIterZeroed<'a, 'b> {
189 inner: FrameIter<'a, 'b>,
190 phys_offset: VirtualAddress,
191}
192
193impl<'a> FrameIterZeroed<'a, '_> {
194 pub fn alloc(&mut self) -> &mut FrameAllocator<'a> {
195 self.inner.alloc
196 }
197}
198
199impl FallibleIterator for FrameIterZeroed<'_, '_> {
200 type Item = Range<PhysicalAddress>;
201 type Error = Error;
202
203 fn next(&mut self) -> Result<Option<Self::Item>, Self::Error> {
204 let Some(range) = self.inner.next()? else {
205 return Ok(None);
206 };
207
208 // Safety: we just allocated the frame
209 unsafe {
210 ptr::write_bytes::<u8>(
211 self.phys_offset.add(range.start.get()).as_mut_ptr(),
212 0,
213 range.len(),
214 );
215 }
216
217 Ok(Some(range))
218 }
219}
220
221pub struct FreeRegions<'a> {
222 offset: usize,
223 inner: iter::Cloned<iter::Rev<slice::Iter<'a, Range<PhysicalAddress>>>>,
224}
225
226impl Iterator for FreeRegions<'_> {
227 type Item = Range<PhysicalAddress>;
228
229 fn next(&mut self) -> Option<Self::Item> {
230 loop {
231 let mut region = self.inner.next()?;
232 // keep advancing past already fully used memory regions
233 let region_size = region.len();
234
235 if self.offset >= region_size {
236 self.offset -= region_size;
237 continue;
238 } else if self.offset > 0 {
239 region.end = region.end.sub(self.offset);
240 self.offset = 0;
241 }
242
243 return Some(region);
244 }
245 }
246}
247
248pub struct UsedRegions<'a> {
249 offset: usize,
250 inner: iter::Cloned<iter::Rev<slice::Iter<'a, Range<PhysicalAddress>>>>,
251}
252
253impl Iterator for UsedRegions<'_> {
254 type Item = Range<PhysicalAddress>;
255
256 fn next(&mut self) -> Option<Self::Item> {
257 let mut region = self.inner.next()?;
258
259 if self.offset >= region.len() {
260 Some(region)
261 } else if self.offset > 0 {
262 region.start = region.end.sub(self.offset);
263 self.offset = 0;
264
265 Some(region)
266 } else {
267 None
268 }
269 }
270}