Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use core::alloc::Layout;
9use core::ops::Range;
10use core::{iter, ptr, slice};
11
12use kmem::{AddressRangeExt, PhysicalAddress};
13
14use crate::arch;
15
16pub struct BootstrapAllocator<'a> {
17 regions: &'a [Range<PhysicalAddress>],
18 // offset from the top of memory regions
19 offset: usize,
20}
21
22impl<'a> BootstrapAllocator<'a> {
23 /// Create a new frame allocator over a given set of physical memory regions.
24 #[must_use]
25 pub fn new(regions: &'a [Range<PhysicalAddress>]) -> Self {
26 Self { regions, offset: 0 }
27 }
28
29 #[must_use]
30 pub fn free_regions(&self) -> FreeRegions<'_> {
31 FreeRegions {
32 offset: self.offset,
33 inner: self.regions.iter().rev().cloned(),
34 }
35 }
36
37 pub fn allocate_one(&mut self) -> Option<PhysicalAddress> {
38 // Safety: layout is always valid
39 self.allocate_contiguous(unsafe {
40 Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE)
41 })
42 }
43
44 pub fn allocate_one_zeroed(&mut self) -> Option<PhysicalAddress> {
45 // Safety: layout is always valid
46 self.allocate_contiguous_zeroed(unsafe {
47 Layout::from_size_align_unchecked(arch::PAGE_SIZE, arch::PAGE_SIZE)
48 })
49 }
50
51 pub fn allocate_contiguous(&mut self, layout: Layout) -> Option<PhysicalAddress> {
52 let requested_size = layout.pad_to_align().size();
53 assert_eq!(
54 layout.align(),
55 arch::PAGE_SIZE,
56 "BootstrapAllocator only supports page-aligned allocations"
57 );
58 let mut offset = self.offset;
59
60 for region in self.regions.iter().rev() {
61 // only consider regions that we haven't already exhausted
62 if offset < region.len() {
63 // Allocating a contiguous range has different requirements than "regular" allocation
64 // contiguous are rare and often happen in very critical paths where e.g. virtual
65 // memory is not available yet. So we rather waste some memory than outright crash.
66 if region.len() - offset < requested_size {
67 tracing::warn!(
68 "Skipped memory region {region:?} since it was too small to fulfill request for {requested_size} bytes. Wasted {} bytes in the process...",
69 region.len() - offset
70 );
71
72 self.offset += region.len() - offset;
73 offset = 0;
74 continue;
75 }
76
77 let frame = region.end.sub(offset + requested_size);
78 self.offset += requested_size;
79
80 return Some(frame);
81 }
82
83 offset -= region.len();
84 }
85
86 None
87 }
88
89 pub fn deallocate_contiguous(&mut self, _addr: PhysicalAddress, _layout: Layout) {
90 unimplemented!("Bootstrap allocator can't free");
91 }
92
93 pub fn allocate_contiguous_zeroed(&mut self, layout: Layout) -> Option<PhysicalAddress> {
94 let requested_size = layout.pad_to_align().size();
95 let addr = self.allocate_contiguous(layout)?;
96
97 // Safety: we just allocated the frame
98 unsafe {
99 ptr::write_bytes::<u8>(
100 arch::KERNEL_ASPACE_RANGE
101 .start()
102 .add(addr.get())
103 .as_mut_ptr(),
104 0,
105 requested_size,
106 );
107 }
108 Some(addr)
109 }
110}
111
112pub struct FreeRegions<'a> {
113 offset: usize,
114 inner: iter::Cloned<iter::Rev<slice::Iter<'a, Range<PhysicalAddress>>>>,
115}
116
117impl Iterator for FreeRegions<'_> {
118 type Item = Range<PhysicalAddress>;
119
120 fn next(&mut self) -> Option<Self::Item> {
121 loop {
122 let mut region = self.inner.next()?;
123 // keep advancing past already fully used memory regions
124 if self.offset >= region.len() {
125 self.offset -= region.len();
126 continue;
127 } else if self.offset > 0 {
128 region.end = region.end.sub(self.offset);
129 self.offset = 0;
130 }
131
132 return Some(region);
133 }
134 }
135}