Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use core::alloc::Layout;
9use core::ops::Range;
10
11use kmem::{AddressRangeExt, VirtualAddress};
12use rand::distr::{Distribution, Uniform};
13use rand::prelude::IteratorRandom;
14use rand_chacha::ChaCha20Rng;
15
16use crate::arch;
17
18pub fn init(prng: Option<ChaCha20Rng>) -> PageAllocator {
19 PageAllocator {
20 page_state: [false; arch::PAGE_TABLE_ENTRIES / 2],
21 prng,
22 }
23}
24
25/// Virtual memory allocator for setting up initial mappings.
26///
27/// All regions will be huge page (1GiB) aligned.
28#[derive(Debug)]
29pub struct PageAllocator {
30 /// Whether a top-level page is in use.
31 page_state: [bool; arch::PAGE_TABLE_ENTRIES / 2],
32 /// A random number generator that should be used to generate random addresses or
33 /// `None` if aslr is disabled.
34 prng: Option<ChaCha20Rng>,
35}
36
37impl PageAllocator {
38 fn allocate_pages(&mut self, num_pages: usize) -> VirtualAddress {
39 // find a consecutive range of `num` entries that are not used
40 let mut free_pages = self
41 .page_state
42 .windows(num_pages.div_ceil(8))
43 .enumerate()
44 .filter_map(|(idx, entries)| {
45 if entries.iter().all(|used| !used) {
46 Some(idx)
47 } else {
48 None
49 }
50 });
51
52 let maybe_idx = if let Some(rng) = self.prng.as_mut() {
53 free_pages.choose(rng)
54 } else {
55 free_pages.next()
56 };
57
58 if let Some(idx) = maybe_idx {
59 for i in 0..num_pages {
60 self.page_state[idx + i] = true;
61 }
62
63 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1);
64
65 let page = idx
66 .checked_mul(top_level_page_size) // convert the index into an actual address
67 .and_then(|idx| idx.checked_add(usize::MAX << arch::VIRT_ADDR_BITS)) // and shift it into the kernel half
68 .unwrap();
69
70 VirtualAddress::new(page)
71 } else {
72 panic!("no usable top-level pages found ({num_pages} pages requested)");
73 }
74 }
75
76 pub fn reserve(&mut self, mut virt_base: VirtualAddress, mut remaining_bytes: usize) {
77 log::trace!(
78 "marking {virt_base}..{} as used",
79 virt_base.add(remaining_bytes)
80 );
81
82 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1);
83 debug_assert!(virt_base.is_aligned_to(top_level_page_size));
84
85 while remaining_bytes > 0 {
86 let page_idx = virt_base
87 .get()
88 .checked_sub(usize::MAX << arch::VIRT_ADDR_BITS)
89 .unwrap()
90 .checked_div(top_level_page_size)
91 .unwrap();
92
93 self.page_state[page_idx] = true;
94
95 virt_base = virt_base.add(top_level_page_size);
96 remaining_bytes -= top_level_page_size;
97 }
98 }
99
100 pub fn allocate(&mut self, layout: Layout) -> Range<VirtualAddress> {
101 assert!(layout.align().is_power_of_two());
102
103 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1);
104
105 // how many top-level pages are needed to map `size` bytes
106 // and attempt to allocate them
107 let base = self.allocate_pages(layout.size().div_ceil(top_level_page_size));
108
109 // calculate the base address of the page
110 //
111 // we know that entry_idx is between 0 and PAGE_TABLE_ENTRIES / 2
112 // and represents a top-level page in the *higher half* of the address space.
113 //
114 // we can then take the lowest possible address of the higher half (`usize::MAX << VA_BITS`)
115 // and add the `idx` multiple of the size of a top-level entry to it
116
117 let offset = if let Some(rng) = self.prng.as_mut() {
118 // Choose a random offset.
119 let max_offset = top_level_page_size - (layout.size() % top_level_page_size);
120
121 if max_offset / layout.align() > 0 {
122 let uniform_range = Uniform::new(0, max_offset / layout.align()).unwrap();
123
124 uniform_range.sample(rng) * layout.align()
125 } else {
126 0
127 }
128 } else {
129 0
130 };
131
132 let start = base.add(offset);
133 Range::from_start_len(start, layout.size())
134 }
135}