Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use core::alloc::Layout;
10use core::range::Range;
11use rand::distr::{Distribution, Uniform};
12use rand::prelude::IteratorRandom;
13use rand_chacha::ChaCha20Rng;
14
15pub fn init(prng: Option<ChaCha20Rng>) -> PageAllocator {
16 PageAllocator {
17 page_state: [false; arch::PAGE_TABLE_ENTRIES / 2],
18 prng,
19 }
20}
21
22/// Virtual memory allocator for setting up initial mappings.
23///
24/// All regions will be huge page (1GiB) aligned.
25#[derive(Debug)]
26pub struct PageAllocator {
27 /// Whether a top-level page is in use.
28 page_state: [bool; arch::PAGE_TABLE_ENTRIES / 2],
29 /// A random number generator that should be used to generate random addresses or
30 /// `None` if aslr is disabled.
31 prng: Option<ChaCha20Rng>,
32}
33
34impl PageAllocator {
35 fn allocate_pages(&mut self, num_pages: usize) -> usize {
36 // find a consecutive range of `num` entries that are not used
37 let mut free_pages = self
38 .page_state
39 .windows(num_pages.div_ceil(8))
40 .enumerate()
41 .filter_map(|(idx, entries)| {
42 if entries.iter().all(|used| !used) {
43 Some(idx)
44 } else {
45 None
46 }
47 });
48
49 let maybe_idx = if let Some(rng) = self.prng.as_mut() {
50 free_pages.choose(rng)
51 } else {
52 free_pages.next()
53 };
54
55 if let Some(idx) = maybe_idx {
56 for i in 0..num_pages {
57 self.page_state[idx + i] = true;
58 }
59
60 idx
61 } else {
62 panic!("no usable top-level pages found ({num_pages} pages requested)");
63 }
64 }
65
66 pub fn reserve(&mut self, mut virt_base: usize, mut remaining_bytes: usize) {
67 log::trace!(
68 "marking {virt_base:#x}..{:#x} as used",
69 virt_base.checked_add(remaining_bytes).unwrap()
70 );
71
72 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1);
73 debug_assert!(virt_base % top_level_page_size == 0);
74
75 while remaining_bytes > 0 {
76 let page_idx = (virt_base - (usize::MAX << arch::VIRT_ADDR_BITS)) / top_level_page_size;
77
78 self.page_state[page_idx] = true;
79
80 virt_base = virt_base.checked_add(top_level_page_size).unwrap();
81 remaining_bytes -= top_level_page_size;
82 }
83 }
84
85 pub fn allocate(&mut self, layout: Layout) -> Range<usize> {
86 assert!(layout.align().is_power_of_two());
87
88 let top_level_page_size = arch::page_size_for_level(arch::PAGE_TABLE_LEVELS - 1);
89
90 // how many top-level pages are needed to map `size` bytes
91 // and attempt to allocate them
92 let page_idx = self.allocate_pages(layout.size().div_ceil(top_level_page_size));
93
94 // calculate the base address of the page
95 //
96 // we know that entry_idx is between 0 and PAGE_TABLE_ENTRIES / 2
97 // and represents a top-level page in the *higher half* of the address space.
98 //
99 // we can then take the lowest possible address of the higher half (`usize::MAX << VA_BITS`)
100 // and add the `idx` multiple of the size of a top-level entry to it
101 let base = (usize::MAX << arch::VIRT_ADDR_BITS) + page_idx * top_level_page_size;
102
103 let offset = if let Some(rng) = self.prng.as_mut() {
104 // Choose a random offset.
105 let max_offset = top_level_page_size - (layout.size() % top_level_page_size);
106
107 if max_offset / layout.align() > 0 {
108 let uniform_range = Uniform::new(0, max_offset / layout.align()).unwrap();
109
110 uniform_range.sample(rng) * layout.align()
111 } else {
112 0
113 }
114 } else {
115 0
116 };
117
118 Range::from(
119 base.checked_add(offset).unwrap()..base.checked_add(offset + layout.size()).unwrap(),
120 )
121 }
122}