Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8#![no_std]
9#![no_main]
10#![feature(naked_functions)]
11#![feature(new_range_api)]
12#![feature(maybe_uninit_slice)]
13#![feature(alloc_layout_extra)]
14#![feature(let_chains)]
15
16use crate::boot_info::prepare_boot_info;
17use crate::error::Error;
18use crate::frame_alloc::FrameAllocator;
19use crate::kernel::Kernel;
20use crate::machine_info::MachineInfo;
21use crate::mapping::{
22 StacksAllocation, TlsAllocation, identity_map_self, map_kernel, map_kernel_stacks,
23 map_physical_memory,
24};
25use arrayvec::ArrayVec;
26use core::ffi::c_void;
27use core::range::Range;
28use rand::SeedableRng;
29use rand_chacha::ChaCha20Rng;
30use spin::{Barrier, OnceLock};
31
32mod arch;
33mod boot_info;
34mod error;
35mod frame_alloc;
36mod kernel;
37mod logger;
38mod machine_info;
39mod mapping;
40mod page_alloc;
41mod panic;
42
43pub const ENABLE_KASLR: bool = false;
44pub const LOG_LEVEL: log::Level = log::Level::Trace;
45pub const STACK_SIZE: usize = 32 * arch::PAGE_SIZE;
46
47pub type Result<T> = core::result::Result<T, Error>;
48
49/// # Safety
50///
51/// The passed `opaque` ptr must point to a valid memory region.
52unsafe fn main(hartid: usize, opaque: *const c_void, boot_ticks: u64) -> ! {
53 static GLOBAL_INIT: OnceLock<GlobalInitResult> = OnceLock::new();
54 let res = GLOBAL_INIT.get_or_init(|| do_global_init(hartid, opaque));
55
56 // Enable the MMU on all harts. Note that this technically reenables it on the initializing hart
57 // but there is no harm in that.
58 // Safety: there is no safety
59 unsafe {
60 log::trace!("activating MMU...");
61 arch::activate_aspace(res.root_pgtable);
62 log::trace!("activated.");
63 }
64
65 if let Some(alloc) = &res.maybe_tls_alloc {
66 alloc.initialize_for_hart(hartid);
67 }
68
69 // Safety: this will jump to the kernel entry
70 unsafe { arch::handoff_to_kernel(hartid, boot_ticks, res) }
71}
72
73pub struct GlobalInitResult {
74 boot_info: *mut loader_api::BootInfo,
75 kernel_entry: usize,
76 root_pgtable: usize,
77 stacks_alloc: StacksAllocation,
78 maybe_tls_alloc: Option<TlsAllocation>,
79 barrier: Barrier,
80}
81
82// Safety: *mut BootInfo isn't Send but `GlobalInitResult` will only ever we read from, so this is fine.
83unsafe impl Send for GlobalInitResult {}
84// Safety: *mut BootInfo isn't Send but `GlobalInitResult` will only ever we read from, so this is fine.
85unsafe impl Sync for GlobalInitResult {}
86
87fn do_global_init(hartid: usize, opaque: *const c_void) -> GlobalInitResult {
88 logger::init(LOG_LEVEL.to_level_filter());
89 // Safety: TODO
90 let minfo = unsafe { MachineInfo::from_dtb(opaque).expect("failed to parse machine info") };
91 log::debug!("\n{minfo}");
92
93 arch::start_secondary_harts(hartid, &minfo).unwrap();
94
95 let self_regions = SelfRegions::collect(&minfo);
96 log::debug!("{self_regions:#x?}");
97
98 let fdt_phys = {
99 let fdt = minfo.fdt.as_ptr_range();
100 Range::from(fdt.start as usize..fdt.end as usize)
101 };
102
103 // Initialize the frame allocator
104 let allocatable_memories = allocatable_memory_regions(&minfo, &self_regions, fdt_phys);
105 log::debug!("allocatable memory regions {allocatable_memories:#x?}");
106 let mut frame_alloc = FrameAllocator::new(&allocatable_memories);
107
108 // initialize the random number generator
109 let rng = ENABLE_KASLR.then_some(ChaCha20Rng::from_seed(
110 minfo.rng_seed.unwrap()[0..32].try_into().unwrap(),
111 ));
112 let rng_seed = rng.as_ref().map(|rng| rng.get_seed()).unwrap_or_default();
113
114 // Initialize the page allocator
115 let mut page_alloc = page_alloc::init(rng);
116
117 let root_pgtable = frame_alloc
118 .allocate_one_zeroed(
119 0, // called before translation into higher half
120 )
121 .unwrap();
122
123 // Identity map the loader itself (this binary).
124 //
125 // we're already running in s-mode which means that once we switch on the MMU it takes effect *immediately*
126 // as opposed to m-mode where it would take effect after the jump to s-mode.
127 // This means we need to temporarily identity map the loader here, so we can continue executing our own code.
128 // We will then unmap the loader in the kernel.
129 identity_map_self(root_pgtable, &mut frame_alloc, &self_regions).unwrap();
130
131 // Map the physical memory into kernel address space.
132 //
133 // This will be used by the kernel to access the page tables, BootInfo struct and maybe
134 // more in the future.
135 let (phys_off, phys_map) =
136 map_physical_memory(root_pgtable, &mut frame_alloc, &mut page_alloc, &minfo).unwrap();
137
138 // Activate the MMU with the address space we have built so far.
139 // the rest of the address space setup will happen in virtual memory (mostly so that we
140 // can correctly apply relocations without having to do expensive virt to phys queries)
141 // Safety: there is no safety
142 unsafe {
143 log::trace!("activating MMU...");
144 arch::activate_aspace(root_pgtable);
145 log::trace!("activated.");
146 }
147
148 let kernel = Kernel::from_static(phys_off).unwrap();
149 // print the elf sections for debugging purposes
150 log::debug!("\n{kernel}");
151
152 let (kernel_virt, maybe_tls_alloc) = map_kernel(
153 root_pgtable,
154 &mut frame_alloc,
155 &mut page_alloc,
156 &kernel,
157 &minfo,
158 phys_off,
159 )
160 .unwrap();
161
162 log::trace!("KASLR: Kernel image at {:#x}", kernel_virt.start);
163
164 let stacks_alloc = map_kernel_stacks(
165 root_pgtable,
166 &mut frame_alloc,
167 &mut page_alloc,
168 &minfo,
169 usize::try_from(kernel._loader_config.kernel_stack_size_pages).unwrap(),
170 phys_off,
171 )
172 .unwrap();
173
174 let frame_usage = frame_alloc.frame_usage();
175 log::debug!(
176 "Mapping complete, permanently used {} KiB.",
177 (frame_usage * arch::PAGE_SIZE) / 1024,
178 );
179
180 let boot_info = prepare_boot_info(
181 frame_alloc,
182 phys_off,
183 phys_map,
184 kernel_virt,
185 maybe_tls_alloc.as_ref().map(|alloc| alloc.template.clone()),
186 Range::from(self_regions.executable.start..self_regions.read_write.end),
187 kernel.phys_range(),
188 fdt_phys,
189 minfo.hart_mask,
190 rng_seed,
191 )
192 .unwrap();
193
194 let kernel_entry = kernel_virt
195 .start
196 .checked_add(usize::try_from(kernel.elf_file.header.pt2.entry_point()).unwrap())
197 .unwrap();
198
199 GlobalInitResult {
200 boot_info,
201 kernel_entry,
202 root_pgtable,
203 maybe_tls_alloc,
204 stacks_alloc,
205 barrier: Barrier::new(minfo.hart_mask.count_ones() as usize),
206 }
207}
208
209#[derive(Debug)]
210struct SelfRegions {
211 pub executable: Range<usize>,
212 pub read_only: Range<usize>,
213 pub read_write: Range<usize>,
214}
215
216impl SelfRegions {
217 pub fn collect(minfo: &MachineInfo) -> Self {
218 unsafe extern "C" {
219 static __text_start: u8;
220 static __text_end: u8;
221 static __rodata_start: u8;
222 static __rodata_end: u8;
223 static __bss_start: u8;
224 static __stack_start: u8;
225 }
226
227 SelfRegions {
228 executable: Range {
229 start: &raw const __text_start as usize,
230 end: &raw const __text_end as usize,
231 },
232 read_only: Range {
233 start: &raw const __rodata_start as usize,
234 end: &raw const __rodata_end as usize,
235 },
236 read_write: Range {
237 start: &raw const __bss_start as usize,
238 end: (&raw const __stack_start as usize)
239 + (minfo.hart_mask.count_ones() as usize * STACK_SIZE),
240 },
241 }
242 }
243}
244
245fn allocatable_memory_regions(
246 minfo: &MachineInfo,
247 self_regions: &SelfRegions,
248 fdt: Range<usize>,
249) -> ArrayVec<Range<usize>, 16> {
250 let mut temp: ArrayVec<Range<usize>, 16> = minfo.memories.clone();
251
252 let mut exclude = |to_exclude: Range<usize>| {
253 for mut region in temp.take() {
254 if to_exclude.contains(®ion.start) && to_exclude.contains(®ion.end) {
255 // remove region
256 continue;
257 } else if region.contains(&to_exclude.start) && region.contains(&to_exclude.end) {
258 temp.push(Range::from(region.start..to_exclude.start));
259 temp.push(Range::from(to_exclude.end..region.end));
260 } else if to_exclude.contains(®ion.start) {
261 region.start = to_exclude.end;
262 temp.push(region);
263 } else if to_exclude.contains(®ion.end) {
264 region.end = to_exclude.start;
265 temp.push(region);
266 } else {
267 temp.push(region);
268 }
269 }
270 };
271
272 exclude(Range::from(
273 self_regions.executable.start..self_regions.read_write.end,
274 ));
275
276 exclude(fdt);
277
278 // // merge adjacent regions
279 // let mut out: ArrayVec<Range<usize>, 16> = ArrayVec::new();
280 // 'outer: for region in temp {
281 // for other in &mut out {
282 // if region.start == other.end {
283 // other.end = region.end;
284 // continue 'outer;
285 // }
286 // if region.end == other.start {
287 // other.start = region.start;
288 // continue 'outer;
289 // }
290 // }
291 //
292 // out.push(region);
293 // }
294
295 temp.sort_unstable_by_key(|region| region.start);
296
297 #[cfg(debug_assertions)]
298 for (i, region) in temp.iter().enumerate() {
299 for (j, other) in temp.iter().enumerate() {
300 if i == j {
301 continue;
302 }
303
304 assert!(
305 !other.contains(®ion.start) && !other.contains(&(region.end - 1)),
306 "regions {region:#x?} and {other:#x?} overlap"
307 );
308 }
309 }
310
311 temp
312}