Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8#![no_std]
9#![no_main]
10#![feature(maybe_uninit_slice)]
11#![feature(alloc_layout_extra)]
12
13use core::ffi::c_void;
14use core::mem;
15use core::ops::Range;
16
17use k23_arrayvec::ArrayVec;
18use k23_spin::{Barrier, OnceLock};
19use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress};
20use rand::SeedableRng;
21use rand_chacha::ChaCha20Rng;
22
23use crate::boot_info::prepare_boot_info;
24use crate::error::Error;
25use crate::frame_alloc::FrameAllocator;
26use crate::kernel::Kernel;
27use crate::machine_info::MachineInfo;
28use crate::mapping::{
29 StacksAllocation, TlsAllocation, identity_map_self, map_kernel, map_kernel_stacks,
30 map_physical_memory,
31};
32
33mod arch;
34mod boot_info;
35mod error;
36mod frame_alloc;
37mod kernel;
38mod logger;
39mod machine_info;
40mod mapping;
41mod page_alloc;
42mod panic;
43
44pub const ENABLE_KASLR: bool = false;
45pub const LOG_LEVEL: log::Level = log::Level::Trace;
46pub const STACK_SIZE: usize = 32 * arch::PAGE_SIZE;
47
48pub type Result<T> = core::result::Result<T, Error>;
49
50/// # Safety
51///
52/// The passed `opaque` ptr must point to a valid memory region.
53unsafe fn main(hartid: usize, opaque: *const c_void, boot_ticks: u64) -> ! {
54 static GLOBAL_INIT: OnceLock<GlobalInitResult> = OnceLock::new();
55 let res = GLOBAL_INIT.get_or_init(|| do_global_init(hartid, opaque));
56
57 // Enable the MMU on all harts. Note that this technically reenables it on the initializing hart
58 // but there is no harm in that.
59 // Safety: there is no safety
60 unsafe {
61 log::trace!("activating MMU...");
62 arch::activate_aspace(res.root_pgtable);
63 log::trace!("activated.");
64 }
65
66 if let Some(alloc) = &res.maybe_tls_alloc {
67 alloc.initialize_for_hart(hartid);
68 }
69
70 // Safety: this will jump to the kernel entry
71 unsafe { arch::handoff_to_kernel(hartid, boot_ticks, res) }
72}
73
74pub struct GlobalInitResult {
75 boot_info: *mut loader_api::BootInfo,
76 kernel_entry: VirtualAddress,
77 root_pgtable: PhysicalAddress,
78 stacks_alloc: StacksAllocation,
79 maybe_tls_alloc: Option<TlsAllocation>,
80 barrier: Barrier,
81}
82
83// Safety: *mut BootInfo isn't Send but `GlobalInitResult` will only ever we read from, so this is fine.
84unsafe impl Send for GlobalInitResult {}
85// Safety: *mut BootInfo isn't Send but `GlobalInitResult` will only ever we read from, so this is fine.
86unsafe impl Sync for GlobalInitResult {}
87
88fn do_global_init(hartid: usize, opaque: *const c_void) -> GlobalInitResult {
89 logger::init(LOG_LEVEL.to_level_filter());
90 // Safety: TODO
91 let minfo = unsafe { MachineInfo::from_dtb(opaque).expect("failed to parse machine info") };
92 log::debug!("\n{minfo}");
93
94 arch::start_secondary_harts(hartid, &minfo).unwrap();
95
96 let self_regions = SelfRegions::collect(&minfo);
97 log::debug!("{self_regions:#x?}");
98
99 let fdt_phys = Range::from_start_len(
100 PhysicalAddress::from_ptr(minfo.fdt.as_ptr()),
101 minfo.fdt.len(),
102 );
103
104 // Initialize the frame allocator
105 let allocatable_memories = allocatable_memory_regions(&minfo, &self_regions, fdt_phys.clone());
106 log::debug!("allocatable memory regions {allocatable_memories:#x?}");
107 let mut frame_alloc = FrameAllocator::new(&allocatable_memories);
108
109 // initialize the random number generator
110 let rng = ENABLE_KASLR.then_some(ChaCha20Rng::from_seed(
111 minfo.rng_seed.unwrap()[0..32].try_into().unwrap(),
112 ));
113 let rng_seed = rng.as_ref().map(|rng| rng.get_seed()).unwrap_or_default();
114
115 // Initialize the page allocator
116 let mut page_alloc = page_alloc::init(rng);
117
118 let root_pgtable = frame_alloc
119 .allocate_one_zeroed(
120 VirtualAddress::MIN, // called before translation into higher half
121 )
122 .unwrap();
123
124 // Identity map the loader itself (this binary).
125 //
126 // we're already running in s-mode which means that once we switch on the MMU it takes effect *immediately*
127 // as opposed to m-mode where it would take effect after the jump to s-mode.
128 // This means we need to temporarily identity map the loader here, so we can continue executing our own code.
129 // We will then unmap the loader in the kernel.
130 identity_map_self(root_pgtable, &mut frame_alloc, &self_regions).unwrap();
131
132 // Map the physical memory into kernel address space.
133 //
134 // This will be used by the kernel to access the page tables, BootInfo struct and maybe
135 // more in the future.
136 let (phys_off, phys_map) =
137 map_physical_memory(root_pgtable, &mut frame_alloc, &mut page_alloc, &minfo).unwrap();
138
139 // Activate the MMU with the address space we have built so far.
140 // the rest of the address space setup will happen in virtual memory (mostly so that we
141 // can correctly apply relocations without having to do expensive virt to phys queries)
142 // Safety: there is no safety
143 unsafe {
144 log::trace!("activating MMU...");
145 arch::activate_aspace(root_pgtable);
146 log::trace!("activated.");
147 }
148
149 let kernel = Kernel::from_static(phys_off).unwrap();
150 // print the elf sections for debugging purposes
151 log::debug!("\n{kernel}");
152
153 let (kernel_virt, maybe_tls_alloc) = map_kernel(
154 root_pgtable,
155 &mut frame_alloc,
156 &mut page_alloc,
157 &kernel,
158 &minfo,
159 phys_off,
160 )
161 .unwrap();
162
163 log::trace!("KASLR: Kernel image at {:?}", kernel_virt.start);
164
165 let stacks_alloc = map_kernel_stacks(
166 root_pgtable,
167 &mut frame_alloc,
168 &mut page_alloc,
169 &minfo,
170 usize::try_from(kernel._loader_config.kernel_stack_size_pages).unwrap(),
171 phys_off,
172 )
173 .unwrap();
174
175 let frame_usage = frame_alloc.frame_usage();
176 log::debug!(
177 "Mapping complete, permanently used {} KiB.",
178 (frame_usage * arch::PAGE_SIZE) / 1024,
179 );
180
181 let boot_info = prepare_boot_info(
182 frame_alloc,
183 phys_off,
184 phys_map,
185 kernel_virt.clone(),
186 maybe_tls_alloc.as_ref().map(|alloc| alloc.template.clone()),
187 self_regions.executable.start..self_regions.read_write.end,
188 kernel.phys_range(),
189 fdt_phys,
190 minfo.hart_mask,
191 rng_seed,
192 )
193 .unwrap();
194
195 let kernel_entry = kernel_virt
196 .start
197 .add(usize::try_from(kernel.elf_file.header.pt2.entry_point()).unwrap());
198
199 GlobalInitResult {
200 boot_info,
201 kernel_entry,
202 root_pgtable,
203 maybe_tls_alloc,
204 stacks_alloc,
205 barrier: Barrier::new(minfo.hart_mask.count_ones() as usize),
206 }
207}
208
209#[derive(Debug)]
210struct SelfRegions {
211 pub executable: Range<PhysicalAddress>,
212 pub read_only: Range<PhysicalAddress>,
213 pub read_write: Range<PhysicalAddress>,
214}
215
216impl SelfRegions {
217 pub fn collect(minfo: &MachineInfo) -> Self {
218 unsafe extern "C" {
219 static __text_start: u8;
220 static __text_end: u8;
221 static __rodata_start: u8;
222 static __rodata_end: u8;
223 static __bss_start: u8;
224 static __stack_start: u8;
225 }
226
227 SelfRegions {
228 executable: Range {
229 start: PhysicalAddress::from_ptr(&raw const __text_start),
230 end: PhysicalAddress::from_ptr(&raw const __text_end),
231 },
232 read_only: Range {
233 start: PhysicalAddress::from_ptr(&raw const __rodata_start),
234 end: PhysicalAddress::from_ptr(&raw const __rodata_end),
235 },
236 read_write: Range {
237 start: PhysicalAddress::from_ptr(&raw const __bss_start),
238 end: PhysicalAddress::from_ptr(&raw const __stack_start)
239 .add(minfo.hart_mask.count_ones() as usize * STACK_SIZE),
240 },
241 }
242 }
243}
244
245fn allocatable_memory_regions(
246 minfo: &MachineInfo,
247 self_regions: &SelfRegions,
248 fdt: Range<PhysicalAddress>,
249) -> ArrayVec<Range<PhysicalAddress>, 16> {
250 let mut temp: ArrayVec<Range<PhysicalAddress>, 16> = minfo.memories.clone();
251
252 let mut exclude = |to_exclude: Range<PhysicalAddress>| {
253 for mut region in mem::take(&mut temp) {
254 if to_exclude.contains(®ion.start) && to_exclude.contains(®ion.end) {
255 // remove region
256 continue;
257 } else if region.contains(&to_exclude.start) && region.contains(&to_exclude.end) {
258 temp.push(region.start..to_exclude.start);
259 temp.push(to_exclude.end..region.end);
260 } else if to_exclude.contains(®ion.start) {
261 region.start = to_exclude.end;
262 temp.push(region);
263 } else if to_exclude.contains(®ion.end) {
264 region.end = to_exclude.start;
265 temp.push(region);
266 } else {
267 temp.push(region);
268 }
269 }
270 };
271
272 exclude(self_regions.executable.start..self_regions.read_write.end);
273
274 exclude(fdt);
275
276 // // merge adjacent regions
277 // let mut out: ArrayVec<Range<usize>, 16> = ArrayVec::new();
278 // 'outer: for region in temp {
279 // for other in &mut out {
280 // if region.start == other.end {
281 // other.end = region.end;
282 // continue 'outer;
283 // }
284 // if region.end == other.start {
285 // other.start = region.start;
286 // continue 'outer;
287 // }
288 // }
289 //
290 // out.push(region);
291 // }
292
293 temp.sort_unstable_by_key(|region| region.start);
294
295 #[cfg(debug_assertions)]
296 for (i, region) in temp.iter().enumerate() {
297 for (j, other) in temp.iter().enumerate() {
298 if i == j {
299 continue;
300 }
301
302 assert!(
303 !region.overlaps(other),
304 "regions {region:#x?} and {other:#x?} overlap"
305 );
306 }
307 }
308
309 temp
310}