Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8mod address_space;
9mod address_space_region;
10pub mod bootstrap_alloc;
11pub mod flush;
12pub mod frame_alloc;
13mod mmap;
14mod provider;
15mod trap_handler;
16mod vmo;
17
18use alloc::format;
19use alloc::string::ToString;
20use alloc::sync::Arc;
21use core::num::NonZeroUsize;
22use core::ops::Range;
23use core::{fmt, slice};
24
25pub use address_space::{AddressSpace, Batch};
26pub use address_space_region::AddressSpaceRegion;
27pub use flush::Flush;
28use k23_spin::{Mutex, OnceLock};
29use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress};
30use loader_api::BootInfo;
31pub use mmap::Mmap;
32use rand::SeedableRng;
33use rand_chacha::ChaCha20Rng;
34pub use trap_handler::handle_page_fault;
35pub use vmo::Vmo;
36use xmas_elf::program::Type;
37
38use crate::arch;
39use crate::mem::frame_alloc::FrameAllocator;
40
41pub const KIB: usize = 1024;
42pub const MIB: usize = KIB * 1024;
43pub const GIB: usize = MIB * 1024;
44
45static KERNEL_ASPACE: OnceLock<Arc<Mutex<AddressSpace>>> = OnceLock::new();
46
47pub fn with_kernel_aspace<F, R>(f: F) -> R
48where
49 F: FnOnce(&Arc<Mutex<AddressSpace>>) -> R,
50{
51 let aspace = KERNEL_ASPACE
52 .get()
53 .expect("kernel address space not initialized");
54 f(aspace)
55}
56
57pub fn init(
58 boot_info: &BootInfo,
59 rand: &mut impl rand::RngCore,
60 frame_alloc: &'static FrameAllocator,
61) -> crate::Result<()> {
62 KERNEL_ASPACE.get_or_try_init(|| -> crate::Result<_> {
63 let (hw_aspace, mut flush) = arch::AddressSpace::from_active(arch::DEFAULT_ASID);
64
65 // Safety: `init` is called during startup where the kernel address space is the only address space available
66 let mut aspace = unsafe {
67 AddressSpace::from_active_kernel(
68 hw_aspace,
69 Some(ChaCha20Rng::from_rng(rand)),
70 frame_alloc,
71 )
72 };
73
74 reserve_wired_regions(&mut aspace, boot_info, &mut flush);
75 flush.flush().unwrap();
76
77 tracing::trace!("Kernel AddressSpace {aspace:?}");
78
79 Ok(Arc::new(Mutex::new(aspace)))
80 })?;
81
82 Ok(())
83}
84
85fn reserve_wired_regions(aspace: &mut AddressSpace, boot_info: &BootInfo, flush: &mut Flush) {
86 // reserve the physical memory map
87 aspace
88 .reserve(
89 boot_info.physical_memory_map.clone(),
90 Permissions::READ | Permissions::WRITE,
91 Some("Physical Memory Map".to_string()),
92 flush,
93 )
94 .unwrap();
95
96 // Safety: we have to trust the loaders BootInfo here
97 let own_elf = unsafe {
98 let base = boot_info
99 .physical_address_offset
100 .add(boot_info.kernel_phys.start.get())
101 .as_ptr();
102
103 slice::from_raw_parts(base, boot_info.kernel_phys.len())
104 };
105 let own_elf = xmas_elf::ElfFile::new(own_elf).unwrap();
106
107 for ph in own_elf.program_iter() {
108 if ph.get_type().unwrap() != Type::Load {
109 continue;
110 }
111
112 let virt = boot_info
113 .kernel_virt
114 .start
115 .add(usize::try_from(ph.virtual_addr()).unwrap());
116
117 let mut permissions = Permissions::empty();
118 if ph.flags().is_read() {
119 permissions |= Permissions::READ;
120 }
121 if ph.flags().is_write() {
122 permissions |= Permissions::WRITE;
123 }
124 if ph.flags().is_execute() {
125 permissions |= Permissions::EXECUTE;
126 }
127
128 assert!(
129 !permissions.contains(Permissions::WRITE | Permissions::EXECUTE),
130 "elf segment (virtual range {:#x}..{:#x}) is marked as write-execute",
131 ph.virtual_addr(),
132 ph.virtual_addr() + ph.mem_size()
133 );
134
135 aspace
136 .reserve(
137 Range {
138 start: virt.align_down(arch::PAGE_SIZE),
139 end: virt
140 .add(usize::try_from(ph.mem_size()).unwrap())
141 .align_up(arch::PAGE_SIZE),
142 },
143 permissions,
144 Some(format!("Kernel {permissions} Segment")),
145 flush,
146 )
147 .unwrap();
148 }
149}
150
151bitflags::bitflags! {
152 #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
153 pub struct PageFaultFlags: u8 {
154 /// The fault was caused by a memory load
155 const LOAD = 1 << 0;
156 /// The fault was caused by a memory store
157 const STORE = 1 << 1;
158 /// The fault was caused by an instruction fetch
159 const INSTRUCTION = 1 << 3;
160 }
161}
162
163impl fmt::Display for PageFaultFlags {
164 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
165 bitflags::parser::to_writer(self, f)
166 }
167}
168
169impl PageFaultFlags {
170 pub fn is_valid(self) -> bool {
171 !self.contains(PageFaultFlags::LOAD | PageFaultFlags::STORE)
172 }
173
174 pub fn cause_is_read(self) -> bool {
175 self.contains(PageFaultFlags::LOAD)
176 }
177 pub fn cause_is_write(self) -> bool {
178 self.contains(PageFaultFlags::STORE)
179 }
180 pub fn cause_is_instr_fetch(self) -> bool {
181 self.contains(PageFaultFlags::INSTRUCTION)
182 }
183}
184
185bitflags::bitflags! {
186 #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
187 pub struct Permissions: u8 {
188 /// Allow reads from the memory region
189 const READ = 1 << 0;
190 /// Allow writes to the memory region
191 const WRITE = 1 << 1;
192 /// Allow code execution from the memory region
193 const EXECUTE = 1 << 2;
194 /// Allow userspace to access the memory region
195 const USER = 1 << 3;
196 }
197}
198
199impl fmt::Display for Permissions {
200 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
201 bitflags::parser::to_writer(self, f)
202 }
203}
204
205impl Permissions {
206 /// Returns whether the set of permissions is `R^X` ie doesn't allow
207 /// write-execute at the same time.
208 pub fn is_valid(self) -> bool {
209 !self.contains(Permissions::WRITE | Permissions::EXECUTE)
210 }
211}
212
213impl From<PageFaultFlags> for Permissions {
214 fn from(value: PageFaultFlags) -> Self {
215 let mut out = Permissions::empty();
216 if value.contains(PageFaultFlags::STORE) {
217 out |= Permissions::WRITE;
218 } else {
219 out |= Permissions::READ;
220 }
221 if value.contains(PageFaultFlags::INSTRUCTION) {
222 out |= Permissions::EXECUTE;
223 }
224 out
225 }
226}
227
228pub trait ArchAddressSpace {
229 type Flags: From<Permissions> + bitflags::Flags;
230
231 fn new(asid: u16, frame_alloc: &FrameAllocator) -> crate::Result<(Self, Flush)>
232 where
233 Self: Sized;
234 fn from_active(asid: u16) -> (Self, Flush)
235 where
236 Self: Sized;
237
238 unsafe fn map_contiguous(
239 &mut self,
240 frame_alloc: &FrameAllocator,
241 virt: VirtualAddress,
242 phys: PhysicalAddress,
243 len: NonZeroUsize,
244 flags: Self::Flags,
245 flush: &mut Flush,
246 ) -> crate::Result<()>;
247
248 unsafe fn update_flags(
249 &mut self,
250 virt: VirtualAddress,
251 len: NonZeroUsize,
252 new_flags: Self::Flags,
253 flush: &mut Flush,
254 ) -> crate::Result<()>;
255
256 unsafe fn unmap(
257 &mut self,
258 virt: VirtualAddress,
259 len: NonZeroUsize,
260 flush: &mut Flush,
261 ) -> crate::Result<()>;
262
263 unsafe fn query(&mut self, virt: VirtualAddress) -> Option<(PhysicalAddress, Self::Flags)>;
264
265 unsafe fn activate(&self);
266
267 fn new_flush(&self) -> Flush;
268}