Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8mod address;
9mod address_space;
10mod address_space_region;
11pub mod bootstrap_alloc;
12pub mod flush;
13pub mod frame_alloc;
14mod mmap;
15mod provider;
16mod trap_handler;
17mod vmo;
18
19use crate::arch;
20use crate::mem::frame_alloc::FrameAllocator;
21use alloc::format;
22use alloc::string::ToString;
23use alloc::sync::Arc;
24use core::num::NonZeroUsize;
25use core::range::Range;
26use core::{fmt, slice};
27use loader_api::BootInfo;
28use rand::SeedableRng;
29use rand_chacha::ChaCha20Rng;
30use spin::{Mutex, OnceLock};
31use xmas_elf::program::Type;
32
33pub use address::{AddressRangeExt, PhysicalAddress, VirtualAddress};
34pub use address_space::{AddressSpace, Batch};
35pub use address_space_region::AddressSpaceRegion;
36pub use flush::Flush;
37pub use mmap::Mmap;
38pub use trap_handler::handle_page_fault;
39pub use vmo::Vmo;
40
41pub const KIB: usize = 1024;
42pub const MIB: usize = KIB * 1024;
43pub const GIB: usize = MIB * 1024;
44
45static KERNEL_ASPACE: OnceLock<Arc<Mutex<AddressSpace>>> = OnceLock::new();
46
47pub fn with_kernel_aspace<F, R>(f: F) -> R
48where
49 F: FnOnce(&Arc<Mutex<AddressSpace>>) -> R,
50{
51 let aspace = KERNEL_ASPACE
52 .get()
53 .expect("kernel address space not initialized");
54 f(aspace)
55}
56
57pub fn init(
58 boot_info: &BootInfo,
59 rand: &mut impl rand::RngCore,
60 frame_alloc: &'static FrameAllocator,
61) -> crate::Result<()> {
62 KERNEL_ASPACE.get_or_try_init(|| -> crate::Result<_> {
63 let (hw_aspace, mut flush) = arch::AddressSpace::from_active(arch::DEFAULT_ASID);
64
65 // Safety: `init` is called during startup where the kernel address space is the only address space available
66 let mut aspace = unsafe {
67 AddressSpace::from_active_kernel(
68 hw_aspace,
69 Some(ChaCha20Rng::from_rng(rand)),
70 frame_alloc,
71 )
72 };
73
74 reserve_wired_regions(&mut aspace, boot_info, &mut flush);
75 flush.flush().unwrap();
76
77 tracing::trace!("Kernel AddressSpace {aspace:?}");
78
79 Ok(Arc::new(Mutex::new(aspace)))
80 })?;
81
82 Ok(())
83}
84
85fn reserve_wired_regions(aspace: &mut AddressSpace, boot_info: &BootInfo, flush: &mut Flush) {
86 // reserve the physical memory map
87 aspace
88 .reserve(
89 Range::from(
90 VirtualAddress::new(boot_info.physical_memory_map.start).unwrap()
91 ..VirtualAddress::new(boot_info.physical_memory_map.end).unwrap(),
92 ),
93 Permissions::READ | Permissions::WRITE,
94 Some("Physical Memory Map".to_string()),
95 flush,
96 )
97 .unwrap();
98
99 // Safety: we have to trust the loaders BootInfo here
100 let own_elf = unsafe {
101 let base = boot_info
102 .physical_address_offset
103 .checked_add(boot_info.kernel_phys.start)
104 .unwrap() as *const u8;
105
106 slice::from_raw_parts(
107 base,
108 boot_info
109 .kernel_phys
110 .end
111 .checked_sub(boot_info.kernel_phys.start)
112 .unwrap(),
113 )
114 };
115 let own_elf = xmas_elf::ElfFile::new(own_elf).unwrap();
116
117 for ph in own_elf.program_iter() {
118 if ph.get_type().unwrap() != Type::Load {
119 continue;
120 }
121
122 let virt = VirtualAddress::new(boot_info.kernel_virt.start)
123 .unwrap()
124 .checked_add(usize::try_from(ph.virtual_addr()).unwrap())
125 .unwrap();
126
127 let mut permissions = Permissions::empty();
128 if ph.flags().is_read() {
129 permissions |= Permissions::READ;
130 }
131 if ph.flags().is_write() {
132 permissions |= Permissions::WRITE;
133 }
134 if ph.flags().is_execute() {
135 permissions |= Permissions::EXECUTE;
136 }
137
138 assert!(
139 !permissions.contains(Permissions::WRITE | Permissions::EXECUTE),
140 "elf segment (virtual range {:#x}..{:#x}) is marked as write-execute",
141 ph.virtual_addr(),
142 ph.virtual_addr() + ph.mem_size()
143 );
144
145 aspace
146 .reserve(
147 Range {
148 start: virt.align_down(arch::PAGE_SIZE),
149 end: virt
150 .checked_add(usize::try_from(ph.mem_size()).unwrap())
151 .unwrap()
152 .checked_align_up(arch::PAGE_SIZE)
153 .unwrap(),
154 },
155 permissions,
156 Some(format!("Kernel {permissions} Segment")),
157 flush,
158 )
159 .unwrap();
160 }
161}
162
163bitflags::bitflags! {
164 #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
165 pub struct PageFaultFlags: u8 {
166 /// The fault was caused by a memory load
167 const LOAD = 1 << 0;
168 /// The fault was caused by a memory store
169 const STORE = 1 << 1;
170 /// The fault was caused by an instruction fetch
171 const INSTRUCTION = 1 << 3;
172 }
173}
174
175impl fmt::Display for PageFaultFlags {
176 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
177 bitflags::parser::to_writer(self, f)
178 }
179}
180
181impl PageFaultFlags {
182 pub fn is_valid(self) -> bool {
183 !self.contains(PageFaultFlags::LOAD | PageFaultFlags::STORE)
184 }
185
186 pub fn cause_is_read(self) -> bool {
187 self.contains(PageFaultFlags::LOAD)
188 }
189 pub fn cause_is_write(self) -> bool {
190 self.contains(PageFaultFlags::STORE)
191 }
192 pub fn cause_is_instr_fetch(self) -> bool {
193 self.contains(PageFaultFlags::INSTRUCTION)
194 }
195}
196
197bitflags::bitflags! {
198 #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
199 pub struct Permissions: u8 {
200 /// Allow reads from the memory region
201 const READ = 1 << 0;
202 /// Allow writes to the memory region
203 const WRITE = 1 << 1;
204 /// Allow code execution from the memory region
205 const EXECUTE = 1 << 2;
206 /// Allow userspace to access the memory region
207 const USER = 1 << 3;
208 }
209}
210
211impl fmt::Display for Permissions {
212 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
213 bitflags::parser::to_writer(self, f)
214 }
215}
216
217impl Permissions {
218 /// Returns whether the set of permissions is `R^X` ie doesn't allow
219 /// write-execute at the same time.
220 pub fn is_valid(self) -> bool {
221 !self.contains(Permissions::WRITE | Permissions::EXECUTE)
222 }
223}
224
225impl From<PageFaultFlags> for Permissions {
226 fn from(value: PageFaultFlags) -> Self {
227 let mut out = Permissions::empty();
228 if value.contains(PageFaultFlags::STORE) {
229 out |= Permissions::WRITE;
230 } else {
231 out |= Permissions::READ;
232 }
233 if value.contains(PageFaultFlags::INSTRUCTION) {
234 out |= Permissions::EXECUTE;
235 }
236 out
237 }
238}
239
240pub trait ArchAddressSpace {
241 type Flags: From<Permissions> + bitflags::Flags;
242
243 fn new(asid: u16, frame_alloc: &FrameAllocator) -> crate::Result<(Self, Flush)>
244 where
245 Self: Sized;
246 fn from_active(asid: u16) -> (Self, Flush)
247 where
248 Self: Sized;
249
250 unsafe fn map_contiguous(
251 &mut self,
252 frame_alloc: &FrameAllocator,
253 virt: VirtualAddress,
254 phys: PhysicalAddress,
255 len: NonZeroUsize,
256 flags: Self::Flags,
257 flush: &mut Flush,
258 ) -> crate::Result<()>;
259
260 unsafe fn update_flags(
261 &mut self,
262 virt: VirtualAddress,
263 len: NonZeroUsize,
264 new_flags: Self::Flags,
265 flush: &mut Flush,
266 ) -> crate::Result<()>;
267
268 unsafe fn unmap(
269 &mut self,
270 virt: VirtualAddress,
271 len: NonZeroUsize,
272 flush: &mut Flush,
273 ) -> crate::Result<()>;
274
275 unsafe fn query(&mut self, virt: VirtualAddress) -> Option<(PhysicalAddress, Self::Flags)>;
276
277 unsafe fn activate(&self);
278
279 fn new_flush(&self) -> Flush;
280}