Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use crate::mem::address::AddressRangeExt;
10use crate::mem::{
11 AddressSpace, AddressSpaceRegion, ArchAddressSpace, Batch, Permissions, PhysicalAddress,
12 VirtualAddress,
13};
14use alloc::string::String;
15use alloc::sync::Arc;
16use core::alloc::Layout;
17use core::num::NonZeroUsize;
18use core::range::Range;
19use core::{ptr, slice};
20use spin::Mutex;
21
22/// A memory mapping.
23///
24/// This is essentially a handle to an [`AddressSpaceRegion`] with convenience methods for userspace
25/// specific needs such as copying from and to memory.
26#[derive(Debug)]
27pub struct Mmap {
28 aspace: Option<Arc<Mutex<AddressSpace>>>,
29 range: Range<VirtualAddress>,
30}
31
32// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace`
33unsafe impl Send for Mmap {}
34// Safety: All mutations of the `*mut AddressSpaceRegion` are happening through a `&mut AddressSpace`
35unsafe impl Sync for Mmap {}
36
37impl Mmap {
38 /// Creates a new empty `Mmap`.
39 ///
40 /// Note that the size of this cannot be changed after the fact, all accessors will return empty
41 /// slices and permission changing methods will always fail.
42 pub const fn new_empty() -> Self {
43 Self {
44 aspace: None,
45 range: Range {
46 start: VirtualAddress::ZERO,
47 end: VirtualAddress::ZERO,
48 },
49 }
50 }
51
52 /// Creates a new read-write (`RW`) memory mapping in the given address space.
53 pub fn new_zeroed(
54 aspace: Arc<Mutex<AddressSpace>>,
55 len: usize,
56 align: usize,
57 name: Option<String>,
58 ) -> crate::Result<Self> {
59 debug_assert!(
60 align >= arch::PAGE_SIZE,
61 "alignment must be at least a page"
62 );
63
64 let layout = Layout::from_size_align(len, align).unwrap();
65
66 let mut aspace_ = aspace.lock();
67 let range = aspace_
68 .map(
69 layout,
70 Permissions::READ | Permissions::WRITE | Permissions::USER,
71 |range, perms, batch| {
72 Ok(AddressSpaceRegion::new_zeroed(
73 batch.frame_alloc,
74 range,
75 perms,
76 name,
77 ))
78 },
79 )?
80 .range;
81 drop(aspace_);
82
83 tracing::trace!("new_zeroed: {len} {range:?}");
84
85 Ok(Self {
86 aspace: Some(aspace),
87 range,
88 })
89 }
90
91 pub fn new_phys(
92 aspace: Arc<Mutex<AddressSpace>>,
93 range_phys: Range<PhysicalAddress>,
94 len: usize,
95 align: usize,
96 name: Option<String>,
97 ) -> crate::Result<Self> {
98 // debug_assert!(
99 // matches!(aspace.kind(), AddressSpaceKind::User),
100 // "cannot create UserMmap in kernel address space"
101 // );
102 debug_assert!(
103 align >= arch::PAGE_SIZE,
104 "alignment must be at least a page"
105 );
106 debug_assert!(len >= arch::PAGE_SIZE, "len must be at least a page");
107 debug_assert_eq!(
108 len % arch::PAGE_SIZE,
109 0,
110 "len must be a multiple of page size"
111 );
112
113 let layout = Layout::from_size_align(len, align).unwrap();
114
115 let mut aspace_ = aspace.lock();
116 let range = aspace_
117 .map(
118 layout,
119 Permissions::READ | Permissions::WRITE,
120 |range_virt, perms, _batch| {
121 Ok(AddressSpaceRegion::new_phys(
122 range_virt, perms, range_phys, name,
123 ))
124 },
125 )?
126 .range;
127 drop(aspace_);
128
129 tracing::trace!("new_phys: {len} {range:?} => {range_phys:?}");
130
131 Ok(Self {
132 aspace: Some(aspace),
133 range,
134 })
135 }
136
137 pub fn range(&self) -> Range<VirtualAddress> {
138 self.range
139 }
140
141 pub fn copy_from_userspace(
142 &self,
143 aspace: &mut AddressSpace,
144 src_range: Range<usize>,
145 dst: &mut [u8],
146 ) -> crate::Result<()> {
147 self.with_user_slice(aspace, src_range, |src| dst.clone_from_slice(src))
148 }
149
150 pub fn copy_to_userspace(
151 &mut self,
152 aspace: &mut AddressSpace,
153 src: &[u8],
154 dst_range: Range<usize>,
155 ) -> crate::Result<()> {
156 self.with_user_slice_mut(aspace, dst_range, |dst| {
157 dst.copy_from_slice(src);
158 })
159 }
160
161 pub fn with_user_slice<F>(
162 &self,
163 aspace: &mut AddressSpace,
164 range: Range<usize>,
165 f: F,
166 ) -> crate::Result<()>
167 where
168 F: FnOnce(&[u8]),
169 {
170 self.commit(aspace, range, false)?;
171
172 // Safety: checked by caller
173 unsafe {
174 let slice = slice::from_raw_parts(self.range.start.as_ptr(), self.range().size());
175
176 f(&slice[range]);
177 }
178
179 Ok(())
180 }
181
182 pub fn with_user_slice_mut<F>(
183 &mut self,
184 aspace: &mut AddressSpace,
185 range: Range<usize>,
186 f: F,
187 ) -> crate::Result<()>
188 where
189 F: FnOnce(&mut [u8]),
190 {
191 self.commit(aspace, range, true)?;
192 // Safety: user aspace also includes kernel mappings in higher half
193 unsafe {
194 aspace.arch.activate();
195 }
196
197 // Safety: checked by caller
198 unsafe {
199 let slice =
200 slice::from_raw_parts_mut(self.range.start.as_mut_ptr(), self.range().size());
201 f(&mut slice[range]);
202 }
203
204 Ok(())
205 }
206
207 /// Returns a pointer to the start of the memory mapped by this `Mmap`.
208 #[inline]
209 pub fn as_ptr(&self) -> *const u8 {
210 if self.range.is_empty() {
211 return ptr::null();
212 }
213
214 let ptr = self.range.start.as_ptr();
215 debug_assert!(!ptr.is_null());
216 ptr
217 }
218
219 /// Returns a mutable pointer to the start of the memory mapped by this `Mmap`.
220 #[inline]
221 pub fn as_mut_ptr(&mut self) -> *mut u8 {
222 if self.range.is_empty() {
223 return ptr::null_mut();
224 }
225
226 let ptr = self.range.start.as_mut_ptr();
227 debug_assert!(!ptr.is_null());
228 ptr
229 }
230
231 /// Returns the size in bytes of this memory mapping.
232 #[inline]
233 pub fn len(&self) -> usize {
234 // Safety: the constructor ensures that the NonNull is valid.
235 self.range.size()
236 }
237
238 /// Whether this is a mapping of zero bytes
239 #[inline]
240 pub fn is_empty(&self) -> bool {
241 self.len() == 0
242 }
243
244 /// Mark this memory mapping as executable (`RX`) this will by-design make it not-writable too.
245 pub fn make_executable(
246 &mut self,
247 aspace: &mut AddressSpace,
248 _branch_protection: bool,
249 ) -> crate::Result<()> {
250 tracing::trace!("UserMmap::make_executable: {:?}", self.range);
251 self.protect(aspace, Permissions::READ | Permissions::EXECUTE)
252 }
253
254 /// Mark this memory mapping as read-only (`R`) essentially removing the write permission.
255 pub fn make_readonly(&mut self, aspace: &mut AddressSpace) -> crate::Result<()> {
256 tracing::trace!("UserMmap::make_readonly: {:?}", self.range);
257 self.protect(aspace, Permissions::READ)
258 }
259
260 fn protect(
261 &mut self,
262 aspace: &mut AddressSpace,
263 new_permissions: Permissions,
264 ) -> crate::Result<()> {
265 if !self.range.is_empty() {
266 let mut cursor = aspace.regions.find_mut(&self.range.start);
267 let mut region = cursor.get_mut().unwrap();
268
269 region.permissions = new_permissions;
270
271 let mut flush = aspace.arch.new_flush();
272 // Safety: constructors ensure invariants are maintained
273 unsafe {
274 aspace.arch.update_flags(
275 self.range.start,
276 NonZeroUsize::new(self.range.size()).unwrap(),
277 new_permissions.into(),
278 &mut flush,
279 )?;
280 };
281 flush.flush()?;
282 }
283
284 Ok(())
285 }
286
287 pub fn commit(
288 &self,
289 aspace: &mut AddressSpace,
290 range: Range<usize>,
291 will_write: bool,
292 ) -> crate::Result<()> {
293 if !self.range.is_empty() {
294 let mut cursor = aspace.regions.find_mut(&self.range.start);
295
296 let src_range = Range {
297 start: self.range.start.checked_add(range.start).unwrap(),
298 end: self.range.end.checked_add(range.start).unwrap(),
299 };
300
301 let mut batch = Batch::new(&mut aspace.arch, aspace.frame_alloc);
302 cursor
303 .get_mut()
304 .unwrap()
305 .commit(&mut batch, src_range, will_write)?;
306 batch.flush()?;
307 }
308
309 Ok(())
310 }
311}
312
313impl Drop for Mmap {
314 fn drop(&mut self) {
315 // A `None` means the Mmap got created through `Mmap::new_empty` so there is nothing to unmap
316 if let Some(aspace) = &self.aspace {
317 let mut aspace = aspace.lock();
318 aspace.unmap(self.range).unwrap();
319 }
320 }
321}