Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8// Mutex
9// A mutual exclusion primitive useful for protecting shared data
10// MutexGuard
11// An RAII implementation of a “scoped lock” of a mutex. When this structure is dropped (falls out of scope), the lock will be unlocked.
12// MappedMutexGuard
13// An RAII mutex guard returned by MutexGuard::map, which can point to a subfield of the protected data.
14// ArcMutexGuardarc_lock
15// An RAII mutex guard returned by the Arc locking operations on Mutex.
16
17use crate::backoff::Backoff;
18use crate::loom::Ordering;
19use crate::loom::{AtomicBool, UnsafeCell};
20use core::marker::PhantomData;
21use core::ops::{Deref, DerefMut};
22use core::{fmt, mem};
23use util::loom_const_fn;
24
25/// A mutual exclusion primitive useful for protecting shared data
26///
27/// This mutex will block threads waiting for the lock to become available. The
28/// mutex can also be statically initialized or created via a `new`
29/// constructor. Each mutex has a type parameter which represents the data that
30/// it is protecting. The data can only be accessed through the RAII guards
31/// returned from `lock` and `try_lock`, which guarantees that the data is only
32/// ever accessed when the mutex is locked.
33pub struct Mutex<T: ?Sized> {
34 lock: AtomicBool,
35 data: UnsafeCell<T>,
36}
37
38/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
39/// dropped (falls out of scope), the lock will be unlocked.
40///
41/// The data protected by the mutex can be accessed through this guard via its
42/// `Deref` and `DerefMut` implementations.
43#[clippy::has_significant_drop]
44#[must_use = "if unused the Mutex will immediately unlock"]
45pub struct MutexGuard<'a, T: ?Sized> {
46 mutex: &'a Mutex<T>,
47 marker: PhantomData<&'a mut T>,
48}
49
50#[expect(clippy::undocumented_unsafe_blocks, reason = "")]
51unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
52#[expect(clippy::undocumented_unsafe_blocks, reason = "")]
53unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
54
55impl<T> Mutex<T> {
56 loom_const_fn! {
57 pub const fn new(val: T) -> Mutex<T> {
58 Mutex {
59 lock: AtomicBool::new(false),
60 data: UnsafeCell::new(val),
61 }
62 }
63 }
64
65 /// Consumes this mutex, returning the underlying data.
66 #[inline]
67 pub fn into_inner(self) -> T {
68 self.data.into_inner()
69 }
70}
71
72impl<T: ?Sized> Mutex<T> {
73 /// Creates a new `MutexGuard` without checking if the mutex is locked.
74 ///
75 /// # Safety
76 ///
77 /// This method must only be called if the thread logically holds the lock.
78 ///
79 /// Calling this function when a guard has already been produced is undefined behaviour unless
80 /// the guard was forgotten with `mem::forget`.
81 #[inline]
82 pub unsafe fn make_guard_unchecked(&self) -> MutexGuard<'_, T> {
83 MutexGuard {
84 mutex: self,
85 marker: PhantomData,
86 }
87 }
88
89 /// Acquires a mutex, blocking the current thread until it is able to do so.
90 ///
91 /// This function will block the local thread until it is available to acquire
92 /// the mutex. Upon returning, the thread is the only thread with the mutex
93 /// held. An RAII guard is returned to allow scoped unlock of the lock. When
94 /// the guard goes out of scope, the mutex will be unlocked.
95 ///
96 /// Attempts to lock a mutex in the thread which already holds the lock will
97 /// result in a deadlock.
98 #[inline]
99 pub fn lock(&self) -> MutexGuard<'_, T> {
100 let mut boff = Backoff::default();
101 while self
102 .lock
103 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
104 .is_err()
105 {
106 while self.is_locked() {
107 boff.spin();
108 }
109 }
110
111 // Safety: The lock is held, as required.
112 unsafe { self.make_guard_unchecked() }
113 }
114
115 /// Try to lock this mutex, returning a lock guard if successful.
116 ///
117 /// Like [`Self::lock`] the lock will be unlocked when the guard is dropped, but *unlike*
118 /// [`Self::lock`] this method never blocks.
119 #[inline]
120 pub fn try_lock(&self) -> Option<MutexGuard<'_, T>> {
121 if self
122 .lock
123 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
124 .is_ok()
125 {
126 // SAFETY: The lock is held, as required.
127 Some(unsafe { self.make_guard_unchecked() })
128 } else {
129 None
130 }
131 }
132
133 /// Try to lock this mutex, returning a lock guard if successful.
134 ///
135 /// Like [`Self::lock`] the lock will be unlocked when the guard is dropped, but *unlike*
136 /// [`Self::lock`] this method never blocks.
137 ///
138 /// Unlike [`Self::try_lock`] this method can spuriously fail even if the mutex is unlocked,
139 /// this makes this method only suitable to be called in loops or similar scenarios, but might
140 /// result in more efficient code on some platforms.
141 #[inline]
142 pub fn try_lock_weak(&self) -> Option<MutexGuard<'_, T>> {
143 if self
144 .lock
145 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
146 .is_ok()
147 {
148 // SAFETY: The lock is held, as required.
149 Some(unsafe { self.make_guard_unchecked() })
150 } else {
151 None
152 }
153 }
154
155 /// Returns a mutable reference to the underlying data.
156 ///
157 /// Since this call borrows the `Mutex` mutably, no actual locking needs to
158 /// take place---the mutable borrow statically guarantees no locks exist.
159 #[inline]
160 pub fn get_mut(&mut self) -> &mut T {
161 // Safety: We hold a mutable reference to the Mutex so getting a mutable reference to the
162 // data is safe
163 self.data.with_mut(|data| unsafe { &mut *data })
164 }
165
166 /// Checks whether the mutex is currently locked.
167 #[inline]
168 pub fn is_locked(&self) -> bool {
169 self.lock.load(Ordering::Relaxed)
170 }
171
172 /// Forcibly unlocks the mutex.
173 ///
174 /// This is useful when combined with `mem::forget` to hold a lock without
175 /// the need to maintain a `MutexGuard` object alive, for example when
176 /// dealing with FFI.
177 ///
178 /// # Safety
179 ///
180 /// This method must only be called if the current thread logically owns a
181 /// `MutexGuard` but that guard has been discarded using `mem::forget`.
182 /// Behavior is undefined if a mutex is unlocked when not locked.
183 #[inline]
184 pub unsafe fn force_unlock(&self) {
185 self.lock.store(false, Ordering::Release);
186 }
187}
188
189impl<T: Default> Default for Mutex<T> {
190 #[inline]
191 fn default() -> Mutex<T> {
192 Mutex::new(Default::default())
193 }
194}
195
196impl<T> From<T> for Mutex<T> {
197 #[inline]
198 fn from(t: T) -> Mutex<T> {
199 Mutex::new(t)
200 }
201}
202
203impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
204 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
205 match self.try_lock() {
206 Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
207 None => {
208 struct LockedPlaceholder;
209 impl fmt::Debug for LockedPlaceholder {
210 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
211 f.write_str("<locked>")
212 }
213 }
214
215 f.debug_struct("Mutex")
216 .field("data", &LockedPlaceholder)
217 .finish()
218 }
219 }
220 }
221}
222
223#[expect(clippy::undocumented_unsafe_blocks, reason = "")]
224unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, T> {}
225
226impl<'a, T: ?Sized + 'a> MutexGuard<'a, T> {
227 /// Returns a reference to the original `Mutex` object.
228 pub fn mutex(s: &Self) -> &'a Mutex<T> {
229 s.mutex
230 }
231
232 /// Leaks the mutex guard and returns a mutable reference to the data
233 /// protected by the mutex.
234 ///
235 /// This will leave the `Mutex` in a locked state.
236 #[inline]
237 pub fn leak(s: Self) -> &'a mut T {
238 // Safety: MutexGuard always holds the lock, so it is safe to access the data
239 let r = s.mutex.data.with_mut(|r| unsafe { &mut *r });
240 mem::forget(s);
241 r
242 }
243
244 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
245 where
246 F: FnOnce() -> U,
247 {
248 struct DropGuard<'a, T: ?Sized> {
249 mutex: &'a Mutex<T>,
250 }
251 impl<T: ?Sized> Drop for DropGuard<'_, T> {
252 fn drop(&mut self) {
253 mem::forget(self.mutex.lock());
254 }
255 }
256
257 // Safety: A MutexGuard always holds the lock.
258 unsafe {
259 s.mutex.force_unlock();
260 }
261 let _guard = DropGuard { mutex: s.mutex };
262 f()
263 }
264}
265
266impl<'a, T: ?Sized + 'a> Deref for MutexGuard<'a, T> {
267 type Target = T;
268 #[inline]
269 fn deref(&self) -> &T {
270 // Safety: MutexGuard always holds the lock, so it is safe to access the data
271 self.mutex.data.with(|data| unsafe { &*data })
272 }
273}
274
275impl<'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, T> {
276 #[inline]
277 fn deref_mut(&mut self) -> &mut T {
278 // Safety: MutexGuard always holds the lock, so it is safe to access the data
279 self.mutex.data.with_mut(|data| unsafe { &mut *data })
280 }
281}
282
283impl<'a, T: ?Sized + 'a> Drop for MutexGuard<'a, T> {
284 #[inline]
285 fn drop(&mut self) {
286 // Safety: A MutexGuard always holds the lock.
287 unsafe {
288 self.mutex.force_unlock();
289 }
290 }
291}
292
293impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, T> {
294 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
295 fmt::Debug::fmt(&**self, f)
296 }
297}
298
299impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, T> {
300 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
301 (**self).fmt(f)
302 }
303}
304
305#[cfg(feature = "lock_api")]
306#[expect(clippy::undocumented_unsafe_blocks, reason = "")]
307unsafe impl lock_api::RawMutex for Mutex<()> {
308 #[allow(clippy::declare_interior_mutable_const, reason = "TODO")]
309 const INIT: Self = Mutex::new(());
310 type GuardMarker = lock_api::GuardSend;
311
312 fn lock(&self) {
313 let g = Mutex::lock(self);
314 mem::forget(g);
315 }
316
317 fn try_lock(&self) -> bool {
318 let g = Mutex::try_lock(self);
319 let ret = g.is_some();
320 mem::forget(g);
321 ret
322 }
323
324 unsafe fn unlock(&self) {
325 // Safety: ensured by caller
326 unsafe {
327 Mutex::force_unlock(self);
328 }
329 }
330
331 fn is_locked(&self) -> bool {
332 Mutex::is_locked(self)
333 }
334}
335
336#[cfg(test)]
337mod tests {
338 use super::*;
339 use crate::loom::Arc;
340 use crate::loom::AtomicUsize;
341 use core::fmt::Debug;
342 use std::{hint, mem};
343
344 #[derive(Eq, PartialEq, Debug)]
345 struct NonCopy(i32);
346
347 #[derive(Eq, PartialEq, Debug)]
348 struct NonCopyNeedsDrop(i32);
349
350 impl Drop for NonCopyNeedsDrop {
351 fn drop(&mut self) {
352 hint::black_box(());
353 }
354 }
355
356 #[test]
357 fn test_needs_drop() {
358 assert!(!mem::needs_drop::<NonCopy>());
359 assert!(mem::needs_drop::<NonCopyNeedsDrop>());
360 }
361
362 #[test]
363 fn smoke() {
364 let m = Mutex::new(());
365 drop(m.lock());
366 drop(m.lock());
367 }
368
369 #[test]
370 fn try_lock() {
371 let mutex = Mutex::<_>::new(42);
372
373 // First lock succeeds
374 let a = mutex.try_lock();
375 assert_eq!(a.as_ref().map(|r| **r), Some(42));
376
377 // Additional lock fails
378 let b = mutex.try_lock();
379 assert!(b.is_none());
380
381 // After dropping lock, it succeeds again
382 drop(a);
383 let c = mutex.try_lock();
384 assert_eq!(c.as_ref().map(|r| **r), Some(42));
385 }
386
387 #[test]
388 fn test_into_inner() {
389 let m = Mutex::<_>::new(NonCopy(10));
390 assert_eq!(m.into_inner(), NonCopy(10));
391 }
392
393 #[test]
394 fn test_into_inner_drop() {
395 struct Foo(Arc<AtomicUsize>);
396 impl Drop for Foo {
397 fn drop(&mut self) {
398 self.0.fetch_add(1, Ordering::SeqCst);
399 }
400 }
401 let num_drops = Arc::new(AtomicUsize::new(0));
402 let m = Mutex::<_>::new(Foo(num_drops.clone()));
403 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
404 {
405 let _inner = m.into_inner();
406 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
407 }
408 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
409 }
410
411 #[test]
412 fn test_mutex_unsized() {
413 let mutex: &Mutex<[i32]> = &Mutex::<_>::new([1, 2, 3]);
414 {
415 let b = &mut *mutex.lock();
416 b[0] = 4;
417 b[2] = 5;
418 }
419 let comp: &[i32] = &[4, 2, 5];
420 assert_eq!(&*mutex.lock(), comp);
421 }
422
423 #[test]
424 fn test_mutex_force_lock() {
425 let lock = Mutex::<_>::new(());
426 mem::forget(lock.lock());
427 unsafe {
428 lock.force_unlock();
429 }
430 assert!(lock.try_lock().is_some());
431 }
432
433 #[test]
434 fn test_get_mut() {
435 let mut m = Mutex::new(NonCopy(10));
436 *m.get_mut() = NonCopy(20);
437 assert_eq!(m.into_inner(), NonCopy(20));
438 }
439
440 #[test]
441 fn basic_multi_threaded() {
442 use crate::loom::{self, Arc, thread};
443
444 #[allow(tail_expr_drop_order)]
445 fn incr(lock: &Arc<Mutex<i32>>) -> thread::JoinHandle<()> {
446 let lock = lock.clone();
447 thread::spawn(move || {
448 let mut lock = lock.lock();
449 *lock += 1;
450 })
451 }
452
453 loom::model(|| {
454 let lock = Arc::new(Mutex::new(0));
455 let t1 = incr(&lock);
456 let t2 = incr(&lock);
457
458 t1.join().unwrap();
459 t2.join().unwrap();
460
461 thread::spawn(move || {
462 let lock = lock.lock();
463 assert_eq!(*lock, 2)
464 });
465 });
466 }
467}