Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::arch;
9use crate::mem::VirtualAddress;
10use crate::wasm::TrapKind;
11use crate::wasm::code_registry::lookup_code;
12use crate::wasm::store::StoreOpaque;
13use crate::wasm::vm::{VMContext, VMStoreContext};
14use alloc::boxed::Box;
15use alloc::vec;
16use alloc::vec::Vec;
17use core::cell::Cell;
18use core::num::NonZeroU32;
19use core::ops::ControlFlow;
20use core::panic::AssertUnwindSafe;
21use core::ptr::NonNull;
22use core::{fmt, ptr};
23use cpu_local::cpu_local;
24
25/// Description about a fault that occurred in WebAssembly.
26#[derive(Debug)]
27pub struct WasmFault {
28 /// The size of memory, in bytes, at the time of the fault.
29 pub memory_size: usize,
30 /// The WebAssembly address at which the fault occurred.
31 pub wasm_address: u64,
32}
33
34impl fmt::Display for WasmFault {
35 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
36 write!(
37 f,
38 "memory fault at wasm address 0x{:x} in linear memory of size 0x{:x}",
39 self.wasm_address, self.memory_size,
40 )
41 }
42}
43
44#[derive(Debug)]
45pub struct Trap {
46 /// Original reason from where this trap originated.
47 pub reason: TrapReason,
48 /// Wasm backtrace of the trap, if any.
49 pub backtrace: Option<RawBacktrace>,
50 // The Wasm Coredump, if any.
51 // pub coredumpstack: Option<CoreDumpStack>,
52}
53
54#[derive(Debug)]
55pub enum TrapReason {
56 /// A user-raised trap through `raise_user_trap`.
57 User(anyhow::Error),
58
59 /// A trap raised from Cranelift-generated code.
60 Jit {
61 /// The program counter where this trap originated.
62 ///
63 /// This is later used with side tables from compilation to translate
64 /// the trapping address to a trap code.
65 pc: VirtualAddress,
66
67 /// If the trap was a memory-related trap such as SIGSEGV then this
68 /// field will contain the address of the inaccessible data.
69 ///
70 /// Note that wasm loads/stores are not guaranteed to fill in this
71 /// information. Dynamically-bounds-checked memories, for example, will
72 /// not access an invalid address but may instead load from NULL or may
73 /// explicitly jump to a `ud2` instruction. This is only available for
74 /// fault-based traps which are one of the main ways, but not the only
75 /// way, to run wasm.
76 faulting_addr: Option<VirtualAddress>,
77
78 /// The trap code associated with this trap.
79 trap: TrapKind,
80 },
81
82 /// A trap raised from a wasm builtin
83 Wasm(TrapKind),
84}
85
86impl From<anyhow::Error> for TrapReason {
87 fn from(err: anyhow::Error) -> Self {
88 TrapReason::User(err)
89 }
90}
91
92impl From<TrapKind> for TrapReason {
93 fn from(code: TrapKind) -> Self {
94 TrapReason::Wasm(code)
95 }
96}
97
98pub enum UnwindReason {
99 Panic(Box<dyn core::any::Any + Send>),
100 Trap(TrapReason),
101}
102
103pub(in crate::wasm) unsafe fn raise_preexisting_trap() -> ! {
104 // Safety: ensured by caller
105 unsafe {
106 let activation = ACTIVATION.get().as_ref().unwrap();
107 activation.unwind()
108 }
109}
110
111pub fn catch_traps<F>(store: &mut StoreOpaque, f: F) -> Result<(), Trap>
112where
113 F: FnOnce(NonNull<VMContext>),
114{
115 let caller = store.default_caller();
116 let mut prev_state = ptr::null_mut();
117 let ret_code = arch::call_with_setjmp(|jmp_buf| {
118 let mut activation = Activation::new(store, jmp_buf);
119
120 prev_state = ACTIVATION.replace(ptr::from_mut(&mut activation).cast());
121 f(caller);
122
123 0_i32
124 });
125
126 if ret_code == 0 {
127 ACTIVATION.set(prev_state);
128
129 Ok(())
130 } else {
131 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
132 let (unwind_reason, backtrace) = unsafe { ACTIVATION.get().as_ref() }
133 .unwrap()
134 .unwind
135 .take()
136 .unwrap();
137 ACTIVATION.set(prev_state);
138
139 match unwind_reason {
140 UnwindReason::Trap(reason) => Err(Trap { reason, backtrace }),
141 UnwindReason::Panic(payload) => panic_unwind2::resume_unwind(payload),
142 }
143 }
144}
145
146cpu_local! {
147 static ACTIVATION: Cell<*mut Activation> = Cell::new(ptr::null_mut())
148}
149
150/// ```text
151/// ┌─────────────────────┐◄───── highest, or oldest, stack address
152/// │ native stack frames │
153/// │ ... │
154/// │ ┌───────────────┐◄─┼──┐
155/// │ │ Activation │ │ │
156/// │ └───────────────┘ │ p
157/// ├─────────────────────┤ r
158/// │ wasm stack frames │ e
159/// │ ... │ v
160/// ├─────────────────────┤ │
161/// │ native stack frames │ │
162/// │ ... │ │
163/// │ ┌───────────────┐◄─┼──┼── TLS pointer
164/// │ │ Activation ├──┼──┘
165/// │ └───────────────┘ │
166/// ├─────────────────────┤
167/// │ wasm stack frames │
168/// │ ... │
169/// ├─────────────────────┤
170/// │ native stack frames │
171/// │ ... │
172/// └─────────────────────┘◄───── smallest, or youngest, stack address
173/// ```
174pub struct Activation {
175 unwind: Cell<Option<(UnwindReason, Option<RawBacktrace>)>>,
176 jmp_buf: arch::JmpBuf,
177 prev: Cell<*mut Activation>,
178 vm_store_context: NonNull<VMStoreContext>,
179
180 // async_guard_range: Range<*mut u8>,
181
182 // vmctx: *mut VMContext,
183 // vmoffsets: VMOffsets,
184
185 // The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}`
186 // for the *previous* `CallThreadState` for this same store/limits. Our
187 // *current* last wasm PC/FP/SP are saved in `self.limits`. We save a
188 // copy of the old registers here because the `VMRuntimeLimits`
189 // typically doesn't change across nested calls into Wasm (i.e. they are
190 // typically calls back into the same store and `self.limits ==
191 // self.prev.limits`) and we must to maintain the list of
192 // contiguous-Wasm-frames stack regions for backtracing purposes.
193 old_last_wasm_exit_fp: Cell<VirtualAddress>,
194 old_last_wasm_exit_pc: Cell<VirtualAddress>,
195 old_last_wasm_entry_fp: Cell<VirtualAddress>,
196}
197
198impl Activation {
199 pub fn new(store: &mut StoreOpaque, jmp_buf: &arch::JmpBufStruct) -> Self {
200 Self {
201 unwind: Cell::new(None),
202 jmp_buf: ptr::from_ref(jmp_buf),
203 prev: Cell::new(ACTIVATION.get()),
204
205 vm_store_context: store.vm_store_context_ptr(),
206 // Safety: TODO
207 old_last_wasm_exit_fp: Cell::new(unsafe {
208 *store.vm_store_context().last_wasm_exit_fp.get()
209 }),
210 // Safety: TODO
211 old_last_wasm_exit_pc: Cell::new(unsafe {
212 *store.vm_store_context().last_wasm_exit_pc.get()
213 }),
214 // Safety: TODO
215 old_last_wasm_entry_fp: Cell::new(unsafe {
216 *store.vm_store_context().last_wasm_entry_fp.get()
217 }),
218 }
219 }
220
221 fn iter(&self) -> impl Iterator<Item = &Self> {
222 let mut state = Some(self);
223 core::iter::from_fn(move || {
224 let this = state?;
225 // Safety: `prev` is always either a null ptr (indicating the end of the list) or a valid pointer to a `CallThreadState`.
226 // This is ensured by the `push` method.
227 state = unsafe { this.prev.get().as_ref() };
228 Some(this)
229 })
230 }
231
232 #[inline]
233 pub(crate) unsafe fn push(&mut self) {
234 assert!(self.prev.get().is_null());
235 let prev = ACTIVATION.replace(ptr::from_mut(self));
236 self.prev.set(prev);
237 }
238
239 #[inline]
240 pub(crate) unsafe fn pop(&self) {
241 let prev = self.prev.replace(ptr::null_mut());
242 let head = ACTIVATION.replace(prev);
243 assert!(ptr::eq(head, self));
244 }
245
246 #[cold]
247 fn read_unwind(&self) -> (UnwindReason, Option<RawBacktrace>) {
248 self.unwind.replace(None).unwrap()
249 }
250
251 fn record_unwind(&self, reason: UnwindReason) {
252 if cfg!(debug_assertions) {
253 let prev = self.unwind.replace(None);
254 assert!(prev.is_none());
255 }
256 let backtrace = match &reason {
257 // Panics don't need backtraces. There is nowhere to attach the
258 // hypothetical backtrace to and it doesn't really make sense to try
259 // in the first place since this is a Rust problem rather than a
260 // Wasm problem.
261 UnwindReason::Panic(_) => None,
262 // // And if we are just propagating an existing trap that already has
263 // // a backtrace attached to it, then there is no need to capture a
264 // // new backtrace either.
265 // UnwindReason::Trap(TrapReason::User(err))
266 // if err.downcast_ref::<RawBacktrace>().is_some() =>
267 // {
268 // (None, None)
269 // }
270 UnwindReason::Trap(_) => {
271 Some(self.capture_backtrace(self.vm_store_context.as_ptr(), None))
272 } // self.capture_coredump(self.vm_store_context.as_ptr(), None),
273 };
274 self.unwind.set(Some((reason, backtrace)));
275 }
276
277 unsafe fn unwind(&self) -> ! {
278 // Safety: ensured by caller
279 unsafe {
280 debug_assert!(!self.jmp_buf.is_null());
281 arch::longjmp(self.jmp_buf, 1);
282 }
283 }
284
285 fn capture_backtrace(
286 &self,
287 vm_store_context: *mut VMStoreContext,
288 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
289 ) -> RawBacktrace {
290 RawBacktrace::new(vm_store_context, self, trap_pc_and_fp)
291 }
292}
293
294impl Drop for Activation {
295 fn drop(&mut self) {
296 // Unwind information should not be present as it should have
297 // already been processed.
298 debug_assert!(self.unwind.replace(None).is_none());
299
300 // Safety: TODO
301 unsafe {
302 let cx = self.vm_store_context.as_ref();
303 *cx.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get();
304 *cx.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get();
305 *cx.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get();
306 }
307 }
308}
309
310pub fn catch_unwind_and_record_trap<R>(f: impl FnOnce() -> R) -> R::Abi
311where
312 R: HostResult,
313{
314 let (ret, unwind) = R::maybe_catch_unwind(f);
315 if let Some(unwind) = unwind {
316 // Safety: TODO
317 let activation = unsafe { ACTIVATION.get().as_ref().unwrap() };
318 activation.record_unwind(unwind);
319 }
320
321 ret
322}
323
324/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
325/// Rust-based type to a specific ABI while handling traps/unwinds.
326pub trait HostResult {
327 /// The type of the value that's returned to Cranelift-compiled code. Needs
328 /// to be ABI-safe to pass through an `extern "C"` return value.
329 type Abi: Copy;
330 /// This type is implemented for return values from host function calls and
331 /// builtins. The `Abi` value of this trait represents either a successful
332 /// execution with some payload state or that a failed execution happened.
333 /// Cranelift-compiled code is expected to test for this failure sentinel
334 /// and process it accordingly.
335 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option<UnwindReason>);
336}
337
338// Base case implementations that do not catch unwinds. These are for libcalls
339// that neither trap nor execute user code. The raw value is the ABI itself.
340//
341// Panics in these libcalls will result in a process abort as unwinding is not
342// allowed via Rust through `extern "C"` function boundaries.
343macro_rules! host_result_no_catch {
344 ($($t:ty,)*) => {
345 $(
346 #[allow(clippy::unused_unit, reason = "the empty tuple case generates an empty tuple as return value, which makes clippy mad but thats fine")]
347 impl HostResult for $t {
348 type Abi = $t;
349 fn maybe_catch_unwind(f: impl FnOnce() -> $t) -> ($t, Option<UnwindReason>) {
350 (f(), None)
351 }
352 }
353 )*
354 }
355}
356
357host_result_no_catch! {
358 (),
359 bool,
360 u32,
361 *mut u8,
362 u64,
363}
364
365impl HostResult for NonNull<u8> {
366 type Abi = *mut u8;
367 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (*mut u8, Option<UnwindReason>) {
368 (f().as_ptr(), None)
369 }
370}
371
372impl<T, E> HostResult for Result<T, E>
373where
374 T: HostResultHasUnwindSentinel,
375 E: Into<TrapReason>,
376{
377 type Abi = T::Abi;
378
379 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option<UnwindReason>) {
380 let f = move || match f() {
381 Ok(ret) => (ret.into_abi(), None),
382 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
383 };
384
385 panic_unwind2::catch_unwind(AssertUnwindSafe(f))
386 .unwrap_or_else(|payload| (T::SENTINEL, Some(UnwindReason::Panic(payload))))
387 }
388}
389
390/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
391/// the trait bound on `T`.
392///
393/// This is for values in the "ok" position of a `Result` return value. Each
394/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
395/// convertible to the ABI. Additionally all implementations of this trait have
396/// a "sentinel value" which indicates that an unwind happened. This means that
397/// no valid instance of `Self` should generate the `SENTINEL` via the
398/// `into_abi` function.
399///
400/// # Safety
401///
402/// TODO
403pub unsafe trait HostResultHasUnwindSentinel {
404 /// The Cranelift-understood ABI of this value (should not be `Self`).
405 type Abi: Copy;
406
407 /// A value that indicates that an unwind should happen and is tested for in
408 /// Cranelift-generated code.
409 const SENTINEL: Self::Abi;
410
411 /// Converts this value into the ABI representation. Should never returned
412 /// the `SENTINEL` value.
413 fn into_abi(self) -> Self::Abi;
414}
415
416/// No return value from the host is represented as a `bool` in the ABI. Here
417/// `true` means that execution succeeded while `false` is the sentinel used to
418/// indicate an unwind.
419// Safety: TODO
420unsafe impl HostResultHasUnwindSentinel for () {
421 type Abi = bool;
422 const SENTINEL: bool = false;
423 fn into_abi(self) -> bool {
424 true
425 }
426}
427
428// Safety: TODO
429unsafe impl HostResultHasUnwindSentinel for NonZeroU32 {
430 type Abi = u32;
431 const SENTINEL: Self::Abi = 0;
432 fn into_abi(self) -> Self::Abi {
433 self.get()
434 }
435}
436
437/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
438/// In this manner a successful result is a zero-extended 32-bit value and the
439/// failure sentinel is `u64::MAX` or -1 as a signed integer.
440// Safety: TODO
441unsafe impl HostResultHasUnwindSentinel for u32 {
442 type Abi = u64;
443 const SENTINEL: u64 = u64::MAX;
444 fn into_abi(self) -> u64 {
445 self.into()
446 }
447}
448
449/// If there is not actual successful result (e.g. an empty enum) then the ABI
450/// can be `()`, or nothing, because there's no successful result and it's
451/// always a failure.
452// Safety: TODO
453unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
454 type Abi = ();
455 const SENTINEL: () = ();
456 fn into_abi(self) {
457 match self {}
458 }
459}
460
461#[derive(Debug)]
462pub struct RawBacktrace(Vec<Frame>);
463
464/// A stack frame within a Wasm stack trace.
465#[derive(Debug)]
466pub struct Frame {
467 pub pc: VirtualAddress,
468 pub fp: VirtualAddress,
469}
470
471impl RawBacktrace {
472 fn new(
473 vm_store_context: *const VMStoreContext,
474 activation: &Activation,
475 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
476 ) -> Self {
477 let mut frames = vec![];
478 Self::trace_with_trap_state(vm_store_context, activation, trap_pc_and_fp, |frame| {
479 frames.push(frame);
480 ControlFlow::Continue(())
481 });
482 Self(frames)
483 }
484
485 /// Walk the current Wasm stack, calling `f` for each frame we walk.
486 pub(crate) fn trace_with_trap_state(
487 vm_store_context: *const VMStoreContext,
488 activation: &Activation,
489 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
490 mut f: impl FnMut(Frame) -> ControlFlow<()>,
491 ) {
492 // Safety: TODO
493 unsafe {
494 tracing::trace!("====== Capturing Backtrace ======");
495
496 let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
497 // If we exited Wasm by catching a trap, then the Wasm-to-host
498 // trampoline did not get a chance to save the last Wasm PC and FP,
499 // and we need to use the plumbed-through values instead.
500 Some((pc, fp)) => {
501 assert!(ptr::eq(
502 vm_store_context,
503 activation.vm_store_context.as_ptr()
504 ));
505 (pc, fp)
506 }
507 // Either there is no Wasm currently on the stack, or we exited Wasm
508 // through the Wasm-to-host trampoline.
509 None => {
510 let pc = *(*vm_store_context).last_wasm_exit_pc.get();
511 let fp = *(*vm_store_context).last_wasm_exit_fp.get();
512 (pc, fp)
513 }
514 };
515
516 let activations = core::iter::once((
517 last_wasm_exit_pc,
518 last_wasm_exit_fp,
519 *(*vm_store_context).last_wasm_entry_fp.get(),
520 ))
521 .chain(activation.iter().map(|state| {
522 (
523 state.old_last_wasm_exit_pc.get(),
524 state.old_last_wasm_exit_fp.get(),
525 state.old_last_wasm_entry_fp.get(),
526 )
527 }))
528 .take_while(|&(pc, fp, sp)| {
529 if pc.get() == 0 {
530 debug_assert_eq!(fp.get(), 0);
531 debug_assert_eq!(sp.get(), 0);
532 }
533 pc.get() != 0
534 });
535
536 for (pc, fp, sp) in activations {
537 if let ControlFlow::Break(()) = Self::trace_through_wasm(pc, fp, sp, &mut f) {
538 tracing::trace!("====== Done Capturing Backtrace (closure break) ======");
539 return;
540 }
541 }
542
543 tracing::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
544 }
545 }
546
547 /// Walk through a contiguous sequence of Wasm frames starting with the
548 /// frame at the given PC and FP and ending at `trampoline_sp`.
549 fn trace_through_wasm(
550 mut pc: VirtualAddress,
551 mut fp: VirtualAddress,
552 trampoline_fp: VirtualAddress,
553 mut f: impl FnMut(Frame) -> ControlFlow<()>,
554 ) -> ControlFlow<()> {
555 f(Frame { pc, fp })?;
556
557 tracing::trace!("=== Tracing through contiguous sequence of Wasm frames ===");
558 tracing::trace!("trampoline_fp = {trampoline_fp}");
559 tracing::trace!(" initial pc = {pc}");
560 tracing::trace!(" initial fp = {fp}");
561
562 // We already checked for this case in the `trace_with_trap_state`
563 // caller.
564 assert_ne!(pc.get(), 0);
565 assert!(pc.is_canonical());
566 assert_ne!(fp.get(), 0);
567 assert!(fp.is_canonical());
568 assert_ne!(trampoline_fp.get(), 0);
569 assert!(trampoline_fp.is_canonical());
570
571 // This loop will walk the linked list of frame pointers starting at
572 // `fp` and going up until `trampoline_fp`. We know that both `fp` and
573 // `trampoline_fp` are "trusted values" aka generated and maintained by
574 // Cranelift. This means that it should be safe to walk the linked list
575 // of pointers and inspect wasm frames.
576 //
577 // Note, though, that any frames outside of this range are not
578 // guaranteed to have valid frame pointers. For example native code
579 // might be using the frame pointer as a general purpose register. Thus
580 // we need to be careful to only walk frame pointers in this one
581 // contiguous linked list.
582 //
583 // To know when to stop iteration all architectures' stacks currently
584 // look something like this:
585 //
586 // | ... |
587 // | Native Frames |
588 // | ... |
589 // |-------------------|
590 // | ... | <-- Trampoline FP |
591 // | Trampoline Frame | |
592 // | ... | <-- Trampoline SP |
593 // |-------------------| Stack
594 // | Return Address | Grows
595 // | Previous FP | <-- Wasm FP Down
596 // | ... | |
597 // | Wasm Frames | |
598 // | ... | V
599 //
600 // The trampoline records its own frame pointer (`trampoline_fp`),
601 // which is guaranteed to be above all Wasm. To check when we've
602 // reached the trampoline frame, it is therefore sufficient to
603 // check when the next frame pointer is equal to `trampoline_fp`. Once
604 // that's hit then we know that the entire linked list has been
605 // traversed.
606 //
607 // Note that it might be possible that this loop doesn't execute at all.
608 // For example if the entry trampoline called wasm which `return_call`'d
609 // an imported function which is an exit trampoline, then
610 // `fp == trampoline_fp` on the entry of this function, meaning the loop
611 // won't actually execute anything.
612 while fp != trampoline_fp {
613 // At the start of each iteration of the loop, we know that `fp` is
614 // a frame pointer from Wasm code. Therefore, we know it is not
615 // being used as an extra general-purpose register, and it is safe
616 // dereference to get the PC and the next older frame pointer.
617 //
618 // The stack also grows down, and therefore any frame pointer we are
619 // dealing with should be less than the frame pointer on entry to
620 // Wasm. Finally also assert that it's aligned correctly as an
621 // additional sanity check.
622 assert!(trampoline_fp > fp, "{trampoline_fp} > {fp}");
623 arch::assert_fp_is_aligned(fp);
624
625 tracing::trace!("--- Tracing through one Wasm frame ---");
626 tracing::trace!("pc = {pc}");
627 tracing::trace!("fp = {fp}");
628
629 f(Frame { pc, fp })?;
630
631 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
632 unsafe {
633 pc = arch::get_next_older_pc_from_fp(fp);
634 }
635
636 // We rely on this offset being zero for all supported architectures
637 // in `crates/cranelift/src/component/compiler.rs` when we set the
638 // Wasm exit FP. If this ever changes, we will need to update that
639 // code as well!
640 assert_eq!(arch::NEXT_OLDER_FP_FROM_FP_OFFSET, 0);
641
642 // Get the next older frame pointer from the current Wasm frame
643 // pointer.
644 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
645 #[expect(clippy::cast_ptr_alignment, reason = "")]
646 let next_older_fp = unsafe {
647 *fp.as_mut_ptr()
648 .cast::<VirtualAddress>()
649 .add(arch::NEXT_OLDER_FP_FROM_FP_OFFSET)
650 };
651
652 // Because the stack always grows down, the older FP must be greater
653 // than the current FP.
654 assert!(next_older_fp > fp, "{next_older_fp} > {fp}");
655 fp = next_older_fp;
656 }
657
658 tracing::trace!("=== Done tracing contiguous sequence of Wasm frames ===");
659 ControlFlow::Continue(())
660 }
661
662 /// Iterate over the frames inside this backtrace.
663 pub fn frames(&self) -> impl ExactSizeIterator<Item = &Frame> + DoubleEndedIterator {
664 self.0.iter()
665 }
666}
667
668pub fn handle_wasm_exception(
669 pc: VirtualAddress,
670 fp: VirtualAddress,
671 faulting_addr: VirtualAddress,
672) -> ControlFlow<()> {
673 if let Some(activation) = NonNull::new(ACTIVATION.get()) {
674 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
675 let activation = unsafe { activation.as_ref() };
676
677 let Some((code, text_offset)) = lookup_code(pc.get()) else {
678 tracing::debug!("no JIT code registered for pc {pc}");
679 return ControlFlow::Continue(());
680 };
681
682 let Some(trap) = code.lookup_trap_code(text_offset) else {
683 tracing::debug!("no JIT trap registered for pc {pc}");
684 return ControlFlow::Continue(());
685 };
686
687 // record the unwind details
688 let backtrace = RawBacktrace::new(
689 activation.vm_store_context.as_ptr(),
690 activation,
691 Some((pc, fp)),
692 );
693 activation.unwind.set(Some((
694 UnwindReason::Trap(TrapReason::Jit {
695 pc,
696 faulting_addr: Some(faulting_addr),
697 trap,
698 }),
699 Some(backtrace),
700 )));
701
702 // longjmp back to Rust
703 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
704 unsafe {
705 arch::longjmp(activation.jmp_buf, 1);
706 }
707 } else {
708 // ACTIVATION is a nullptr
709 // => means no activations on stack
710 // => means exception cannot be a WASM trap
711 ControlFlow::Continue(())
712 }
713}