Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use alloc::boxed::Box;
9use alloc::vec;
10use alloc::vec::Vec;
11use core::cell::Cell;
12use core::num::NonZeroU32;
13use core::ops::ControlFlow;
14use core::panic::AssertUnwindSafe;
15use core::ptr::NonNull;
16use core::{fmt, ptr};
17
18use k23_cpu_local::cpu_local;
19use kmem::VirtualAddress;
20
21use crate::arch;
22use crate::wasm::TrapKind;
23use crate::wasm::code_registry::lookup_code;
24use crate::wasm::store::StoreOpaque;
25use crate::wasm::vm::{VMContext, VMStoreContext};
26
27/// Description about a fault that occurred in WebAssembly.
28#[derive(Debug)]
29pub struct WasmFault {
30 /// The size of memory, in bytes, at the time of the fault.
31 pub memory_size: usize,
32 /// The WebAssembly address at which the fault occurred.
33 pub wasm_address: u64,
34}
35
36impl fmt::Display for WasmFault {
37 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
38 write!(
39 f,
40 "memory fault at wasm address 0x{:x} in linear memory of size 0x{:x}",
41 self.wasm_address, self.memory_size,
42 )
43 }
44}
45
46#[derive(Debug)]
47pub struct Trap {
48 /// Original reason from where this trap originated.
49 pub reason: TrapReason,
50 /// Wasm backtrace of the trap, if any.
51 pub backtrace: Option<RawBacktrace>,
52 // The Wasm Coredump, if any.
53 // pub coredumpstack: Option<CoreDumpStack>,
54}
55
56#[derive(Debug)]
57pub enum TrapReason {
58 /// A user-raised trap through `raise_user_trap`.
59 User(anyhow::Error),
60
61 /// A trap raised from Cranelift-generated code.
62 Jit {
63 /// The program counter where this trap originated.
64 ///
65 /// This is later used with side tables from compilation to translate
66 /// the trapping address to a trap code.
67 pc: VirtualAddress,
68
69 /// If the trap was a memory-related trap such as SIGSEGV then this
70 /// field will contain the address of the inaccessible data.
71 ///
72 /// Note that wasm loads/stores are not guaranteed to fill in this
73 /// information. Dynamically-bounds-checked memories, for example, will
74 /// not access an invalid address but may instead load from NULL or may
75 /// explicitly jump to a `ud2` instruction. This is only available for
76 /// fault-based traps which are one of the main ways, but not the only
77 /// way, to run wasm.
78 faulting_addr: Option<VirtualAddress>,
79
80 /// The trap code associated with this trap.
81 trap: TrapKind,
82 },
83
84 /// A trap raised from a wasm builtin
85 Wasm(TrapKind),
86}
87
88impl From<anyhow::Error> for TrapReason {
89 fn from(err: anyhow::Error) -> Self {
90 TrapReason::User(err)
91 }
92}
93
94impl From<TrapKind> for TrapReason {
95 fn from(code: TrapKind) -> Self {
96 TrapReason::Wasm(code)
97 }
98}
99
100pub enum UnwindReason {
101 Panic(Box<dyn core::any::Any + Send>),
102 Trap(TrapReason),
103}
104
105pub(in crate::wasm) unsafe fn raise_preexisting_trap() -> ! {
106 // Safety: ensured by caller
107 unsafe {
108 let activation = ACTIVATION.get().as_ref().unwrap();
109 activation.unwind()
110 }
111}
112
113pub fn catch_traps<F>(store: &mut StoreOpaque, f: F) -> Result<(), Trap>
114where
115 F: FnOnce(NonNull<VMContext>),
116{
117 let caller = store.default_caller();
118 let mut prev_state = ptr::null_mut();
119 let ret_code = arch::call_with_setjmp(|jmp_buf| {
120 let mut activation = Activation::new(store, jmp_buf);
121
122 prev_state = ACTIVATION.replace(ptr::from_mut(&mut activation).cast());
123 f(caller);
124
125 0_i32
126 });
127
128 if ret_code == 0 {
129 ACTIVATION.set(prev_state);
130
131 Ok(())
132 } else {
133 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
134 let (unwind_reason, backtrace) = unsafe { ACTIVATION.get().as_ref() }
135 .unwrap()
136 .unwind
137 .take()
138 .unwrap();
139 ACTIVATION.set(prev_state);
140
141 match unwind_reason {
142 UnwindReason::Trap(reason) => Err(Trap { reason, backtrace }),
143 UnwindReason::Panic(payload) => k23_panic_unwind::resume_unwind(payload),
144 }
145 }
146}
147
148cpu_local! {
149 static ACTIVATION: Cell<*mut Activation> = Cell::new(ptr::null_mut())
150}
151
152/// ```text
153/// ┌─────────────────────┐◄───── highest, or oldest, stack address
154/// │ native stack frames │
155/// │ ... │
156/// │ ┌───────────────┐◄─┼──┐
157/// │ │ Activation │ │ │
158/// │ └───────────────┘ │ p
159/// ├─────────────────────┤ r
160/// │ wasm stack frames │ e
161/// │ ... │ v
162/// ├─────────────────────┤ │
163/// │ native stack frames │ │
164/// │ ... │ │
165/// │ ┌───────────────┐◄─┼──┼── TLS pointer
166/// │ │ Activation ├──┼──┘
167/// │ └───────────────┘ │
168/// ├─────────────────────┤
169/// │ wasm stack frames │
170/// │ ... │
171/// ├─────────────────────┤
172/// │ native stack frames │
173/// │ ... │
174/// └─────────────────────┘◄───── smallest, or youngest, stack address
175/// ```
176pub struct Activation {
177 unwind: Cell<Option<(UnwindReason, Option<RawBacktrace>)>>,
178 jmp_buf: arch::JmpBuf,
179 prev: Cell<*mut Activation>,
180 vm_store_context: NonNull<VMStoreContext>,
181
182 // async_guard_range: Range<*mut u8>,
183
184 // vmctx: *mut VMContext,
185 // vmoffsets: VMOffsets,
186
187 // The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}`
188 // for the *previous* `CallThreadState` for this same store/limits. Our
189 // *current* last wasm PC/FP/SP are saved in `self.limits`. We save a
190 // copy of the old registers here because the `VMRuntimeLimits`
191 // typically doesn't change across nested calls into Wasm (i.e. they are
192 // typically calls back into the same store and `self.limits ==
193 // self.prev.limits`) and we must to maintain the list of
194 // contiguous-Wasm-frames stack regions for backtracing purposes.
195 old_last_wasm_exit_fp: Cell<VirtualAddress>,
196 old_last_wasm_exit_pc: Cell<VirtualAddress>,
197 old_last_wasm_entry_fp: Cell<VirtualAddress>,
198}
199
200impl Activation {
201 pub fn new(store: &mut StoreOpaque, jmp_buf: &arch::JmpBufStruct) -> Self {
202 Self {
203 unwind: Cell::new(None),
204 jmp_buf: ptr::from_ref(jmp_buf),
205 prev: Cell::new(ACTIVATION.get()),
206
207 vm_store_context: store.vm_store_context_ptr(),
208 // Safety: TODO
209 old_last_wasm_exit_fp: Cell::new(unsafe {
210 *store.vm_store_context().last_wasm_exit_fp.get()
211 }),
212 // Safety: TODO
213 old_last_wasm_exit_pc: Cell::new(unsafe {
214 *store.vm_store_context().last_wasm_exit_pc.get()
215 }),
216 // Safety: TODO
217 old_last_wasm_entry_fp: Cell::new(unsafe {
218 *store.vm_store_context().last_wasm_entry_fp.get()
219 }),
220 }
221 }
222
223 fn iter(&self) -> impl Iterator<Item = &Self> {
224 let mut state = Some(self);
225 core::iter::from_fn(move || {
226 let this = state?;
227 // Safety: `prev` is always either a null ptr (indicating the end of the list) or a valid pointer to a `CallThreadState`.
228 // This is ensured by the `push` method.
229 state = unsafe { this.prev.get().as_ref() };
230 Some(this)
231 })
232 }
233
234 #[inline]
235 pub(crate) unsafe fn push(&mut self) {
236 assert!(self.prev.get().is_null());
237 let prev = ACTIVATION.replace(ptr::from_mut(self));
238 self.prev.set(prev);
239 }
240
241 #[inline]
242 pub(crate) unsafe fn pop(&self) {
243 let prev = self.prev.replace(ptr::null_mut());
244 let head = ACTIVATION.replace(prev);
245 assert!(ptr::eq(head, self));
246 }
247
248 #[cold]
249 fn read_unwind(&self) -> (UnwindReason, Option<RawBacktrace>) {
250 self.unwind.replace(None).unwrap()
251 }
252
253 fn record_unwind(&self, reason: UnwindReason) {
254 if cfg!(debug_assertions) {
255 let prev = self.unwind.replace(None);
256 assert!(prev.is_none());
257 }
258 let backtrace = match &reason {
259 // Panics don't need backtraces. There is nowhere to attach the
260 // hypothetical backtrace to and it doesn't really make sense to try
261 // in the first place since this is a Rust problem rather than a
262 // Wasm problem.
263 UnwindReason::Panic(_) => None,
264 // // And if we are just propagating an existing trap that already has
265 // // a backtrace attached to it, then there is no need to capture a
266 // // new backtrace either.
267 // UnwindReason::Trap(TrapReason::User(err))
268 // if err.downcast_ref::<RawBacktrace>().is_some() =>
269 // {
270 // (None, None)
271 // }
272 UnwindReason::Trap(_) => {
273 Some(self.capture_backtrace(self.vm_store_context.as_ptr(), None))
274 } // self.capture_coredump(self.vm_store_context.as_ptr(), None),
275 };
276 self.unwind.set(Some((reason, backtrace)));
277 }
278
279 unsafe fn unwind(&self) -> ! {
280 // Safety: ensured by caller
281 unsafe {
282 debug_assert!(!self.jmp_buf.is_null());
283 arch::longjmp(self.jmp_buf, 1);
284 }
285 }
286
287 fn capture_backtrace(
288 &self,
289 vm_store_context: *mut VMStoreContext,
290 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
291 ) -> RawBacktrace {
292 RawBacktrace::new(vm_store_context, self, trap_pc_and_fp)
293 }
294}
295
296impl Drop for Activation {
297 fn drop(&mut self) {
298 // Unwind information should not be present as it should have
299 // already been processed.
300 debug_assert!(self.unwind.replace(None).is_none());
301
302 // Safety: TODO
303 unsafe {
304 let cx = self.vm_store_context.as_ref();
305 *cx.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get();
306 *cx.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get();
307 *cx.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get();
308 }
309 }
310}
311
312pub fn catch_unwind_and_record_trap<R>(f: impl FnOnce() -> R) -> R::Abi
313where
314 R: HostResult,
315{
316 let (ret, unwind) = R::maybe_catch_unwind(f);
317 if let Some(unwind) = unwind {
318 // Safety: TODO
319 let activation = unsafe { ACTIVATION.get().as_ref().unwrap() };
320 activation.record_unwind(unwind);
321 }
322
323 ret
324}
325
326/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
327/// Rust-based type to a specific ABI while handling traps/unwinds.
328pub trait HostResult {
329 /// The type of the value that's returned to Cranelift-compiled code. Needs
330 /// to be ABI-safe to pass through an `extern "C"` return value.
331 type Abi: Copy;
332 /// This type is implemented for return values from host function calls and
333 /// builtins. The `Abi` value of this trait represents either a successful
334 /// execution with some payload state or that a failed execution happened.
335 /// Cranelift-compiled code is expected to test for this failure sentinel
336 /// and process it accordingly.
337 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option<UnwindReason>);
338}
339
340// Base case implementations that do not catch unwinds. These are for libcalls
341// that neither trap nor execute user code. The raw value is the ABI itself.
342//
343// Panics in these libcalls will result in a process abort as unwinding is not
344// allowed via Rust through `extern "C"` function boundaries.
345macro_rules! host_result_no_catch {
346 ($($t:ty,)*) => {
347 $(
348 #[allow(clippy::unused_unit, reason = "the empty tuple case generates an empty tuple as return value, which makes clippy mad but thats fine")]
349 impl HostResult for $t {
350 type Abi = $t;
351 fn maybe_catch_unwind(f: impl FnOnce() -> $t) -> ($t, Option<UnwindReason>) {
352 (f(), None)
353 }
354 }
355 )*
356 }
357}
358
359host_result_no_catch! {
360 (),
361 bool,
362 u32,
363 *mut u8,
364 u64,
365}
366
367impl HostResult for NonNull<u8> {
368 type Abi = *mut u8;
369 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (*mut u8, Option<UnwindReason>) {
370 (f().as_ptr(), None)
371 }
372}
373
374impl<T, E> HostResult for Result<T, E>
375where
376 T: HostResultHasUnwindSentinel,
377 E: Into<TrapReason>,
378{
379 type Abi = T::Abi;
380
381 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option<UnwindReason>) {
382 let f = move || match f() {
383 Ok(ret) => (ret.into_abi(), None),
384 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
385 };
386
387 k23_panic_unwind::catch_unwind(AssertUnwindSafe(f))
388 .unwrap_or_else(|payload| (T::SENTINEL, Some(UnwindReason::Panic(payload))))
389 }
390}
391
392/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
393/// the trait bound on `T`.
394///
395/// This is for values in the "ok" position of a `Result` return value. Each
396/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
397/// convertible to the ABI. Additionally all implementations of this trait have
398/// a "sentinel value" which indicates that an unwind happened. This means that
399/// no valid instance of `Self` should generate the `SENTINEL` via the
400/// `into_abi` function.
401///
402/// # Safety
403///
404/// TODO
405pub unsafe trait HostResultHasUnwindSentinel {
406 /// The Cranelift-understood ABI of this value (should not be `Self`).
407 type Abi: Copy;
408
409 /// A value that indicates that an unwind should happen and is tested for in
410 /// Cranelift-generated code.
411 const SENTINEL: Self::Abi;
412
413 /// Converts this value into the ABI representation. Should never returned
414 /// the `SENTINEL` value.
415 fn into_abi(self) -> Self::Abi;
416}
417
418/// No return value from the host is represented as a `bool` in the ABI. Here
419/// `true` means that execution succeeded while `false` is the sentinel used to
420/// indicate an unwind.
421// Safety: TODO
422unsafe impl HostResultHasUnwindSentinel for () {
423 type Abi = bool;
424 const SENTINEL: bool = false;
425 fn into_abi(self) -> bool {
426 true
427 }
428}
429
430// Safety: TODO
431unsafe impl HostResultHasUnwindSentinel for NonZeroU32 {
432 type Abi = u32;
433 const SENTINEL: Self::Abi = 0;
434 fn into_abi(self) -> Self::Abi {
435 self.get()
436 }
437}
438
439/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
440/// In this manner a successful result is a zero-extended 32-bit value and the
441/// failure sentinel is `u64::MAX` or -1 as a signed integer.
442// Safety: TODO
443unsafe impl HostResultHasUnwindSentinel for u32 {
444 type Abi = u64;
445 const SENTINEL: u64 = u64::MAX;
446 fn into_abi(self) -> u64 {
447 self.into()
448 }
449}
450
451/// If there is not actual successful result (e.g. an empty enum) then the ABI
452/// can be `()`, or nothing, because there's no successful result and it's
453/// always a failure.
454// Safety: TODO
455unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
456 type Abi = ();
457 const SENTINEL: () = ();
458 fn into_abi(self) {
459 match self {}
460 }
461}
462
463#[derive(Debug)]
464pub struct RawBacktrace(Vec<Frame>);
465
466/// A stack frame within a Wasm stack trace.
467#[derive(Debug)]
468pub struct Frame {
469 pub pc: VirtualAddress,
470 pub fp: VirtualAddress,
471}
472
473impl RawBacktrace {
474 fn new(
475 vm_store_context: *const VMStoreContext,
476 activation: &Activation,
477 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
478 ) -> Self {
479 let mut frames = vec![];
480 Self::trace_with_trap_state(vm_store_context, activation, trap_pc_and_fp, |frame| {
481 frames.push(frame);
482 ControlFlow::Continue(())
483 });
484 Self(frames)
485 }
486
487 /// Walk the current Wasm stack, calling `f` for each frame we walk.
488 pub(crate) fn trace_with_trap_state(
489 vm_store_context: *const VMStoreContext,
490 activation: &Activation,
491 trap_pc_and_fp: Option<(VirtualAddress, VirtualAddress)>,
492 mut f: impl FnMut(Frame) -> ControlFlow<()>,
493 ) {
494 // Safety: TODO
495 unsafe {
496 tracing::trace!("====== Capturing Backtrace ======");
497
498 let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
499 // If we exited Wasm by catching a trap, then the Wasm-to-host
500 // trampoline did not get a chance to save the last Wasm PC and FP,
501 // and we need to use the plumbed-through values instead.
502 Some((pc, fp)) => {
503 assert!(ptr::eq(
504 vm_store_context,
505 activation.vm_store_context.as_ptr()
506 ));
507 (pc, fp)
508 }
509 // Either there is no Wasm currently on the stack, or we exited Wasm
510 // through the Wasm-to-host trampoline.
511 None => {
512 let pc = *(*vm_store_context).last_wasm_exit_pc.get();
513 let fp = *(*vm_store_context).last_wasm_exit_fp.get();
514 (pc, fp)
515 }
516 };
517
518 let activations = core::iter::once((
519 last_wasm_exit_pc,
520 last_wasm_exit_fp,
521 *(*vm_store_context).last_wasm_entry_fp.get(),
522 ))
523 .chain(activation.iter().map(|state| {
524 (
525 state.old_last_wasm_exit_pc.get(),
526 state.old_last_wasm_exit_fp.get(),
527 state.old_last_wasm_entry_fp.get(),
528 )
529 }))
530 .take_while(|&(pc, fp, sp)| {
531 if pc.get() == 0 {
532 debug_assert_eq!(fp.get(), 0);
533 debug_assert_eq!(sp.get(), 0);
534 }
535 pc.get() != 0
536 });
537
538 for (pc, fp, sp) in activations {
539 if let ControlFlow::Break(()) = Self::trace_through_wasm(pc, fp, sp, &mut f) {
540 tracing::trace!("====== Done Capturing Backtrace (closure break) ======");
541 return;
542 }
543 }
544
545 tracing::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
546 }
547 }
548
549 /// Walk through a contiguous sequence of Wasm frames starting with the
550 /// frame at the given PC and FP and ending at `trampoline_sp`.
551 fn trace_through_wasm(
552 mut pc: VirtualAddress,
553 mut fp: VirtualAddress,
554 trampoline_fp: VirtualAddress,
555 mut f: impl FnMut(Frame) -> ControlFlow<()>,
556 ) -> ControlFlow<()> {
557 f(Frame { pc, fp })?;
558
559 tracing::trace!("=== Tracing through contiguous sequence of Wasm frames ===");
560 tracing::trace!("trampoline_fp = {trampoline_fp}");
561 tracing::trace!(" initial pc = {pc}");
562 tracing::trace!(" initial fp = {fp}");
563
564 // We already checked for this case in the `trace_with_trap_state`
565 // caller.
566 assert_ne!(pc.get(), 0);
567 assert!(arch::is_canonical(pc));
568 assert_ne!(fp.get(), 0);
569 assert!(arch::is_canonical(fp));
570 assert_ne!(trampoline_fp.get(), 0);
571 assert!(arch::is_canonical(trampoline_fp));
572
573 // This loop will walk the linked list of frame pointers starting at
574 // `fp` and going up until `trampoline_fp`. We know that both `fp` and
575 // `trampoline_fp` are "trusted values" aka generated and maintained by
576 // Cranelift. This means that it should be safe to walk the linked list
577 // of pointers and inspect wasm frames.
578 //
579 // Note, though, that any frames outside of this range are not
580 // guaranteed to have valid frame pointers. For example native code
581 // might be using the frame pointer as a general purpose register. Thus
582 // we need to be careful to only walk frame pointers in this one
583 // contiguous linked list.
584 //
585 // To know when to stop iteration all architectures' stacks currently
586 // look something like this:
587 //
588 // | ... |
589 // | Native Frames |
590 // | ... |
591 // |-------------------|
592 // | ... | <-- Trampoline FP |
593 // | Trampoline Frame | |
594 // | ... | <-- Trampoline SP |
595 // |-------------------| Stack
596 // | Return Address | Grows
597 // | Previous FP | <-- Wasm FP Down
598 // | ... | |
599 // | Wasm Frames | |
600 // | ... | V
601 //
602 // The trampoline records its own frame pointer (`trampoline_fp`),
603 // which is guaranteed to be above all Wasm. To check when we've
604 // reached the trampoline frame, it is therefore sufficient to
605 // check when the next frame pointer is equal to `trampoline_fp`. Once
606 // that's hit then we know that the entire linked list has been
607 // traversed.
608 //
609 // Note that it might be possible that this loop doesn't execute at all.
610 // For example if the entry trampoline called wasm which `return_call`'d
611 // an imported function which is an exit trampoline, then
612 // `fp == trampoline_fp` on the entry of this function, meaning the loop
613 // won't actually execute anything.
614 while fp != trampoline_fp {
615 // At the start of each iteration of the loop, we know that `fp` is
616 // a frame pointer from Wasm code. Therefore, we know it is not
617 // being used as an extra general-purpose register, and it is safe
618 // dereference to get the PC and the next older frame pointer.
619 //
620 // The stack also grows down, and therefore any frame pointer we are
621 // dealing with should be less than the frame pointer on entry to
622 // Wasm. Finally also assert that it's aligned correctly as an
623 // additional sanity check.
624 assert!(trampoline_fp > fp, "{trampoline_fp} > {fp}");
625 arch::assert_fp_is_aligned(fp);
626
627 tracing::trace!("--- Tracing through one Wasm frame ---");
628 tracing::trace!("pc = {pc}");
629 tracing::trace!("fp = {fp}");
630
631 f(Frame { pc, fp })?;
632
633 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
634 unsafe {
635 pc = arch::get_next_older_pc_from_fp(fp);
636 }
637
638 // We rely on this offset being zero for all supported architectures
639 // in `crates/cranelift/src/component/compiler.rs` when we set the
640 // Wasm exit FP. If this ever changes, we will need to update that
641 // code as well!
642 assert_eq!(arch::NEXT_OLDER_FP_FROM_FP_OFFSET, 0);
643
644 // Get the next older frame pointer from the current Wasm frame
645 // pointer.
646 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
647 #[expect(clippy::cast_ptr_alignment, reason = "")]
648 let next_older_fp = unsafe {
649 *fp.as_mut_ptr()
650 .cast::<VirtualAddress>()
651 .add(arch::NEXT_OLDER_FP_FROM_FP_OFFSET)
652 };
653
654 // Because the stack always grows down, the older FP must be greater
655 // than the current FP.
656 assert!(next_older_fp > fp, "{next_older_fp} > {fp}");
657 fp = next_older_fp;
658 }
659
660 tracing::trace!("=== Done tracing contiguous sequence of Wasm frames ===");
661 ControlFlow::Continue(())
662 }
663
664 /// Iterate over the frames inside this backtrace.
665 pub fn frames(&self) -> impl ExactSizeIterator<Item = &Frame> + DoubleEndedIterator {
666 self.0.iter()
667 }
668}
669
670pub fn handle_wasm_exception(
671 pc: VirtualAddress,
672 fp: VirtualAddress,
673 faulting_addr: VirtualAddress,
674) -> ControlFlow<()> {
675 if let Some(activation) = NonNull::new(ACTIVATION.get()) {
676 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
677 let activation = unsafe { activation.as_ref() };
678
679 let Some((code, text_offset)) = lookup_code(pc.get()) else {
680 tracing::debug!("no JIT code registered for pc {pc}");
681 return ControlFlow::Continue(());
682 };
683
684 let Some(trap) = code.lookup_trap_code(text_offset) else {
685 tracing::debug!("no JIT trap registered for pc {pc}");
686 return ControlFlow::Continue(());
687 };
688
689 // record the unwind details
690 let backtrace = RawBacktrace::new(
691 activation.vm_store_context.as_ptr(),
692 activation,
693 Some((pc, fp)),
694 );
695 activation.unwind.set(Some((
696 UnwindReason::Trap(TrapReason::Jit {
697 pc,
698 faulting_addr: Some(faulting_addr),
699 trap,
700 }),
701 Some(backtrace),
702 )));
703
704 // longjmp back to Rust
705 #[expect(clippy::undocumented_unsafe_blocks, reason = "")]
706 unsafe {
707 arch::longjmp(activation.jmp_buf, 1);
708 }
709 } else {
710 // ACTIVATION is a nullptr
711 // => means no activations on stack
712 // => means exception cannot be a WASM trap
713 ControlFlow::Continue(())
714 }
715}