Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use core::alloc::Layout;
9use core::ops::Range;
10use core::{cmp, ptr, slice};
11
12use bitflags::bitflags;
13use fallible_iterator::FallibleIterator;
14use kmem::{AddressRangeExt, PhysicalAddress, VirtualAddress};
15use loader_api::TlsTemplate;
16use xmas_elf::P64;
17use xmas_elf::dynamic::Tag;
18use xmas_elf::program::{SegmentData, Type};
19
20use crate::error::Error;
21use crate::frame_alloc::FrameAllocator;
22use crate::kernel::Kernel;
23use crate::machine_info::MachineInfo;
24use crate::page_alloc::PageAllocator;
25use crate::{SelfRegions, arch};
26
27bitflags! {
28 #[derive(Debug, Copy, Clone, PartialEq)]
29 pub struct Flags: u8 {
30 const READ = 1 << 0;
31 const WRITE = 1 << 1;
32 const EXECUTE = 1 << 2;
33 }
34}
35
36pub fn identity_map_self(
37 root_pgtable: PhysicalAddress,
38 frame_alloc: &mut FrameAllocator,
39 self_regions: &SelfRegions,
40) -> crate::Result<()> {
41 log::trace!(
42 "Identity mapping loader executable region {:#x?}...",
43 self_regions.executable
44 );
45 identity_map_range(
46 root_pgtable,
47 frame_alloc,
48 self_regions.executable.clone(),
49 Flags::READ | Flags::EXECUTE,
50 )?;
51
52 log::trace!(
53 "Identity mapping loader read-only region {:#x?}...",
54 self_regions.read_only
55 );
56 identity_map_range(
57 root_pgtable,
58 frame_alloc,
59 self_regions.read_only.clone(),
60 Flags::READ,
61 )?;
62
63 log::trace!(
64 "Identity mapping loader read-write region {:#x?}...",
65 self_regions.read_write
66 );
67 identity_map_range(
68 root_pgtable,
69 frame_alloc,
70 self_regions.read_write.clone(),
71 Flags::READ | Flags::WRITE,
72 )?;
73
74 Ok(())
75}
76
77#[inline]
78fn identity_map_range(
79 root_pgtable: PhysicalAddress,
80 frame_alloc: &mut FrameAllocator,
81 phys: Range<PhysicalAddress>,
82 flags: Flags,
83) -> crate::Result<()> {
84 let virt_start = VirtualAddress::new(phys.start.get());
85
86 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
87 // abort startup anyway
88 unsafe {
89 arch::map_contiguous(
90 root_pgtable,
91 frame_alloc,
92 virt_start,
93 phys.start,
94 phys.len(),
95 flags,
96 VirtualAddress::MIN, // called before translation into higher half
97 )
98 }
99}
100
101pub fn map_physical_memory(
102 root_pgtable: PhysicalAddress,
103 frame_alloc: &mut FrameAllocator,
104 page_alloc: &mut PageAllocator,
105 minfo: &MachineInfo,
106) -> crate::Result<(VirtualAddress, Range<VirtualAddress>)> {
107 let alignment = arch::page_size_for_level(2);
108
109 let phys = minfo.memory_hull().align_out(alignment);
110 let virt = Range {
111 start: arch::KERNEL_ASPACE_BASE.add(phys.start.get()),
112 end: arch::KERNEL_ASPACE_BASE.add(phys.end.get()),
113 };
114
115 debug_assert!(phys.start.is_aligned_to(alignment) && phys.end.is_aligned_to(alignment));
116 debug_assert!(virt.start.is_aligned_to(alignment) && virt.end.is_aligned_to(alignment));
117
118 log::trace!("Mapping physical memory {phys:?} => {virt:?}...");
119 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
120 // abort startup anyway
121 unsafe {
122 arch::map_contiguous(
123 root_pgtable,
124 frame_alloc,
125 virt.start,
126 phys.start,
127 phys.len(),
128 Flags::READ | Flags::WRITE,
129 VirtualAddress::MIN, // called before translation into higher half
130 )?;
131 }
132
133 // exclude the physical memory map region from page allocation
134 page_alloc.reserve(virt.start, phys.len());
135
136 Ok((arch::KERNEL_ASPACE_BASE, virt))
137}
138
139pub fn map_kernel(
140 root_pgtable: PhysicalAddress,
141 frame_alloc: &mut FrameAllocator,
142 page_alloc: &mut PageAllocator,
143 kernel: &Kernel,
144 minfo: &MachineInfo,
145 phys_off: VirtualAddress,
146) -> crate::Result<(Range<VirtualAddress>, Option<TlsAllocation>)> {
147 let kernel_virt = page_alloc.allocate(
148 Layout::from_size_align(
149 usize::try_from(kernel.mem_size())?,
150 usize::try_from(kernel.max_align())?,
151 )
152 .unwrap(),
153 );
154
155 let phys_base = PhysicalAddress::new(
156 kernel.elf_file.input.as_ptr() as usize - arch::KERNEL_ASPACE_BASE.get(),
157 );
158 assert!(
159 phys_base.is_aligned_to(arch::PAGE_SIZE),
160 "Loaded ELF file is not sufficiently aligned"
161 );
162
163 let mut maybe_tls_allocation = None;
164
165 // Load the segments into virtual memory.
166 for ph in kernel.elf_file.program_iter() {
167 match ph.get_type().unwrap() {
168 Type::Load => handle_load_segment(
169 root_pgtable,
170 frame_alloc,
171 &ProgramHeader::try_from(ph)?,
172 phys_base,
173 kernel_virt.start,
174 phys_off,
175 )?,
176 Type::Tls => {
177 let ph = ProgramHeader::try_from(ph)?;
178 let old = maybe_tls_allocation.replace(handle_tls_segment(
179 root_pgtable,
180 frame_alloc,
181 page_alloc,
182 &ph,
183 kernel_virt.start,
184 minfo,
185 phys_off,
186 )?);
187 log::trace!("{maybe_tls_allocation:?}");
188 assert!(old.is_none(), "multiple TLS segments not supported");
189 }
190 _ => {}
191 }
192 }
193
194 // Apply relocations in virtual memory.
195 for ph in kernel.elf_file.program_iter() {
196 if ph.get_type().unwrap() == Type::Dynamic {
197 handle_dynamic_segment(
198 &ProgramHeader::try_from(ph).unwrap(),
199 &kernel.elf_file,
200 kernel_virt.start,
201 )?;
202 }
203 }
204
205 // // Mark some memory regions as read-only after relocations have been
206 // // applied.
207 // for ph in kernel.elf_file.program_iter() {
208 // if ph.get_type().unwrap() == Type::GnuRelro {
209 // handle_relro_segment(
210 // aspace,
211 // &ProgramHeader::try_from(ph).unwrap(),
212 // kernel_virt.start,
213 // flush,
214 // )?;
215 // }
216 // }
217
218 Ok((kernel_virt, maybe_tls_allocation))
219}
220
221/// Map an ELF LOAD segment.
222fn handle_load_segment(
223 root_pgtable: PhysicalAddress,
224 frame_alloc: &mut FrameAllocator,
225 ph: &ProgramHeader,
226 phys_base: PhysicalAddress,
227 virt_base: VirtualAddress,
228 phys_off: VirtualAddress,
229) -> crate::Result<()> {
230 let flags = flags_for_segment(ph);
231
232 log::trace!(
233 "Handling Segment: LOAD off {offset:#016x} vaddr {vaddr:#016x} align {align} filesz {filesz:#016x} memsz {memsz:#016x} flags {flags:?}",
234 offset = ph.offset,
235 vaddr = ph.virtual_address,
236 align = ph.align,
237 filesz = ph.file_size,
238 memsz = ph.mem_size
239 );
240
241 let phys = Range::from_start_len(phys_base.add(ph.offset), ph.file_size).align_out(ph.align);
242
243 let virt =
244 Range::from_start_len(virt_base.add(ph.virtual_address), ph.file_size).align_out(ph.align);
245
246 log::trace!("mapping {virt:#x?} => {phys:#x?}");
247 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
248 // abort startup anyway
249 unsafe {
250 arch::map_contiguous(
251 root_pgtable,
252 frame_alloc,
253 virt.start,
254 phys.start,
255 phys.len(),
256 flags,
257 arch::KERNEL_ASPACE_BASE,
258 )?;
259 }
260
261 if ph.file_size < ph.mem_size {
262 handle_bss_section(
263 root_pgtable,
264 frame_alloc,
265 ph,
266 flags,
267 phys_base,
268 virt_base,
269 phys_off,
270 )?;
271 }
272
273 Ok(())
274}
275
276/// BSS sections are special, since they take up virtual memory that is not present in the "physical" elf file.
277///
278/// Usually, this means just allocating zeroed frames and mapping them "in between" the pages
279/// backed by the elf file. However, quite often the boundary between DATA and BSS sections is
280/// *not* page aligned (since that would unnecessarily bloat the elf file) which means for us
281/// that we need special handling for the last DATA page that is only partially filled with data
282/// and partially filled with zeroes. Here's how we do this:
283///
284/// 1. We calculate the size of the segments zero initialized part.
285/// 2. We then figure out whether the boundary is page-aligned or if there are DATA bytes we need to account for.
286/// 2.1. IF there are data bytes to account for, we allocate a zeroed frame,
287/// 2.2. we then copy over the relevant data from the DATA section into the new frame
288/// 2.3. and lastly replace last page previously mapped by `handle_load_segment` to stitch things up.
289/// 3. If the BSS section is larger than that one page, we allocate additional zeroed frames and map them in.
290fn handle_bss_section(
291 root_pgtable: PhysicalAddress,
292 frame_alloc: &mut FrameAllocator,
293 ph: &ProgramHeader,
294 flags: Flags,
295 phys_base: PhysicalAddress,
296 virt_base: VirtualAddress,
297 phys_off: VirtualAddress,
298) -> crate::Result<()> {
299 let virt_start = virt_base.add(ph.virtual_address);
300 let zero_start = virt_start.add(ph.file_size);
301 let zero_end = virt_start.add(ph.mem_size);
302
303 let data_bytes_before_zero = zero_start.get() & 0xfff;
304
305 log::trace!(
306 "handling BSS {:#x?}, data bytes before {data_bytes_before_zero}",
307 zero_start..zero_end
308 );
309
310 if data_bytes_before_zero != 0 {
311 let last_page = virt_start
312 .add(ph.file_size.saturating_sub(1))
313 .align_down(ph.align);
314 let last_frame = phys_base
315 .add(ph.offset + ph.file_size - 1)
316 .align_down(ph.align);
317
318 let new_frame = frame_alloc.allocate_one_zeroed(arch::KERNEL_ASPACE_BASE)?;
319
320 // Safety: we just allocated the frame
321 unsafe {
322 let src = slice::from_raw_parts(
323 arch::KERNEL_ASPACE_BASE.add(last_frame.get()).as_mut_ptr(),
324 data_bytes_before_zero,
325 );
326
327 let dst = slice::from_raw_parts_mut(
328 arch::KERNEL_ASPACE_BASE.add(new_frame.get()).as_mut_ptr(),
329 data_bytes_before_zero,
330 );
331
332 log::trace!("copying {data_bytes_before_zero} bytes from {src:p} to {dst:p}...");
333 ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), dst.len());
334 }
335
336 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
337 // abort startup anyway
338 unsafe {
339 arch::remap_contiguous(
340 root_pgtable,
341 last_page,
342 new_frame,
343 arch::PAGE_SIZE,
344 phys_off,
345 );
346 }
347 }
348
349 log::trace!("zero_start {zero_start:?} zero_end {zero_end:?}");
350 // zero_start either lies at a page boundary OR somewhere within the first page
351 // by aligning up, we move it to the beginning of the *next* page.
352 let mut virt = Range {
353 start: zero_start.align_up(ph.align),
354 end: zero_end.align_up(ph.align),
355 };
356
357 if !virt.is_empty() {
358 let mut frame_iter = frame_alloc.allocate_zeroed(
359 Layout::from_size_align(virt.len(), arch::PAGE_SIZE).unwrap(),
360 arch::KERNEL_ASPACE_BASE,
361 );
362
363 while let Some(chunk) = frame_iter.next()? {
364 log::trace!("mapping additional zeros {virt:?}",);
365
366 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
367 // abort startup anyway
368 unsafe {
369 arch::map_contiguous(
370 root_pgtable,
371 frame_iter.alloc(),
372 virt.start,
373 chunk.start,
374 chunk.len(),
375 flags,
376 arch::KERNEL_ASPACE_BASE,
377 )?;
378 }
379
380 virt.start = virt.start.add(chunk.len());
381 }
382 }
383
384 Ok(())
385}
386
387fn handle_dynamic_segment(
388 ph: &ProgramHeader,
389 elf_file: &xmas_elf::ElfFile,
390 virt_base: VirtualAddress,
391) -> crate::Result<()> {
392 log::trace!("parsing RELA info...");
393
394 if let Some(rela_info) = ph.parse_rela(elf_file)? {
395 // Safety: we have to trust the ELF data
396 let relas = unsafe {
397 #[expect(clippy::cast_ptr_alignment, reason = "this is fine")]
398 let ptr = elf_file
399 .input
400 .as_ptr()
401 .byte_add(usize::try_from(rela_info.offset)?)
402 .cast::<xmas_elf::sections::Rela<P64>>();
403
404 slice::from_raw_parts(ptr, usize::try_from(rela_info.count)?)
405 };
406
407 // TODO memory fence here
408
409 log::trace!("applying relocations in virtual memory...");
410 for rela in relas {
411 apply_relocation(rela, virt_base);
412 }
413 }
414
415 Ok(())
416}
417
418fn apply_relocation(rela: &xmas_elf::sections::Rela<P64>, virt_base: VirtualAddress) {
419 assert_eq!(
420 rela.get_symbol_table_index(),
421 0,
422 "relocations using the symbol table are not supported"
423 );
424
425 const R_RISCV_RELATIVE: u32 = 3;
426
427 match rela.get_type() {
428 R_RISCV_RELATIVE => {
429 // Calculate address at which to apply the relocation.
430 // dynamic relocations offsets are relative to the virtual layout of the elf,
431 // not the physical file
432 let target = virt_base.add(usize::try_from(rela.get_offset()).unwrap());
433
434 // Calculate the value to store at the relocation target.
435 let value = virt_base.offset(isize::try_from(rela.get_addend()).unwrap());
436
437 // log::trace!("reloc R_RISCV_RELATIVE offset: {:#x}; addend: {:#x} => target {target:?} value {value:?}", rela.get_offset(), rela.get_addend());
438 // Safety: we have to trust the ELF data here
439 unsafe {
440 target
441 .as_mut_ptr()
442 .cast::<usize>()
443 .write_unaligned(value.get());
444 }
445 }
446 _ => unimplemented!("unsupported relocation type {}", rela.get_type()),
447 }
448}
449
450/// Map the kernel thread-local storage (TLS) memory regions.
451fn handle_tls_segment(
452 root_pgtable: PhysicalAddress,
453 frame_alloc: &mut FrameAllocator,
454 page_alloc: &mut PageAllocator,
455 ph: &ProgramHeader,
456 virt_base: VirtualAddress,
457 minfo: &MachineInfo,
458 phys_off: VirtualAddress,
459) -> crate::Result<TlsAllocation> {
460 let layout = Layout::from_size_align(ph.mem_size, cmp::max(ph.align, arch::PAGE_SIZE))
461 .unwrap()
462 .repeat(minfo.hart_mask.count_ones() as usize)
463 .unwrap()
464 .0
465 .pad_to_align();
466 log::trace!("allocating TLS segment {layout:?}...");
467
468 let virt = page_alloc.allocate(layout);
469 let mut virt_start = virt.start;
470
471 let mut frame_iter = frame_alloc.allocate_zeroed(layout, phys_off);
472 while let Some(chunk) = frame_iter.next()? {
473 log::trace!(
474 "Mapping TLS region {virt_start:?}..{:?} => {chunk:?} ...",
475 virt_start.add(chunk.len())
476 );
477
478 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
479 // abort startup anyway
480 unsafe {
481 arch::map_contiguous(
482 root_pgtable,
483 frame_iter.alloc(),
484 virt_start,
485 chunk.start,
486 chunk.len(),
487 Flags::READ | Flags::WRITE,
488 phys_off,
489 )?;
490 }
491
492 virt_start = virt_start.add(chunk.len());
493 }
494
495 Ok(TlsAllocation {
496 virt,
497 template: TlsTemplate {
498 start_addr: virt_base.add(ph.virtual_address),
499 mem_size: ph.mem_size,
500 file_size: ph.file_size,
501 align: ph.align,
502 },
503 })
504}
505
506#[derive(Debug)]
507pub struct TlsAllocation {
508 /// The TLS region in virtual memory
509 virt: Range<VirtualAddress>,
510 /// The template we allocated for
511 pub template: TlsTemplate,
512}
513
514impl TlsAllocation {
515 pub fn region_for_hart(&self, hartid: usize) -> Range<VirtualAddress> {
516 let aligned_size = checked_align_up(
517 self.template.mem_size,
518 cmp::max(self.template.align, arch::PAGE_SIZE),
519 )
520 .unwrap();
521 let start = self.virt.start.add(aligned_size * hartid);
522
523 Range::from_start_len(start, self.template.mem_size)
524 }
525
526 pub fn initialize_for_hart(&self, hartid: usize) {
527 if self.template.file_size != 0 {
528 // Safety: We have to trust the loaders BootInfo here
529 unsafe {
530 let src: &[u8] = slice::from_raw_parts(
531 self.template.start_addr.as_mut_ptr(),
532 self.template.file_size,
533 );
534 let dst: &mut [u8] = slice::from_raw_parts_mut(
535 self.region_for_hart(hartid).start.as_mut_ptr(),
536 self.template.file_size,
537 );
538
539 // sanity check to ensure our destination allocated memory is actually zeroed.
540 // if it's not, that likely means we're about to override something important
541 debug_assert!(dst.iter().all(|&x| x == 0));
542
543 dst.copy_from_slice(src);
544 }
545 }
546 }
547}
548
549pub fn map_kernel_stacks(
550 root_pgtable: PhysicalAddress,
551 frame_alloc: &mut FrameAllocator,
552 page_alloc: &mut PageAllocator,
553 minfo: &MachineInfo,
554 per_cpu_size_pages: usize,
555 phys_off: VirtualAddress,
556) -> crate::Result<StacksAllocation> {
557 let per_cpu_size = per_cpu_size_pages * arch::PAGE_SIZE;
558 let per_cpu_size_with_guard = per_cpu_size + arch::PAGE_SIZE;
559
560 let layout_with_guard = Layout::from_size_align(per_cpu_size_with_guard, arch::PAGE_SIZE)
561 .unwrap()
562 .repeat(minfo.hart_mask.count_ones() as usize)
563 .unwrap()
564 .0;
565
566 let virt = page_alloc.allocate(layout_with_guard);
567 log::trace!("Mapping stacks region {virt:#x?}...");
568
569 for hart in 0..minfo.hart_mask.count_ones() {
570 let layout = Layout::from_size_align(per_cpu_size, arch::PAGE_SIZE).unwrap();
571
572 let mut virt = virt
573 .end
574 .add(per_cpu_size_with_guard * hart as usize)
575 .sub(per_cpu_size);
576
577 log::trace!("Allocating stack {layout:?}...");
578 // The stacks region doesn't need to be zeroed, since we will be filling it with
579 // the canary pattern anyway
580 let mut frame_iter = frame_alloc.allocate(layout);
581
582 while let Some(chunk) = frame_iter.next()? {
583 log::trace!(
584 "mapping stack for hart {hart} {virt:?}..{:?} => {chunk:?}",
585 virt.add(chunk.len())
586 );
587
588 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
589 // abort startup anyway
590 unsafe {
591 arch::map_contiguous(
592 root_pgtable,
593 frame_iter.alloc(),
594 virt,
595 chunk.start,
596 chunk.len(),
597 Flags::READ | Flags::WRITE,
598 phys_off,
599 )?;
600 }
601
602 virt = virt.add(chunk.len());
603 }
604 }
605
606 Ok(StacksAllocation {
607 virt,
608 per_cpu_size,
609 per_cpu_size_with_guard,
610 })
611}
612
613pub struct StacksAllocation {
614 /// The TLS region in virtual memory
615 virt: Range<VirtualAddress>,
616 per_cpu_size: usize,
617 per_cpu_size_with_guard: usize,
618}
619
620impl StacksAllocation {
621 pub fn region_for_cpu(&self, cpuid: usize) -> Range<VirtualAddress> {
622 let end = self.virt.end.add(self.per_cpu_size_with_guard * cpuid);
623
624 end.sub(self.per_cpu_size)..end
625 }
626}
627
628struct ProgramHeader<'a> {
629 pub p_flags: xmas_elf::program::Flags,
630 pub align: usize,
631 pub offset: usize,
632 pub virtual_address: usize,
633 pub file_size: usize,
634 pub mem_size: usize,
635 ph: xmas_elf::program::ProgramHeader<'a>,
636}
637
638impl ProgramHeader<'_> {
639 pub fn parse_rela(&self, elf_file: &xmas_elf::ElfFile) -> crate::Result<Option<RelaInfo>> {
640 let data = self.ph.get_data(elf_file).map_err(Error::Elf)?;
641 let fields = match data {
642 SegmentData::Dynamic32(_) => unimplemented!("32-bit elf files are not supported"),
643 SegmentData::Dynamic64(fields) => fields,
644 _ => return Ok(None),
645 };
646
647 let mut rela = None; // Address of Rela relocs
648 let mut rela_size = None; // Total size of Rela relocs
649 let mut rela_ent = None; // Size of one Rela reloc
650
651 for field in fields {
652 let tag = field.get_tag().map_err(Error::Elf)?;
653 match tag {
654 Tag::Rela => {
655 let ptr = field.get_ptr().map_err(Error::Elf)?;
656 let prev = rela.replace(ptr);
657 assert!(
658 prev.is_none(),
659 "Dynamic section contains more than one Rela entry"
660 );
661 }
662 Tag::RelaSize => {
663 let val = field.get_val().map_err(Error::Elf)?;
664 let prev = rela_size.replace(val);
665 assert!(
666 prev.is_none(),
667 "Dynamic section contains more than one RelaSize entry"
668 );
669 }
670 Tag::RelaEnt => {
671 let val = field.get_val().map_err(Error::Elf)?;
672 let prev = rela_ent.replace(val);
673 assert!(
674 prev.is_none(),
675 "Dynamic section contains more than one RelaEnt entry"
676 );
677 }
678
679 Tag::Rel | Tag::RelSize | Tag::RelEnt => {
680 panic!("REL relocations are not supported")
681 }
682 Tag::RelrSize | Tag::Relr | Tag::RelrEnt => {
683 panic!("RELR relocations are not supported")
684 }
685 _ => {}
686 }
687 }
688
689 #[expect(clippy::manual_assert, reason = "cleaner this way")]
690 if rela.is_none() && (rela_size.is_some() || rela_ent.is_some()) {
691 panic!("Rela entry is missing but RelaSize or RelaEnt have been provided");
692 }
693
694 let Some(offset) = rela else {
695 return Ok(None);
696 };
697
698 let total_size = rela_size.expect("RelaSize entry is missing");
699 let entry_size = rela_ent.expect("RelaEnt entry is missing");
700
701 Ok(Some(RelaInfo {
702 offset,
703 count: total_size / entry_size,
704 }))
705 }
706}
707
708struct RelaInfo {
709 pub offset: u64,
710 pub count: u64,
711}
712
713impl<'a> TryFrom<xmas_elf::program::ProgramHeader<'a>> for ProgramHeader<'a> {
714 type Error = Error;
715
716 fn try_from(ph: xmas_elf::program::ProgramHeader<'a>) -> Result<Self, Self::Error> {
717 Ok(Self {
718 p_flags: ph.flags(),
719 align: usize::try_from(ph.align())?,
720 offset: usize::try_from(ph.offset())?,
721 virtual_address: usize::try_from(ph.virtual_addr())?,
722 file_size: usize::try_from(ph.file_size())?,
723 mem_size: usize::try_from(ph.mem_size())?,
724 ph,
725 })
726 }
727}
728
729fn flags_for_segment(ph: &ProgramHeader) -> Flags {
730 let mut out = Flags::empty();
731
732 if ph.p_flags.is_read() {
733 out |= Flags::READ;
734 }
735
736 if ph.p_flags.is_write() {
737 out |= Flags::WRITE;
738 }
739
740 if ph.p_flags.is_execute() {
741 out |= Flags::EXECUTE;
742 }
743
744 assert!(
745 !out.contains(Flags::WRITE | Flags::EXECUTE),
746 "elf segment (virtual range {:#x}..{:#x}) is marked as write-execute",
747 ph.virtual_address,
748 ph.virtual_address + ph.mem_size
749 );
750
751 out
752}
753
754#[must_use]
755#[inline]
756pub const fn checked_align_up(this: usize, align: usize) -> Option<usize> {
757 assert!(
758 align.is_power_of_two(),
759 "checked_align_up: align is not a power-of-two"
760 );
761
762 // SAFETY: `align` has been checked to be a power of 2 above
763 let align_minus_one = unsafe { align.unchecked_sub(1) };
764
765 // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
766 if let Some(addr_plus_align) = this.checked_add(align_minus_one) {
767 let aligned = addr_plus_align & 0usize.wrapping_sub(align);
768 debug_assert!(aligned.is_multiple_of(align));
769 debug_assert!(aligned >= this);
770 Some(aligned)
771 } else {
772 None
773 }
774}