Next Generation WASM Microkernel Operating System
1// Copyright 2025 Jonas Kruckenberg
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::error::Error;
9use crate::frame_alloc::FrameAllocator;
10use crate::kernel::Kernel;
11use crate::machine_info::MachineInfo;
12use crate::page_alloc::PageAllocator;
13use crate::{SelfRegions, arch};
14use bitflags::bitflags;
15use core::alloc::Layout;
16use core::num::NonZeroUsize;
17use core::range::Range;
18use core::{cmp, ptr, slice};
19use fallible_iterator::FallibleIterator;
20use loader_api::TlsTemplate;
21use xmas_elf::P64;
22use xmas_elf::dynamic::Tag;
23use xmas_elf::program::{SegmentData, Type};
24
25bitflags! {
26 #[derive(Debug, Copy, Clone, PartialEq)]
27 pub struct Flags: u8 {
28 const READ = 1 << 0;
29 const WRITE = 1 << 1;
30 const EXECUTE = 1 << 2;
31 }
32}
33
34pub fn identity_map_self(
35 root_pgtable: usize,
36 frame_alloc: &mut FrameAllocator,
37 self_regions: &SelfRegions,
38) -> crate::Result<()> {
39 log::trace!(
40 "Identity mapping loader executable region {:#x?}...",
41 self_regions.executable
42 );
43 identity_map_range(
44 root_pgtable,
45 frame_alloc,
46 self_regions.executable,
47 Flags::READ | Flags::EXECUTE,
48 )?;
49
50 log::trace!(
51 "Identity mapping loader read-only region {:#x?}...",
52 self_regions.read_only
53 );
54 identity_map_range(
55 root_pgtable,
56 frame_alloc,
57 self_regions.read_only,
58 Flags::READ,
59 )?;
60
61 log::trace!(
62 "Identity mapping loader read-write region {:#x?}...",
63 self_regions.read_write
64 );
65 identity_map_range(
66 root_pgtable,
67 frame_alloc,
68 self_regions.read_write,
69 Flags::READ | Flags::WRITE,
70 )?;
71
72 Ok(())
73}
74
75#[inline]
76fn identity_map_range(
77 root_pgtable: usize,
78 frame_alloc: &mut FrameAllocator,
79 phys: Range<usize>,
80 flags: Flags,
81) -> crate::Result<()> {
82 let len = NonZeroUsize::new(phys.end.checked_sub(phys.start).unwrap()).unwrap();
83
84 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
85 // abort startup anyway
86 unsafe {
87 arch::map_contiguous(
88 root_pgtable,
89 frame_alloc,
90 phys.start,
91 phys.start,
92 len,
93 flags,
94 0, // called before translation into higher half
95 )
96 }
97}
98
99pub fn map_physical_memory(
100 root_pgtable: usize,
101 frame_alloc: &mut FrameAllocator,
102 page_alloc: &mut PageAllocator,
103 minfo: &MachineInfo,
104) -> crate::Result<(usize, Range<usize>)> {
105 let alignment = arch::page_size_for_level(2);
106
107 let phys = minfo.memory_hull();
108 let phys = Range::from(
109 align_down(phys.start, alignment)..checked_align_up(phys.end, alignment).unwrap(),
110 );
111 let virt = Range::from(
112 arch::KERNEL_ASPACE_BASE.checked_add(phys.start).unwrap()
113 ..arch::KERNEL_ASPACE_BASE.checked_add(phys.end).unwrap(),
114 );
115 let size = NonZeroUsize::new(phys.end.checked_sub(phys.start).unwrap()).unwrap();
116
117 debug_assert!(phys.start % alignment == 0 && phys.end % alignment == 0);
118 debug_assert!(virt.start % alignment == 0 && virt.end % alignment == 0);
119
120 log::trace!("Mapping physical memory {phys:#x?} => {virt:#x?}...");
121 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
122 // abort startup anyway
123 unsafe {
124 arch::map_contiguous(
125 root_pgtable,
126 frame_alloc,
127 virt.start,
128 phys.start,
129 size,
130 Flags::READ | Flags::WRITE,
131 0, // called before translation into higher half
132 )?;
133 }
134
135 // exclude the physical memory map region from page allocation
136 page_alloc.reserve(virt.start, size.get());
137
138 Ok((arch::KERNEL_ASPACE_BASE, virt))
139}
140
141pub fn map_kernel(
142 root_pgtable: usize,
143 frame_alloc: &mut FrameAllocator,
144 page_alloc: &mut PageAllocator,
145 kernel: &Kernel,
146 minfo: &MachineInfo,
147 phys_off: usize,
148) -> crate::Result<(Range<usize>, Option<TlsAllocation>)> {
149 let kernel_virt = page_alloc.allocate(
150 Layout::from_size_align(
151 usize::try_from(kernel.mem_size())?,
152 usize::try_from(kernel.max_align())?,
153 )
154 .unwrap(),
155 );
156
157 let phys_base = kernel.elf_file.input.as_ptr() as usize - arch::KERNEL_ASPACE_BASE;
158 assert!(
159 phys_base % arch::PAGE_SIZE == 0,
160 "Loaded ELF file is not sufficiently aligned"
161 );
162
163 let mut maybe_tls_allocation = None;
164
165 // Load the segments into virtual memory.
166 for ph in kernel.elf_file.program_iter() {
167 match ph.get_type().unwrap() {
168 Type::Load => handle_load_segment(
169 root_pgtable,
170 frame_alloc,
171 &ProgramHeader::try_from(ph)?,
172 phys_base,
173 kernel_virt.start,
174 phys_off,
175 )?,
176 Type::Tls => {
177 let ph = ProgramHeader::try_from(ph)?;
178 let old = maybe_tls_allocation.replace(handle_tls_segment(
179 root_pgtable,
180 frame_alloc,
181 page_alloc,
182 &ph,
183 kernel_virt.start,
184 minfo,
185 phys_off,
186 )?);
187 log::trace!("{maybe_tls_allocation:?}");
188 assert!(old.is_none(), "multiple TLS segments not supported");
189 }
190 _ => {}
191 }
192 }
193
194 // Apply relocations in virtual memory.
195 for ph in kernel.elf_file.program_iter() {
196 if ph.get_type().unwrap() == Type::Dynamic {
197 handle_dynamic_segment(
198 &ProgramHeader::try_from(ph).unwrap(),
199 &kernel.elf_file,
200 kernel_virt.start,
201 )?;
202 }
203 }
204
205 // // Mark some memory regions as read-only after relocations have been
206 // // applied.
207 // for ph in kernel.elf_file.program_iter() {
208 // if ph.get_type().unwrap() == Type::GnuRelro {
209 // handle_relro_segment(
210 // aspace,
211 // &ProgramHeader::try_from(ph).unwrap(),
212 // kernel_virt.start,
213 // flush,
214 // )?;
215 // }
216 // }
217
218 Ok((kernel_virt, maybe_tls_allocation))
219}
220
221/// Map an ELF LOAD segment.
222fn handle_load_segment(
223 root_pgtable: usize,
224 frame_alloc: &mut FrameAllocator,
225 ph: &ProgramHeader,
226 phys_base: usize,
227 virt_base: usize,
228 phys_off: usize,
229) -> crate::Result<()> {
230 let flags = flags_for_segment(ph);
231
232 log::trace!(
233 "Handling Segment: LOAD off {offset:#016x} vaddr {vaddr:#016x} align {align} filesz {filesz:#016x} memsz {memsz:#016x} flags {flags:?}",
234 offset = ph.offset,
235 vaddr = ph.virtual_address,
236 align = ph.align,
237 filesz = ph.file_size,
238 memsz = ph.mem_size
239 );
240
241 let phys = {
242 let start = phys_base.checked_add(ph.offset).unwrap();
243 let end = start.checked_add(ph.file_size).unwrap();
244
245 Range::from(align_down(start, ph.align)..checked_align_up(end, ph.align).unwrap())
246 };
247
248 let virt = {
249 let start = virt_base.checked_add(ph.virtual_address).unwrap();
250 let end = start.checked_add(ph.file_size).unwrap();
251
252 Range::from(align_down(start, ph.align)..checked_align_up(end, ph.align).unwrap())
253 };
254
255 log::trace!("mapping {virt:#x?} => {phys:#x?}");
256 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
257 // abort startup anyway
258 unsafe {
259 arch::map_contiguous(
260 root_pgtable,
261 frame_alloc,
262 virt.start,
263 phys.start,
264 NonZeroUsize::new(phys.end.checked_sub(phys.start).unwrap()).unwrap(),
265 flags,
266 arch::KERNEL_ASPACE_BASE,
267 )?;
268 }
269
270 if ph.file_size < ph.mem_size {
271 handle_bss_section(
272 root_pgtable,
273 frame_alloc,
274 ph,
275 flags,
276 phys_base,
277 virt_base,
278 phys_off,
279 )?;
280 }
281
282 Ok(())
283}
284
285/// BSS sections are special, since they take up virtual memory that is not present in the "physical" elf file.
286///
287/// Usually, this means just allocating zeroed frames and mapping them "in between" the pages
288/// backed by the elf file. However, quite often the boundary between DATA and BSS sections is
289/// *not* page aligned (since that would unnecessarily bloat the elf file) which means for us
290/// that we need special handling for the last DATA page that is only partially filled with data
291/// and partially filled with zeroes. Here's how we do this:
292///
293/// 1. We calculate the size of the segments zero initialized part.
294/// 2. We then figure out whether the boundary is page-aligned or if there are DATA bytes we need to account for.
295/// 2.1. IF there are data bytes to account for, we allocate a zeroed frame,
296/// 2.2. we then copy over the relevant data from the DATA section into the new frame
297/// 2.3. and lastly replace last page previously mapped by `handle_load_segment` to stitch things up.
298/// 3. If the BSS section is larger than that one page, we allocate additional zeroed frames and map them in.
299fn handle_bss_section(
300 root_pgtable: usize,
301 frame_alloc: &mut FrameAllocator,
302 ph: &ProgramHeader,
303 flags: Flags,
304 phys_base: usize,
305 virt_base: usize,
306 phys_off: usize,
307) -> crate::Result<()> {
308 let virt_start = virt_base.checked_add(ph.virtual_address).unwrap();
309 let zero_start = virt_start.checked_add(ph.file_size).unwrap();
310 let zero_end = virt_start.checked_add(ph.mem_size).unwrap();
311
312 let data_bytes_before_zero = zero_start & 0xfff;
313
314 log::trace!(
315 "handling BSS {:#x?}, data bytes before {data_bytes_before_zero}",
316 zero_start..zero_end
317 );
318
319 if data_bytes_before_zero != 0 {
320 let last_page = align_down(
321 virt_start
322 .checked_add(ph.file_size.saturating_sub(1))
323 .unwrap(),
324 ph.align,
325 );
326 let last_frame = align_down(
327 phys_base.checked_add(ph.offset + ph.file_size - 1).unwrap(),
328 ph.align,
329 );
330
331 let new_frame = frame_alloc.allocate_one_zeroed(arch::KERNEL_ASPACE_BASE)?;
332
333 // Safety: we just allocated the frame
334 unsafe {
335 let src = slice::from_raw_parts(
336 arch::KERNEL_ASPACE_BASE.checked_add(last_frame).unwrap() as *mut u8,
337 data_bytes_before_zero,
338 );
339
340 let dst = slice::from_raw_parts_mut(
341 arch::KERNEL_ASPACE_BASE.checked_add(new_frame).unwrap() as *mut u8,
342 data_bytes_before_zero,
343 );
344
345 log::trace!("copying {data_bytes_before_zero} bytes from {src:p} to {dst:p}...");
346 ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), dst.len());
347 }
348
349 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
350 // abort startup anyway
351 unsafe {
352 arch::remap_contiguous(
353 root_pgtable,
354 last_page,
355 new_frame,
356 NonZeroUsize::new(arch::PAGE_SIZE).unwrap(),
357 phys_off,
358 );
359 }
360 }
361
362 log::trace!("zero_start {zero_start:#x} zero_end {zero_end:#x}");
363 let (mut virt, len) = {
364 // zero_start either lies at a page boundary OR somewhere within the first page
365 // by aligning up, we move it to the beginning of the *next* page.
366 let start = checked_align_up(zero_start, ph.align).unwrap();
367 let end = checked_align_up(zero_end, ph.align).unwrap();
368 (start, end.checked_sub(start).unwrap())
369 };
370
371 if len > 0 {
372 let mut phys_iter = frame_alloc.allocate_zeroed(
373 Layout::from_size_align(len, arch::PAGE_SIZE).unwrap(),
374 arch::KERNEL_ASPACE_BASE,
375 );
376
377 while let Some((phys, len)) = phys_iter.next()? {
378 log::trace!(
379 "mapping additional zeros {virt:#x}..{:#x}",
380 virt.checked_add(len.get()).unwrap()
381 );
382
383 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
384 // abort startup anyway
385 unsafe {
386 arch::map_contiguous(
387 root_pgtable,
388 phys_iter.alloc(),
389 virt,
390 phys,
391 len,
392 flags,
393 arch::KERNEL_ASPACE_BASE,
394 )?;
395 }
396
397 virt += len.get();
398 }
399 }
400
401 Ok(())
402}
403
404fn handle_dynamic_segment(
405 ph: &ProgramHeader,
406 elf_file: &xmas_elf::ElfFile,
407 virt_base: usize,
408) -> crate::Result<()> {
409 log::trace!("parsing RELA info...");
410
411 if let Some(rela_info) = ph.parse_rela(elf_file)? {
412 // Safety: we have to trust the ELF data
413 let relas = unsafe {
414 #[expect(clippy::cast_ptr_alignment, reason = "this is fine")]
415 let ptr = elf_file
416 .input
417 .as_ptr()
418 .byte_add(usize::try_from(rela_info.offset)?)
419 .cast::<xmas_elf::sections::Rela<P64>>();
420
421 slice::from_raw_parts(ptr, usize::try_from(rela_info.count)?)
422 };
423
424 // TODO memory fence here
425
426 log::trace!("applying relocations in virtual memory...");
427 for rela in relas {
428 apply_relocation(rela, virt_base);
429 }
430 }
431
432 Ok(())
433}
434
435fn apply_relocation(rela: &xmas_elf::sections::Rela<P64>, virt_base: usize) {
436 assert_eq!(
437 rela.get_symbol_table_index(),
438 0,
439 "relocations using the symbol table are not supported"
440 );
441
442 const R_RISCV_RELATIVE: u32 = 3;
443
444 match rela.get_type() {
445 R_RISCV_RELATIVE => {
446 // Calculate address at which to apply the relocation.
447 // dynamic relocations offsets are relative to the virtual layout of the elf,
448 // not the physical file
449 let target = virt_base
450 .checked_add(usize::try_from(rela.get_offset()).unwrap())
451 .unwrap();
452
453 // Calculate the value to store at the relocation target.
454 let value = virt_base
455 .checked_add_signed(isize::try_from(rela.get_addend()).unwrap())
456 .unwrap();
457
458 // log::trace!("reloc R_RISCV_RELATIVE offset: {:#x}; addend: {:#x} => target {target:?} value {value:?}", rela.get_offset(), rela.get_addend());
459 // Safety: we have to trust the ELF data here
460 unsafe {
461 (target as *mut usize).write_unaligned(value);
462 }
463 }
464 _ => unimplemented!("unsupported relocation type {}", rela.get_type()),
465 }
466}
467
468/// Map the kernel thread-local storage (TLS) memory regions.
469fn handle_tls_segment(
470 root_pgtable: usize,
471 frame_alloc: &mut FrameAllocator,
472 page_alloc: &mut PageAllocator,
473 ph: &ProgramHeader,
474 virt_base: usize,
475 minfo: &MachineInfo,
476 phys_off: usize,
477) -> crate::Result<TlsAllocation> {
478 let layout = Layout::from_size_align(ph.mem_size, cmp::max(ph.align, arch::PAGE_SIZE))
479 .unwrap()
480 .repeat(minfo.hart_mask.count_ones() as usize)
481 .unwrap()
482 .0
483 .pad_to_align();
484 log::trace!("allocating TLS segment {layout:?}...");
485
486 let virt = page_alloc.allocate(layout);
487 let mut virt_start = virt.start;
488
489 let mut phys_iter = frame_alloc.allocate_zeroed(layout, phys_off);
490 while let Some((phys, len)) = phys_iter.next()? {
491 log::trace!(
492 "Mapping TLS region {virt_start:#x}..{:#x} {len} ...",
493 virt_start.checked_add(len.get()).unwrap()
494 );
495
496 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
497 // abort startup anyway
498 unsafe {
499 arch::map_contiguous(
500 root_pgtable,
501 phys_iter.alloc(),
502 virt_start,
503 phys,
504 len,
505 Flags::READ | Flags::WRITE,
506 phys_off,
507 )?;
508 }
509
510 virt_start += len.get();
511 }
512
513 Ok(TlsAllocation {
514 virt,
515 template: TlsTemplate {
516 start_addr: virt_base + ph.virtual_address,
517 mem_size: ph.mem_size,
518 file_size: ph.file_size,
519 align: ph.align,
520 },
521 })
522}
523
524#[derive(Debug)]
525pub struct TlsAllocation {
526 /// The TLS region in virtual memory
527 virt: Range<usize>,
528 /// The template we allocated for
529 pub template: TlsTemplate,
530}
531
532impl TlsAllocation {
533 pub fn region_for_hart(&self, hartid: usize) -> Range<usize> {
534 let aligned_size = checked_align_up(
535 self.template.mem_size,
536 cmp::max(self.template.align, arch::PAGE_SIZE),
537 )
538 .unwrap();
539 let start = self.virt.start + (aligned_size * hartid);
540
541 Range::from(start..start + self.template.mem_size)
542 }
543
544 pub fn initialize_for_hart(&self, hartid: usize) {
545 if self.template.file_size != 0 {
546 // Safety: We have to trust the loaders BootInfo here
547 unsafe {
548 let src: &[u8] = slice::from_raw_parts(
549 self.template.start_addr as *const u8,
550 self.template.file_size,
551 );
552 let dst: &mut [u8] = slice::from_raw_parts_mut(
553 self.region_for_hart(hartid).start as *mut u8,
554 self.template.file_size,
555 );
556
557 // sanity check to ensure our destination allocated memory is actually zeroed.
558 // if it's not, that likely means we're about to override something important
559 debug_assert!(dst.iter().all(|&x| x == 0));
560
561 dst.copy_from_slice(src);
562 }
563 }
564 }
565}
566
567pub fn map_kernel_stacks(
568 root_pgtable: usize,
569 frame_alloc: &mut FrameAllocator,
570 page_alloc: &mut PageAllocator,
571 minfo: &MachineInfo,
572 per_cpu_size_pages: usize,
573 phys_off: usize,
574) -> crate::Result<StacksAllocation> {
575 let per_cpu_size = per_cpu_size_pages * arch::PAGE_SIZE;
576 let per_cpu_size_with_guard = per_cpu_size + arch::PAGE_SIZE;
577
578 let layout_with_guard = Layout::from_size_align(per_cpu_size_with_guard, arch::PAGE_SIZE)
579 .unwrap()
580 .repeat(minfo.hart_mask.count_ones() as usize)
581 .unwrap()
582 .0;
583
584 let virt = page_alloc.allocate(layout_with_guard);
585 log::trace!("Mapping stacks region {virt:#x?}...");
586
587 for hart in 0..minfo.hart_mask.count_ones() {
588 let layout = Layout::from_size_align(per_cpu_size, arch::PAGE_SIZE).unwrap();
589
590 let mut virt = virt
591 .end
592 .checked_sub(per_cpu_size_with_guard * hart as usize)
593 .and_then(|a| a.checked_sub(per_cpu_size))
594 .unwrap();
595
596 log::trace!("Allocating stack {layout:?}...");
597 // The stacks region doesn't need to be zeroed, since we will be filling it with
598 // the canary pattern anyway
599 let mut phys_iter = frame_alloc.allocate(layout);
600
601 while let Some((phys, len)) = phys_iter.next()? {
602 log::trace!(
603 "mapping stack for hart {hart} {virt:#x}..{:#x} => {phys:#x}..{:#x}",
604 virt.checked_add(len.get()).unwrap(),
605 phys.checked_add(len.get()).unwrap()
606 );
607
608 // Safety: Leaving the address space in an invalid state here is fine since on panic we'll
609 // abort startup anyway
610 unsafe {
611 arch::map_contiguous(
612 root_pgtable,
613 phys_iter.alloc(),
614 virt,
615 phys,
616 len,
617 Flags::READ | Flags::WRITE,
618 phys_off,
619 )?;
620 }
621
622 virt += len.get();
623 }
624 }
625
626 Ok(StacksAllocation {
627 virt,
628 per_cpu_size,
629 per_cpu_size_with_guard,
630 })
631}
632
633pub struct StacksAllocation {
634 /// The TLS region in virtual memory
635 virt: Range<usize>,
636 per_cpu_size: usize,
637 per_cpu_size_with_guard: usize,
638}
639
640impl StacksAllocation {
641 pub fn region_for_cpu(&self, cpuid: usize) -> Range<usize> {
642 let end = self.virt.end - (self.per_cpu_size_with_guard * cpuid);
643
644 Range::from((end - self.per_cpu_size)..end)
645 }
646}
647
648struct ProgramHeader<'a> {
649 pub p_flags: xmas_elf::program::Flags,
650 pub align: usize,
651 pub offset: usize,
652 pub virtual_address: usize,
653 pub file_size: usize,
654 pub mem_size: usize,
655 ph: xmas_elf::program::ProgramHeader<'a>,
656}
657
658impl ProgramHeader<'_> {
659 pub fn parse_rela(&self, elf_file: &xmas_elf::ElfFile) -> crate::Result<Option<RelaInfo>> {
660 let data = self.ph.get_data(elf_file).map_err(Error::Elf)?;
661 let fields = match data {
662 SegmentData::Dynamic32(_) => unimplemented!("32-bit elf files are not supported"),
663 SegmentData::Dynamic64(fields) => fields,
664 _ => return Ok(None),
665 };
666
667 let mut rela = None; // Address of Rela relocs
668 let mut rela_size = None; // Total size of Rela relocs
669 let mut rela_ent = None; // Size of one Rela reloc
670
671 for field in fields {
672 let tag = field.get_tag().map_err(Error::Elf)?;
673 match tag {
674 Tag::Rela => {
675 let ptr = field.get_ptr().map_err(Error::Elf)?;
676 let prev = rela.replace(ptr);
677 assert!(
678 prev.is_none(),
679 "Dynamic section contains more than one Rela entry"
680 );
681 }
682 Tag::RelaSize => {
683 let val = field.get_val().map_err(Error::Elf)?;
684 let prev = rela_size.replace(val);
685 assert!(
686 prev.is_none(),
687 "Dynamic section contains more than one RelaSize entry"
688 );
689 }
690 Tag::RelaEnt => {
691 let val = field.get_val().map_err(Error::Elf)?;
692 let prev = rela_ent.replace(val);
693 assert!(
694 prev.is_none(),
695 "Dynamic section contains more than one RelaEnt entry"
696 );
697 }
698
699 Tag::Rel | Tag::RelSize | Tag::RelEnt => {
700 panic!("REL relocations are not supported")
701 }
702 Tag::RelrSize | Tag::Relr | Tag::RelrEnt => {
703 panic!("RELR relocations are not supported")
704 }
705 _ => {}
706 }
707 }
708
709 #[expect(clippy::manual_assert, reason = "cleaner this way")]
710 if rela.is_none() && (rela_size.is_some() || rela_ent.is_some()) {
711 panic!("Rela entry is missing but RelaSize or RelaEnt have been provided");
712 }
713
714 let Some(offset) = rela else {
715 return Ok(None);
716 };
717
718 let total_size = rela_size.expect("RelaSize entry is missing");
719 let entry_size = rela_ent.expect("RelaEnt entry is missing");
720
721 Ok(Some(RelaInfo {
722 offset,
723 count: total_size / entry_size,
724 }))
725 }
726}
727
728struct RelaInfo {
729 pub offset: u64,
730 pub count: u64,
731}
732
733impl<'a> TryFrom<xmas_elf::program::ProgramHeader<'a>> for ProgramHeader<'a> {
734 type Error = Error;
735
736 fn try_from(ph: xmas_elf::program::ProgramHeader<'a>) -> Result<Self, Self::Error> {
737 Ok(Self {
738 p_flags: ph.flags(),
739 align: usize::try_from(ph.align())?,
740 offset: usize::try_from(ph.offset())?,
741 virtual_address: usize::try_from(ph.virtual_addr())?,
742 file_size: usize::try_from(ph.file_size())?,
743 mem_size: usize::try_from(ph.mem_size())?,
744 ph,
745 })
746 }
747}
748
749fn flags_for_segment(ph: &ProgramHeader) -> Flags {
750 let mut out = Flags::empty();
751
752 if ph.p_flags.is_read() {
753 out |= Flags::READ;
754 }
755
756 if ph.p_flags.is_write() {
757 out |= Flags::WRITE;
758 }
759
760 if ph.p_flags.is_execute() {
761 out |= Flags::EXECUTE;
762 }
763
764 assert!(
765 !out.contains(Flags::WRITE | Flags::EXECUTE),
766 "elf segment (virtual range {:#x}..{:#x}) is marked as write-execute",
767 ph.virtual_address,
768 ph.virtual_address + ph.mem_size
769 );
770
771 out
772}
773
774#[must_use]
775#[inline]
776pub const fn checked_align_up(this: usize, align: usize) -> Option<usize> {
777 assert!(
778 align.is_power_of_two(),
779 "checked_align_up: align is not a power-of-two"
780 );
781
782 // SAFETY: `align` has been checked to be a power of 2 above
783 let align_minus_one = unsafe { align.unchecked_sub(1) };
784
785 // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
786 if let Some(addr_plus_align) = this.checked_add(align_minus_one) {
787 let aligned = addr_plus_align & 0usize.wrapping_sub(align);
788 debug_assert!(aligned % align == 0);
789 debug_assert!(aligned >= this);
790 Some(aligned)
791 } else {
792 None
793 }
794}
795
796#[must_use]
797#[inline]
798pub const fn align_down(this: usize, align: usize) -> usize {
799 assert!(
800 align.is_power_of_two(),
801 "align_down: align is not a power-of-two"
802 );
803
804 let aligned = this & 0usize.wrapping_sub(align);
805 debug_assert!(aligned % align == 0);
806 debug_assert!(aligned <= this);
807 aligned
808}