Microkernel thing OS experiment (Zig ⚡)

overhaul paging API

pci.express fce9655d 3590420c

verified
Changed files
+250 -183
components
ukernel
arch
amd64
common
+13 -7
components/ukernel/arch/amd64/boot.zig
··· 9 9 const StandardGdt = arch.structures.gdt.StandardGdt; 10 10 const Tss = arch.structures.tss.Tss; 11 11 12 + var pg_ctx: arch.mm.paging.Context = undefined; 13 + 12 14 pub const limine_requests = struct { 13 15 export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{}; 14 16 export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{}; ··· 104 106 105 107 log.info("Setting up scheduling...", .{}); 106 108 109 + pg_ctx = arch.mm.paging.Context.get_current(); 110 + 107 111 initApic() catch |err| { 108 112 log.err("Failed to set up APIC! {}", .{err}); 109 113 @panic("apic"); ··· 117 121 .size = 0x1000, 118 122 .memory_type = .MemoryWriteBack, 119 123 .perms = .{ 120 - .executable = false, 121 - .userspace_accessible = true, 122 - .writable = true, 124 + .x = false, 125 + .u = true, 126 + .w = true, 123 127 }, 128 + .context = &pg_ctx, 124 129 }) catch @panic("couldn't map user stack"); 125 130 126 - const entry = common.loadRootTask() catch |err| { 131 + const entry = common.loadRootTask(&pg_ctx) catch |err| { 127 132 log.err("Couldn't load the root task! {}", .{err}); 128 133 @panic("ggz"); 129 134 }; ··· 217 222 .size = 0x1000, 218 223 .memory_type = .DeviceUncacheable, 219 224 .perms = .{ 220 - .executable = false, 221 - .userspace_accessible = false, 222 - .writable = true, 225 + .x = false, 226 + .u = false, 227 + .w = true, 223 228 }, 229 + .context = &pg_ctx, 224 230 }); 225 231 break :blk .{ .xapic = apic_base }; 226 232 },
+207 -155
components/ukernel/arch/amd64/mm/paging.zig
··· 1 - const common = @import("common"); 2 1 const arch = @import("../root.zig"); 2 + const common = @import("common"); 3 3 const std = @import("std"); 4 - const physToVirt = common.mm.physToHHDM; 4 + const Cr3 = arch.registers.ControlRegisters.Cr3; 5 + const Cr4 = arch.registers.ControlRegisters.Cr4; 5 6 const Perms = common.mm.paging.Perms; 6 - 7 - pub const page_sizes = [_]usize{ 8 - 0x1000, // 4K 9 - 0x200000, // 2M 10 - 0x40000000, // 1G 11 - 0x8000000000, // 512G 12 - 0x1000000000000, // 256T 13 - }; 14 7 15 8 pub const PageTable = extern struct { 16 9 entries: [512]Entry, ··· 43 36 }; 44 37 }; 45 38 46 - fn extract_index_from_vaddr(vaddr: u64, level: u6) u9 { 47 - const shamt = 12 + level * 9; 48 - return @truncate(vaddr >> shamt); 39 + pub const MemoryType = enum { 40 + DeviceUncacheable, 41 + DeviceWriteCombining, 42 + MemoryWritethrough, 43 + MemoryWriteBack, 44 + }; 45 + 46 + pub fn detect_5level() bool { 47 + const bits: u64 = 1 << 12; 48 + return Cr4.read() & bits != 0; 49 49 } 50 50 51 - pub const TypedPTE = union(common.mm.paging.PTEType) { 52 - Mapping: MappingHandle, 53 - Table: TableHandle, 54 - Empty, 51 + pub const Context = struct { 52 + cr3_val: u64, 53 + level5: bool, 55 54 56 55 const Self = @This(); 56 + pub fn apply(self: *Self) void { 57 + // NX Enable 58 + const IA32_EFER = arch.registers.MSR(u64, 0xC0000080); 59 + const efer_val = IA32_EFER.read() | (0b1 << 11); 60 + IA32_EFER.write(efer_val); 61 + 62 + // Set the level 5 bit accordingly 63 + const cr4 = Cr4.read(); 64 + const level5mask: u64 = 1 << 12; 65 + Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask); 66 + 67 + Cr3.write(self.cr3_val); 68 + } 57 69 58 - pub fn decode(pte: *PageTable.Entry, level: u3) Self { 70 + pub fn get_current() Context { 71 + return .{ 72 + .cr3_val = Cr3.read(), 73 + .level5 = detect_5level(), 74 + }; 75 + } 76 + 77 + pub fn can_map_at(_: *const Self, level: u3) bool { 78 + return level < 2; 79 + } 80 + 81 + // We need the parameter because aarch64 has 2 root page tables 82 + pub fn root_table(self: *Self, _: u64) TableHandle { 83 + return .{ 84 + .paddr = self.cr3_val, 85 + .level = if (self.level5) 5 else 4, 86 + .context = self, 87 + .perms = .{ 88 + .x = true, 89 + .w = true, 90 + .u = true, 91 + }, 92 + .underlying = null, 93 + }; 94 + } 95 + 96 + pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle { 59 97 if (!pte.present) { 60 98 return .Empty; 61 99 } 62 100 if (!pte.huge and level != 0) { 63 - return .{ .Table = decode_table(pte, level) }; 101 + return .{ .Table = self.parse_table(pte, level) }; 64 102 } 65 - return .{ .Mapping = decode_mapping(pte, level) }; 103 + return .{ .Mapping = self.parse_mapping(pte, level) }; 66 104 } 67 105 68 - pub fn decode_table(pte: *PageTable.Entry, level: u3) TableHandle { 106 + pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle { 107 + const memory_type = self.decode_memory_type(pte, level); 69 108 return .{ 70 - .phys_addr = pte.getAddr(), 109 + .context = self, 110 + .paddr = pte.getAddr(), 71 111 .level = level, 112 + .memory_type = memory_type, 72 113 .underlying = pte, 73 114 .perms = .{ 74 - .writable = pte.writable, 75 - .executable = !pte.nx, 76 - .userspace_accessible = pte.user_accessible, 115 + .w = pte.writable, 116 + .x = !pte.nx, 117 + .u = pte.user_accessible, 118 + }, 119 + }; 120 + } 121 + 122 + pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType { 123 + return switch (pte.disable_cache) { 124 + true => .DeviceUncacheable, 125 + false => switch (pte.write_through) { 126 + true => .MemoryWritethrough, 127 + false => .MemoryWriteBack, 77 128 }, 78 129 }; 79 130 } 80 131 81 - pub fn decode_mapping(pte: *PageTable.Entry, level: u3) MappingHandle { 132 + pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void { 133 + switch (mapping_handle.memory_type.?) { 134 + .MemoryWritethrough => pte.write_through = true, 135 + .DeviceUncacheable => pte.disable_cache = true, 136 + .MemoryWriteBack => {}, 137 + else => @panic("bad memory type"), 138 + } 139 + } 140 + 141 + pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle { 82 142 return .{ 83 - .phys_addr = pte.getAddr(), 143 + .context = self, 144 + .paddr = pte.getAddr(), 84 145 .level = level, 85 - // TODO: memory types 86 - .memory_type = null, 87 146 .underlying = pte, 88 147 .perms = .{ 89 - .writable = pte.writable, 90 - .executable = !pte.nx, 91 - .userspace_accessible = pte.user_accessible, 148 + .w = pte.writable, 149 + .x = !pte.nx, 150 + .u = pte.user_accessible, 92 151 }, 93 152 }; 94 153 } 154 + 155 + pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry { 156 + var pte = std.mem.zeroes(PageTable.Entry); 157 + pte.setAddr(mapping_handle.paddr); 158 + pte.present = true; 159 + if (mapping_handle.level != 0) { 160 + pte.huge = true; 161 + } 162 + 163 + pte.writable = mapping_handle.perms.w; 164 + pte.user_accessible = mapping_handle.perms.u; 165 + pte.nx = !mapping_handle.perms.x; 166 + 167 + self.encode_memory_type(&pte, mapping_handle); 168 + return pte; 169 + } 170 + 171 + pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry { 172 + var pte = std.mem.zeroes(PageTable.Entry); 173 + pte.writable = table_handle.perms.w; 174 + pte.user_accessible = table_handle.perms.u; 175 + pte.nx = !table_handle.perms.x; 176 + pte.setAddr(table_handle.paddr); 177 + 178 + pte.present = true; 179 + pte.huge = false; 180 + 181 + return pte; 182 + } 183 + 184 + pub fn invalidate(_: *const Self, vaddr: u64) void { 185 + asm volatile ( 186 + \\ invlpg (%[vaddr]) 187 + : 188 + : [vaddr] "r" (vaddr), 189 + : .{ .memory = true }); 190 + } 191 + 192 + pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice { 193 + return .{ 194 + .ptr = vaddr & ~(page_sizes[level] - 1), 195 + .len = page_sizes[level], 196 + }; 197 + } 198 + 199 + pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize { 200 + const root = context.root_table(0).get_children(); 201 + const indexes = [_]usize{ 202 + (vaddr >> 39) & 0x1FF, 203 + (vaddr >> 30) & 0x1FF, 204 + (vaddr >> 21) & 0x1FF, 205 + (vaddr >> 12) & 0x1FF, 206 + }; 207 + var pte_ptr = &root[indexes[0]]; 208 + std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() }); 209 + for (0..3) |i| { 210 + if (!pte_ptr.present) { 211 + return null; 212 + } 213 + const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr()); 214 + pte_ptr = &next_page_table.entries[indexes[i + 1]]; 215 + std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() }); 216 + } 217 + return pte_ptr.getAddr() + (vaddr & 0xFFF); 218 + } 95 219 }; 96 220 97 - pub const MappingHandle = struct { 98 - phys_addr: usize, 221 + fn idx_from_level(vaddr: u64, level: u6) u9 { 222 + const shamt = 12 + level * 9; 223 + return @truncate(vaddr >> shamt); 224 + } 225 + 226 + pub fn make_page_table() !usize { 227 + const page_size = std.heap.pageSize(); 228 + const paddr = try common.init_data.bootmem.allocPhys(page_size); 229 + const pt_ptr = common.mm.physToHHDM([*]u8, paddr); 230 + @memset(pt_ptr[0..page_size], 0); 231 + return paddr; 232 + } 233 + 234 + pub const page_sizes = [_]usize{ 235 + 0x1000, // 4K 236 + 0x200000, // 2M 237 + 0x40000000, // 1G 238 + 0x8000000000, // 512G 239 + 0x1000000000000, // 256T 240 + }; 241 + 242 + const MappingHandle = struct { 243 + paddr: u64, 99 244 level: u3, 100 245 memory_type: ?MemoryType, 246 + context: *Context, 101 247 perms: Perms, 102 248 underlying: *PageTable.Entry, 103 249 }; 104 250 105 251 pub const TableHandle = struct { 106 - phys_addr: usize, 252 + paddr: u64, 107 253 level: u3, 254 + context: *Context, 108 255 perms: Perms, 109 256 underlying: ?*PageTable.Entry, 110 257 111 258 const Self = @This(); 112 - 113 - // Get the child entries of this page table 114 259 pub fn get_children(self: *const Self) []PageTable.Entry { 115 - const page_table = physToVirt(*PageTable, self.phys_addr); 116 - return page_table.entries[0..]; 260 + const pt = common.mm.physToHHDM(*PageTable, self.paddr); 261 + return pt.entries[0..]; 117 262 } 118 263 119 - // Get children from the position holding the table and on 120 - pub fn skip_to(self: *const Self, vaddr: usize) []PageTable.Entry { 121 - return self.get_children()[extract_index_from_vaddr(vaddr, self.level - 1)..]; 264 + pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry { 265 + return self.get_children()[idx_from_level(vaddr, self.level - 1)..]; 122 266 } 123 267 124 - // Decode child table given an entry 125 - pub fn decode_child(self: *const Self, pte: *PageTable.Entry) TypedPTE { 126 - return TypedPTE.decode(pte, self.level - 1); 268 + pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle { 269 + return self.context.decode(pte, self.level - 1); 127 270 } 128 271 129 272 pub fn addPerms(self: *const Self, perms: Perms) void { 130 - if (perms.executable) { 273 + if (perms.x) { 131 274 self.underlying.?.nx = false; 132 275 } 133 - if (perms.writable) { 276 + if (perms.w) { 134 277 self.underlying.?.writable = true; 135 278 } 136 - if (perms.userspace_accessible) { 279 + if (perms.u) { 137 280 self.underlying.?.user_accessible = true; 138 281 } 139 282 } 140 283 141 - pub fn child_domain(self: *const Self, vaddr: usize) UntypedSlice { 142 - return domain(vaddr, self.level - 1); 143 - } 144 - 145 284 pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle { 146 285 const pmem = try make_page_table(); 147 286 148 287 const result: TableHandle = .{ 149 - .phys_addr = pmem, 288 + .paddr = pmem, 289 + .context = self.context, 150 290 .level = self.level - 1, 151 291 .perms = perms, 152 292 .underlying = pte, 153 293 }; 154 - pte.* = encode_table(result); 294 + pte.* = self.context.encode_table(result); 155 295 156 296 return result; 157 297 } 158 298 159 - pub fn make_child_mapping( 160 - self: *const Self, 161 - pte: *PageTable.Entry, 162 - paddr: ?usize, 163 - perms: Perms, 164 - memory_type: MemoryType, 165 - ) !MappingHandle { 299 + pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle { 166 300 const page_size = page_sizes[self.level - 1]; 167 301 const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size); 168 302 169 303 const result: MappingHandle = .{ 170 304 .level = self.level - 1, 171 305 .memory_type = memory_type, 306 + .context = self.context, 172 307 .perms = perms, 173 308 .underlying = pte, 174 - .phys_addr = pmem, 309 + .paddr = pmem, 175 310 }; 176 311 177 - pte.* = encode_mapping(result); 312 + pte.* = self.context.encode_mapping(result); 178 313 179 314 return result; 180 315 } 181 - }; 182 316 183 - pub fn root_table(vaddr: usize) TableHandle { 184 - _ = vaddr; 185 - const cr3_val = arch.registers.ControlRegisters.Cr3.read() & 0xFFFF_FFFF_FFFF_F000; 186 - return .{ 187 - .phys_addr = cr3_val, 188 - // TODO: detect and support 5 level paging! 189 - .level = 4, 190 - .perms = .{ 191 - .executable = true, 192 - .writable = true, 193 - }, 194 - .underlying = null, 195 - }; 196 - } 197 - 198 - fn encode_table(pte_handle: TableHandle) PageTable.Entry { 199 - var pte = std.mem.zeroes(PageTable.Entry); 200 - 201 - pte.setAddr(pte_handle.phys_addr); 202 - pte.writable = pte_handle.perms.writable; 203 - pte.user_accessible = pte_handle.perms.userspace_accessible; 204 - pte.nx = !pte_handle.perms.executable; 205 - pte.present = true; 206 - pte.huge = false; 207 - 208 - return pte; 209 - } 210 - 211 - fn encode_mapping(pte_handle: MappingHandle) PageTable.Entry { 212 - var pte = std.mem.zeroes(PageTable.Entry); 213 - 214 - pte.setAddr(pte_handle.phys_addr); 215 - pte.present = true; 216 - 217 - if (pte_handle.level != 0) { 218 - pte.huge = true; 317 + pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice { 318 + return self.context.domain(self.level - 1, vaddr); 219 319 } 220 - 221 - pte.writable = pte_handle.perms.writable; 222 - pte.user_accessible = pte_handle.perms.userspace_accessible; 223 - pte.nx = !pte_handle.perms.executable; 224 - 225 - encode_memory_type(&pte, pte_handle); 226 - 227 - return pte; 228 - } 229 - 230 - fn encode_memory_type(pte: *PageTable.Entry, pte_handle: MappingHandle) void { 231 - const mt = pte_handle.memory_type orelse @panic("Unknown memory type"); 232 - 233 - // TODO: Page Attribute Table 234 - switch (mt) { 235 - .MemoryWritethrough => pte.write_through = true, 236 - .DeviceUncacheable => pte.disable_cache = true, 237 - .MemoryWriteBack => {}, 238 - else => @panic("Cannot set memory type"), 239 - } 240 - } 320 + }; 241 321 242 - /// Returns physical address 243 - fn make_page_table() !usize { 244 - const pt_phys = try common.init_data.bootmem.allocPhys(std.heap.pageSize()); 245 - const pt = physToVirt([*]u8, pt_phys); 246 - @memset(pt[0..std.heap.pageSize()], 0x00); 247 - return pt_phys; 248 - } 322 + pub const SomePteHandle = union(common.mm.paging.PTEType) { 323 + Mapping: MappingHandle, 324 + Table: TableHandle, 325 + Empty, 326 + }; 249 327 250 - pub fn invalidate(vaddr: u64) void { 251 - asm volatile ( 252 - \\ invlpg (%[vaddr]) 253 - : 254 - : [vaddr] "r" (vaddr), 255 - : .{ .memory = true }); 256 - } 257 - 258 - const UntypedSlice = struct { 328 + pub const StupidSlice = struct { 259 329 len: usize, 260 330 ptr: usize, 261 331 }; 262 - 263 - pub fn domain(vaddr: usize, level: u3) UntypedSlice { 264 - return .{ 265 - .len = page_sizes[level], 266 - .ptr = vaddr & ~(page_sizes[level] - 1), 267 - }; 268 - } 269 - 270 - pub const MemoryType = enum { 271 - DeviceUncacheable, 272 - DeviceWriteCombining, 273 - MemoryWritethrough, 274 - MemoryWriteBack, 275 - }; 276 - 277 - pub fn can_map_at(level: u3) bool { 278 - return level < 2; 279 - }
+6 -4
components/ukernel/common/loader.zig
··· 1 1 const common = @import("root.zig"); 2 + const arch = @import("arch"); 2 3 const paging = common.mm.paging; 3 4 const std = @import("std"); 4 5 const elf = std.elf; 5 6 const log = std.log.scoped(.elf_loader); 6 7 7 8 // Load root task, return the entry point 8 - pub fn loadRootTask() !usize { 9 + pub fn loadRootTask(context: *arch.mm.paging.Context) !usize { 9 10 const root_task = common.init_data.root_task; 10 11 const hdr = blk: { 11 12 const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task); ··· 27 28 .size = memsz_pages, 28 29 .memory_type = .MemoryWriteBack, 29 30 .perms = .{ 30 - .executable = entry.p_flags & elf.PF_X > 0, 31 - .writable = entry.p_flags & elf.PF_W > 0, 32 - .userspace_accessible = true, 31 + .x = entry.p_flags & elf.PF_X > 0, 32 + .w = entry.p_flags & elf.PF_W > 0, 33 + .u = true, 33 34 }, 35 + .context = context, 34 36 }); 35 37 36 38 // 2. Copy filesz bytes from offset to this new page
+24 -17
components/ukernel/common/mm/paging.zig
··· 2 2 const std = @import("std"); 3 3 const TableHandle = arch.mm.paging.TableHandle; 4 4 const MemoryType = arch.mm.paging.MemoryType; 5 + const Context = arch.mm.paging.Context; 5 6 6 7 pub const Perms = struct { 7 - writable: bool, 8 - executable: bool, 9 - userspace_accessible: bool = false, 8 + /// Writable 9 + w: bool, 10 + /// Executable 11 + x: bool, 12 + /// Userspace Accessible 13 + u: bool = false, 10 14 11 15 const Self = @This(); 12 16 13 17 /// Verify that the current permissions are a superset of the provided ones 14 18 pub fn allows(self: Self, other: Self) bool { 15 - if (!self.writable and other.writable) { 19 + if (!self.w and other.w) { 16 20 return false; 17 21 } 18 - if (!self.executable and other.executable) { 22 + if (!self.x and other.x) { 19 23 return false; 20 24 } 21 - if (!self.userspace_accessible and other.userspace_accessible) { 25 + if (!self.u and other.u) { 22 26 return false; 23 27 } 24 28 return true; ··· 27 31 /// OR two permissions 28 32 pub fn addPerms(self: Self, other: Self) Self { 29 33 return .{ 30 - .writable = self.writable or other.writable, 31 - .executable = self.executable or other.executable, 32 - .userspace = self.userspace_accessible or other.userspace_accessible, 34 + .w = self.w or other.w, 35 + .x = self.x or other.x, 36 + .u = self.u or other.u, 33 37 }; 34 38 } 35 39 }; ··· 42 46 size: usize, 43 47 perms: Perms, 44 48 memory_type: MemoryType, 49 + context: *Context, 45 50 }) !void { 46 - const root = arch.mm.paging.root_table(args.vaddr); 51 + const root = args.context.root_table(args.vaddr); 47 52 var vaddr = args.vaddr; 48 53 var paddr = args.paddr; 49 54 var size = args.size; 50 - try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type); 55 + try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type, args.context); 51 56 } 52 57 53 58 pub fn map(args: struct { ··· 55 60 size: usize, 56 61 perms: Perms, 57 62 memory_type: MemoryType, 63 + context: *Context, 58 64 }) !void { 59 - const root = arch.mm.paging.root_table(args.vaddr); 65 + const root = args.context.root_table(args.vaddr); 60 66 var vaddr = args.vaddr; 61 67 var size = args.size; 62 - try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type); 68 + try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type, args.context); 63 69 } 64 70 65 71 fn mapPageImpl( ··· 69 75 table: TableHandle, 70 76 perms: Perms, 71 77 memory_type: MemoryType, 78 + context: *Context, 72 79 ) !void { 73 80 // 1. Get slice of every child from the target forwards 74 81 const children = table.skip_to(vaddr.*); ··· 84 91 switch (table.decode_child(child)) { 85 92 .Mapping => return error.AlreadyPresent, 86 93 .Table => |*tbl| { 87 - try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type); 94 + try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type, context); 88 95 if (!tbl.perms.allows(perms)) { 89 96 tbl.addPerms(perms); 90 - arch.mm.paging.invalidate(vaddr.*); 97 + context.invalidate(vaddr.*); 91 98 } 92 99 }, 93 100 .Empty => { 94 101 const domain = table.child_domain(vaddr.*); 95 - if (domain.ptr == vaddr.* and domain.len <= size.* and arch.mm.paging.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) { 102 + if (domain.ptr == vaddr.* and domain.len <= size.* and context.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) { 96 103 // Make child mapping etc 97 104 _ = try table.make_child_mapping(child, if (paddr) |p| p.* else null, perms, memory_type); 98 105 const step = domain.len; ··· 108 115 } 109 116 } else { 110 117 const tbl = try table.make_child_table(child, perms); 111 - try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type); 118 + try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type, context); 112 119 } 113 120 }, 114 121 }