+2
-1
assets/limine.conf
+2
-1
assets/limine.conf
+12
-5
build.zig
+12
-5
build.zig
···
11
11
const ukernel_inst = b.addInstallFile(ukernel_artifact.getEmittedBin(), arch.kernelExeName());
12
12
b.getInstallStep().dependOn(&ukernel_inst.step);
13
13
14
-
const root_dep = b.dependency("root_server", .{
14
+
const root_69 = b.dependency("root_server", .{
15
+
.arch = arch,
16
+
.number = 0x69,
17
+
}).artifact("root_server");
18
+
const root_69_inst = b.addInstallFile(root_69.getEmittedBin(), "root-69.elf");
19
+
b.getInstallStep().dependOn(&root_69_inst.step);
20
+
21
+
const root_420 = b.dependency("root_server", .{
15
22
.arch = arch,
16
-
});
17
-
const root_artifact = root_dep.artifact("root_server");
18
-
const root_inst = b.addInstallFile(root_artifact.getEmittedBin(), arch.rootTaskName());
19
-
b.getInstallStep().dependOn(&root_inst.step);
23
+
.number = 0x420,
24
+
}).artifact("root_server");
25
+
const root_420_inst = b.addInstallFile(root_420.getEmittedBin(), "root-420.elf");
26
+
b.getInstallStep().dependOn(&root_420_inst.step);
20
27
21
28
// Run in QEMU
22
29
run_blk: {
+2
components/root_server/build.zig
+2
components/root_server/build.zig
···
3
3
4
4
pub fn build(b: *std.Build) void {
5
5
const arch = b.option(build_helpers.Architecture, "arch", "The target root_server architecture") orelse .amd64;
6
+
const number = b.option(usize, "number", "The syscall number to use") orelse 0x69;
6
7
7
8
// set CPU features based on the architecture
8
9
const target = b.resolveTargetQuery(.{
···
20
21
21
22
const config = b.addOptions();
22
23
config.addOption(build_helpers.Architecture, "arch", arch);
24
+
config.addOption(usize, "number", number);
23
25
24
26
const build_helpers_dep = b.dependency("build_helpers", .{});
25
27
+6
-2
components/root_server/src/main.zig
+6
-2
components/root_server/src/main.zig
···
1
1
const std = @import("std");
2
2
const os = @import("os.zig");
3
+
const config = @import("config");
3
4
4
5
export fn _start() callconv(.c) noreturn {
5
6
_ = os.syscall1(SYS_poke, 0xB16B00B5BADBABE);
6
7
_ = os.syscall1(SYS_exit, 0x69696969);
7
8
asm volatile ("int3");
8
9
asm volatile (
9
-
\\ mov $0x69696969, %%rdi
10
+
\\ mov %[number], %%rdi
10
11
\\ xor %%rsi, %%rsi
11
12
\\ xor %%rbx, %%rbx
12
13
\\ mainloop:
13
14
\\ xor %%rax, %%rax
14
15
\\ delayloop:
15
16
\\ inc %%rax
16
-
\\ cmp $0x4000000, %%rax
17
+
\\ cmp $0x1000000, %%rax
17
18
\\ jnz delayloop
18
19
\\ inc %%rbx
20
+
\\ mov %%rsp, %%rsi
19
21
\\ syscall
20
22
\\ jmp mainloop
23
+
:
24
+
: [number] "r" (config.number),
21
25
);
22
26
23
27
die();
+1
-1
components/ukernel/arch/aarch64/boot.zig
+1
-1
components/ukernel/arch/aarch64/boot.zig
···
67
67
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
68
68
const mod_size = mod.size;
69
69
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
70
-
common.init_data.root_task = mod_addr[0..mod_size];
70
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
71
71
}
72
72
} else {
73
73
@branchHint(.unlikely);
+21
-5
components/ukernel/arch/amd64/boot.zig
+21
-5
components/ukernel/arch/amd64/boot.zig
···
67
67
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
68
68
gdt.* = .{};
69
69
const tss = &arch.per_cpu_init_data.tss_buf[0];
70
-
// TSS rsp 0x3800
70
+
// TODO: create a fixed mapping for the pages maybe?
71
71
tss.* = .{
72
-
.rsp0 = 0x7ffe_0000_8000,
73
-
.rsp1 = 0x7ffe_0000_8000,
74
-
.rsp2 = 0x7ffe_0000_8000,
72
+
.rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0),
75
73
};
76
74
77
75
gdt.tss_desc.set_tss_addr(tss);
···
107
105
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
108
106
const mod_size = mod.size;
109
107
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
110
-
common.init_data.root_task = mod_addr[0..mod_size];
108
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
111
109
}
112
110
} else {
113
111
@branchHint(.unlikely);
···
115
113
}
116
114
117
115
bootstrapAPs();
116
+
}
117
+
118
+
pub fn loadTasks() void {
119
+
const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch {
120
+
std.log.err("Couldn't allocate tasks!", .{});
121
+
@panic("allocPhys");
122
+
});
123
+
const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)];
124
+
125
+
if (limine_requests.modules.response) |module_response| {
126
+
if (module_response.module_count > 0) {
127
+
for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| {
128
+
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
129
+
const mod_size = mod.size;
130
+
common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]);
131
+
}
132
+
}
133
+
}
118
134
}
119
135
120
136
fn initHwDesc() void {
+17
-4
components/ukernel/arch/amd64/interrupts/apic.zig
+17
-4
components/ukernel/arch/amd64/interrupts/apic.zig
···
350
350
}
351
351
352
352
pub fn timer_handler(stack_trace: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
353
-
log.warn("Got an APIC timer interrupt, incrementing user's rsi...", .{});
354
-
stack_trace.regs.rsi += 1;
355
-
singleton.setRegister(.eoi, 0);
356
-
armTimer(1000);
353
+
defer {
354
+
singleton.setRegister(.eoi, 0);
355
+
armTimer(20);
356
+
}
357
+
// 1. Get the next task. If there is no next task, just keep scheduling.
358
+
const task = common.scheduler.getNextTask() orelse return;
359
+
// 2. Swap the next task state with the current interrupt trace
360
+
std.mem.swap(arch.interrupts.idt.SavedRegisters, &task.regs, &stack_trace.regs);
361
+
std.mem.swap(u64, &task.rip, &stack_trace.rip);
362
+
std.mem.swap(u64, &task.rsp, &stack_trace.rsp);
363
+
// If task has a new cr3, swap current CR3 and task cr3 too
364
+
if (task.cr3_val != stack_trace.cr3) {
365
+
arch.registers.ControlRegisters.Cr3.write(task.cr3_val);
366
+
task.cr3_val = stack_trace.cr3;
367
+
}
368
+
// 3. Now, `task` has our current state, so enqueue it.
369
+
common.scheduler.pushTask(task);
357
370
}
+2
components/ukernel/arch/amd64/interrupts/idt.zig
+2
components/ukernel/arch/amd64/interrupts/idt.zig
+15
components/ukernel/arch/amd64/interrupts/root.zig
+15
components/ukernel/arch/amd64/interrupts/root.zig
···
5
5
const std = @import("std");
6
6
const log = std.log.scoped(.interrupts);
7
7
const arch = @import("../root.zig");
8
+
const common = @import("common");
8
9
9
10
pub inline fn enable() void {
10
11
asm volatile ("sti");
···
94
95
}
95
96
print_regs(stack_frame.normalize());
96
97
arch.instructions.die();
98
+
}
99
+
100
+
// Start scheduling
101
+
pub fn startScheduling() noreturn {
102
+
// 1. Pop off the task to run
103
+
const task = common.scheduler.getNextTask() orelse {
104
+
std.log.scoped(.startScheduling).err("No root task!", .{});
105
+
@panic("startScheduling");
106
+
};
107
+
// 2. Apply the paging context
108
+
task.getPagingContext().apply();
109
+
// 3. Give a slice of 1000ms and fire away
110
+
apic.armTimer(20);
111
+
enter_userspace(task.rip, 0x69, task.rsp);
97
112
}
98
113
99
114
// Set up the IDT, PIC, TSC, and APIC
+21
-7
components/ukernel/arch/amd64/mm/paging.zig
+21
-7
components/ukernel/arch/amd64/mm/paging.zig
···
54
54
level5: bool,
55
55
56
56
const Self = @This();
57
-
pub fn apply(self: *Self) void {
57
+
pub fn apply(self: *const Self) void {
58
58
// NX Enable
59
59
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
60
60
const efer_val = IA32_EFER.read() | (0b1 << 11);
···
75
75
};
76
76
}
77
77
78
+
pub fn make_user() !Context {
79
+
// Make a new root page table
80
+
const user_root_paddr = try make_page_table();
81
+
const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr);
82
+
// Copy the entire higher half entries
83
+
const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children();
84
+
@memcpy(user_root.entries[256..], higher_half[256..]);
85
+
return .{
86
+
.cr3_val = user_root_paddr,
87
+
.level5 = common.init_data.kernel_paging_ctx.level5,
88
+
};
89
+
}
90
+
78
91
pub fn can_map_at(_: *const Self, level: u3) bool {
79
92
return level < 2;
80
93
}
···
82
95
// We need the parameter because aarch64 has 2 root page tables
83
96
pub fn root_table(self: *Self, _: u64) TableHandle {
84
97
return .{
85
-
.paddr = self.cr3_val,
98
+
// Mask out the cr3 value
99
+
.paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000,
86
100
.level = if (self.level5) 5 else 4,
87
101
.context = self,
88
102
.perms = .{
···
233
247
}
234
248
235
249
pub const page_sizes = [_]usize{
236
-
0x1000, // 4K
237
-
0x200000, // 2M
238
-
0x40000000, // 1G
239
-
0x8000000000, // 512G
240
-
0x1000000000000, // 256T
250
+
0x1000,
251
+
0x200000,
252
+
0x40000000,
253
+
0x8000000000,
254
+
0x1000000000000,
241
255
};
242
256
243
257
const MappingHandle = struct {
+18
-3
components/ukernel/arch/amd64/root.zig
+18
-3
components/ukernel/arch/amd64/root.zig
···
26
26
27
27
gdt_buf: []StandardGdt = undefined,
28
28
tss_buf: []Tss = undefined,
29
+
// Physical ptr
30
+
stack_buf: usize = undefined,
31
+
32
+
const stack_size = std.heap.page_size_max;
29
33
30
34
const Self = @This();
31
35
pub fn init(self: *Self, cpu_count: u64) void {
32
-
// 1. Allocate space for GDT and TSS data
36
+
// 1. Allocate stack space for every core
37
+
self.stack_buf = common.init_data.bootmem.allocPhys(stack_size * cpu_count) catch |err| {
38
+
std.log.err("init PerCpuInitData: failed to allocate stack! {}", .{err});
39
+
@panic("stack_buf");
40
+
};
41
+
42
+
// 2. Allocate space for GDT and TSS data
33
43
const gdt_size = @sizeOf(StandardGdt);
34
44
const tss_size = @sizeOf(Tss);
35
45
36
46
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
37
47
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
38
48
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
39
-
@panic("rip bozo");
49
+
@panic("gdt_tss_buf");
40
50
});
41
51
42
-
// 2. Transmute and fill out the structure
52
+
// 3. Transmute and fill out the structure
43
53
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
44
54
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
45
55
self.gdt_buf = gdt_buf[0..cpu_count];
46
56
self.tss_buf = tss_buf[0..cpu_count];
57
+
}
58
+
59
+
// returns a pointer to the TOP of the stack!
60
+
pub fn getStackPhys(self: *Self, core_num: usize) usize {
61
+
return self.stack_buf + (core_num + 1) * stack_size;
47
62
}
48
63
};
+24
components/ukernel/arch/amd64/structures/root.zig
+24
components/ukernel/arch/amd64/structures/root.zig
···
1
1
pub const gdt = @import("gdt.zig");
2
2
pub const tss = @import("tss.zig");
3
+
const arch = @import("../root.zig");
4
+
const common = @import("common");
5
+
const Queue = @import("Queue");
6
+
7
+
// Uses an intrusive queue
8
+
pub const Task = struct {
9
+
// Saved Registers
10
+
regs: arch.interrupts.idt.SavedRegisters align(8),
11
+
// Address Space context
12
+
cr3_val: u64,
13
+
// Instruction Pointer
14
+
rip: u64,
15
+
// Stack Pointer
16
+
rsp: u64,
17
+
// Next task basically
18
+
node: Queue.Node = .{},
19
+
20
+
pub fn getPagingContext(self: Task) arch.mm.paging.Context {
21
+
return .{
22
+
.cr3_val = self.cr3_val,
23
+
.level5 = common.init_data.kernel_paging_ctx.level5,
24
+
};
25
+
}
26
+
};
+3
components/ukernel/build.zig
+3
components/ukernel/build.zig
···
65
65
const spinlock_mod = b.dependency("spinlock", .{}).module("spinlock");
66
66
const limine_mod = b.dependency("limine", .{ .api_revision = 3 }).module("limine");
67
67
const console_mod = b.dependency("console", .{}).module("console");
68
+
const queue_mod = b.dependency("Queue", .{}).module("Queue");
68
69
69
70
arch_module.addImport("limine", limine_mod);
70
71
arch_module.addImport("console", console_mod);
71
72
arch_module.addImport("common", common_mod);
73
+
arch_module.addImport("Queue", queue_mod);
72
74
73
75
common_mod.addImport("arch", arch_module);
74
76
common_mod.addImport("spinlock", spinlock_mod);
75
77
common_mod.addImport("console", console_mod);
78
+
common_mod.addImport("Queue", queue_mod);
76
79
77
80
const kernel = b.addExecutable(.{
78
81
.name = "ukernel",
+4
components/ukernel/build.zig.zon
+4
components/ukernel/build.zig.zon
···
8
8
.spinlock = .{ .path = "deps/spinlock" },
9
9
.build_helpers = .{ .path = "../build_helpers" },
10
10
.console = .{ .path = "deps/console" },
11
+
.Queue = .{
12
+
.url = "git+https://tangled.sh/@sydney.blue/Queue.zig?ref=dev#6c0760e8a233c1d59554a40a87f0ef293a9697f3",
13
+
.hash = "Queue-0.0.0-upnEfhEPAADNV4Dvs3DVCRSnOh-BrhgsRR6scaE2qTIa",
14
+
},
11
15
},
12
16
.paths = .{
13
17
"build.zig",
+1
-1
components/ukernel/common/aux.zig
+1
-1
components/ukernel/common/aux.zig
···
19
19
console: ?console.Console = null,
20
20
framebuffer: ?console.Framebuffer = null,
21
21
hardware_description: HardwareDescription = .none,
22
-
root_task: []align(4096) u8 = undefined,
22
+
root_task_elf: []align(4096) u8 = undefined,
23
23
hhdm_slide: usize = 0,
24
24
kernel_paging_ctx: arch.mm.paging.Context = undefined,
25
25
};
+4
-5
components/ukernel/common/loader.zig
+4
-5
components/ukernel/common/loader.zig
···
6
6
const log = std.log.scoped(.elf_loader);
7
7
8
8
// Load root task, return the entry point
9
-
pub fn loadRootTask(context: *arch.mm.paging.Context) !usize {
10
-
const root_task = common.init_data.root_task;
9
+
pub fn loadElf(context: *arch.mm.paging.Context, task_slice: []align(4096) u8) !usize {
11
10
const hdr = blk: {
12
-
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
11
+
const hdr: *elf.Elf64_Ehdr = @ptrCast(task_slice);
13
12
break :blk elf.Header.init(hdr.*, .little);
14
13
};
15
-
var iter = hdr.iterateProgramHeadersBuffer(root_task);
14
+
var iter = hdr.iterateProgramHeadersBuffer(task_slice);
16
15
while (try iter.next()) |entry| {
17
16
if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue;
18
17
···
39
38
const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift);
40
39
const dst_slice = dst[0..entry.p_filesz];
41
40
42
-
const src_slice = root_task[entry.p_offset..][0..entry.p_filesz];
41
+
const src_slice = task_slice[entry.p_offset..][0..entry.p_filesz];
43
42
@memcpy(dst_slice, src_slice);
44
43
45
44
// 3. Add memsz - filesz zeroes
+29
-13
components/ukernel/common/root.zig
+29
-13
components/ukernel/common/root.zig
···
1
1
pub const aux = @import("aux.zig");
2
2
pub const mm = @import("mm/root.zig");
3
-
pub const loadRootTask = loader.loadRootTask;
3
+
pub const scheduler = @import("scheduler.zig");
4
+
pub const loadElf = loader.loadElf;
4
5
const arch = @import("arch");
5
6
const std = @import("std");
6
7
const loader = @import("loader.zig");
···
25
26
26
27
// Now, set up interrupts
27
28
arch.interrupts.init();
29
+
arch.interrupts.init_syscalls();
28
30
29
-
log.info("Loading root task...", .{});
31
+
log.info("Loading attached tasks...", .{});
32
+
arch.boot.loadTasks();
30
33
31
-
// The following needs to be genericized and unshittified
34
+
log.info("Dropping to userspace!", .{});
32
35
33
-
// Allocate a stack
36
+
arch.interrupts.startScheduling();
37
+
}
38
+
39
+
pub fn loadTask(scratch: *arch.structures.Task, task_slice: []align(4096) u8) void {
40
+
// 1. Create a user address space
41
+
var user_ctx = arch.mm.paging.Context.make_user() catch |err| {
42
+
std.log.err("Failed to make user context! {}", .{err});
43
+
@panic("make_user_ctx");
44
+
};
45
+
46
+
// 2. Allocate a user stack
34
47
mm.paging.map(.{
35
48
.vaddr = 0x7ffe_0000_0000,
36
49
.size = 65536,
···
40
53
.u = true,
41
54
.w = true,
42
55
},
56
+
.context = &user_ctx,
43
57
}) catch @panic("couldn't map user stack");
44
58
45
-
// TODO: make user page tables!
46
-
const entry = loadRootTask(&init_data.kernel_paging_ctx) catch |err| {
47
-
log.err("Couldn't load the root task! {}", .{err});
59
+
// 3. Map ELF into address space
60
+
const entry = loadElf(&user_ctx, task_slice) catch |err| {
61
+
std.log.err("Couldn't load the root task! {}", .{err});
48
62
@panic("ggz");
49
63
};
50
-
log.info("Dropping to userspace entry 0x{x:0>16}", .{entry});
51
-
52
-
arch.interrupts.init_syscalls();
53
-
54
-
arch.interrupts.apic.armTimer(1000);
55
-
arch.interrupts.enter_userspace(entry, 0x69, 0x7ffe_0001_0000);
64
+
// 4. Add task to scheduler
65
+
scratch.* = .{
66
+
.cr3_val = user_ctx.cr3_val,
67
+
.regs = .default,
68
+
.rip = entry,
69
+
.rsp = 0x7ffe_0001_0000,
70
+
};
71
+
scheduler.pushTask(scratch);
56
72
}
57
73
58
74
// std options etc.
+16
components/ukernel/common/scheduler.zig
+16
components/ukernel/common/scheduler.zig
···
1
+
const std = @import("std");
2
+
const arch = @import("arch");
3
+
const Queue = @import("Queue");
4
+
const Task = arch.structures.Task;
5
+
6
+
var task_queue: Queue = .{};
7
+
8
+
pub fn pushTask(task: *Task) void {
9
+
task_queue.enqueue(&task.node);
10
+
}
11
+
12
+
pub fn getNextTask() ?*Task {
13
+
const node = task_queue.dequeue() orelse return null;
14
+
const task: *Task = @fieldParentPtr("node", node);
15
+
return task;
16
+
}