Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
4 * Copyright (c) 2021, Liav A. <liavalb@hotmail.co.il>
5 *
6 * SPDX-License-Identifier: BSD-2-Clause
7 */
8
9#include <AK/Types.h>
10#include <Kernel/Multiboot.h>
11#include <Kernel/PhysicalAddress.h>
12#include <Kernel/Prekernel/Prekernel.h>
13#include <Kernel/VirtualAddress.h>
14#include <LibC/elf.h>
15#include <LibELF/Relocation.h>
16
17#if ARCH(X86_64)
18# include <Kernel/Arch/x86_64/ASM_wrapper.h>
19# include <Kernel/Arch/x86_64/CPUID.h>
20#endif
21
22// Defined in the linker script
23extern uintptr_t __stack_chk_guard;
24uintptr_t __stack_chk_guard __attribute__((used));
25extern "C" [[noreturn]] void __stack_chk_fail();
26
27extern "C" u8 start_of_prekernel_image[];
28extern "C" u8 end_of_prekernel_image[];
29
30extern "C" u8 gdt64ptr[];
31extern "C" u16 code64_sel;
32extern "C" u64 boot_pml4t[512];
33extern "C" u64 boot_pdpt[512];
34extern "C" u64 boot_pd0[512];
35extern "C" u64 boot_pd0_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
36extern "C" u64 boot_pd_kernel[512];
37extern "C" u64 boot_pd_kernel_pt0[512];
38extern "C" u64 boot_pd_kernel_image_pts[512 * (MAX_KERNEL_SIZE >> 21 & 0x1ff)];
39extern "C" u64 boot_pd_kernel_pt1023[512];
40extern "C" char const kernel_cmdline[4096];
41
42extern "C" void reload_cr3();
43
44extern "C" {
45multiboot_info_t* multiboot_info_ptr;
46}
47
48[[noreturn]] static void halt()
49{
50 asm volatile("hlt");
51 __builtin_unreachable();
52}
53
54void __stack_chk_fail()
55{
56 halt();
57}
58
59void __assertion_failed(char const*, char const*, unsigned int, char const*)
60{
61 halt();
62}
63
64namespace Kernel {
65
66// boot.S expects these functions to exactly have the following signatures.
67// We declare them here to ensure their signatures don't accidentally change.
68extern "C" [[noreturn]] void init();
69
70// SerenityOS Pre-Kernel Environment C++ entry point :^)
71//
72// This is where C++ execution begins, after boot.S transfers control here.
73//
74
75u64 generate_secure_seed();
76
77extern "C" [[noreturn]] void init()
78{
79 if (multiboot_info_ptr->mods_count < 1)
80 halt();
81
82 multiboot_module_entry_t* kernel_module = (multiboot_module_entry_t*)(FlatPtr)multiboot_info_ptr->mods_addr;
83
84 u8* kernel_image = (u8*)(FlatPtr)kernel_module->start;
85 // copy the ELF header and program headers because we might end up overwriting them
86 ElfW(Ehdr) kernel_elf_header = *(ElfW(Ehdr)*)kernel_image;
87 ElfW(Phdr) kernel_program_headers[16];
88 if (kernel_elf_header.e_phnum > array_size(kernel_program_headers))
89 halt();
90 __builtin_memcpy(kernel_program_headers, kernel_image + kernel_elf_header.e_phoff, sizeof(ElfW(Phdr)) * kernel_elf_header.e_phnum);
91
92 FlatPtr kernel_physical_base = 0x200000;
93 FlatPtr default_kernel_load_base = KERNEL_MAPPING_BASE + 0x200000;
94
95 FlatPtr kernel_load_base = default_kernel_load_base;
96
97 if (__builtin_strstr(kernel_cmdline, "disable_kaslr") == nullptr) {
98 FlatPtr maximum_offset = (FlatPtr)KERNEL_PD_SIZE - MAX_KERNEL_SIZE - 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
99 kernel_load_base += (generate_secure_seed() % maximum_offset);
100 kernel_load_base &= ~(2 * MiB - 1);
101 }
102
103 FlatPtr kernel_load_end = 0;
104 for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
105 auto& kernel_program_header = kernel_program_headers[i];
106 if (kernel_program_header.p_type != PT_LOAD)
107 continue;
108 auto start = kernel_load_base + kernel_program_header.p_vaddr;
109 auto end = start + kernel_program_header.p_memsz;
110 if (start < (FlatPtr)end_of_prekernel_image)
111 halt();
112 if (kernel_physical_base + kernel_program_header.p_paddr < (FlatPtr)end_of_prekernel_image)
113 halt();
114 if (end > kernel_load_end)
115 kernel_load_end = end;
116 }
117
118 // align to 1GB
119 FlatPtr kernel_mapping_base = kernel_load_base & ~(FlatPtr)0x3fffffff;
120
121 VERIFY(kernel_load_base % 0x1000 == 0);
122 VERIFY(kernel_load_base >= kernel_mapping_base + 0x200000);
123
124 int pdpt_flags = 0x3;
125
126 boot_pdpt[(kernel_mapping_base >> 30) & 0x1ffu] = (FlatPtr)boot_pd_kernel | pdpt_flags;
127
128 boot_pd_kernel[0] = (FlatPtr)boot_pd_kernel_pt0 | 0x3;
129
130 for (FlatPtr vaddr = kernel_load_base; vaddr <= kernel_load_end; vaddr += PAGE_SIZE * 512)
131 boot_pd_kernel[(vaddr - kernel_mapping_base) >> 21] = (FlatPtr)(&boot_pd_kernel_image_pts[(vaddr - kernel_load_base) >> 12]) | 0x3;
132
133 __builtin_memset(boot_pd_kernel_pt0, 0, sizeof(boot_pd_kernel_pt0));
134
135 VERIFY((size_t)end_of_prekernel_image < array_size(boot_pd_kernel_pt0) * PAGE_SIZE);
136
137 /* pseudo-identity map 0M - end_of_prekernel_image */
138 for (size_t i = 0; i < (FlatPtr)end_of_prekernel_image / PAGE_SIZE; i++)
139 boot_pd_kernel_pt0[i] = i * PAGE_SIZE | 0x3;
140
141 __builtin_memset(boot_pd_kernel_image_pts, 0, sizeof(boot_pd_kernel_image_pts));
142
143 for (size_t i = 0; i < kernel_elf_header.e_phnum; i++) {
144 auto& kernel_program_header = kernel_program_headers[i];
145 if (kernel_program_header.p_type != PT_LOAD)
146 continue;
147 for (FlatPtr offset = 0; offset < kernel_program_header.p_memsz; offset += PAGE_SIZE) {
148 auto pte_index = ((kernel_load_base & 0x1fffff) + kernel_program_header.p_vaddr + offset) >> 12;
149 boot_pd_kernel_image_pts[pte_index] = (kernel_physical_base + kernel_program_header.p_paddr + offset) | 0x3;
150 }
151 }
152
153 boot_pd_kernel[511] = (FlatPtr)boot_pd_kernel_pt1023 | 0x3;
154
155 reload_cr3();
156
157 for (ssize_t i = kernel_elf_header.e_phnum - 1; i >= 0; i--) {
158 auto& kernel_program_header = kernel_program_headers[i];
159 if (kernel_program_header.p_type != PT_LOAD)
160 continue;
161 __builtin_memmove((u8*)kernel_load_base + kernel_program_header.p_vaddr, kernel_image + kernel_program_header.p_offset, kernel_program_header.p_filesz);
162 }
163
164 for (ssize_t i = kernel_elf_header.e_phnum - 1; i >= 0; i--) {
165 auto& kernel_program_header = kernel_program_headers[i];
166 if (kernel_program_header.p_type != PT_LOAD)
167 continue;
168 __builtin_memset((u8*)kernel_load_base + kernel_program_header.p_vaddr + kernel_program_header.p_filesz, 0, kernel_program_header.p_memsz - kernel_program_header.p_filesz);
169 }
170
171 multiboot_info_ptr->mods_count--;
172 multiboot_info_ptr->mods_addr += sizeof(multiboot_module_entry_t);
173
174 auto adjust_by_mapping_base = [kernel_mapping_base](auto ptr) {
175 return (decltype(ptr))((FlatPtr)ptr + kernel_mapping_base);
176 };
177
178 BootInfo info {};
179 info.start_of_prekernel_image = (PhysicalPtr)start_of_prekernel_image;
180 info.end_of_prekernel_image = (PhysicalPtr)end_of_prekernel_image;
181 info.physical_to_virtual_offset = kernel_load_base - kernel_physical_base;
182 info.kernel_mapping_base = kernel_mapping_base;
183 info.kernel_load_base = kernel_load_base;
184#if ARCH(X86_64)
185 info.gdt64ptr = (PhysicalPtr)gdt64ptr;
186 info.code64_sel = code64_sel;
187 info.boot_pml4t = (PhysicalPtr)boot_pml4t;
188#endif
189 info.boot_pdpt = (PhysicalPtr)boot_pdpt;
190 info.boot_pd0 = (PhysicalPtr)boot_pd0;
191 info.boot_pd_kernel = (PhysicalPtr)boot_pd_kernel;
192 info.boot_pd_kernel_pt1023 = (FlatPtr)adjust_by_mapping_base(boot_pd_kernel_pt1023);
193 info.kernel_cmdline = (FlatPtr)adjust_by_mapping_base(kernel_cmdline);
194 info.multiboot_flags = multiboot_info_ptr->flags;
195 info.multiboot_memory_map = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mmap_addr);
196 info.multiboot_memory_map_count = multiboot_info_ptr->mmap_length / sizeof(multiboot_memory_map_t);
197 info.multiboot_modules = adjust_by_mapping_base((FlatPtr)multiboot_info_ptr->mods_addr);
198 info.multiboot_modules_count = multiboot_info_ptr->mods_count;
199 if ((multiboot_info_ptr->flags & MULTIBOOT_INFO_FRAMEBUFFER_INFO) != 0) {
200 info.multiboot_framebuffer_addr = multiboot_info_ptr->framebuffer_addr;
201 info.multiboot_framebuffer_pitch = multiboot_info_ptr->framebuffer_pitch;
202 info.multiboot_framebuffer_width = multiboot_info_ptr->framebuffer_width;
203 info.multiboot_framebuffer_height = multiboot_info_ptr->framebuffer_height;
204 info.multiboot_framebuffer_bpp = multiboot_info_ptr->framebuffer_bpp;
205 info.multiboot_framebuffer_type = multiboot_info_ptr->framebuffer_type;
206 }
207
208 asm(
209 "mov %0, %%rax\n"
210 "add %%rax, %%rsp" ::"g"(kernel_mapping_base)
211 : "ax");
212
213 // unmap the 0-1MB region
214 for (size_t i = 0; i < 256; i++)
215 boot_pd0_pts[i] = 0;
216
217 // unmap the end_of_prekernel_image - MAX_KERNEL_SIZE region
218 for (FlatPtr vaddr = (FlatPtr)end_of_prekernel_image; vaddr < MAX_KERNEL_SIZE; vaddr += PAGE_SIZE)
219 boot_pd0_pts[vaddr >> 12] = 0;
220
221 reload_cr3();
222
223 ELF::perform_relative_relocations(kernel_load_base);
224
225 void (*entry)(BootInfo const&) = (void (*)(BootInfo const&))(kernel_load_base + kernel_elf_header.e_entry);
226 entry(*adjust_by_mapping_base(&info));
227
228 __builtin_unreachable();
229}
230
231u64 generate_secure_seed()
232{
233 u32 seed = 0xFEEBDAED;
234
235#if ARCH(X86_64)
236 CPUID processor_info(0x1);
237 if (processor_info.edx() & (1 << 4)) // TSC
238 seed ^= read_tsc();
239
240 if (processor_info.ecx() & (1 << 30)) // RDRAND
241 seed ^= read_rdrand();
242
243 CPUID extended_features(0x7);
244 if (extended_features.ebx() & (1 << 18)) // RDSEED
245 seed ^= read_rdseed();
246#else
247# warning No native randomness source available for this architecture
248#endif
249
250 seed ^= multiboot_info_ptr->mods_addr;
251 seed ^= multiboot_info_ptr->framebuffer_addr;
252
253 return seed;
254}
255
256// Define some Itanium C++ ABI methods to stop the linker from complaining.
257// If we actually call these something has gone horribly wrong
258void* __dso_handle __attribute__((visibility("hidden")));
259
260}