Oh yeah uh huh I'm making my own os...
os
osdev
asm
assembly
1// |$|*file*kernel.c*| |$|*noclean*|
2// |$|*artifact*kernel.o*kernel.c*"gcc -ffreestanding -nostdlib -c {artifact0} -o {file}"*|
3// |$|*artifact*kernel.bin*kernel.ld&kernel.o*"ld -flto -T {artifact0} {artifact1} -S -o {file}"*|
4#include <stdint.h>
5#include <stddef.h>
6#define true 1
7#define false 0
8
9typedef void (*diskread_fn)(void *buffer, uint32_t lba);
10typedef struct SMAP_entry {
11
12 uint32_t BaseL; // base address uint64_t
13 uint32_t BaseH;
14 uint32_t LengthL; // length uint64_t
15 uint32_t LengthH;
16 uint32_t Type; // entry Type
17 uint32_t ACPI; // extended
18
19}__attribute__((packed)) SMAP_entry_t;
20typedef uint64_t page_table_entry_t;
21void KernelMain(SMAP_entry_t* e820_buffer, uint16_t e820_entry_count);
22diskread_fn diskRead;
23void _start(diskread_fn a1, void* e820_buffer, uint16_t e820_entry_count)
24{
25 diskRead = a1;
26 KernelMain(e820_buffer, e820_entry_count);
27}
28
29
30// --- Constants ---
31#define PAGE_PRESENT (1 << 0)
32#define PAGE_WRITE (1 << 1)
33#define PAGE_USER (1 << 2)
34#define PAGE_SIZE_2MB (1 << 7)
35#define ALIGN_4K __attribute__((aligned(4096)))
36#define PHYS_MEM_OFFSET 0xFFFF800000000000
37#define BOOTSTRAP_MEM_END 0x200000 // 2MiB
38
39// --- Hardcoded Page Tables ---
40page_table_entry_t *pml4 = (page_table_entry_t*)0x1000;
41page_table_entry_t *pdp = (page_table_entry_t*)0x2000;
42page_table_entry_t *pd = (page_table_entry_t*)0x3000;
43page_table_entry_t *pt = (page_table_entry_t*)0x4000;
44page_table_entry_t *pt2 = (page_table_entry_t*)0x4000;
45
46
47// --- Free-List Allocator ---
48typedef struct free_block {
49 size_t size;
50 struct free_block* next;
51} free_block_t;
52
53volatile static uint64_t *debug = 0x7200;
54
55static free_block_t* free_list = NULL;
56
57void* alloc(size_t size) {
58 free_block_t* prev = NULL;
59 free_block_t* curr = free_list;
60
61 while (curr) {
62 if (curr->size >= size) {
63 if (curr->size > size + sizeof(free_block_t)) {
64 // Split block
65 free_block_t* new_block = (free_block_t*)((uint8_t*)curr + size);
66 new_block->size = curr->size - size;
67 new_block->next = curr->next;
68 curr->size = size;
69 if (prev) prev->next = new_block;
70 else free_list = new_block;
71 } else {
72 // Use whole block
73 if (prev) prev->next = curr->next;
74 else free_list = curr->next;
75 }
76 return (void*)curr;
77 }
78 prev = curr;
79 curr = curr->next;
80 }
81 *debug = 0xEEEE0002;
82 while(1){}
83 return NULL; // No suitable block
84}
85
86void* alloc_aligned_4k(size_t size) {
87 // Allocate extra space to ensure alignment
88 size_t total_size = size + 4096;
89
90 // Allocate using the free-list allocator
91 void* ptr = alloc(total_size);
92 if (!ptr) {
93 *debug = 0xEEEE0001;
94 while(1){}
95 return NULL; // Allocation failed
96 }
97
98 // Calculate the aligned address
99 uintptr_t aligned_addr = ((uintptr_t)ptr + 4095) & ~4095;
100
101 // If the aligned address is the same as the original, return it
102 if (aligned_addr == (uintptr_t)ptr) {
103 return ptr;
104 }
105
106 // Otherwise, adjust the free list to account for the unused space
107 size_t unused_space = aligned_addr - (uintptr_t)ptr;
108
109 // If there's enough unused space to split into a new free block
110 if (unused_space >= sizeof(free_block_t)) {
111 free_block_t* new_free_block = (free_block_t*)ptr;
112 new_free_block->size = unused_space;
113 new_free_block->next = free_list;
114 free_list = new_free_block;
115 }
116
117 // Return the aligned address
118 return (void*)aligned_addr;
119}
120
121
122void free(void* ptr, size_t size) {
123 free_block_t* block = (free_block_t*)ptr;
124 block->size = size;
125 block->next = free_list;
126 free_list = block;
127}
128
129void merge_adjacent_blocks() {
130 if (!free_list || !free_list->next) {
131 // No merging possible if there are 0 or 1 blocks
132 return;
133 }
134
135 free_block_t *curr = free_list;
136 free_block_t *prev = NULL;
137
138 while (curr && curr->next) {
139 // Check if the current block is adjacent to the next block
140 if ((uint8_t *)curr + curr->size == (uint8_t *)curr->next) {
141 // Merge current and next block
142 curr->size += curr->next->size;
143 curr->next = curr->next->next;
144 // Do not advance 'curr' yet, as the next block might also be adjacent
145 } else {
146 // Move to the next block
147 prev = curr;
148 curr = curr->next;
149 }
150 }
151}
152
153static inline void reload_cr3(uint64_t pml4_phys_addr) {
154 asm volatile(
155 "mov %0, %%cr3"
156 :
157 : "r"(pml4_phys_addr)
158 : "memory"
159 );
160}
161
162// --- Paging ---
163void map_page(uint64_t phys_addr, uint64_t virt_addr) {
164 uint64_t pml4_idx = (virt_addr >> 39) & 0x1FF;
165 uint64_t pdp_idx = (virt_addr >> 30) & 0x1FF;
166 uint64_t pd_idx = (virt_addr >> 21) & 0x1FF;
167 uint64_t pt_idx = (virt_addr >> 12) & 0x1FF;
168
169 // Allocate PDP if not present
170 if (!(pml4[pml4_idx] & PAGE_PRESENT)) {
171 page_table_entry_t* new_pdp = alloc_aligned_4k(4096);
172 for (int i = 0; i < 512; i++) new_pdp[i] = 0;
173 pml4[pml4_idx] = (uint64_t)new_pdp | PAGE_PRESENT | PAGE_WRITE;
174 }
175 page_table_entry_t* pdp_ptr = (page_table_entry_t*)(pml4[pml4_idx] & ~0xFFF);
176
177 // Allocate PD if not present
178 if (!(pdp_ptr[pdp_idx] & PAGE_PRESENT)) {
179 page_table_entry_t* new_pd = alloc_aligned_4k(4096);
180 for (int i = 0; i < 512; i++) new_pd[i] = 0;
181 pdp_ptr[pdp_idx] = (uint64_t)new_pd | PAGE_PRESENT | PAGE_WRITE;
182 }
183 page_table_entry_t* pd_ptr = (page_table_entry_t*)(pdp_ptr[pdp_idx] & ~0xFFF);
184
185 // Allocate PT if not present
186 if (!(pd_ptr[pd_idx] & PAGE_PRESENT)) {
187 page_table_entry_t* new_pt = alloc_aligned_4k(4096);
188 for (int i = 0; i < 512; i++) new_pt[i] = 0;
189 pd_ptr[pd_idx] = (uint64_t)new_pt | PAGE_PRESENT | PAGE_WRITE;
190 }
191 page_table_entry_t* pt_ptr = (page_table_entry_t*)(pd_ptr[pd_idx] & ~0xFFF);
192
193 // Map the physical page
194 pt_ptr[pt_idx] = phys_addr | PAGE_PRESENT | PAGE_WRITE;
195}
196
197
198// --- Kernel Main ---
199void KernelMain(SMAP_entry_t* e820_buffer, uint16_t e820_entry_count) {
200 // 1. Initialize hardcoded page tables
201
202 for (int i = 0; i < 512; i++) {
203 pml4[i] = 0;
204 pdp[i] = 0;
205 pd[i] = 0;
206 pt[i] = 0;
207 pt2[i]= 0;
208 }
209
210 // 2. Identity map first 2MiB
211 pml4[0] = (uint64_t)pdp | PAGE_PRESENT | PAGE_WRITE;
212 pdp[0] = (uint64_t)pd | PAGE_PRESENT | PAGE_WRITE;
213 pd[0] = (uint64_t)pt | PAGE_PRESENT | PAGE_WRITE;
214 pd[1] = (uint64_t)pt2| PAGE_PRESENT | PAGE_WRITE;
215 for (int i = 0; i < 512; i++) {
216 pt[i] = (i * 4096) | PAGE_PRESENT | PAGE_WRITE;
217 //pt2[i]= (PHYS_MEM_OFFSET + (i * 4096)) | PAGE_PRESENT | PAGE_WRITE;
218 }
219 // --- Map first 2MiB at PHYS_MEM_OFFSET ---
220 // We'll use pt2 for the offset mapping
221 // Set up the paging hierarchy for PHYS_MEM_OFFSET
222 // PHYS_MEM_OFFSET = 0xFFFF800000000000
223 // Indices for PHYS_MEM_OFFSET
224 uint64_t pml4_off_idx = (PHYS_MEM_OFFSET >> 39) & 0x1FF;
225 uint64_t pdp_off_idx = (PHYS_MEM_OFFSET >> 30) & 0x1FF;
226 uint64_t pd_off_idx = (PHYS_MEM_OFFSET >> 21) & 0x1FF;
227 // Allocate new tables for offset mapping if needed
228 // For this hardcoded setup, reuse pdp, pd, pt2
229 pml4[pml4_off_idx] = (uint64_t)pdp | PAGE_PRESENT | PAGE_WRITE;
230 pdp[pdp_off_idx] = (uint64_t)pd | PAGE_PRESENT | PAGE_WRITE;
231 pd[pd_off_idx] = (uint64_t)pt2 | PAGE_PRESENT | PAGE_WRITE;
232 for (int i = 0; i < 512; i++) {
233 pt2[i] = (i * 4096) | PAGE_PRESENT | PAGE_WRITE;
234 }
235 reload_cr3((uint64_t)pml4);
236
237 for (int i = 1; i < e820_entry_count; i++)
238 {
239 if (e820_buffer[i].Type!=1) continue;
240 uint64_t base = ((uint64_t)e820_buffer[i].BaseH << 32) | e820_buffer[i].BaseL;
241 uint64_t length = ((uint64_t)e820_buffer[i].LengthH << 32) | e820_buffer[i].LengthL;
242 if (base > 0x200000) continue;
243 if (base+length > 0x200000)
244 {
245 ...
246 }
247 else
248 {
249 ...
250 }
251 }
252
253 while(1){}
254 // 3. Map all physical memory
255 for (int i = 1; i < e820_entry_count; i++) {
256 if (e820_buffer[i].Type != 1) continue; // Skip non-usable memory
257 uint64_t base = ((uint64_t)e820_buffer[i].BaseH << 32) | e820_buffer[i].BaseL;
258 uint64_t length = ((uint64_t)e820_buffer[i].LengthH << 32) | e820_buffer[i].LengthL;
259 for (uint64_t addr = base; addr < base + length; addr += 4096) {
260 uint64_t virt_addr = PHYS_MEM_OFFSET + addr;
261 //map_page(addr, virt_addr);
262 /*reload_cr3((uint64_t)pml4);
263
264 free_block_t* block = (free_block_t*)virt_addr;
265 block->size=4096;
266 block->next = 0;
267 free_block_t* last_block = free_list;
268 while(last_block->next){
269 last_block=last_block->next;
270 }
271 last_block->next = block;*/
272 merge_adjacent_blocks();
273
274 }
275 }
276 *debug = 0xbeef1337;
277 while(1){}
278 // 4. Initialize free_list with mapped memory
279 // (Add code here to populate free_list with mapped memory)
280}