Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * Really really *really* Q&D malloc() and free() implementations
29 * just to get going. Don't ever let anyone see this shit. :^)
30 */
31
32#include <AK/Assertions.h>
33#include <AK/Types.h>
34#include <Kernel/Arch/i386/CPU.h>
35#include <Kernel/Heap/kmalloc.h>
36#include <Kernel/KSyms.h>
37#include <Kernel/Process.h>
38#include <Kernel/Scheduler.h>
39#include <LibBareMetal/StdLib.h>
40
41#define SANITIZE_KMALLOC
42
43struct AllocationHeader
44{
45 size_t allocation_size_in_chunks;
46 u8 data[0];
47};
48
49#define BASE_PHYSICAL (0xc0000000 + (4 * MB))
50#define CHUNK_SIZE 32
51#define POOL_SIZE (3 * MB)
52
53#define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
54#define ETERNAL_RANGE_SIZE (2 * MB)
55
56static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
57
58volatile size_t sum_alloc = 0;
59volatile size_t sum_free = POOL_SIZE;
60volatile size_t kmalloc_sum_eternal = 0;
61
62u32 g_kmalloc_call_count;
63u32 g_kfree_call_count;
64bool g_dump_kmalloc_stacks;
65
66static u8* s_next_eternal_ptr;
67static u8* s_end_of_eternal_range;
68
69void kmalloc_init()
70{
71 memset(&alloc_map, 0, sizeof(alloc_map));
72 memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
73
74 kmalloc_sum_eternal = 0;
75 sum_alloc = 0;
76 sum_free = POOL_SIZE;
77
78 s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
79 s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
80}
81
82void* kmalloc_eternal(size_t size)
83{
84 void* ptr = s_next_eternal_ptr;
85 s_next_eternal_ptr += size;
86 ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
87 kmalloc_sum_eternal += size;
88 return ptr;
89}
90
91void* kmalloc_aligned(size_t size, size_t alignment)
92{
93 void* ptr = kmalloc(size + alignment + sizeof(void*));
94 size_t max_addr = (size_t)ptr + alignment;
95 void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
96 ((void**)aligned_ptr)[-1] = ptr;
97 return aligned_ptr;
98}
99
100void kfree_aligned(void* ptr)
101{
102 kfree(((void**)ptr)[-1]);
103}
104
105void* kmalloc_page_aligned(size_t size)
106{
107 void* ptr = kmalloc_aligned(size, PAGE_SIZE);
108 size_t d = (size_t)ptr;
109 ASSERT((d & PAGE_MASK) == d);
110 return ptr;
111}
112
113void* kmalloc_impl(size_t size)
114{
115 Kernel::InterruptDisabler disabler;
116 ++g_kmalloc_call_count;
117
118 if (g_dump_kmalloc_stacks && Kernel::ksyms_ready) {
119 dbgprintf("kmalloc(%u)\n", size);
120 Kernel::dump_backtrace();
121 }
122
123 // We need space for the AllocationHeader at the head of the block.
124 size_t real_size = size + sizeof(AllocationHeader);
125
126 if (sum_free < real_size) {
127 Kernel::dump_backtrace();
128 kprintf("%s(%u) kmalloc(): PANIC! Out of memory (sucks, dude)\nsum_free=%u, real_size=%u\n", Kernel::Process::current->name().characters(), Kernel::Process::current->pid(), sum_free, real_size);
129 Kernel::hang();
130 }
131
132 size_t chunks_needed = real_size / CHUNK_SIZE;
133 if (real_size % CHUNK_SIZE)
134 ++chunks_needed;
135
136 size_t chunks_here = 0;
137 size_t first_chunk = 0;
138
139 for (size_t i = 0; i < (POOL_SIZE / CHUNK_SIZE / 8); ++i) {
140 if (alloc_map[i] == 0xff) {
141 // Skip over completely full bucket.
142 chunks_here = 0;
143 continue;
144 }
145 // FIXME: This scan can be optimized further with LZCNT.
146 for (size_t j = 0; j < 8; ++j) {
147 if (!(alloc_map[i] & (1 << j))) {
148 if (chunks_here == 0) {
149 // Mark where potential allocation starts.
150 first_chunk = i * 8 + j;
151 }
152
153 ++chunks_here;
154
155 if (chunks_here == chunks_needed) {
156 auto* a = (AllocationHeader*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
157 u8* ptr = a->data;
158 a->allocation_size_in_chunks = chunks_needed;
159
160 for (size_t k = first_chunk; k < (first_chunk + chunks_needed); ++k) {
161 alloc_map[k / 8] |= 1 << (k % 8);
162 }
163
164 sum_alloc += a->allocation_size_in_chunks * CHUNK_SIZE;
165 sum_free -= a->allocation_size_in_chunks * CHUNK_SIZE;
166#ifdef SANITIZE_KMALLOC
167 memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader));
168#endif
169 return ptr;
170 }
171 } else {
172 // This is in use, so restart chunks_here counter.
173 chunks_here = 0;
174 }
175 }
176 }
177
178 kprintf("%s(%u) kmalloc(): PANIC! Out of memory (no suitable block for size %u)\n", Kernel::Process::current->name().characters(), Kernel::Process::current->pid(), size);
179 Kernel::dump_backtrace();
180 Kernel::hang();
181}
182
183void kfree(void* ptr)
184{
185 if (!ptr)
186 return;
187
188 Kernel::InterruptDisabler disabler;
189 ++g_kfree_call_count;
190
191 auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
192 uintptr_t start = ((uintptr_t)a - (uintptr_t)BASE_PHYSICAL) / CHUNK_SIZE;
193
194 for (size_t k = start; k < (start + a->allocation_size_in_chunks); ++k)
195 alloc_map[k / 8] &= ~(1 << (k % 8));
196
197 sum_alloc -= a->allocation_size_in_chunks * CHUNK_SIZE;
198 sum_free += a->allocation_size_in_chunks * CHUNK_SIZE;
199
200#ifdef SANITIZE_KMALLOC
201 memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE);
202#endif
203}
204
205void* krealloc(void* ptr, size_t new_size)
206{
207 if (!ptr)
208 return kmalloc(new_size);
209
210 Kernel::InterruptDisabler disabler;
211
212 auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
213 size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE;
214
215 if (old_size == new_size)
216 return ptr;
217
218 auto* new_ptr = kmalloc(new_size);
219 memcpy(new_ptr, ptr, min(old_size, new_size));
220 kfree(ptr);
221 return new_ptr;
222}
223
224void* operator new(size_t size)
225{
226 return kmalloc(size);
227}
228
229void* operator new[](size_t size)
230{
231 return kmalloc(size);
232}