Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <AK/Assertions.h>
28#include <AK/Memory.h>
29#include <Kernel/Heap/SlabAllocator.h>
30#include <Kernel/Heap/kmalloc.h>
31#include <Kernel/VM/Region.h>
32
33#define SANITIZE_SLABS
34
35namespace Kernel {
36
37template<size_t templated_slab_size>
38class SlabAllocator {
39public:
40 SlabAllocator() {}
41
42 void init(size_t size)
43 {
44 m_base = kmalloc_eternal(size);
45 m_end = (u8*)m_base + size;
46 FreeSlab* slabs = (FreeSlab*)m_base;
47 size_t slab_count = size / templated_slab_size;
48 for (size_t i = 1; i < slab_count; ++i) {
49 slabs[i].next = &slabs[i - 1];
50 }
51 slabs[0].next = nullptr;
52 m_freelist = &slabs[slab_count - 1];
53 m_num_allocated = 0;
54 m_num_free = slab_count;
55 }
56
57 constexpr size_t slab_size() const { return templated_slab_size; }
58
59 void* alloc()
60 {
61 InterruptDisabler disabler;
62 if (!m_freelist)
63 return kmalloc(slab_size());
64 ASSERT(m_freelist);
65 void* ptr = m_freelist;
66 m_freelist = m_freelist->next;
67 ++m_num_allocated;
68 --m_num_free;
69#ifdef SANITIZE_SLABS
70 memset(ptr, SLAB_ALLOC_SCRUB_BYTE, slab_size());
71#endif
72 return ptr;
73 }
74
75 void dealloc(void* ptr)
76 {
77 InterruptDisabler disabler;
78 ASSERT(ptr);
79 if (ptr < m_base || ptr >= m_end) {
80 kfree(ptr);
81 return;
82 }
83 ((FreeSlab*)ptr)->next = m_freelist;
84#ifdef SANITIZE_SLABS
85 if (slab_size() > sizeof(FreeSlab*))
86 memset(((FreeSlab*)ptr)->padding, SLAB_DEALLOC_SCRUB_BYTE, sizeof(FreeSlab::padding));
87#endif
88 m_freelist = (FreeSlab*)ptr;
89 ++m_num_allocated;
90 --m_num_free;
91 }
92
93 size_t num_allocated() const { return m_num_allocated; }
94 size_t num_free() const { return m_num_free; }
95
96private:
97 struct FreeSlab {
98 FreeSlab* next { nullptr };
99 char padding[templated_slab_size - sizeof(FreeSlab*)];
100 };
101
102 // NOTE: These are not default-initialized to prevent an init-time constructor from overwriting them
103 FreeSlab* m_freelist;
104 size_t m_num_allocated;
105 size_t m_num_free;
106 void* m_base;
107 void* m_end;
108
109 static_assert(sizeof(FreeSlab) == templated_slab_size);
110};
111
112static SlabAllocator<16> s_slab_allocator_16;
113static SlabAllocator<32> s_slab_allocator_32;
114static SlabAllocator<64> s_slab_allocator_64;
115
116static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
117
118template<typename Callback>
119void for_each_allocator(Callback callback)
120{
121 callback(s_slab_allocator_16);
122 callback(s_slab_allocator_32);
123 callback(s_slab_allocator_64);
124}
125
126void slab_alloc_init()
127{
128 s_slab_allocator_16.init(128 * KB);
129 s_slab_allocator_32.init(128 * KB);
130 s_slab_allocator_64.init(512 * KB);
131}
132
133void* slab_alloc(size_t slab_size)
134{
135 if (slab_size <= 16)
136 return s_slab_allocator_16.alloc();
137 if (slab_size <= 32)
138 return s_slab_allocator_32.alloc();
139 if (slab_size <= 64)
140 return s_slab_allocator_64.alloc();
141 ASSERT_NOT_REACHED();
142}
143
144void slab_dealloc(void* ptr, size_t slab_size)
145{
146 if (slab_size <= 16)
147 return s_slab_allocator_16.dealloc(ptr);
148 if (slab_size <= 32)
149 return s_slab_allocator_32.dealloc(ptr);
150 if (slab_size <= 64)
151 return s_slab_allocator_64.dealloc(ptr);
152 ASSERT_NOT_REACHED();
153}
154
155void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback)
156{
157 for_each_allocator([&](auto& allocator) {
158 callback(allocator.slab_size(), allocator.num_allocated(), allocator.num_free());
159 });
160}
161
162}