Serenity Operating System
1/*
2 * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <AK/Badge.h>
28#include <AK/HashTable.h>
29#include <LibJS/Heap/Handle.h>
30#include <LibJS/Heap/Heap.h>
31#include <LibJS/Heap/HeapBlock.h>
32#include <LibJS/Interpreter.h>
33#include <LibJS/Runtime/Object.h>
34#include <setjmp.h>
35#include <stdio.h>
36
37#ifdef __serenity__
38# include <serenity.h>
39#else
40# include <pthread.h>
41#endif
42
43#ifdef __serenity__
44#define HEAP_DEBUG
45#endif
46
47namespace JS {
48
49Heap::Heap(Interpreter& interpreter)
50 : m_interpreter(interpreter)
51{
52}
53
54Heap::~Heap()
55{
56 collect_garbage(CollectionType::CollectEverything);
57}
58
59Cell* Heap::allocate_cell(size_t size)
60{
61 if (should_collect_on_every_allocation()) {
62 collect_garbage();
63 } else if (m_allocations_since_last_gc > m_max_allocations_between_gc) {
64 m_allocations_since_last_gc = 0;
65 collect_garbage();
66 } else {
67 ++m_allocations_since_last_gc;
68 }
69
70 for (auto& block : m_blocks) {
71 if (size > block->cell_size())
72 continue;
73 if (auto* cell = block->allocate())
74 return cell;
75 }
76
77 size_t cell_size = round_up_to_power_of_two(size, 16);
78 auto block = HeapBlock::create_with_cell_size(*this, cell_size);
79 auto* cell = block->allocate();
80 m_blocks.append(move(block));
81 return cell;
82}
83
84void Heap::collect_garbage(CollectionType collection_type)
85{
86 if (collection_type == CollectionType::CollectGarbage) {
87 HashTable<Cell*> roots;
88 gather_roots(roots);
89 mark_live_cells(roots);
90 }
91 sweep_dead_cells();
92}
93
94void Heap::gather_roots(HashTable<Cell*>& roots)
95{
96 m_interpreter.gather_roots({}, roots);
97
98 gather_conservative_roots(roots);
99
100 for (auto* handle : m_handles)
101 roots.set(handle->cell());
102
103#ifdef HEAP_DEBUG
104 dbg() << "gather_roots:";
105 for (auto* root : roots) {
106 dbg() << " + " << root;
107 }
108#endif
109}
110
111void Heap::gather_conservative_roots(HashTable<Cell*>& roots)
112{
113#ifdef __serenity__
114 FlatPtr dummy;
115
116#ifdef HEAP_DEBUG
117 dbg() << "gather_conservative_roots:";
118#endif
119
120 jmp_buf buf;
121 setjmp(buf);
122
123 HashTable<FlatPtr> possible_pointers;
124
125 const FlatPtr* raw_jmp_buf = reinterpret_cast<const FlatPtr*>(buf);
126
127 for (size_t i = 0; i < sizeof(buf) / sizeof(FlatPtr); i += sizeof(FlatPtr))
128 possible_pointers.set(raw_jmp_buf[i]);
129
130 FlatPtr stack_base;
131 size_t stack_size;
132
133#ifdef __serenity__
134 if (get_stack_bounds(&stack_base, &stack_size) < 0) {
135 perror("get_stack_bounds");
136 ASSERT_NOT_REACHED();
137 }
138#elif __linux__
139 pthread_attr_t attr = {};
140 if (int rc = pthread_getattr_np(pthread_self(), &attr) != 0) {
141 fprintf(stderr, "pthread_getattr_np: %s\n", strerror(-rc));
142 ASSERT_NOT_REACHED();
143 }
144 if (int rc = pthread_attr_getstack(&attr, (void**)&stack_base, &stack_size) != 0) {
145 fprintf(stderr, "pthread_attr_getstack: %s\n", strerror(-rc));
146 ASSERT_NOT_REACHED();
147 }
148 pthread_attr_destroy(&attr);
149#endif
150
151 FlatPtr stack_reference = reinterpret_cast<FlatPtr>(&dummy);
152 FlatPtr stack_top = stack_base + stack_size;
153
154 for (FlatPtr stack_address = stack_reference; stack_address < stack_top; stack_address += sizeof(FlatPtr)) {
155 auto data = *reinterpret_cast<FlatPtr*>(stack_address);
156 possible_pointers.set(data);
157 }
158
159 for (auto possible_pointer : possible_pointers) {
160 if (!possible_pointer)
161 continue;
162#ifdef HEAP_DEBUG
163 dbg() << " ? " << (const void*)possible_pointer;
164#endif
165 if (auto* cell = cell_from_possible_pointer(possible_pointer)) {
166 if (cell->is_live()) {
167#ifdef HEAP_DEBUG
168 dbg() << " ?-> " << (const void*)cell;
169#endif
170 roots.set(cell);
171 } else {
172#ifdef HEAP_DEBUG
173 dbg() << " #-> " << (const void*)cell;
174#endif
175 }
176 }
177 }
178#else
179 (void)roots;
180#endif
181}
182
183Cell* Heap::cell_from_possible_pointer(FlatPtr pointer)
184{
185 auto* possible_heap_block = HeapBlock::from_cell(reinterpret_cast<const Cell*>(pointer));
186 if (m_blocks.find([possible_heap_block](auto& block) { return block.ptr() == possible_heap_block; }) == m_blocks.end())
187 return nullptr;
188 return possible_heap_block->cell_from_possible_pointer(pointer);
189}
190
191class MarkingVisitor final : public Cell::Visitor {
192public:
193 MarkingVisitor() {}
194
195 virtual void visit(Cell* cell)
196 {
197 if (cell->is_marked())
198 return;
199#ifdef HEAP_DEBUG
200 dbg() << " ! " << cell;
201#endif
202 cell->set_marked(true);
203 cell->visit_children(*this);
204 }
205};
206
207void Heap::mark_live_cells(const HashTable<Cell*>& roots)
208{
209#ifdef HEAP_DEBUG
210 dbg() << "mark_live_cells:";
211#endif
212 MarkingVisitor visitor;
213 for (auto* root : roots) {
214 if (!root)
215 continue;
216 visitor.visit(root);
217 }
218}
219
220void Heap::sweep_dead_cells()
221{
222#ifdef HEAP_DEBUG
223 dbg() << "sweep_dead_cells:";
224#endif
225 Vector<HeapBlock*, 32> empty_blocks;
226
227 for (auto& block : m_blocks) {
228 bool block_has_live_cells = false;
229 block->for_each_cell([&](Cell* cell) {
230 if (cell->is_live()) {
231 if (!cell->is_marked()) {
232#ifdef HEAP_DEBUG
233 dbg() << " ~ " << cell;
234#endif
235 block->deallocate(cell);
236 } else {
237 cell->set_marked(false);
238 block_has_live_cells = true;
239 }
240 }
241 });
242 if (!block_has_live_cells)
243 empty_blocks.append(block);
244 }
245
246 for (auto* block : empty_blocks) {
247#ifdef HEAP_DEBUG
248 dbg() << " - Reclaim HeapBlock @ " << block << ": cell_size=" << block->cell_size();
249#endif
250 m_blocks.remove_first_matching([block](auto& entry) { return entry == block; });
251 }
252
253#ifdef HEAP_DEBUG
254 for (auto& block : m_blocks) {
255 dbg() << " > Live HeapBlock @ " << block << ": cell_size=" << block->cell_size();
256 }
257#endif
258}
259
260void Heap::did_create_handle(Badge<HandleImpl>, HandleImpl& impl)
261{
262 ASSERT(!m_handles.contains(&impl));
263 m_handles.set(&impl);
264}
265
266void Heap::did_destroy_handle(Badge<HandleImpl>, HandleImpl& impl)
267{
268 ASSERT(m_handles.contains(&impl));
269 m_handles.remove(&impl);
270}
271
272}