Serenity Operating System
1/*
2 * Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2021, Tobias Christiansen <tobyase@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include "MallocTracer.h"
9#include "Emulator.h"
10#include "MmapRegion.h"
11#include <AK/Debug.h>
12#include <AK/TemporaryChange.h>
13#include <mallocdefs.h>
14#include <string.h>
15#include <unistd.h>
16
17namespace UserspaceEmulator {
18
19MallocTracer::MallocTracer(Emulator& emulator)
20 : m_emulator(emulator)
21{
22}
23
24template<typename Callback>
25inline void MallocTracer::for_each_mallocation(Callback callback) const
26{
27 m_emulator.mmu().for_each_region([&](auto& region) {
28 if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block()) {
29 auto* malloc_data = static_cast<MmapRegion&>(region).malloc_metadata();
30 for (auto& mallocation : malloc_data->mallocations) {
31 if (mallocation.used && callback(mallocation) == IterationDecision::Break)
32 return IterationDecision::Break;
33 }
34 }
35 return IterationDecision::Continue;
36 });
37}
38
39void MallocTracer::update_metadata(MmapRegion& mmap_region, size_t chunk_size)
40{
41 mmap_region.set_malloc_metadata({},
42 adopt_own(*new MallocRegionMetadata {
43 .region = mmap_region,
44 .address = mmap_region.base(),
45 .chunk_size = chunk_size,
46 .mallocations = {},
47 }));
48 auto& malloc_data = *mmap_region.malloc_metadata();
49
50 bool is_chunked_block = malloc_data.chunk_size <= size_classes[num_size_classes - 1];
51 if (is_chunked_block)
52 malloc_data.mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / malloc_data.chunk_size);
53 else
54 malloc_data.mallocations.resize(1);
55
56 // Mark the containing mmap region as a malloc block!
57 mmap_region.set_malloc(true);
58}
59
60void MallocTracer::target_did_malloc(Badge<Emulator>, FlatPtr address, size_t size)
61{
62 if (m_emulator.is_in_loader_code())
63 return;
64 auto* region = m_emulator.mmu().find_region({ 0x23, address });
65 VERIFY(region);
66 auto& mmap_region = verify_cast<MmapRegion>(*region);
67
68 auto* shadow_bits = mmap_region.shadow_data() + address - mmap_region.base();
69 memset(shadow_bits, 0, size);
70
71 if (auto* existing_mallocation = find_mallocation(address)) {
72 VERIFY(existing_mallocation->freed);
73 existing_mallocation->size = size;
74 existing_mallocation->freed = false;
75 existing_mallocation->malloc_backtrace = m_emulator.raw_backtrace();
76 existing_mallocation->free_backtrace.clear();
77 return;
78 }
79
80 if (!mmap_region.is_malloc_block()) {
81 auto chunk_size = mmap_region.read32(offsetof(CommonHeader, m_size)).value();
82 update_metadata(mmap_region, chunk_size);
83 }
84 auto* mallocation = mmap_region.malloc_metadata()->mallocation_for_address(address);
85 VERIFY(mallocation);
86 *mallocation = { address, size, true, false, m_emulator.raw_backtrace(), Vector<FlatPtr>() };
87}
88
89void MallocTracer::target_did_change_chunk_size(Badge<Emulator>, FlatPtr block, size_t chunk_size)
90{
91 if (m_emulator.is_in_loader_code())
92 return;
93 auto* region = m_emulator.mmu().find_region({ 0x23, block });
94 VERIFY(region);
95 auto& mmap_region = verify_cast<MmapRegion>(*region);
96 update_metadata(mmap_region, chunk_size);
97}
98
99ALWAYS_INLINE Mallocation* MallocRegionMetadata::mallocation_for_address(FlatPtr address) const
100{
101 auto index = chunk_index_for_address(address);
102 if (!index.has_value())
103 return nullptr;
104 return &const_cast<Mallocation&>(this->mallocations[index.value()]);
105}
106
107ALWAYS_INLINE Optional<size_t> MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
108{
109 bool is_chunked_block = chunk_size <= size_classes[num_size_classes - 1];
110 if (!is_chunked_block) {
111 // This is a BigAllocationBlock
112 return 0;
113 }
114 auto offset_into_block = address - this->address;
115 if (offset_into_block < sizeof(ChunkedBlock))
116 return 0;
117 auto chunk_offset = offset_into_block - sizeof(ChunkedBlock);
118 auto chunk_index = chunk_offset / this->chunk_size;
119 if (chunk_index >= mallocations.size())
120 return {};
121 return chunk_index;
122}
123
124void MallocTracer::target_did_free(Badge<Emulator>, FlatPtr address)
125{
126 if (!address)
127 return;
128 if (m_emulator.is_in_loader_code())
129 return;
130
131 if (auto* mallocation = find_mallocation(address)) {
132 if (mallocation->freed) {
133 reportln("\n=={}== \033[31;1mDouble free()\033[0m, {:p}"sv, getpid(), address);
134 reportln("=={}== Address {} has already been passed to free()"sv, getpid(), address);
135 m_emulator.dump_backtrace();
136 } else {
137 mallocation->freed = true;
138 mallocation->free_backtrace = m_emulator.raw_backtrace();
139 }
140 return;
141 }
142
143 reportln("\n=={}== \033[31;1mInvalid free()\033[0m, {:p}"sv, getpid(), address);
144 reportln("=={}== Address {} has never been returned by malloc()"sv, getpid(), address);
145 m_emulator.dump_backtrace();
146}
147
148void MallocTracer::target_did_realloc(Badge<Emulator>, FlatPtr address, size_t size)
149{
150 if (m_emulator.is_in_loader_code())
151 return;
152 auto* region = m_emulator.mmu().find_region({ 0x23, address });
153 VERIFY(region);
154 auto& mmap_region = verify_cast<MmapRegion>(*region);
155
156 VERIFY(mmap_region.is_malloc_block());
157
158 auto* existing_mallocation = find_mallocation(address);
159 VERIFY(existing_mallocation);
160 VERIFY(!existing_mallocation->freed);
161
162 size_t old_size = existing_mallocation->size;
163
164 auto* shadow_bits = mmap_region.shadow_data() + address - mmap_region.base();
165
166 if (size > old_size) {
167 memset(shadow_bits + old_size, 1, size - old_size);
168 } else {
169 memset(shadow_bits + size, 1, old_size - size);
170 }
171
172 existing_mallocation->size = size;
173 // FIXME: Should we track malloc/realloc backtrace separately perhaps?
174 existing_mallocation->malloc_backtrace = m_emulator.raw_backtrace();
175}
176
177Mallocation* MallocTracer::find_mallocation(FlatPtr address)
178{
179 auto* region = m_emulator.mmu().find_region({ 0x23, address });
180 if (!region)
181 return nullptr;
182 return find_mallocation(*region, address);
183}
184
185Mallocation* MallocTracer::find_mallocation_before(FlatPtr address)
186{
187 Mallocation* found_mallocation = nullptr;
188 for_each_mallocation([&](auto& mallocation) {
189 if (mallocation.address >= address)
190 return IterationDecision::Continue;
191 if (!found_mallocation || (mallocation.address > found_mallocation->address))
192 found_mallocation = const_cast<Mallocation*>(&mallocation);
193 return IterationDecision::Continue;
194 });
195 return found_mallocation;
196}
197
198Mallocation* MallocTracer::find_mallocation_after(FlatPtr address)
199{
200 Mallocation* found_mallocation = nullptr;
201 for_each_mallocation([&](auto& mallocation) {
202 if (mallocation.address <= address)
203 return IterationDecision::Continue;
204 if (!found_mallocation || (mallocation.address < found_mallocation->address))
205 found_mallocation = const_cast<Mallocation*>(&mallocation);
206 return IterationDecision::Continue;
207 });
208 return found_mallocation;
209}
210
211void MallocTracer::audit_read(Region const& region, FlatPtr address, size_t size)
212{
213 if (!m_auditing_enabled)
214 return;
215
216 if (m_emulator.is_memory_auditing_suppressed()) {
217 return;
218 }
219
220 if (m_emulator.is_in_libsystem()) {
221 return;
222 }
223
224 if (m_emulator.is_in_loader_code()) {
225 return;
226 }
227
228 auto* mallocation = find_mallocation(region, address);
229
230 if (!mallocation) {
231 reportln("\n=={}== \033[31;1mHeap buffer overflow\033[0m, invalid {}-byte read at address {:p}"sv, getpid(), size, address);
232 m_emulator.dump_backtrace();
233 auto* mallocation_before = find_mallocation_before(address);
234 auto* mallocation_after = find_mallocation_after(address);
235 size_t distance_to_mallocation_before = mallocation_before ? (address - mallocation_before->address - mallocation_before->size) : 0;
236 size_t distance_to_mallocation_after = mallocation_after ? (mallocation_after->address - address) : 0;
237 if (mallocation_before && (!mallocation_after || distance_to_mallocation_before < distance_to_mallocation_after)) {
238 reportln("=={}== Address is {} byte(s) after block of size {}, identity {:p}, allocated at:"sv, getpid(), distance_to_mallocation_before, mallocation_before->size, mallocation_before->address);
239 m_emulator.dump_backtrace(mallocation_before->malloc_backtrace);
240 return;
241 }
242 if (mallocation_after && (!mallocation_before || distance_to_mallocation_after < distance_to_mallocation_before)) {
243 reportln("=={}== Address is {} byte(s) before block of size {}, identity {:p}, allocated at:"sv, getpid(), distance_to_mallocation_after, mallocation_after->size, mallocation_after->address);
244 m_emulator.dump_backtrace(mallocation_after->malloc_backtrace);
245 }
246 return;
247 }
248
249 size_t offset_into_mallocation = address - mallocation->address;
250
251 if (mallocation->freed) {
252 reportln("\n=={}== \033[31;1mUse-after-free\033[0m, invalid {}-byte read at address {:p}"sv, getpid(), size, address);
253 m_emulator.dump_backtrace();
254 reportln("=={}== Address is {} byte(s) into block of size {}, allocated at:"sv, getpid(), offset_into_mallocation, mallocation->size);
255 m_emulator.dump_backtrace(mallocation->malloc_backtrace);
256 reportln("=={}== Later freed at:"sv, getpid());
257 m_emulator.dump_backtrace(mallocation->free_backtrace);
258 return;
259 }
260}
261
262void MallocTracer::audit_write(Region const& region, FlatPtr address, size_t size)
263{
264 if (!m_auditing_enabled)
265 return;
266
267 if (m_emulator.is_memory_auditing_suppressed()) {
268 return;
269 }
270
271 if (m_emulator.is_in_loader_code()) {
272 return;
273 }
274
275 auto* mallocation = find_mallocation(region, address);
276 if (!mallocation) {
277 reportln("\n=={}== \033[31;1mHeap buffer overflow\033[0m, invalid {}-byte write at address {:p}"sv, getpid(), size, address);
278 m_emulator.dump_backtrace();
279 auto* mallocation_before = find_mallocation_before(address);
280 auto* mallocation_after = find_mallocation_after(address);
281 size_t distance_to_mallocation_before = mallocation_before ? (address - mallocation_before->address - mallocation_before->size) : 0;
282 size_t distance_to_mallocation_after = mallocation_after ? (mallocation_after->address - address) : 0;
283 if (mallocation_before && (!mallocation_after || distance_to_mallocation_before < distance_to_mallocation_after)) {
284 reportln("=={}== Address is {} byte(s) after block of size {}, identity {:p}, allocated at:"sv, getpid(), distance_to_mallocation_before, mallocation_before->size, mallocation_before->address);
285 m_emulator.dump_backtrace(mallocation_before->malloc_backtrace);
286 return;
287 }
288 if (mallocation_after && (!mallocation_before || distance_to_mallocation_after < distance_to_mallocation_before)) {
289 reportln("=={}== Address is {} byte(s) before block of size {}, identity {:p}, allocated at:"sv, getpid(), distance_to_mallocation_after, mallocation_after->size, mallocation_after->address);
290 m_emulator.dump_backtrace(mallocation_after->malloc_backtrace);
291 }
292 return;
293 }
294
295 size_t offset_into_mallocation = address - mallocation->address;
296
297 if (mallocation->freed) {
298 reportln("\n=={}== \033[31;1mUse-after-free\033[0m, invalid {}-byte write at address {:p}"sv, getpid(), size, address);
299 m_emulator.dump_backtrace();
300 reportln("=={}== Address is {} byte(s) into block of size {}, allocated at:"sv, getpid(), offset_into_mallocation, mallocation->size);
301 m_emulator.dump_backtrace(mallocation->malloc_backtrace);
302 reportln("=={}== Later freed at:"sv, getpid());
303 m_emulator.dump_backtrace(mallocation->free_backtrace);
304 return;
305 }
306}
307
308void MallocTracer::populate_memory_graph()
309{
310 // Create Node for each live Mallocation
311 for_each_mallocation([&](auto& mallocation) {
312 if (mallocation.freed)
313 return IterationDecision::Continue;
314 m_memory_graph.set(mallocation.address, {});
315 return IterationDecision::Continue;
316 });
317
318 // Find pointers from each memory region to another
319 for_each_mallocation([&](auto& mallocation) {
320 if (mallocation.freed)
321 return IterationDecision::Continue;
322
323 size_t pointers_in_mallocation = mallocation.size / sizeof(u32);
324
325 auto& edges_from_mallocation = m_memory_graph.find(mallocation.address)->value;
326
327 for (size_t i = 0; i < pointers_in_mallocation; ++i) {
328 auto value = m_emulator.mmu().read32({ 0x23, mallocation.address + i * sizeof(u32) });
329 auto other_address = value.value();
330 if (!value.is_uninitialized() && m_memory_graph.contains(value.value())) {
331 if constexpr (REACHABLE_DEBUG)
332 reportln("region/mallocation {:p} is reachable from other mallocation {:p}"sv, other_address, mallocation.address);
333 edges_from_mallocation.edges_from_node.append(other_address);
334 }
335 }
336 return IterationDecision::Continue;
337 });
338
339 // Find mallocations that are pointed to by other regions
340 Vector<FlatPtr> reachable_mallocations = {};
341 m_emulator.mmu().for_each_region([&](auto& region) {
342 // Skip the stack
343 if (region.is_stack())
344 return IterationDecision::Continue;
345 if (region.is_text())
346 return IterationDecision::Continue;
347 if (!region.is_readable())
348 return IterationDecision::Continue;
349 // Skip malloc blocks
350 if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block())
351 return IterationDecision::Continue;
352
353 size_t pointers_in_region = region.size() / sizeof(u32);
354
355 for (size_t i = 0; i < pointers_in_region; ++i) {
356 auto value = region.read32(i * sizeof(u32));
357 auto other_address = value.value();
358 if (!value.is_uninitialized() && m_memory_graph.contains(value.value())) {
359 if constexpr (REACHABLE_DEBUG)
360 reportln("region/mallocation {:p} is reachable from region {:p}-{:p}"sv, other_address, region.base(), region.end() - 1);
361 m_memory_graph.find(other_address)->value.is_reachable = true;
362 reachable_mallocations.append(other_address);
363 }
364 }
365 return IterationDecision::Continue;
366 });
367
368 // Propagate reachability
369 // There are probably better ways to do that
370 Vector<FlatPtr> visited = {};
371 for (size_t i = 0; i < reachable_mallocations.size(); ++i) {
372 auto reachable = reachable_mallocations.at(i);
373 if (visited.contains_slow(reachable))
374 continue;
375 visited.append(reachable);
376 auto& mallocation_node = m_memory_graph.find(reachable)->value;
377
378 if (!mallocation_node.is_reachable)
379 mallocation_node.is_reachable = true;
380
381 for (auto& edge : mallocation_node.edges_from_node) {
382 reachable_mallocations.append(edge);
383 }
384 }
385}
386
387void MallocTracer::dump_memory_graph()
388{
389 for (auto& key : m_memory_graph.keys()) {
390 auto value = m_memory_graph.find(key)->value;
391 dbgln("Block {:p} [{}reachable] ({} edges)", key, !value.is_reachable ? "not " : "", value.edges_from_node.size());
392 for (auto& edge : value.edges_from_node) {
393 dbgln(" -> {:p}", edge);
394 }
395 }
396}
397
398void MallocTracer::dump_leak_report()
399{
400 TemporaryChange change(m_auditing_enabled, false);
401
402 size_t bytes_leaked = 0;
403 size_t leaks_found = 0;
404
405 populate_memory_graph();
406
407 if constexpr (REACHABLE_DEBUG)
408 dump_memory_graph();
409
410 for_each_mallocation([&](auto& mallocation) {
411 if (mallocation.freed)
412 return IterationDecision::Continue;
413
414 auto& value = m_memory_graph.find(mallocation.address)->value;
415
416 if (value.is_reachable)
417 return IterationDecision::Continue;
418 ++leaks_found;
419 bytes_leaked += mallocation.size;
420 reportln("\n=={}== \033[31;1mLeak\033[0m, {}-byte allocation at address {:p}"sv, getpid(), mallocation.size, mallocation.address);
421 m_emulator.dump_backtrace(mallocation.malloc_backtrace);
422 return IterationDecision::Continue;
423 });
424
425 if (!leaks_found)
426 reportln("\n=={}== \033[32;1mNo leaks found!\033[0m"sv, getpid());
427 else
428 reportln("\n=={}== \033[31;1m{} leak(s) found: {} byte(s) leaked\033[0m"sv, getpid(), leaks_found, bytes_leaked);
429}
430}