Serenity Operating System
1/*
2 * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2022, Peter Elliott <pelliott@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <AK/BuiltinWrappers.h>
9#include <AK/Debug.h>
10#include <AK/ScopedValueRollback.h>
11#include <AK/Vector.h>
12#include <assert.h>
13#include <errno.h>
14#include <mallocdefs.h>
15#include <pthread.h>
16#include <serenity.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <string.h>
20#include <sys/internals.h>
21#include <sys/mman.h>
22#include <syscall.h>
23
24class PthreadMutexLocker {
25public:
26 ALWAYS_INLINE explicit PthreadMutexLocker(pthread_mutex_t& mutex)
27 : m_mutex(mutex)
28 {
29 lock();
30 __heap_is_stable = false;
31 }
32 ALWAYS_INLINE ~PthreadMutexLocker()
33 {
34 __heap_is_stable = true;
35 unlock();
36 }
37 ALWAYS_INLINE void lock() { pthread_mutex_lock(&m_mutex); }
38 ALWAYS_INLINE void unlock() { pthread_mutex_unlock(&m_mutex); }
39
40private:
41 pthread_mutex_t& m_mutex;
42};
43
44#define RECYCLE_BIG_ALLOCATIONS
45
46static pthread_mutex_t s_malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
47bool __heap_is_stable = true;
48
49constexpr size_t number_of_hot_chunked_blocks_to_keep_around = 16;
50constexpr size_t number_of_cold_chunked_blocks_to_keep_around = 16;
51constexpr size_t number_of_big_blocks_to_keep_around_per_size_class = 8;
52
53static bool s_log_malloc = false;
54static bool s_scrub_malloc = true;
55static bool s_scrub_free = true;
56static bool s_profiling = false;
57static bool s_in_userspace_emulator = false;
58
59ALWAYS_INLINE static void ue_notify_malloc(void const* ptr, size_t size)
60{
61 if (s_in_userspace_emulator)
62 syscall(SC_emuctl, 1, size, (FlatPtr)ptr);
63}
64
65ALWAYS_INLINE static void ue_notify_free(void const* ptr)
66{
67 if (s_in_userspace_emulator)
68 syscall(SC_emuctl, 2, (FlatPtr)ptr, 0);
69}
70
71ALWAYS_INLINE static void ue_notify_realloc(void const* ptr, size_t size)
72{
73 if (s_in_userspace_emulator)
74 syscall(SC_emuctl, 3, size, (FlatPtr)ptr);
75}
76
77ALWAYS_INLINE static void ue_notify_chunk_size_changed(void const* block, size_t chunk_size)
78{
79 if (s_in_userspace_emulator)
80 syscall(SC_emuctl, 4, chunk_size, (FlatPtr)block);
81}
82
83struct MemoryAuditingSuppressor {
84 ALWAYS_INLINE MemoryAuditingSuppressor()
85 {
86 if (s_in_userspace_emulator)
87 syscall(SC_emuctl, 7);
88 }
89 ALWAYS_INLINE ~MemoryAuditingSuppressor()
90 {
91 if (s_in_userspace_emulator)
92 syscall(SC_emuctl, 8);
93 }
94};
95
96struct MallocStats {
97 size_t number_of_malloc_calls;
98
99 size_t number_of_big_allocator_hits;
100 size_t number_of_big_allocator_purge_hits;
101 size_t number_of_big_allocs;
102
103 size_t number_of_hot_empty_block_hits;
104 size_t number_of_cold_empty_block_hits;
105 size_t number_of_cold_empty_block_purge_hits;
106 size_t number_of_block_allocs;
107 size_t number_of_blocks_full;
108
109 size_t number_of_free_calls;
110
111 size_t number_of_big_allocator_keeps;
112 size_t number_of_big_allocator_frees;
113
114 size_t number_of_freed_full_blocks;
115 size_t number_of_hot_keeps;
116 size_t number_of_cold_keeps;
117 size_t number_of_frees;
118};
119static MallocStats g_malloc_stats = {};
120
121static size_t s_hot_empty_block_count { 0 };
122static ChunkedBlock* s_hot_empty_blocks[number_of_hot_chunked_blocks_to_keep_around] { nullptr };
123static size_t s_cold_empty_block_count { 0 };
124static ChunkedBlock* s_cold_empty_blocks[number_of_cold_chunked_blocks_to_keep_around] { nullptr };
125
126struct Allocator {
127 size_t size { 0 };
128 size_t block_count { 0 };
129 ChunkedBlock::List usable_blocks;
130 ChunkedBlock::List full_blocks;
131};
132
133struct BigAllocator {
134 Vector<BigAllocationBlock*, number_of_big_blocks_to_keep_around_per_size_class> blocks;
135};
136
137// Allocators will be initialized in __malloc_init.
138// We can not rely on global constructors to initialize them,
139// because they must be initialized before other global constructors
140// are run. Similarly, we can not allow global destructors to destruct
141// them. We could have used AK::NeverDestoyed to prevent the latter,
142// but it would have not helped with the former.
143alignas(Allocator) static u8 g_allocators_storage[sizeof(Allocator) * num_size_classes];
144alignas(BigAllocator) static u8 g_big_allocators_storage[sizeof(BigAllocator)];
145
146static inline Allocator (&allocators())[num_size_classes]
147{
148 return reinterpret_cast<Allocator(&)[num_size_classes]>(g_allocators_storage);
149}
150
151static inline BigAllocator (&big_allocators())[1]
152{
153 return reinterpret_cast<BigAllocator(&)[1]>(g_big_allocators_storage);
154}
155
156// --- BEGIN MATH ---
157// This stuff is only used for checking if there exists an aligned block in a
158// chunk. It has no bearing on the rest of the allocator, especially for
159// regular malloc.
160
161static inline unsigned long modulo(long a, long b)
162{
163 return (b + (a % b)) % b;
164}
165
166struct EuclideanResult {
167 long x;
168 long y;
169 long gcd;
170};
171
172// Returns x, y, gcd.
173static inline EuclideanResult extended_euclid(long a, long b)
174{
175 EuclideanResult old = { 1, 0, a };
176 EuclideanResult current = { 0, 1, b };
177
178 while (current.gcd != 0) {
179 long quotient = old.gcd / current.gcd;
180
181 EuclideanResult next = {
182 old.x - quotient * current.x,
183 old.y - quotient * current.y,
184 old.gcd - quotient * current.gcd,
185 };
186
187 old = current;
188 current = next;
189 }
190
191 return old;
192}
193
194static inline bool block_has_aligned_chunk(long align, long bytes_per_chunk, long chunk_capacity)
195{
196 // Never do math on a normal malloc.
197 if ((size_t)align <= sizeof(ChunkedBlock))
198 return true;
199
200 // Solve the linear congruence n*bytes_per_chunk = -sizeof(ChunkedBlock) (mod align).
201 auto [x, y, gcd] = extended_euclid(bytes_per_chunk % align, align);
202 long constant = modulo(-sizeof(ChunkedBlock), align);
203 if (constant % gcd != 0)
204 // No solution. Chunk size is probably a multiple of align.
205 return false;
206
207 long n = modulo(x * (constant / gcd), align);
208 if (x < 0)
209 n = (n + align / gcd) % align;
210
211 // Don't ask me to prove this.
212 VERIFY(n > 0);
213 return n < chunk_capacity;
214}
215
216// --- END MATH ---
217
218static Allocator* allocator_for_size(size_t size, size_t& good_size, size_t align = 1)
219{
220 for (size_t i = 0; size_classes[i]; ++i) {
221 auto& allocator = allocators()[i];
222 if (size <= size_classes[i] && block_has_aligned_chunk(align, allocator.size, (ChunkedBlock::block_size - sizeof(ChunkedBlock)) / allocator.size)) {
223 good_size = size_classes[i];
224 return &allocator;
225 }
226 }
227 good_size = PAGE_ROUND_UP(size);
228 return nullptr;
229}
230
231#ifdef RECYCLE_BIG_ALLOCATIONS
232static BigAllocator* big_allocator_for_size(size_t size)
233{
234 if (size == 65536)
235 return &big_allocators()[0];
236 return nullptr;
237}
238#endif
239
240extern "C" {
241
242static ErrorOr<void*> os_alloc(size_t size, char const* name)
243{
244 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_PURGEABLE;
245#if ARCH(X86_64)
246 flags |= MAP_RANDOMIZED;
247#endif
248 auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, flags, 0, 0, ChunkedBlock::block_size, name);
249 VERIFY(ptr != nullptr);
250 if (ptr == MAP_FAILED) {
251 return ENOMEM;
252 }
253 return ptr;
254}
255
256static void os_free(void* ptr, size_t size)
257{
258 int rc = munmap(ptr, size);
259 assert(rc == 0);
260}
261
262static void* try_allocate_chunk_aligned(size_t align, ChunkedBlock& block)
263{
264 // These loops are guaranteed to run only once for a standard-aligned malloc.
265 for (FreelistEntry** entry = &(block.m_freelist); *entry != nullptr; entry = &((*entry)->next)) {
266 if ((reinterpret_cast<uintptr_t>(*entry) & (align - 1)) == 0) {
267 --block.m_free_chunks;
268 void* ptr = *entry;
269 *entry = (*entry)->next; // Delete the entry.
270 return ptr;
271 }
272 }
273 for (; block.m_next_lazy_freelist_index < block.chunk_capacity(); block.m_next_lazy_freelist_index++) {
274 void* ptr = block.m_slot + block.m_next_lazy_freelist_index * block.m_size;
275 if ((reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0) {
276 --block.m_free_chunks;
277 block.m_next_lazy_freelist_index++;
278 return ptr;
279 }
280 auto* entry = (FreelistEntry*)ptr;
281 entry->next = block.m_freelist;
282 block.m_freelist = entry;
283 }
284 return nullptr;
285}
286
287enum class CallerWillInitializeMemory {
288 No,
289 Yes,
290};
291
292#ifndef NO_TLS
293__thread bool s_allocation_enabled = true;
294#endif
295
296static ErrorOr<void*> malloc_impl(size_t size, size_t align, CallerWillInitializeMemory caller_will_initialize_memory)
297{
298#ifndef NO_TLS
299 VERIFY(s_allocation_enabled);
300#endif
301
302 // Align must be a power of 2.
303 if (popcount(align) != 1)
304 return EINVAL;
305
306 // FIXME: Support larger than 32KiB alignments (if you dare).
307 if (sizeof(BigAllocationBlock) + align >= ChunkedBlock::block_size)
308 return EINVAL;
309
310 if (s_log_malloc)
311 dbgln("LibC: malloc({})", size);
312
313 if (!size) {
314 // Legally we could just return a null pointer here, but this is more
315 // compatible with existing software.
316 size = 1;
317 }
318
319 g_malloc_stats.number_of_malloc_calls++;
320
321 size_t good_size;
322 auto* allocator = allocator_for_size(size, good_size, align);
323
324 PthreadMutexLocker locker(s_malloc_mutex);
325
326 if (!allocator) {
327 size_t real_size = round_up_to_power_of_two(sizeof(BigAllocationBlock) + size + ((align > 16) ? align : 0), ChunkedBlock::block_size);
328 if (real_size < size) {
329 dbgln_if(MALLOC_DEBUG, "LibC: Detected overflow trying to do big allocation of size {} for {}", real_size, size);
330 return ENOMEM;
331 }
332#ifdef RECYCLE_BIG_ALLOCATIONS
333 if (auto* allocator = big_allocator_for_size(real_size)) {
334 if (!allocator->blocks.is_empty()) {
335 g_malloc_stats.number_of_big_allocator_hits++;
336 auto* block = allocator->blocks.take_last();
337 int rc = madvise(block, real_size, MADV_SET_NONVOLATILE);
338 bool this_block_was_purged = rc == 1;
339 if (rc < 0) {
340 perror("madvise");
341 VERIFY_NOT_REACHED();
342 }
343 if (mprotect(block, real_size, PROT_READ | PROT_WRITE) < 0) {
344 perror("mprotect");
345 VERIFY_NOT_REACHED();
346 }
347 if (this_block_was_purged) {
348 g_malloc_stats.number_of_big_allocator_purge_hits++;
349 new (block) BigAllocationBlock(real_size);
350 }
351
352 void* ptr = reinterpret_cast<void*>(round_up_to_power_of_two(reinterpret_cast<uintptr_t>(&block->m_slot[0]), align));
353
354 ue_notify_malloc(ptr, size);
355 return ptr;
356 }
357 }
358#endif
359 auto* block = (BigAllocationBlock*)TRY(os_alloc(real_size, "malloc: BigAllocationBlock"));
360 g_malloc_stats.number_of_big_allocs++;
361 new (block) BigAllocationBlock(real_size);
362
363 void* ptr = reinterpret_cast<void*>(round_up_to_power_of_two(reinterpret_cast<uintptr_t>(&block->m_slot[0]), align));
364 ue_notify_malloc(ptr, size);
365 return ptr;
366 }
367
368 ChunkedBlock* block = nullptr;
369 void* ptr = nullptr;
370 for (auto& current : allocator->usable_blocks) {
371 if (current.free_chunks()) {
372 ptr = try_allocate_chunk_aligned(align, current);
373 if (ptr) {
374 block = ¤t;
375 break;
376 }
377 }
378 }
379
380 if (!block && s_hot_empty_block_count) {
381 g_malloc_stats.number_of_hot_empty_block_hits++;
382 block = s_hot_empty_blocks[--s_hot_empty_block_count];
383 if (block->m_size != good_size) {
384 new (block) ChunkedBlock(good_size);
385 ue_notify_chunk_size_changed(block, good_size);
386 char buffer[64];
387 snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
388 set_mmap_name(block, ChunkedBlock::block_size, buffer);
389 }
390 allocator->usable_blocks.append(*block);
391 }
392
393 if (!block && s_cold_empty_block_count) {
394 g_malloc_stats.number_of_cold_empty_block_hits++;
395 block = s_cold_empty_blocks[--s_cold_empty_block_count];
396 int rc = madvise(block, ChunkedBlock::block_size, MADV_SET_NONVOLATILE);
397 bool this_block_was_purged = rc == 1;
398 if (rc < 0) {
399 perror("madvise");
400 VERIFY_NOT_REACHED();
401 }
402 rc = mprotect(block, ChunkedBlock::block_size, PROT_READ | PROT_WRITE);
403 if (rc < 0) {
404 perror("mprotect");
405 VERIFY_NOT_REACHED();
406 }
407 if (this_block_was_purged || block->m_size != good_size) {
408 if (this_block_was_purged)
409 g_malloc_stats.number_of_cold_empty_block_purge_hits++;
410 new (block) ChunkedBlock(good_size);
411 ue_notify_chunk_size_changed(block, good_size);
412 }
413 allocator->usable_blocks.append(*block);
414 }
415
416 if (!block) {
417 g_malloc_stats.number_of_block_allocs++;
418 char buffer[64];
419 snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
420 block = (ChunkedBlock*)TRY(os_alloc(ChunkedBlock::block_size, buffer));
421 new (block) ChunkedBlock(good_size);
422 allocator->usable_blocks.append(*block);
423 ++allocator->block_count;
424 }
425
426 if (!ptr) {
427 ptr = try_allocate_chunk_aligned(align, *block);
428 }
429
430 VERIFY(ptr);
431 if (block->is_full()) {
432 g_malloc_stats.number_of_blocks_full++;
433 dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);
434 allocator->usable_blocks.remove(*block);
435 allocator->full_blocks.append(*block);
436 }
437 dbgln_if(MALLOC_DEBUG, "LibC: allocated {:p} (chunk in block {:p}, size {})", ptr, block, block->bytes_per_chunk());
438
439 if (s_scrub_malloc && caller_will_initialize_memory == CallerWillInitializeMemory::No)
440 memset(ptr, MALLOC_SCRUB_BYTE, block->m_size);
441
442 ue_notify_malloc(ptr, size);
443 return ptr;
444}
445
446static void free_impl(void* ptr)
447{
448#ifndef NO_TLS
449 VERIFY(s_allocation_enabled);
450#endif
451
452 ScopedValueRollback rollback(errno);
453
454 if (!ptr)
455 return;
456
457 g_malloc_stats.number_of_free_calls++;
458
459 void* block_base = (void*)((FlatPtr)ptr & ChunkedBlock::ChunkedBlock::block_mask);
460 size_t magic = *(size_t*)block_base;
461
462 PthreadMutexLocker locker(s_malloc_mutex);
463
464 if (magic == MAGIC_BIGALLOC_HEADER) {
465 auto* block = (BigAllocationBlock*)block_base;
466#ifdef RECYCLE_BIG_ALLOCATIONS
467 if (auto* allocator = big_allocator_for_size(block->m_size)) {
468 if (allocator->blocks.size() < number_of_big_blocks_to_keep_around_per_size_class) {
469 g_malloc_stats.number_of_big_allocator_keeps++;
470 allocator->blocks.append(block);
471 size_t this_block_size = block->m_size;
472 if (mprotect(block, this_block_size, PROT_NONE) < 0) {
473 perror("mprotect");
474 VERIFY_NOT_REACHED();
475 }
476 if (madvise(block, this_block_size, MADV_SET_VOLATILE) != 0) {
477 perror("madvise");
478 VERIFY_NOT_REACHED();
479 }
480 return;
481 }
482 }
483#endif
484 g_malloc_stats.number_of_big_allocator_frees++;
485 os_free(block, block->m_size);
486 return;
487 }
488
489 assert(magic == MAGIC_PAGE_HEADER);
490 auto* block = (ChunkedBlock*)block_base;
491
492 dbgln_if(MALLOC_DEBUG, "LibC: freeing {:p} in allocator {:p} (size={}, used={})", ptr, block, block->bytes_per_chunk(), block->used_chunks());
493
494 if (s_scrub_free)
495 memset(ptr, FREE_SCRUB_BYTE, block->bytes_per_chunk());
496
497 auto* entry = (FreelistEntry*)ptr;
498 entry->next = block->m_freelist;
499 block->m_freelist = entry;
500
501 if (block->is_full()) {
502 size_t good_size;
503 auto* allocator = allocator_for_size(block->m_size, good_size);
504 dbgln_if(MALLOC_DEBUG, "Block {:p} no longer full in size class {}", block, good_size);
505 g_malloc_stats.number_of_freed_full_blocks++;
506 allocator->full_blocks.remove(*block);
507 allocator->usable_blocks.prepend(*block);
508 }
509
510 ++block->m_free_chunks;
511
512 if (!block->used_chunks()) {
513 size_t good_size;
514 auto* allocator = allocator_for_size(block->m_size, good_size);
515 if (s_hot_empty_block_count < number_of_hot_chunked_blocks_to_keep_around) {
516 dbgln_if(MALLOC_DEBUG, "Keeping hot block {:p} around", block);
517 g_malloc_stats.number_of_hot_keeps++;
518 allocator->usable_blocks.remove(*block);
519 s_hot_empty_blocks[s_hot_empty_block_count++] = block;
520 return;
521 }
522 if (s_cold_empty_block_count < number_of_cold_chunked_blocks_to_keep_around) {
523 dbgln_if(MALLOC_DEBUG, "Keeping cold block {:p} around", block);
524 g_malloc_stats.number_of_cold_keeps++;
525 allocator->usable_blocks.remove(*block);
526 s_cold_empty_blocks[s_cold_empty_block_count++] = block;
527 mprotect(block, ChunkedBlock::block_size, PROT_NONE);
528 madvise(block, ChunkedBlock::block_size, MADV_SET_VOLATILE);
529 return;
530 }
531 dbgln_if(MALLOC_DEBUG, "Releasing block {:p} for size class {}", block, good_size);
532 g_malloc_stats.number_of_frees++;
533 allocator->usable_blocks.remove(*block);
534 --allocator->block_count;
535 os_free(block, ChunkedBlock::block_size);
536 }
537}
538
539// https://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
540void* malloc(size_t size)
541{
542 MemoryAuditingSuppressor suppressor;
543 auto ptr_or_error = malloc_impl(size, 16, CallerWillInitializeMemory::No);
544
545 if (ptr_or_error.is_error()) {
546 errno = ptr_or_error.error().code();
547 return nullptr;
548 }
549
550 if (s_profiling)
551 perf_event(PERF_EVENT_MALLOC, size, reinterpret_cast<FlatPtr>(ptr_or_error.value()));
552
553 return ptr_or_error.value();
554}
555
556// https://pubs.opengroup.org/onlinepubs/9699919799/functions/free.html
557void free(void* ptr)
558{
559 MemoryAuditingSuppressor suppressor;
560 if (s_profiling)
561 perf_event(PERF_EVENT_FREE, reinterpret_cast<FlatPtr>(ptr), 0);
562 ue_notify_free(ptr);
563 free_impl(ptr);
564}
565
566// https://pubs.opengroup.org/onlinepubs/9699919799/functions/calloc.html
567void* calloc(size_t count, size_t size)
568{
569 MemoryAuditingSuppressor suppressor;
570 if (Checked<size_t>::multiplication_would_overflow(count, size)) {
571 errno = ENOMEM;
572 return nullptr;
573 }
574 size_t new_size = count * size;
575 auto ptr_or_error = malloc_impl(new_size, 16, CallerWillInitializeMemory::Yes);
576
577 if (ptr_or_error.is_error()) {
578 errno = ptr_or_error.error().code();
579 return nullptr;
580 }
581
582 memset(ptr_or_error.value(), 0, new_size);
583 return ptr_or_error.value();
584}
585
586// https://pubs.opengroup.org/onlinepubs/9699919799/functions/posix_memalign.html
587int posix_memalign(void** memptr, size_t alignment, size_t size)
588{
589 MemoryAuditingSuppressor suppressor;
590 auto ptr_or_error = malloc_impl(size, alignment, CallerWillInitializeMemory::No);
591
592 if (ptr_or_error.is_error())
593 return ptr_or_error.error().code();
594
595 *memptr = ptr_or_error.value();
596 return 0;
597}
598
599void* aligned_alloc(size_t alignment, size_t size)
600{
601 MemoryAuditingSuppressor suppressor;
602 auto ptr_or_error = malloc_impl(size, alignment, CallerWillInitializeMemory::No);
603
604 if (ptr_or_error.is_error()) {
605 errno = ptr_or_error.error().code();
606 return nullptr;
607 }
608
609 return ptr_or_error.value();
610}
611
612size_t malloc_size(void const* ptr)
613{
614 MemoryAuditingSuppressor suppressor;
615 if (!ptr)
616 return 0;
617 void* page_base = (void*)((FlatPtr)ptr & ChunkedBlock::block_mask);
618 auto* header = (CommonHeader const*)page_base;
619 auto size = header->m_size;
620 if (header->m_magic == MAGIC_BIGALLOC_HEADER)
621 size -= sizeof(BigAllocationBlock);
622 else
623 VERIFY(header->m_magic == MAGIC_PAGE_HEADER);
624 return size;
625}
626
627size_t malloc_good_size(size_t size)
628{
629 size_t good_size;
630 allocator_for_size(size, good_size);
631 return good_size;
632}
633
634void* realloc(void* ptr, size_t size)
635{
636 MemoryAuditingSuppressor suppressor;
637 if (!ptr)
638 return malloc(size);
639 if (!size) {
640 free(ptr);
641 return nullptr;
642 }
643
644 auto existing_allocation_size = malloc_size(ptr);
645
646 if (size <= existing_allocation_size) {
647 ue_notify_realloc(ptr, size);
648 return ptr;
649 }
650 auto* new_ptr = malloc(size);
651 if (new_ptr) {
652 memcpy(new_ptr, ptr, min(existing_allocation_size, size));
653 free(ptr);
654 }
655 return new_ptr;
656}
657
658void __malloc_init()
659{
660 s_in_userspace_emulator = (int)syscall(SC_emuctl, 0) != -ENOSYS;
661 if (s_in_userspace_emulator) {
662 // Don't bother scrubbing memory if we're running in UE since it
663 // keeps track of heap memory anyway.
664 s_scrub_malloc = false;
665 s_scrub_free = false;
666 }
667
668 if (secure_getenv("LIBC_NOSCRUB_MALLOC"))
669 s_scrub_malloc = false;
670 if (secure_getenv("LIBC_NOSCRUB_FREE"))
671 s_scrub_free = false;
672 if (secure_getenv("LIBC_LOG_MALLOC"))
673 s_log_malloc = true;
674 if (secure_getenv("LIBC_PROFILE_MALLOC"))
675 s_profiling = true;
676
677 for (size_t i = 0; i < num_size_classes; ++i) {
678 new (&allocators()[i]) Allocator();
679 allocators()[i].size = size_classes[i];
680 }
681
682 new (&big_allocators()[0])(BigAllocator);
683}
684
685void serenity_dump_malloc_stats()
686{
687 dbgln("# malloc() calls: {}", g_malloc_stats.number_of_malloc_calls);
688 dbgln();
689 dbgln("big alloc hits: {}", g_malloc_stats.number_of_big_allocator_hits);
690 dbgln("big alloc hits that were purged: {}", g_malloc_stats.number_of_big_allocator_purge_hits);
691 dbgln("big allocs: {}", g_malloc_stats.number_of_big_allocs);
692 dbgln();
693 dbgln("empty hot block hits: {}", g_malloc_stats.number_of_hot_empty_block_hits);
694 dbgln("empty cold block hits: {}", g_malloc_stats.number_of_cold_empty_block_hits);
695 dbgln("empty cold block hits that were purged: {}", g_malloc_stats.number_of_cold_empty_block_purge_hits);
696 dbgln("block allocs: {}", g_malloc_stats.number_of_block_allocs);
697 dbgln("filled blocks: {}", g_malloc_stats.number_of_blocks_full);
698 dbgln();
699 dbgln("# free() calls: {}", g_malloc_stats.number_of_free_calls);
700 dbgln();
701 dbgln("big alloc keeps: {}", g_malloc_stats.number_of_big_allocator_keeps);
702 dbgln("big alloc frees: {}", g_malloc_stats.number_of_big_allocator_frees);
703 dbgln();
704 dbgln("full block frees: {}", g_malloc_stats.number_of_freed_full_blocks);
705 dbgln("number of hot keeps: {}", g_malloc_stats.number_of_hot_keeps);
706 dbgln("number of cold keeps: {}", g_malloc_stats.number_of_cold_keeps);
707 dbgln("number of frees: {}", g_malloc_stats.number_of_frees);
708}
709}