Serenity Operating System
1/*
2 * Copyright (c) 2019-2021, Andrew Kaster <akaster@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/Bitmap.h>
8#include <AK/Checked.h>
9#include <AK/Debug.h>
10#include <AK/Format.h>
11#include <AK/NeverDestroyed.h>
12#include <assert.h>
13#include <bits/pthread_integration.h>
14#include <mallocdefs.h>
15#include <pthread.h>
16#include <stdio.h>
17#include <stdlib.h>
18#include <string.h>
19#include <sys/internals.h>
20#include <sys/mman.h>
21#include <unistd.h>
22
23extern "C" {
24
25struct AtExitEntry {
26 AtExitFunction method { nullptr };
27 void* parameter { nullptr };
28 void* dso_handle { nullptr };
29};
30
31// We'll re-allocate the region if it ends up being too small at runtime.
32// Invariant: atexit_entry_region_capacity * sizeof(AtExitEntry) does not overflow.
33static size_t atexit_entry_region_capacity = PAGE_SIZE / sizeof(AtExitEntry);
34
35static size_t atexit_region_bytes(size_t capacity = atexit_entry_region_capacity)
36{
37 return PAGE_ROUND_UP(capacity * sizeof(AtExitEntry));
38}
39
40static size_t atexit_next_capacity()
41{
42 size_t original_num_bytes = atexit_region_bytes();
43 VERIFY(!Checked<size_t>::addition_would_overflow(original_num_bytes, PAGE_SIZE));
44 return (original_num_bytes + PAGE_SIZE) / sizeof(AtExitEntry);
45}
46
47static AtExitEntry* atexit_entries;
48static size_t atexit_entry_count = 0;
49static pthread_mutex_t atexit_mutex = __PTHREAD_MUTEX_INITIALIZER;
50
51// The C++ compiler automagically registers the destructor of this object with __cxa_atexit.
52// However, we can't control the order in which these destructors are run, so we might still want to access this data after the registered entry.
53// Hence, we will call the destructor manually, when we know it is safe to do so.
54static NeverDestroyed<Bitmap> atexit_called_entries;
55
56// During startup, it is sufficiently unlikely that the attacker can exploit any write primitive.
57// We use this to avoid unnecessary syscalls to mprotect.
58static bool atexit_region_should_lock = false;
59
60static void lock_atexit_handlers()
61{
62 if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ) < 0) {
63 perror("lock_atexit_handlers");
64 _exit(1);
65 }
66}
67
68static void unlock_atexit_handlers()
69{
70 if (atexit_region_should_lock && mprotect(atexit_entries, atexit_region_bytes(), PROT_READ | PROT_WRITE) < 0) {
71 perror("unlock_atexit_handlers");
72 _exit(1);
73 }
74}
75
76void __begin_atexit_locking()
77{
78 atexit_region_should_lock = true;
79 lock_atexit_handlers();
80}
81
82int __cxa_atexit(AtExitFunction exit_function, void* parameter, void* dso_handle)
83{
84 pthread_mutex_lock(&atexit_mutex);
85
86 // allocate initial atexit region
87 if (!atexit_entries) {
88 atexit_entries = (AtExitEntry*)mmap(nullptr, atexit_region_bytes(), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
89 if (atexit_entries == MAP_FAILED) {
90 pthread_mutex_unlock(&atexit_mutex);
91 perror("__cxa_atexit mmap");
92 _exit(1);
93 }
94 }
95
96 // reallocate atexit region, increasing size by PAGE_SIZE
97 if (atexit_entry_count >= atexit_entry_region_capacity) {
98 size_t new_capacity = atexit_next_capacity();
99 size_t new_atexit_region_size = atexit_region_bytes(new_capacity);
100 dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_atexit: Growing exit handler region from {} entries to {} entries", atexit_entry_region_capacity, new_capacity);
101
102 auto* new_atexit_entries = (AtExitEntry*)mmap(nullptr, new_atexit_region_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
103 if (new_atexit_entries == MAP_FAILED) {
104 pthread_mutex_unlock(&atexit_mutex);
105 perror("__cxa_atexit mmap (new size)");
106 return -1;
107 }
108 // Note: We must make sure to only copy initialized entries, as even touching uninitialized bytes will trigger UBSan.
109 memcpy(new_atexit_entries, atexit_entries, atexit_entry_count * sizeof(AtExitEntry));
110 if (munmap(atexit_entries, atexit_region_bytes()) < 0) {
111 perror("__cxa_atexit munmap old region");
112 // leak the old region on failure
113 }
114 atexit_entries = new_atexit_entries;
115 atexit_entry_region_capacity = new_capacity;
116 }
117
118 unlock_atexit_handlers();
119 atexit_entries[atexit_entry_count++] = { exit_function, parameter, dso_handle };
120 lock_atexit_handlers();
121
122 pthread_mutex_unlock(&atexit_mutex);
123
124 return 0;
125}
126
127void __cxa_finalize(void* dso_handle)
128{
129 // From the itanium abi, https://itanium-cxx-abi.github.io/cxx-abi/abi.html#dso-dtor-runtime-api
130 //
131 // When __cxa_finalize(d) is called, it should walk the termination function list, calling each in turn
132 // if d matches __dso_handle for the termination function entry. If d == NULL, it should call all of them.
133 // Multiple calls to __cxa_finalize shall not result in calling termination function entries multiple times;
134 // the implementation may either remove entries or mark them finished.
135
136 pthread_mutex_lock(&atexit_mutex);
137
138 if (atexit_entry_count > atexit_called_entries->size())
139 atexit_called_entries->grow(atexit_entry_count, false);
140
141 ssize_t entry_index = atexit_entry_count;
142
143 dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: {} entries in the finalizer list", entry_index);
144
145 while (--entry_index >= 0) {
146 auto& exit_entry = atexit_entries[entry_index];
147 bool needs_calling = !atexit_called_entries->get(entry_index) && (!dso_handle || dso_handle == exit_entry.dso_handle);
148 if (needs_calling) {
149 dbgln_if(GLOBAL_DTORS_DEBUG, "__cxa_finalize: calling entry[{}] {:p}({:p}) dso: {:p}", entry_index, exit_entry.method, exit_entry.parameter, exit_entry.dso_handle);
150 atexit_called_entries->set(entry_index, true);
151 pthread_mutex_unlock(&atexit_mutex);
152 exit_entry.method(exit_entry.parameter);
153 pthread_mutex_lock(&atexit_mutex);
154 }
155 }
156
157 pthread_mutex_unlock(&atexit_mutex);
158}
159
160__attribute__((noreturn)) void __cxa_pure_virtual()
161{
162 VERIFY_NOT_REACHED();
163}
164
165} // extern "C"