Serenity Operating System
1/*
2 * Copyright (c) 2021, the SerenityOS developers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/Atomic.h>
8#include <AK/NeverDestroyed.h>
9#include <AK/Types.h>
10#include <AK/Vector.h>
11#include <bits/pthread_integration.h>
12#include <errno.h>
13#include <pthread.h>
14#include <sched.h>
15#include <serenity.h>
16#include <unistd.h>
17
18namespace {
19
20// Most programs don't need this, no need to incur an extra mutex lock/unlock on them
21static Atomic<bool> g_did_touch_atfork { false };
22static pthread_mutex_t g_atfork_list_mutex __PTHREAD_MUTEX_INITIALIZER;
23static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_prepare_list;
24static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_child_list;
25static NeverDestroyed<Vector<void (*)(void), 4>> g_atfork_parent_list;
26
27}
28
29extern "C" {
30void __pthread_fork_prepare(void)
31{
32 if (!g_did_touch_atfork.load())
33 return;
34
35 pthread_mutex_lock(&g_atfork_list_mutex);
36 for (auto entry : g_atfork_prepare_list.get())
37 entry();
38 pthread_mutex_unlock(&g_atfork_list_mutex);
39}
40
41void __pthread_fork_child(void)
42{
43 if (!g_did_touch_atfork.load())
44 return;
45
46 pthread_mutex_lock(&g_atfork_list_mutex);
47 for (auto entry : g_atfork_child_list.get())
48 entry();
49 pthread_mutex_unlock(&g_atfork_list_mutex);
50}
51
52void __pthread_fork_parent(void)
53{
54 if (!g_did_touch_atfork.load())
55 return;
56
57 pthread_mutex_lock(&g_atfork_list_mutex);
58 for (auto entry : g_atfork_parent_list.get())
59 entry();
60 pthread_mutex_unlock(&g_atfork_list_mutex);
61}
62
63void __pthread_fork_atfork_register_prepare(void (*func)(void))
64{
65 g_did_touch_atfork.store(true);
66
67 pthread_mutex_lock(&g_atfork_list_mutex);
68 g_atfork_prepare_list->append(func);
69 pthread_mutex_unlock(&g_atfork_list_mutex);
70}
71
72void __pthread_fork_atfork_register_parent(void (*func)(void))
73{
74 g_did_touch_atfork.store(true);
75
76 pthread_mutex_lock(&g_atfork_list_mutex);
77 g_atfork_parent_list->append(func);
78 pthread_mutex_unlock(&g_atfork_list_mutex);
79}
80
81void __pthread_fork_atfork_register_child(void (*func)(void))
82{
83 g_did_touch_atfork.store(true);
84
85 pthread_mutex_lock(&g_atfork_list_mutex);
86 g_atfork_child_list->append(func);
87 pthread_mutex_unlock(&g_atfork_list_mutex);
88}
89
90// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_self.html
91int pthread_self()
92{
93 return gettid();
94}
95
96static constexpr u32 MUTEX_UNLOCKED = 0;
97static constexpr u32 MUTEX_LOCKED_NO_NEED_TO_WAKE = 1;
98static constexpr u32 MUTEX_LOCKED_NEED_TO_WAKE = 2;
99
100// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_init.html
101int pthread_mutex_init(pthread_mutex_t* mutex, pthread_mutexattr_t const* attributes)
102{
103 mutex->lock = 0;
104 mutex->owner = 0;
105 mutex->level = 0;
106 mutex->type = attributes ? attributes->type : __PTHREAD_MUTEX_NORMAL;
107 return 0;
108}
109
110// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_trylock.html
111int pthread_mutex_trylock(pthread_mutex_t* mutex)
112{
113 u32 expected = MUTEX_UNLOCKED;
114 bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, expected, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
115
116 if (exchanged) [[likely]] {
117 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
118 AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
119 mutex->level = 0;
120 return 0;
121 } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
122 pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
123 if (owner == pthread_self()) {
124 // We already own the mutex!
125 mutex->level++;
126 return 0;
127 }
128 }
129 return EBUSY;
130}
131
132// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_lock.html
133int pthread_mutex_lock(pthread_mutex_t* mutex)
134{
135 // Fast path: attempt to claim the mutex without waiting.
136 u32 value = MUTEX_UNLOCKED;
137 bool exchanged = AK::atomic_compare_exchange_strong(&mutex->lock, value, MUTEX_LOCKED_NO_NEED_TO_WAKE, AK::memory_order_acquire);
138 if (exchanged) [[likely]] {
139 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
140 AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
141 mutex->level = 0;
142 return 0;
143 } else if (mutex->type == __PTHREAD_MUTEX_RECURSIVE) {
144 pthread_t owner = AK::atomic_load(&mutex->owner, AK::memory_order_relaxed);
145 if (owner == pthread_self()) {
146 // We already own the mutex!
147 mutex->level++;
148 return 0;
149 }
150 }
151
152 // Slow path: wait, record the fact that we're going to wait, and always
153 // remember to wake the next thread up once we release the mutex.
154 if (value != MUTEX_LOCKED_NEED_TO_WAKE)
155 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
156
157 while (value != MUTEX_UNLOCKED) {
158 futex_wait(&mutex->lock, value, nullptr, 0, false);
159 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
160 }
161
162 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
163 AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
164 mutex->level = 0;
165 return 0;
166}
167
168int __pthread_mutex_lock_pessimistic_np(pthread_mutex_t* mutex)
169{
170 // Same as pthread_mutex_lock(), but always set MUTEX_LOCKED_NEED_TO_WAKE,
171 // and also don't bother checking for already owning the mutex recursively,
172 // because we know we don't. Used in the condition variable implementation.
173 u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
174 while (value != MUTEX_UNLOCKED) {
175 futex_wait(&mutex->lock, value, nullptr, 0, false);
176 value = AK::atomic_exchange(&mutex->lock, MUTEX_LOCKED_NEED_TO_WAKE, AK::memory_order_acquire);
177 }
178
179 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
180 AK::atomic_store(&mutex->owner, pthread_self(), AK::memory_order_relaxed);
181 mutex->level = 0;
182 return 0;
183}
184
185// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_unlock.html
186int pthread_mutex_unlock(pthread_mutex_t* mutex)
187{
188 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE && mutex->level > 0) {
189 mutex->level--;
190 return 0;
191 }
192
193 if (mutex->type == __PTHREAD_MUTEX_RECURSIVE)
194 AK::atomic_store(&mutex->owner, 0, AK::memory_order_relaxed);
195
196 u32 value = AK::atomic_exchange(&mutex->lock, MUTEX_UNLOCKED, AK::memory_order_release);
197 if (value == MUTEX_LOCKED_NEED_TO_WAKE) [[unlikely]] {
198 int rc = futex_wake(&mutex->lock, 1, false);
199 VERIFY(rc >= 0);
200 }
201
202 return 0;
203}
204}