Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <AK/Assertions.h>
8#include <AK/Atomic.h>
9#include <AK/Debug.h>
10#include <AK/Format.h>
11#include <AK/SinglyLinkedList.h>
12#include <Kernel/API/Syscall.h>
13#include <LibSystem/syscall.h>
14#include <bits/pthread_cancel.h>
15#include <bits/pthread_integration.h>
16#include <errno.h>
17#include <limits.h>
18#include <mallocdefs.h>
19#include <pthread.h>
20#include <serenity.h>
21#include <signal.h>
22#include <stdio.h>
23#include <string.h>
24#include <sys/mman.h>
25#include <syscall.h>
26#include <time.h>
27#include <unistd.h>
28
29namespace {
30using PthreadAttrImpl = Syscall::SC_create_thread_params;
31
32} // end anonymous namespace
33
34static constexpr size_t required_stack_alignment = 4 * MiB;
35static constexpr size_t highest_reasonable_guard_size = 32 * PAGE_SIZE;
36
37__thread void* s_stack_location;
38__thread size_t s_stack_size;
39
40__thread int s_thread_cancel_state = PTHREAD_CANCEL_ENABLE;
41__thread int s_thread_cancel_type = PTHREAD_CANCEL_DEFERRED;
42
43#define __RETURN_PTHREAD_ERROR(rc) \
44 return ((rc) < 0 ? -(rc) : 0)
45
46struct CleanupHandler {
47 void (*routine)(void*);
48 void* argument;
49};
50
51static thread_local SinglyLinkedList<CleanupHandler> cleanup_handlers;
52
53static __thread bool pending_cancellation = false;
54
55extern "C" {
56
57[[noreturn]] static void exit_thread(void* code, void* stack_location, size_t stack_size)
58{
59 __pthread_key_destroy_for_current_thread();
60 syscall(SC_exit_thread, code, stack_location, stack_size);
61 VERIFY_NOT_REACHED();
62}
63
64[[noreturn]] static void pthread_exit_without_cleanup_handlers(void* value_ptr)
65{
66 exit_thread(value_ptr, s_stack_location, s_stack_size);
67}
68
69static void* pthread_create_helper(void* (*routine)(void*), void* argument, void* stack_location, size_t stack_size)
70{
71 s_stack_location = stack_location;
72 s_stack_size = stack_size;
73 void* ret_val = routine(argument);
74 pthread_exit_without_cleanup_handlers(ret_val);
75}
76
77static int create_thread(pthread_t* thread, void* (*entry)(void*), void* argument, PthreadAttrImpl* thread_params)
78{
79 void** stack = (void**)((uintptr_t)thread_params->stack_location + thread_params->stack_size);
80
81 auto push_on_stack = [&](void* data) {
82 stack--;
83 *stack = data;
84 thread_params->stack_size -= sizeof(void*);
85 };
86
87 // We set up the stack for pthread_create_helper.
88 // Note that we need to align the stack to 16B, accounting for
89 // the fact that we also push 16 bytes.
90 while (((uintptr_t)stack - 16) % 16 != 0)
91 push_on_stack(nullptr);
92
93#if ARCH(X86_64)
94 thread_params->rdi = (FlatPtr)entry;
95 thread_params->rsi = (FlatPtr)argument;
96 thread_params->rdx = (FlatPtr)thread_params->stack_location;
97 thread_params->rcx = thread_params->stack_size;
98#elif ARCH(AARCH64)
99 (void)entry;
100 (void)argument;
101 TODO_AARCH64();
102#else
103# error Unknown architecture
104#endif
105 VERIFY((uintptr_t)stack % 16 == 0);
106
107 // Push a fake return address
108 push_on_stack(nullptr);
109
110 int rc = syscall(SC_create_thread, pthread_create_helper, thread_params);
111 if (rc >= 0)
112 *thread = rc;
113 __RETURN_PTHREAD_ERROR(rc);
114}
115
116// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_create.html
117int pthread_create(pthread_t* thread, pthread_attr_t const* attributes, void* (*start_routine)(void*), void* argument_to_start_routine)
118{
119 if (!thread)
120 return -EINVAL;
121
122 PthreadAttrImpl default_attributes {};
123 PthreadAttrImpl* const* arg_attributes = reinterpret_cast<PthreadAttrImpl* const*>(attributes);
124
125 PthreadAttrImpl* used_attributes = arg_attributes ? *arg_attributes : &default_attributes;
126
127 if (!used_attributes->stack_location) {
128 // adjust stack size, user might have called setstacksize, which has no restrictions on size/alignment
129 if (0 != (used_attributes->stack_size % required_stack_alignment))
130 used_attributes->stack_size += required_stack_alignment - (used_attributes->stack_size % required_stack_alignment);
131
132 used_attributes->stack_location = mmap_with_name(nullptr, used_attributes->stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, 0, 0, "Thread stack");
133 if (!used_attributes->stack_location)
134 return -1;
135 }
136
137 dbgln_if(PTHREAD_DEBUG, "pthread_create: Creating thread with attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
138 used_attributes,
139 (PTHREAD_CREATE_JOINABLE == used_attributes->detach_state) ? "joinable" : "detached",
140 used_attributes->schedule_priority,
141 used_attributes->guard_page_size,
142 used_attributes->stack_size,
143 used_attributes->stack_location);
144
145 return create_thread(thread, start_routine, argument_to_start_routine, used_attributes);
146}
147
148// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_exit.html
149void pthread_exit(void* value_ptr)
150{
151 while (!cleanup_handlers.is_empty()) {
152 auto handler = cleanup_handlers.take_first();
153 handler.routine(handler.argument);
154 }
155
156 pthread_exit_without_cleanup_handlers(value_ptr);
157}
158
159void __pthread_maybe_cancel()
160{
161 // Check if we have cancellations enabled.
162 if (s_thread_cancel_state != PTHREAD_CANCEL_ENABLE)
163 return;
164
165 // Check if a cancellation request is pending.
166 if (!pending_cancellation)
167 return;
168
169 // Exit the thread via `pthread_exit`. This handles passing the
170 // return value and calling the cleanup handlers for us.
171 pthread_exit(PTHREAD_CANCELED);
172}
173
174// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cleanup_push.html
175void pthread_cleanup_push(void (*routine)(void*), void* arg)
176{
177 cleanup_handlers.prepend({ routine, arg });
178}
179
180// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cleanup_pop.html
181void pthread_cleanup_pop(int execute)
182{
183 VERIFY(!cleanup_handlers.is_empty());
184
185 auto handler = cleanup_handlers.take_first();
186
187 if (execute)
188 handler.routine(handler.argument);
189}
190
191// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_join.html
192int pthread_join(pthread_t thread, void** exit_value_ptr)
193{
194 __pthread_maybe_cancel();
195
196 int rc = syscall(SC_join_thread, thread, exit_value_ptr);
197 __RETURN_PTHREAD_ERROR(rc);
198}
199
200// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_kill.html
201int pthread_kill(pthread_t thread, int sig)
202{
203 int rc = syscall(SC_kill_thread, thread, sig);
204 __RETURN_PTHREAD_ERROR(rc);
205}
206
207// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_detach.html
208int pthread_detach(pthread_t thread)
209{
210 int rc = syscall(SC_detach_thread, thread);
211 __RETURN_PTHREAD_ERROR(rc);
212}
213
214// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_sigmask.html
215int pthread_sigmask(int how, sigset_t const* set, sigset_t* old_set)
216{
217 if (sigprocmask(how, set, old_set))
218 return errno;
219 return 0;
220}
221
222// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_destroy.html
223int pthread_mutex_destroy(pthread_mutex_t*)
224{
225 return 0;
226}
227
228// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_init.html
229int pthread_mutexattr_init(pthread_mutexattr_t* attr)
230{
231 attr->type = PTHREAD_MUTEX_NORMAL;
232 return 0;
233}
234
235// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_destroy.html
236int pthread_mutexattr_destroy(pthread_mutexattr_t*)
237{
238 return 0;
239}
240
241int pthread_mutexattr_settype(pthread_mutexattr_t* attr, int type)
242{
243 if (!attr)
244 return EINVAL;
245 if (type != PTHREAD_MUTEX_NORMAL && type != PTHREAD_MUTEX_RECURSIVE)
246 return EINVAL;
247 attr->type = type;
248 return 0;
249}
250
251// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutexattr_gettype.html
252int pthread_mutexattr_gettype(pthread_mutexattr_t* attr, int* type)
253{
254 *type = attr->type;
255 return 0;
256}
257
258// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_init.html
259int pthread_attr_init(pthread_attr_t* attributes)
260{
261 auto* impl = new PthreadAttrImpl {};
262 *attributes = impl;
263
264 dbgln_if(PTHREAD_DEBUG, "pthread_attr_init: New thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
265 impl,
266 (PTHREAD_CREATE_JOINABLE == impl->detach_state) ? "joinable" : "detached",
267 impl->schedule_priority,
268 impl->guard_page_size,
269 impl->stack_size,
270 impl->stack_location);
271
272 return 0;
273}
274
275// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_destroy.html
276int pthread_attr_destroy(pthread_attr_t* attributes)
277{
278 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
279 delete attributes_impl;
280 return 0;
281}
282
283// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getdetachstate.html
284int pthread_attr_getdetachstate(pthread_attr_t const* attributes, int* p_detach_state)
285{
286 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
287
288 if (!attributes_impl || !p_detach_state)
289 return EINVAL;
290
291 *p_detach_state = attributes_impl->detach_state;
292 return 0;
293}
294
295// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setdetachstate.html
296int pthread_attr_setdetachstate(pthread_attr_t* attributes, int detach_state)
297{
298 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
299
300 if (!attributes_impl)
301 return EINVAL;
302
303 if (detach_state != PTHREAD_CREATE_JOINABLE && detach_state != PTHREAD_CREATE_DETACHED)
304 return EINVAL;
305
306 attributes_impl->detach_state = detach_state;
307
308 dbgln_if(PTHREAD_DEBUG, "pthread_attr_setdetachstate: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
309 attributes_impl,
310 (PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
311 attributes_impl->schedule_priority,
312 attributes_impl->guard_page_size,
313 attributes_impl->stack_size,
314 attributes_impl->stack_location);
315
316 return 0;
317}
318
319// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getguardsize.html
320int pthread_attr_getguardsize(pthread_attr_t const* attributes, size_t* p_guard_size)
321{
322 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
323
324 if (!attributes_impl || !p_guard_size)
325 return EINVAL;
326
327 *p_guard_size = attributes_impl->reported_guard_page_size;
328 return 0;
329}
330
331// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setguardsize.html
332int pthread_attr_setguardsize(pthread_attr_t* attributes, size_t guard_size)
333{
334 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
335
336 if (!attributes_impl)
337 return EINVAL;
338
339 size_t actual_guard_size = guard_size;
340 // round up
341 if (0 != (guard_size % PAGE_SIZE))
342 actual_guard_size += PAGE_SIZE - (guard_size % PAGE_SIZE);
343
344 // what is the user even doing?
345 if (actual_guard_size > highest_reasonable_guard_size) {
346 return EINVAL;
347 }
348
349 attributes_impl->guard_page_size = actual_guard_size;
350 attributes_impl->reported_guard_page_size = guard_size; // POSIX, why?
351
352 dbgln_if(PTHREAD_DEBUG, "pthread_attr_setguardsize: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
353 attributes_impl,
354 (PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
355 attributes_impl->schedule_priority,
356 attributes_impl->guard_page_size,
357 attributes_impl->stack_size,
358 attributes_impl->stack_location);
359
360 return 0;
361}
362
363// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getschedparam.html
364int pthread_attr_getschedparam(pthread_attr_t const* attributes, struct sched_param* p_sched_param)
365{
366 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
367
368 if (!attributes_impl || !p_sched_param)
369 return EINVAL;
370
371 p_sched_param->sched_priority = attributes_impl->schedule_priority;
372 return 0;
373}
374
375// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setschedparam.html
376int pthread_attr_setschedparam(pthread_attr_t* attributes, const struct sched_param* p_sched_param)
377{
378 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
379 if (!attributes_impl || !p_sched_param)
380 return EINVAL;
381
382 if (p_sched_param->sched_priority < THREAD_PRIORITY_MIN || p_sched_param->sched_priority > THREAD_PRIORITY_MAX)
383 return ENOTSUP;
384
385 attributes_impl->schedule_priority = p_sched_param->sched_priority;
386
387 dbgln_if(PTHREAD_DEBUG, "pthread_attr_setschedparam: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
388 attributes_impl,
389 (PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
390 attributes_impl->schedule_priority,
391 attributes_impl->guard_page_size,
392 attributes_impl->stack_size,
393 attributes_impl->stack_location);
394
395 return 0;
396}
397
398// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getstack.html
399int pthread_attr_getstack(pthread_attr_t const* attributes, void** p_stack_ptr, size_t* p_stack_size)
400{
401 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
402
403 if (!attributes_impl || !p_stack_ptr || !p_stack_size)
404 return EINVAL;
405
406 *p_stack_ptr = attributes_impl->stack_location;
407 *p_stack_size = attributes_impl->stack_size;
408
409 return 0;
410}
411
412// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setstack.html
413int pthread_attr_setstack(pthread_attr_t* attributes, void* p_stack, size_t stack_size)
414{
415 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
416
417 if (!attributes_impl || !p_stack)
418 return EINVAL;
419
420 // Check for required alignment on size
421 if (0 != (stack_size % required_stack_alignment))
422 return EINVAL;
423
424 // FIXME: Check for required alignment on pointer?
425
426 // FIXME: "[EACCES] The stack page(s) described by stackaddr and stacksize are not both readable and writable by the thread."
427 // Have to check that the whole range is mapped to this process/thread? Can we defer this to create_thread?
428
429 attributes_impl->stack_size = stack_size;
430 attributes_impl->stack_location = p_stack;
431
432 dbgln_if(PTHREAD_DEBUG, "pthread_attr_setstack: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
433 attributes_impl,
434 (PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
435 attributes_impl->schedule_priority,
436 attributes_impl->guard_page_size,
437 attributes_impl->stack_size,
438 attributes_impl->stack_location);
439
440 return 0;
441}
442
443// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getstacksize.html
444int pthread_attr_getstacksize(pthread_attr_t const* attributes, size_t* p_stack_size)
445{
446 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl const* const*>(attributes));
447
448 if (!attributes_impl || !p_stack_size)
449 return EINVAL;
450
451 *p_stack_size = attributes_impl->stack_size;
452 return 0;
453}
454
455// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setstacksize.html
456int pthread_attr_setstacksize(pthread_attr_t* attributes, size_t stack_size)
457{
458 auto* attributes_impl = *(reinterpret_cast<PthreadAttrImpl**>(attributes));
459
460 if (!attributes_impl)
461 return EINVAL;
462
463 if (stack_size < PTHREAD_STACK_MIN || stack_size > PTHREAD_STACK_MAX)
464 return EINVAL;
465
466 attributes_impl->stack_size = stack_size;
467
468 dbgln_if(PTHREAD_DEBUG, "pthread_attr_setstacksize: Thread attributes at {}, detach state {}, priority {}, guard page size {}, stack size {}, stack location {}",
469 attributes_impl,
470 (PTHREAD_CREATE_JOINABLE == attributes_impl->detach_state) ? "joinable" : "detached",
471 attributes_impl->schedule_priority,
472 attributes_impl->guard_page_size,
473 attributes_impl->stack_size,
474 attributes_impl->stack_location);
475
476 return 0;
477}
478
479// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_getscope.html
480int pthread_attr_getscope([[maybe_unused]] pthread_attr_t const* attributes, [[maybe_unused]] int* contention_scope)
481{
482 return 0;
483}
484
485// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_attr_setscope.html
486int pthread_attr_setscope([[maybe_unused]] pthread_attr_t* attributes, [[maybe_unused]] int contention_scope)
487{
488 return 0;
489}
490
491// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_getschedparam.html
492int pthread_getschedparam(pthread_t thread, [[maybe_unused]] int* policy, struct sched_param* param)
493{
494 Syscall::SC_scheduler_parameters_params parameters {
495 .pid_or_tid = thread,
496 .mode = Syscall::SchedulerParametersMode::Thread,
497 .parameters = *param,
498 };
499 int rc = syscall(Syscall::SC_scheduler_get_parameters, ¶meters);
500 if (rc == 0)
501 *param = parameters.parameters;
502
503 __RETURN_PTHREAD_ERROR(rc);
504}
505
506// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setschedparam.html
507int pthread_setschedparam(pthread_t thread, [[maybe_unused]] int policy, struct sched_param const* param)
508{
509 Syscall::SC_scheduler_parameters_params parameters {
510 .pid_or_tid = thread,
511 .mode = Syscall::SchedulerParametersMode::Thread,
512 .parameters = *param,
513 };
514 int rc = syscall(Syscall::SC_scheduler_set_parameters, ¶meters);
515 __RETURN_PTHREAD_ERROR(rc);
516}
517
518static void pthread_cancel_signal_handler(int signal)
519{
520 // SIGCANCEL is a custom signal that is beyond the usual range of signal numbers.
521 // Let's make sure we know about it in case we still end up in here, but the signal
522 // number is being mangled.
523 VERIFY(signal == SIGCANCEL);
524
525 // Note: We don't handle PTHREAD_CANCEL_ASYNCHRONOUS any different from PTHREAD_CANCEL_DEFERRED,
526 // since ASYNCHRONOUS just means that the thread can be cancelled at any time (instead of just
527 // at the next cancellation point) and it seems to be generally discouraged to use it at all.
528 pending_cancellation = true;
529}
530
531// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cancel.html
532// NOTE: libgcc expects this function to exist in libpthread, even if it is not implemented.
533int pthread_cancel(pthread_t thread)
534{
535 // Set up our signal handler, which listens on SIGCANCEL and flips the cancellation indicator.
536 // Note that signal handlers are shared across the whole process, so we can just set that up at any time.
537 static bool set_up_cancel_handler = false;
538
539 if (!set_up_cancel_handler) {
540 struct sigaction act = {};
541 act.sa_handler = pthread_cancel_signal_handler;
542 sigaction(SIGCANCEL, &act, nullptr);
543 set_up_cancel_handler = true;
544 }
545
546 return pthread_kill(thread, SIGCANCEL);
547}
548
549// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_testcancel.html
550void pthread_testcancel(void)
551{
552 __pthread_maybe_cancel();
553}
554
555int pthread_setname_np(pthread_t thread, char const* name)
556{
557 if (!name)
558 return EFAULT;
559 int rc = syscall(SC_set_thread_name, thread, name, strlen(name));
560 __RETURN_PTHREAD_ERROR(rc);
561}
562
563int pthread_getname_np(pthread_t thread, char* buffer, size_t buffer_size)
564{
565 int rc = syscall(SC_get_thread_name, thread, buffer, buffer_size);
566 __RETURN_PTHREAD_ERROR(rc);
567}
568
569// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setcancelstate.html
570int pthread_setcancelstate(int state, int* oldstate)
571{
572 if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE)
573 return EINVAL;
574 if (oldstate)
575 *oldstate = s_thread_cancel_state;
576 s_thread_cancel_state = state;
577 return 0;
578}
579
580// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_setcanceltype.html
581int pthread_setcanceltype(int type, int* oldtype)
582{
583 if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS)
584 return EINVAL;
585 if (oldtype)
586 *oldtype = s_thread_cancel_type;
587 s_thread_cancel_type = type;
588 return 0;
589}
590
591constexpr static pid_t spinlock_unlock_sentinel = 0;
592// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_destroy.html
593int pthread_spin_destroy(pthread_spinlock_t* lock)
594{
595 auto current = AK::atomic_load(&lock->m_lock);
596
597 if (current != spinlock_unlock_sentinel)
598 return EBUSY;
599
600 return 0;
601}
602
603// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_init.html
604int pthread_spin_init(pthread_spinlock_t* lock, [[maybe_unused]] int shared)
605{
606 lock->m_lock = spinlock_unlock_sentinel;
607 return 0;
608}
609
610// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_lock.html
611int pthread_spin_lock(pthread_spinlock_t* lock)
612{
613 auto const desired = gettid();
614 while (true) {
615 auto current = AK::atomic_load(&lock->m_lock);
616
617 if (current == desired)
618 return EDEADLK;
619
620 if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire))
621 break;
622 }
623
624 return 0;
625}
626
627// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_trylock.html
628int pthread_spin_trylock(pthread_spinlock_t* lock)
629{
630 // We expect the current value to be unlocked, as the specification
631 // states that trylock should lock only if it is not held by ANY thread.
632 auto current = spinlock_unlock_sentinel;
633 auto desired = gettid();
634
635 if (AK::atomic_compare_exchange_strong(&lock->m_lock, current, desired, AK::MemoryOrder::memory_order_acquire)) {
636 return 0;
637 } else {
638 return EBUSY;
639 }
640}
641
642// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_spin_unlock.html
643int pthread_spin_unlock(pthread_spinlock_t* lock)
644{
645 auto current = AK::atomic_load(&lock->m_lock);
646
647 if (gettid() != current)
648 return EPERM;
649
650 AK::atomic_store(&lock->m_lock, spinlock_unlock_sentinel);
651 return 0;
652}
653
654// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_equal.html
655int pthread_equal(pthread_t t1, pthread_t t2)
656{
657 return t1 == t2;
658}
659
660// FIXME: Use the fancy futex mechanism above to write an rw lock.
661// For the time being, let's just use a less-than-good lock to get things working.
662// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_destroy.html
663int pthread_rwlock_destroy(pthread_rwlock_t* rl)
664{
665 if (!rl)
666 return 0;
667 return 0;
668}
669
670// In a very non-straightforward way, this value is composed of two 32-bit integers
671// the top 32 bits are reserved for the ID of write-locking thread (if any)
672// and the bottom 32 bits are:
673// top 2 bits (30,31): reader wake mask, writer wake mask
674// middle 16 bits: information
675// bit 16: someone is waiting to write
676// bit 17: locked for write
677// bottom 16 bits (0..15): reader count
678constexpr static u32 reader_wake_mask = 1 << 30;
679constexpr static u32 writer_wake_mask = 1 << 31;
680constexpr static u32 writer_locked_mask = 1 << 17;
681constexpr static u32 writer_intent_mask = 1 << 16;
682// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_init.html
683int pthread_rwlock_init(pthread_rwlock_t* __restrict lockp, pthread_rwlockattr_t const* __restrict attr)
684{
685 // Just ignore the attributes. use defaults for now.
686 (void)attr;
687
688 // No readers, no writer, not locked at all.
689 *lockp = 0;
690 return 0;
691}
692
693// Note that this function does not care about the top 32 bits at all.
694static int rwlock_rdlock_maybe_timed(u32* lockp, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
695{
696 auto current = AK::atomic_load(lockp);
697 for (; !only_once;) {
698 // First, see if this is locked for writing
699 // if it's not, try to add to the counter.
700 // If someone is waiting to write, and there is one or no other readers, let them have the lock.
701 if (!(current & writer_locked_mask)) {
702 auto count = (u16)current;
703 if (!(current & writer_intent_mask) || count > 1) {
704 ++count;
705 auto desired = (current & 0xffff0000u) | count;
706 auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
707 if (!did_exchange)
708 continue; // tough luck, try again.
709 return value_if_okay;
710 }
711 }
712
713 // If no one else is waiting for the read wake bit, set it.
714 if (!(current & reader_wake_mask)) {
715 auto desired = current | reader_wake_mask;
716 auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
717 if (!did_exchange)
718 continue; // Something interesting happened!
719
720 current = desired;
721 }
722
723 // Seems like someone is writing (or is interested in writing and we let them have the lock)
724 // wait until they're done.
725 auto rc = futex(lockp, FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, current, timeout, nullptr, reader_wake_mask);
726 if (rc < 0 && errno == ETIMEDOUT && timeout) {
727 return value_if_timeout;
728 }
729 if (rc < 0 && errno != EAGAIN) {
730 // Something broke. let's just bail out.
731 return errno;
732 }
733 errno = 0;
734 // Reload the 'current' value
735 current = AK::atomic_load(lockp);
736 }
737 return value_if_timeout;
738}
739
740static int rwlock_wrlock_maybe_timed(pthread_rwlock_t* lockval_p, const struct timespec* timeout = nullptr, bool only_once = false, int value_if_timeout = -1, int value_if_okay = -2)
741{
742 u32* lockp = reinterpret_cast<u32*>(lockval_p);
743 auto current = AK::atomic_load(lockp);
744 for (; !only_once;) {
745 // First, see if this is locked for writing, and if there are any readers.
746 // if not, lock it.
747 // If someone is waiting to write, let them have the lock.
748 if (!(current & writer_locked_mask) && ((u16)current) == 0) {
749 if (!(current & writer_intent_mask)) {
750 auto desired = current | writer_locked_mask | writer_intent_mask;
751 auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
752 if (!did_exchange)
753 continue;
754
755 // Now that we've locked the value, it's safe to set our thread ID.
756 AK::atomic_store(reinterpret_cast<i32*>(lockval_p) + 1, pthread_self());
757 return value_if_okay;
758 }
759 }
760
761 // That didn't work, if no one else is waiting for the write bit, set it.
762 if (!(current & writer_wake_mask)) {
763 auto desired = current | writer_wake_mask | writer_intent_mask;
764 auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_acquire);
765 if (!did_exchange)
766 continue; // Something interesting happened!
767
768 current = desired;
769 }
770
771 // Seems like someone is writing (or is interested in writing and we let them have the lock)
772 // wait until they're done.
773 auto rc = futex(lockp, FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, current, timeout, nullptr, writer_wake_mask);
774 if (rc < 0 && errno == ETIMEDOUT && timeout) {
775 return value_if_timeout;
776 }
777 if (rc < 0 && errno != EAGAIN) {
778 // Something broke. let's just bail out.
779 return errno;
780 }
781 errno = 0;
782 // Reload the 'current' value
783 current = AK::atomic_load(lockp);
784 }
785
786 return value_if_timeout;
787}
788
789// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_rdlock.html
790int pthread_rwlock_rdlock(pthread_rwlock_t* lockp)
791{
792 if (!lockp)
793 return EINVAL;
794
795 return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, false, 0, 0);
796}
797
798// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_timedrdlock.html
799int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
800{
801 if (!lockp)
802 return EINVAL;
803
804 auto rc = rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), timespec);
805 if (rc == -1) // "ok"
806 return 0;
807 if (rc == -2) // "timed out"
808 return 1;
809 return rc;
810}
811
812// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_timedwrlock.html
813int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict lockp, const struct timespec* __restrict timespec)
814{
815 if (!lockp)
816 return EINVAL;
817
818 auto rc = rwlock_wrlock_maybe_timed(lockp, timespec);
819 if (rc == -1) // "ok"
820 return 0;
821 if (rc == -2) // "timed out"
822 return 1;
823 return rc;
824}
825
826// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_tryrdlock.html
827int pthread_rwlock_tryrdlock(pthread_rwlock_t* lockp)
828{
829 if (!lockp)
830 return EINVAL;
831
832 return rwlock_rdlock_maybe_timed(reinterpret_cast<u32*>(lockp), nullptr, true, EBUSY, 0);
833}
834
835// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_trywrlock.html
836int pthread_rwlock_trywrlock(pthread_rwlock_t* lockp)
837{
838 if (!lockp)
839 return EINVAL;
840
841 return rwlock_wrlock_maybe_timed(lockp, nullptr, true, EBUSY, 0);
842}
843
844// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_unlock.html
845int pthread_rwlock_unlock(pthread_rwlock_t* lockval_p)
846{
847 if (!lockval_p)
848 return EINVAL;
849
850 // This is a weird API, we don't really know whether we're unlocking write or read...
851 auto lockp = reinterpret_cast<u32*>(lockval_p);
852 auto current = AK::atomic_load(lockp, AK::MemoryOrder::memory_order_relaxed);
853 if (current & writer_locked_mask) {
854 // If this lock is locked for writing, its owner better be us!
855 auto owner_id = AK::atomic_load(reinterpret_cast<i32*>(lockval_p) + 1);
856 auto my_id = pthread_self();
857 if (owner_id != my_id)
858 return EINVAL; // you don't own this lock, silly.
859
860 // Now just unlock it.
861 auto desired = current & ~(writer_locked_mask | writer_intent_mask);
862 AK::atomic_store(lockp, desired, AK::MemoryOrder::memory_order_release);
863 // Then wake both readers and writers, if any.
864 auto rc = futex(lockp, FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, current, nullptr, nullptr, (current & writer_wake_mask) | reader_wake_mask);
865 if (rc < 0)
866 return errno;
867 return 0;
868 }
869
870 for (;;) {
871 auto count = (u16)current;
872 if (!count) {
873 // Are you crazy? this isn't even locked!
874 return EINVAL;
875 }
876 --count;
877 auto desired = (current & 0xffff0000u) | count;
878 auto did_exchange = AK::atomic_compare_exchange_strong(lockp, current, desired, AK::MemoryOrder::memory_order_release);
879 if (did_exchange)
880 break;
881 // tough luck, try again.
882 }
883
884 // Finally, unlocked at last!
885 return 0;
886}
887
888// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlock_wrlock.html
889int pthread_rwlock_wrlock(pthread_rwlock_t* lockp)
890{
891 if (!lockp)
892 return EINVAL;
893
894 return rwlock_wrlock_maybe_timed(lockp, nullptr, false, 0, 0);
895}
896
897// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_destroy.html
898int pthread_rwlockattr_destroy(pthread_rwlockattr_t*)
899{
900 return 0;
901}
902
903// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_getpshared.html
904int pthread_rwlockattr_getpshared(pthread_rwlockattr_t const* __restrict, int* __restrict)
905{
906 VERIFY_NOT_REACHED();
907}
908
909// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_init.html
910int pthread_rwlockattr_init(pthread_rwlockattr_t*)
911{
912 VERIFY_NOT_REACHED();
913}
914
915// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_rwlockattr_setpshared.html
916int pthread_rwlockattr_setpshared(pthread_rwlockattr_t*, int)
917{
918 VERIFY_NOT_REACHED();
919}
920
921// https://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_atfork.html
922int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
923{
924 if (prepare)
925 __pthread_fork_atfork_register_prepare(prepare);
926 if (parent)
927 __pthread_fork_atfork_register_parent(parent);
928 if (child)
929 __pthread_fork_atfork_register_child(child);
930 return 0;
931}
932
933} // extern "C"