jcs's openbsd hax
openbsd
1/* $OpenBSD: rthread.c,v 1.101 2025/07/17 02:21:44 deraadt Exp $ */
2/*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18/*
19 * The heart of rthreads. Basic functions like creating and joining
20 * threads.
21 */
22
23#include <sys/types.h>
24#ifndef NO_PIC
25#include <elf.h>
26#pragma weak _DYNAMIC
27#endif
28#ifdef __PROFIL_SRC__
29#include <sys/gmon.h>
30#endif
31
32#include <stdlib.h>
33#include <unistd.h>
34#include <signal.h>
35#include <stdio.h>
36#include <string.h>
37#include <errno.h>
38#include <dlfcn.h>
39#include <tib.h>
40
41#include <pthread.h>
42
43#include "cancel.h" /* in libc/include */
44#include "rthread.h"
45#include "rthread_cb.h"
46
47/*
48 * Call nonstandard functions via names in the reserved namespace:
49 * dlctl() -> _dlctl()
50 * getthrid -> _thread_sys_getthrid
51 */
52typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
53REDIRECT_SYSCALL(getthrid);
54
55/* weak stub to be overridden by ld.so */
56int dlctl(void *handle, int cmd, void *data) { return 0; }
57
58/*
59 * libc's signal wrappers hide SIGTHR; we need to call the real syscall
60 * stubs _thread_sys_* directly.
61 */
62REDIRECT_SYSCALL(sigaction);
63REDIRECT_SYSCALL(sigprocmask);
64REDIRECT_SYSCALL(thrkill);
65
66static int concurrency_level; /* not used */
67
68int _threads_ready;
69int _post_threaded;
70size_t _thread_pagesize;
71struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
72_atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
73static struct pthread_queue _thread_gc_list
74 = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
75static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
76static struct pthread _initial_thread;
77
78struct pthread_attr _rthread_attr_default = {
79 .stack_addr = NULL,
80 .stack_size = RTHREAD_STACK_SIZE_DEF,
81/* .guard_size set in _rthread_init */
82 .detach_state = PTHREAD_CREATE_JOINABLE,
83 .contention_scope = PTHREAD_SCOPE_SYSTEM,
84 .sched_policy = SCHED_OTHER,
85 .sched_param = { .sched_priority = 0 },
86 .sched_inherit = PTHREAD_INHERIT_SCHED,
87};
88
89/*
90 * internal support functions
91 */
92
93static void
94_rthread_start(void *v)
95{
96 pthread_t thread = v;
97 void *retval;
98
99 retval = thread->fn(thread->arg);
100 pthread_exit(retval);
101}
102
103static void
104sigthr_handler(__unused int sig)
105{
106 struct tib *tib = TIB_GET();
107 pthread_t self = tib->tib_thread;
108
109 /*
110 * Do nothing unless
111 * 1) pthread_cancel() has been called on this thread,
112 * 2) cancelation is enabled for it, and
113 * 3) we're not already in cancelation processing
114 */
115 if (!tib->tib_canceled || tib->tib_cantcancel)
116 return;
117
118 /*
119 * If delaying cancels inside complex ops (pthread_cond_wait,
120 * pthread_join, etc), just mark that this has happened to
121 * prevent a race with going to sleep
122 */
123 if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
124 self->delayed_cancel = 1;
125 return;
126 }
127
128 /*
129 * otherwise, if in a cancel point or async cancels are
130 * enabled, then exit
131 */
132 if (tib->tib_cancel_point ||
133 (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
134 pthread_exit(PTHREAD_CANCELED);
135}
136
137
138/*
139 * A few basic callbacks for libc. The first couple are only used
140 * on archs where there isn't a fast TCB_GET()
141 */
142#ifndef TCB_HAVE_MD_GET
143static int *
144multi_threaded_errnoptr(void)
145{
146 return (&TIB_GET()->tib_errno);
147}
148
149static void *
150multi_threaded_tcb(void)
151{
152 return (TCB_GET());
153}
154#endif /* TCB_HAVE_MD_GET */
155
156static void
157_rthread_free(pthread_t thread)
158{
159 _spinlock(&_thread_gc_lock);
160 TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
161 _spinunlock(&_thread_gc_lock);
162}
163
164static void
165_thread_release(pthread_t thread)
166{
167 _spinlock(&_thread_lock);
168 LIST_REMOVE(thread, threads);
169 _spinunlock(&_thread_lock);
170
171 _spinlock(&thread->flags_lock);
172 if (thread->flags & THREAD_DETACHED) {
173 _spinunlock(&thread->flags_lock);
174 _rthread_free(thread);
175 } else {
176 thread->flags |= THREAD_DONE;
177 _spinunlock(&thread->flags_lock);
178 _sem_post(&thread->donesem);
179 }
180}
181
182static void
183_thread_key_zero(int key)
184{
185 pthread_t thread;
186 struct rthread_storage *rs;
187
188 LIST_FOREACH(thread, &_thread_list, threads) {
189 for (rs = thread->local_storage; rs; rs = rs->next) {
190 if (rs->keyid == key)
191 rs->data = NULL;
192 }
193 }
194}
195
196void
197_rthread_init(void)
198{
199 pthread_t thread = pthread_self();
200 struct sigaction sa;
201
202 if (_threads_ready)
203 return;
204
205 LIST_INSERT_HEAD(&_thread_list, thread, threads);
206
207 _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
208 _rthread_attr_default.guard_size = _thread_pagesize;
209 thread->attr = _rthread_attr_default;
210
211 /* get libc to start using our callbacks */
212 {
213 struct thread_callbacks cb = { 0 };
214
215#ifndef TCB_HAVE_MD_GET
216 cb.tc_errnoptr = multi_threaded_errnoptr;
217 cb.tc_tcb = multi_threaded_tcb;
218#endif
219 cb.tc_fork = _thread_fork;
220 cb.tc_vfork = _thread_vfork;
221 cb.tc_thread_release = _thread_release;
222 cb.tc_thread_key_zero = _thread_key_zero;
223 _thread_set_callbacks(&cb, sizeof(cb));
224 }
225
226#ifndef NO_PIC
227 if (_DYNAMIC) {
228 dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
229 }
230#endif
231
232 /*
233 * Set the handler on the signal used for cancelation and
234 * suspension, and make sure it's unblocked
235 */
236 memset(&sa, 0, sizeof(sa));
237 sigemptyset(&sa.sa_mask);
238 sa.sa_handler = sigthr_handler;
239 sigaction(SIGTHR, &sa, NULL);
240 sigaddset(&sa.sa_mask, SIGTHR);
241 sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
242
243 _threads_ready = 1;
244
245 _malloc_init(1);
246
247 _rthread_debug(1, "rthread init\n");
248}
249
250static void
251_rthread_reaper(void)
252{
253 pthread_t thread;
254
255restart:
256 _spinlock(&_thread_gc_lock);
257 TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
258 if (thread->tib->tib_tid != 0)
259 continue;
260 TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
261 _spinunlock(&_thread_gc_lock);
262 if (thread != &_initial_thread) {
263 _rthread_debug(3, "rthread reaping %p stack %p\n",
264 (void *)thread, (void *)thread->stack);
265 _rthread_free_stack(thread->stack);
266 _dl_free_tib(thread->tib, sizeof(*thread));
267 } else {
268 /* initial thread isn't part of TIB allocation */
269 _rthread_debug(3, "rthread reaping %p (initial)\n",
270 (void *)thread);
271 _dl_free_tib(thread->tib, 0);
272 }
273 goto restart;
274 }
275 _spinunlock(&_thread_gc_lock);
276}
277
278/*
279 * real pthread functions
280 */
281
282int
283pthread_join(pthread_t thread, void **retval)
284{
285 int e;
286 struct tib *tib = TIB_GET();
287 pthread_t self;
288 PREP_CANCEL_POINT(tib);
289
290 if (_post_threaded) {
291#define GREATSCOTT "great scott! serious repercussions on future events!\n"
292 write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
293 abort();
294 }
295 if (!_threads_ready)
296 _rthread_init();
297 self = tib->tib_thread;
298
299 e = 0;
300 ENTER_DELAYED_CANCEL_POINT(tib, self);
301 if (thread == NULL)
302 e = EINVAL;
303 else if (thread == self)
304 e = EDEADLK;
305 else if (thread->flags & THREAD_DETACHED)
306 e = EINVAL;
307 else if ((e = _sem_wait(&thread->donesem, 0, NULL,
308 &self->delayed_cancel)) == 0) {
309 if (retval)
310 *retval = thread->retval;
311
312 /*
313 * We should be the last having a ref to this thread,
314 * but someone stupid or evil might haved detached it;
315 * in that case the thread will clean up itself
316 */
317 if ((thread->flags & THREAD_DETACHED) == 0)
318 _rthread_free(thread);
319 }
320
321 LEAVE_CANCEL_POINT_INNER(tib, e);
322 _rthread_reaper();
323 return (e);
324}
325
326int
327pthread_detach(pthread_t thread)
328{
329 int rc = 0;
330
331 _spinlock(&thread->flags_lock);
332 if (thread->flags & THREAD_DETACHED) {
333 rc = EINVAL;
334 _spinunlock(&thread->flags_lock);
335 } else if (thread->flags & THREAD_DONE) {
336 _spinunlock(&thread->flags_lock);
337 _rthread_free(thread);
338 } else {
339 thread->flags |= THREAD_DETACHED;
340 _spinunlock(&thread->flags_lock);
341 }
342 _rthread_reaper();
343 return (rc);
344}
345
346int
347pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
348 void *(*start_routine)(void *), void *arg)
349{
350 extern int __isthreaded;
351 struct tib *tib;
352 pthread_t thread;
353 struct __tfork param;
354 int rc;
355
356 if (!_threads_ready)
357 _rthread_init();
358
359 _rthread_reaper();
360
361 tib = _dl_allocate_tib(sizeof(*thread));
362 if (tib == NULL)
363 return (ENOMEM);
364 thread = tib->tib_thread;
365 memset(thread, 0, sizeof(*thread));
366 thread->tib = tib;
367 thread->donesem.lock = _SPINLOCK_UNLOCKED;
368 thread->flags_lock = _SPINLOCK_UNLOCKED;
369 thread->fn = start_routine;
370 thread->arg = arg;
371 tib->tib_tid = -1;
372
373 thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
374 if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
375 pthread_t self = pthread_self();
376
377 thread->attr.sched_policy = self->attr.sched_policy;
378 thread->attr.sched_param = self->attr.sched_param;
379 }
380 if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
381 thread->flags |= THREAD_DETACHED;
382
383 thread->stack = _rthread_alloc_stack(thread);
384 if (!thread->stack) {
385 rc = errno;
386 goto fail1;
387 }
388
389 param.tf_tcb = TIB_TO_TCB(tib);
390 param.tf_tid = &tib->tib_tid;
391 param.tf_stack = thread->stack->sp;
392
393 _spinlock(&_thread_lock);
394 LIST_INSERT_HEAD(&_thread_list, thread, threads);
395 _spinunlock(&_thread_lock);
396
397 /* we're going to be multi-threaded real soon now */
398 __isthreaded = 1;
399
400#ifdef __PROFIL_SRC__
401 /* Ignore errors. NULL is OK for a non-profiling case. */
402 thread->gmonparam = _gmon_alloc();
403#endif
404
405 rc = __tfork_thread(¶m, sizeof(param), _rthread_start, thread);
406 if (rc != -1) {
407 /* success */
408 *threadp = thread;
409 return (0);
410 }
411
412 rc = errno;
413
414 _spinlock(&_thread_lock);
415 LIST_REMOVE(thread, threads);
416 _spinunlock(&_thread_lock);
417 _rthread_free_stack(thread->stack);
418fail1:
419 _dl_free_tib(tib, sizeof(*thread));
420
421 return (rc);
422}
423
424int
425pthread_kill(pthread_t thread, int sig)
426{
427 struct tib *tib = thread->tib;
428
429 if (sig == SIGTHR)
430 return (EINVAL);
431 if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
432 return (errno);
433 return (0);
434}
435
436int
437pthread_cancel(pthread_t thread)
438{
439 struct tib *tib = thread->tib;
440 pid_t tid = tib->tib_tid;
441
442 if (tib->tib_canceled == 0 && tid != 0 &&
443 (tib->tib_cantcancel & CANCEL_DYING) == 0) {
444 tib->tib_canceled = 1;
445
446 if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
447 thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
448 return (0);
449 }
450 }
451 return (0);
452}
453
454void
455pthread_testcancel(void)
456{
457 struct tib *tib = TIB_GET();
458
459 if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
460 pthread_exit(PTHREAD_CANCELED);
461}
462
463int
464pthread_setcancelstate(int state, int *oldstatep)
465{
466 struct tib *tib = TIB_GET();
467 int oldstate;
468
469 oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
470 PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
471 if (state == PTHREAD_CANCEL_ENABLE) {
472 tib->tib_cantcancel &= ~CANCEL_DISABLED;
473 } else if (state == PTHREAD_CANCEL_DISABLE) {
474 tib->tib_cantcancel |= CANCEL_DISABLED;
475 } else {
476 return (EINVAL);
477 }
478 if (oldstatep)
479 *oldstatep = oldstate;
480
481 return (0);
482}
483DEF_STD(pthread_setcancelstate);
484
485int
486pthread_setcanceltype(int type, int *oldtypep)
487{
488 struct tib *tib = TIB_GET();
489 int oldtype;
490
491 oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
492 PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
493 if (type == PTHREAD_CANCEL_DEFERRED) {
494 tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
495 } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
496 tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
497 } else {
498 return (EINVAL);
499 }
500 if (oldtypep)
501 *oldtypep = oldtype;
502
503 return (0);
504}
505
506void
507pthread_cleanup_push(void (*fn)(void *), void *arg)
508{
509 struct rthread_cleanup_fn *clfn;
510 pthread_t self = pthread_self();
511
512 clfn = calloc(1, sizeof(*clfn));
513 if (!clfn)
514 return;
515 clfn->fn = fn;
516 clfn->arg = arg;
517 clfn->next = self->cleanup_fns;
518 self->cleanup_fns = clfn;
519}
520
521void
522pthread_cleanup_pop(int execute)
523{
524 struct rthread_cleanup_fn *clfn;
525 pthread_t self = pthread_self();
526
527 clfn = self->cleanup_fns;
528 if (clfn) {
529 self->cleanup_fns = clfn->next;
530 if (execute)
531 clfn->fn(clfn->arg);
532 free(clfn);
533 }
534}
535
536int
537pthread_getconcurrency(void)
538{
539 return (concurrency_level);
540}
541
542int
543pthread_setconcurrency(int new_level)
544{
545 if (new_level < 0)
546 return (EINVAL);
547 concurrency_level = new_level;
548 return (0);
549}
550
551/*
552 * compat debug stuff
553 */
554void
555_thread_dump_info(void)
556{
557 pthread_t thread;
558
559 _spinlock(&_thread_lock);
560 LIST_FOREACH(thread, &_thread_list, threads)
561 printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
562 thread->tib->tib_thread_flags, thread->name);
563 _spinunlock(&_thread_lock);
564}
565
566#ifndef NO_PIC
567/*
568 * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
569 * the function called via atexit() to invoke all destructors. The latter
570 * two call shared-object destructors, which may need to call dlclose(),
571 * so this lock needs to permit recursive locking.
572 * The specific code here was extracted from _rthread_mutex_lock() and
573 * pthread_mutex_unlock() and simplified to use the static variables.
574 */
575void
576_rthread_dl_lock(int what)
577{
578 static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
579 static pthread_t owner = NULL;
580 static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
581 static int count = 0;
582
583 if (what == 0) {
584 pthread_t self = pthread_self();
585
586 /* lock, possibly recursive */
587 _spinlock(&lock);
588 if (owner == NULL) {
589 owner = self;
590 } else if (owner != self) {
591 TAILQ_INSERT_TAIL(&lockers, self, waiting);
592 while (owner != self) {
593 __thrsleep(self, 0, NULL, &lock, NULL);
594 _spinlock(&lock);
595 }
596 }
597 count++;
598 _spinunlock(&lock);
599 } else if (what == 1) {
600 /* unlock, possibly recursive */
601 if (--count == 0) {
602 pthread_t next;
603
604 _spinlock(&lock);
605 owner = next = TAILQ_FIRST(&lockers);
606 if (next != NULL)
607 TAILQ_REMOVE(&lockers, next, waiting);
608 _spinunlock(&lock);
609 if (next != NULL)
610 __thrwakeup(next, 1);
611 }
612 } else {
613 /* reinit: used in child after fork to clear the queue */
614 lock = _SPINLOCK_UNLOCKED;
615 if (--count == 0)
616 owner = NULL;
617 TAILQ_INIT(&lockers);
618 }
619}
620#endif