jcs's openbsd hax
openbsd
1/* $OpenBSD: rthread.c,v 1.12 2025/10/07 16:37:37 deraadt Exp $ */
2/*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18/*
19 * The infrastructure of rthreads
20 */
21
22#include <sys/types.h>
23#include <sys/time.h>
24#include <sys/futex.h>
25#include <sys/atomic.h>
26#include <sys/gmon.h>
27
28#include <pthread.h>
29#include <stdlib.h>
30#include <tib.h>
31#include <unistd.h>
32#include <assert.h>
33
34#include "rthread.h"
35
36#define RTHREAD_ENV_DEBUG "RTHREAD_DEBUG"
37
38int _rthread_debug_level;
39
40static int _threads_inited;
41
42struct pthread _initial_thread = {
43 .flags_lock = _SPINLOCK_UNLOCKED,
44 .name = "Original thread",
45};
46
47/*
48 * internal support functions
49 */
50void
51_spinlock(volatile _atomic_lock_t *lock)
52{
53 while (_atomic_lock(lock))
54 sched_yield();
55 membar_enter_after_atomic();
56}
57DEF_STRONG(_spinlock);
58
59int
60_spinlocktry(volatile _atomic_lock_t *lock)
61{
62 if (_atomic_lock(lock) == 0) {
63 membar_enter_after_atomic();
64 return 1;
65 }
66 return 0;
67}
68
69void
70_spinunlock(volatile _atomic_lock_t *lock)
71{
72 membar_exit();
73 *lock = _ATOMIC_LOCK_UNLOCKED;
74}
75DEF_STRONG(_spinunlock);
76
77#ifdef __CMTX_CAS
78
79/*
80 * CAS+futex locks
81 */
82
83void
84__cmtx_init(struct __cmtx *cmtx)
85{
86 cmtx->lock = __CMTX_UNLOCKED;
87}
88
89int
90__cmtx_enter_try(struct __cmtx *cmtx)
91{
92 if (atomic_cas_uint(&cmtx->lock,
93 __CMTX_UNLOCKED, __CMTX_LOCKED) == __CMTX_UNLOCKED) {
94 membar_enter_after_atomic();
95 return (1);
96 }
97
98 return (0);
99}
100
101void
102__cmtx_enter(struct __cmtx *cmtx)
103{
104 unsigned int locked;
105
106 locked = atomic_cas_uint(&cmtx->lock,
107 __CMTX_UNLOCKED, __CMTX_LOCKED);
108 if (locked == __CMTX_UNLOCKED) {
109 membar_enter_after_atomic();
110 return;
111 }
112
113 /* add adaptive spin here */
114
115 do {
116 switch (locked) {
117 case __CMTX_LOCKED:
118 locked = atomic_cas_uint(&cmtx->lock,
119 __CMTX_LOCKED, __CMTX_CONTENDED);
120 if (locked == __CMTX_UNLOCKED)
121 break;
122
123 /* lock is LOCKED -> CONTENDED or was CONTENDED */
124 /* FALLTHROUGH */
125 case __CMTX_CONTENDED:
126 futex(&cmtx->lock, FUTEX_WAIT_PRIVATE,
127 __CMTX_CONTENDED, NULL, NULL);
128 break;
129 }
130
131 locked = atomic_cas_uint(&cmtx->lock,
132 __CMTX_UNLOCKED, __CMTX_CONTENDED);
133 } while (locked != __CMTX_UNLOCKED);
134
135 membar_enter_after_atomic();
136}
137
138void
139__cmtx_leave(struct __cmtx *cmtx)
140{
141 unsigned int locked;
142
143 membar_exit_before_atomic();
144 locked = atomic_cas_uint(&cmtx->lock,
145 __CMTX_LOCKED, __CMTX_UNLOCKED);
146 if (locked != __CMTX_LOCKED) {
147 assert(locked != __CMTX_UNLOCKED);
148 cmtx->lock = __CMTX_UNLOCKED;
149 futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL);
150 }
151}
152
153#else /* __CMTX_CAS */
154
155/*
156 * spinlock+futex locks
157 */
158
159void
160__cmtx_init(struct __cmtx *cmtx)
161{
162 cmtx->spin = _SPINLOCK_UNLOCKED;
163 cmtx->lock = __CMTX_UNLOCKED;
164}
165
166int
167__cmtx_enter_try(struct __cmtx *cmtx)
168{
169 unsigned int locked;
170
171 _spinlock(&cmtx->spin);
172 locked = cmtx->lock;
173 if (locked == __CMTX_UNLOCKED)
174 cmtx->lock = __CMTX_LOCKED;
175 _spinunlock(&cmtx->spin);
176
177 /* spinlocks provide enough membars */
178
179 return (locked == __CMTX_UNLOCKED);
180}
181
182void
183__cmtx_enter(struct __cmtx *cmtx)
184{
185 unsigned int locked;
186
187 _spinlock(&cmtx->spin);
188 locked = cmtx->lock;
189 switch (locked) {
190 case __CMTX_UNLOCKED:
191 cmtx->lock = __CMTX_LOCKED;
192 break;
193 case __CMTX_LOCKED:
194 cmtx->lock = __CMTX_CONTENDED;
195 break;
196 }
197 _spinunlock(&cmtx->spin);
198
199 while (locked != __CMTX_UNLOCKED) {
200 futex(&cmtx->lock, FUTEX_WAIT_PRIVATE,
201 __CMTX_CONTENDED, NULL, NULL);
202
203 _spinlock(&cmtx->spin);
204 locked = cmtx->lock;
205 switch (locked) {
206 case __CMTX_UNLOCKED:
207 case __CMTX_LOCKED:
208 cmtx->lock = __CMTX_CONTENDED;
209 break;
210 }
211 _spinunlock(&cmtx->spin);
212 }
213
214 /* spinlocks provide enough membars */
215}
216
217void
218__cmtx_leave(struct __cmtx *cmtx)
219{
220 unsigned int locked;
221
222 /* spinlocks provide enough membars */
223
224 _spinlock(&cmtx->spin);
225 locked = cmtx->lock;
226 cmtx->lock = __CMTX_UNLOCKED;
227 _spinunlock(&cmtx->spin);
228
229 if (locked != __CMTX_LOCKED) {
230 assert(locked != __CMTX_UNLOCKED);
231 futex(&cmtx->lock, FUTEX_WAKE_PRIVATE, 1, NULL, NULL);
232 }
233}
234
235#endif /* __CMTX_CAS */
236
237/*
238 * recursive mutex
239 */
240
241void
242__rcmtx_init(struct __rcmtx *rcmtx)
243{
244 __cmtx_init(&rcmtx->mtx);
245 rcmtx->owner = NULL;
246 rcmtx->depth = 0;
247}
248
249int
250__rcmtx_enter_try(struct __rcmtx *rcmtx)
251{
252 pthread_t self = pthread_self();
253
254 if (rcmtx->owner != self) {
255 if (__cmtx_enter_try(&rcmtx->mtx) == 0)
256 return (0);
257 assert(rcmtx->owner == NULL);
258 rcmtx->owner = self;
259 assert(rcmtx->depth == 0);
260 }
261
262 rcmtx->depth++;
263
264 return (1);
265}
266
267void
268__rcmtx_enter(struct __rcmtx *rcmtx)
269{
270 pthread_t self = pthread_self();
271
272 if (rcmtx->owner != self) {
273 __cmtx_enter(&rcmtx->mtx);
274 assert(rcmtx->owner == NULL);
275 rcmtx->owner = self;
276 assert(rcmtx->depth == 0);
277 }
278
279 rcmtx->depth++;
280}
281
282void
283__rcmtx_leave(struct __rcmtx *rcmtx)
284{
285 assert(rcmtx->owner == pthread_self());
286 if (--rcmtx->depth == 0) {
287 rcmtx->owner = NULL;
288 __cmtx_leave(&rcmtx->mtx);
289 }
290}
291
292static void
293_rthread_init(void)
294{
295 pthread_t thread = &_initial_thread;
296 struct tib *tib;
297
298 if (_threads_inited)
299 return;
300
301 tib = TIB_GET();
302 tib->tib_thread = thread;
303 thread->tib = tib;
304
305 thread->donesem.lock = _SPINLOCK_UNLOCKED;
306 tib->tib_thread_flags = TIB_THREAD_INITIAL_STACK;
307
308 /*
309 * Set the debug level from an environment string.
310 * Bogus values are silently ignored.
311 */
312 if (!issetugid()) {
313 char *envp = getenv(RTHREAD_ENV_DEBUG);
314
315 if (envp != NULL) {
316 char *rem;
317
318 _rthread_debug_level = (int) strtol(envp, &rem, 0);
319 if (*rem != '\0' || _rthread_debug_level < 0)
320 _rthread_debug_level = 0;
321 }
322 }
323
324 _threads_inited = 1;
325
326 /* Ignore errors. NULL is OK for a non-profiling case. */
327 thread->gmonparam = _gmon_alloc();
328}
329
330/*
331 * real pthread functions
332 */
333pthread_t
334pthread_self(void)
335{
336 if (__predict_false(!_threads_inited))
337 _rthread_init();
338
339 return TIB_GET()->tib_thread;
340}
341DEF_STRONG(pthread_self);
342
343void
344pthread_exit(void *retval)
345{
346 struct rthread_cleanup_fn *clfn;
347 struct tib *tib;
348 pthread_t thread = pthread_self();
349
350 tib = thread->tib;
351
352 if (tib->tib_cantcancel & CANCEL_DYING) {
353 /*
354 * Called pthread_exit() from destructor or cancelation
355 * handler: blow up. XXX write something to stderr?
356 */
357 abort();
358 //_exit(42);
359 }
360
361 tib->tib_cantcancel |= CANCEL_DYING;
362
363 thread->retval = retval;
364
365 for (clfn = thread->cleanup_fns; clfn; ) {
366 struct rthread_cleanup_fn *oclfn = clfn;
367 clfn = clfn->next;
368 oclfn->fn(oclfn->arg);
369 free(oclfn);
370 }
371 _thread_finalize();
372 _rthread_tls_destructors(thread);
373
374 if (_thread_cb.tc_thread_release != NULL)
375 _thread_cb.tc_thread_release(thread);
376
377 __threxit(&tib->tib_tid);
378 for(;;);
379}
380DEF_STRONG(pthread_exit);
381
382int
383pthread_equal(pthread_t t1, pthread_t t2)
384{
385 return (t1 == t2);
386}
387