jcs's openbsd hax
openbsd
1/* $OpenBSD: rthread_sync.c,v 1.7 2026/03/27 12:26:58 claudio Exp $ */
2/*
3 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4 * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
5 * All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19/*
20 * Mutexes and conditions - synchronization functions.
21 */
22
23#include <assert.h>
24#include <errno.h>
25#include <pthread.h>
26#include <stdlib.h>
27#include <string.h>
28#include <unistd.h>
29
30#include "rthread.h"
31#include "cancel.h" /* in libc/include */
32
33static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
34
35/*
36 * mutexen
37 */
38int
39pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
40{
41 struct pthread_mutex *mutex;
42
43 mutex = calloc(1, sizeof(*mutex));
44 if (!mutex)
45 return (errno);
46 mutex->lock = _SPINLOCK_UNLOCKED;
47 TAILQ_INIT(&mutex->lockers);
48 if (attr == NULL) {
49 mutex->type = PTHREAD_MUTEX_DEFAULT;
50 mutex->prioceiling = -1;
51 } else {
52 mutex->type = (*attr)->ma_type;
53 mutex->prioceiling = (*attr)->ma_protocol ==
54 PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
55 }
56 *mutexp = mutex;
57
58 return (0);
59}
60DEF_STRONG(pthread_mutex_init);
61
62int
63pthread_mutex_destroy(pthread_mutex_t *mutexp)
64{
65 struct pthread_mutex *mutex;
66
67 if (mutexp == NULL)
68 return (EINVAL);
69
70 mutex = (struct pthread_mutex *)*mutexp;
71 if (mutex) {
72 if (mutex->count || mutex->owner != NULL ||
73 !TAILQ_EMPTY(&mutex->lockers)) {
74#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
75 write(2, MSG, sizeof(MSG) - 1);
76#undef MSG
77 return (EBUSY);
78 }
79 free(mutex);
80 *mutexp = NULL;
81 }
82 return (0);
83}
84DEF_STRONG(pthread_mutex_destroy);
85
86static int
87_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
88 const struct timespec *abstime)
89{
90 struct pthread_mutex *mutex;
91 pthread_t self = pthread_self();
92 int ret = 0;
93
94 /*
95 * If the mutex is statically initialized, perform the dynamic
96 * initialization. Note: _thread_mutex_lock() in libc requires
97 * _rthread_mutex_lock() to perform the mutex init when *mutexp
98 * is NULL.
99 */
100 if (*mutexp == NULL) {
101 _spinlock(&static_init_lock);
102 if (*mutexp == NULL)
103 ret = pthread_mutex_init(mutexp, NULL);
104 _spinunlock(&static_init_lock);
105 if (ret != 0)
106 return (EINVAL);
107 }
108 mutex = (struct pthread_mutex *)*mutexp;
109
110 _rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
111 _spinlock(&mutex->lock);
112 if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
113 assert(mutex->count == 0);
114 mutex->owner = self;
115 } else if (mutex->owner == self) {
116 assert(mutex->count > 0);
117
118 /* already owner? handle recursive behavior */
119 if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
120 {
121 if (trywait ||
122 mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
123 _spinunlock(&mutex->lock);
124 return (trywait ? EBUSY : EDEADLK);
125 }
126
127 /* self-deadlock is disallowed by strict */
128 if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
129 abstime == NULL)
130 abort();
131
132 /* self-deadlock, possibly until timeout */
133 while (__thrsleep(self, CLOCK_REALTIME, abstime,
134 &mutex->lock, NULL) != EWOULDBLOCK)
135 _spinlock(&mutex->lock);
136 return (ETIMEDOUT);
137 }
138 if (mutex->count == INT_MAX) {
139 _spinunlock(&mutex->lock);
140 return (EAGAIN);
141 }
142 } else if (trywait) {
143 /* try failed */
144 _spinunlock(&mutex->lock);
145 return (EBUSY);
146 } else {
147 /* add to the wait queue and block until at the head */
148 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
149 while (mutex->owner != self) {
150 ret = __thrsleep(self, CLOCK_REALTIME, abstime,
151 &mutex->lock, NULL);
152 _spinlock(&mutex->lock);
153 assert(mutex->owner != NULL);
154 if (ret == EWOULDBLOCK) {
155 if (mutex->owner == self)
156 break;
157 TAILQ_REMOVE(&mutex->lockers, self, waiting);
158 _spinunlock(&mutex->lock);
159 return (ETIMEDOUT);
160 }
161 }
162 }
163
164 mutex->count++;
165 _spinunlock(&mutex->lock);
166
167 return (0);
168}
169
170int
171pthread_mutex_lock(pthread_mutex_t *p)
172{
173 return (_rthread_mutex_lock(p, 0, NULL));
174}
175DEF_STRONG(pthread_mutex_lock);
176
177int
178pthread_mutex_trylock(pthread_mutex_t *p)
179{
180 return (_rthread_mutex_lock(p, 1, NULL));
181}
182
183int
184pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime)
185{
186 return (_rthread_mutex_lock(p, 0, abstime));
187}
188
189int
190pthread_mutex_unlock(pthread_mutex_t *mutexp)
191{
192 pthread_t self = pthread_self();
193 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
194
195 _rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
196 (void *)mutex);
197
198 if (mutex == NULL)
199#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
200 return (EPERM);
201#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
202 return(0);
203#else
204 abort();
205#endif
206
207 if (mutex->owner != self) {
208 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
209 mutex->type == PTHREAD_MUTEX_RECURSIVE)
210 return (EPERM);
211 else {
212 /*
213 * For mutex type NORMAL our undefined behavior for
214 * unlocking an unlocked mutex is to succeed without
215 * error. All other undefined behaviors are to
216 * abort() immediately.
217 */
218 if (mutex->owner == NULL &&
219 mutex->type == PTHREAD_MUTEX_NORMAL)
220 return (0);
221 else
222 abort();
223 }
224 }
225
226 if (--mutex->count == 0) {
227 pthread_t next;
228
229 _spinlock(&mutex->lock);
230 mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
231 if (next != NULL)
232 TAILQ_REMOVE(&mutex->lockers, next, waiting);
233 _spinunlock(&mutex->lock);
234 if (next != NULL)
235 __thrwakeup(next, 1);
236 }
237
238 return (0);
239}
240DEF_STRONG(pthread_mutex_unlock);
241
242/*
243 * condition variables
244 */
245int
246pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
247{
248 pthread_cond_t cond;
249
250 cond = calloc(1, sizeof(*cond));
251 if (!cond)
252 return (errno);
253 cond->lock = _SPINLOCK_UNLOCKED;
254 TAILQ_INIT(&cond->waiters);
255 if (attr == NULL)
256 cond->clock = CLOCK_REALTIME;
257 else
258 cond->clock = (*attr)->ca_clock;
259 *condp = cond;
260
261 return (0);
262}
263DEF_STRONG(pthread_cond_init);
264
265int
266pthread_cond_destroy(pthread_cond_t *condp)
267{
268 pthread_cond_t cond;
269
270 assert(condp);
271 cond = *condp;
272 if (cond) {
273 if (!TAILQ_EMPTY(&cond->waiters)) {
274#define MSG "pthread_cond_destroy on condvar with waiters!\n"
275 write(2, MSG, sizeof(MSG) - 1);
276#undef MSG
277 return (EBUSY);
278 }
279 free(cond);
280 }
281 *condp = NULL;
282
283 return (0);
284}
285
286int
287pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
288 const struct timespec *abstime)
289{
290 pthread_cond_t cond;
291 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
292 struct tib *tib = TIB_GET();
293 pthread_t self = tib->tib_thread;
294 pthread_t next;
295 int mutex_count;
296 int canceled = 0;
297 int rv = 0;
298 int error;
299 PREP_CANCEL_POINT(tib);
300
301 if (!*condp)
302 if ((error = pthread_cond_init(condp, NULL)))
303 return (error);
304 cond = *condp;
305 _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
306 (void *)cond, (void *)mutex);
307
308 if (mutex == NULL)
309#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
310 return (EPERM);
311#else
312 abort();
313#endif
314
315 if (mutex->owner != self) {
316 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
317 return (EPERM);
318 else
319 abort();
320 }
321
322 if (abstime == NULL || abstime->tv_nsec < 0 ||
323 abstime->tv_nsec >= 1000000000)
324 return (EINVAL);
325
326 ENTER_DELAYED_CANCEL_POINT(tib, self);
327
328 _spinlock(&cond->lock);
329
330 /* mark the condvar as being associated with this mutex */
331 if (cond->mutex == NULL) {
332 cond->mutex = mutex;
333 assert(TAILQ_EMPTY(&cond->waiters));
334 } else if (cond->mutex != mutex) {
335 assert(cond->mutex == mutex);
336 _spinunlock(&cond->lock);
337 LEAVE_CANCEL_POINT_INNER(tib, 1);
338 return (EINVAL);
339 } else
340 assert(! TAILQ_EMPTY(&cond->waiters));
341
342 /* snag the count in case this is a recursive mutex */
343 mutex_count = mutex->count;
344
345 /* transfer from the mutex queue to the condvar queue */
346 _spinlock(&mutex->lock);
347 self->blocking_cond = cond;
348 TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
349 _spinunlock(&cond->lock);
350
351 /* wake the next guy blocked on the mutex */
352 mutex->count = 0;
353 mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
354 if (next != NULL) {
355 TAILQ_REMOVE(&mutex->lockers, next, waiting);
356 __thrwakeup(next, 1);
357 }
358
359 /* wait until we're the owner of the mutex again */
360 while (mutex->owner != self) {
361 error = __thrsleep(self, cond->clock, abstime,
362 &mutex->lock, &self->delayed_cancel);
363
364 /*
365 * If abstime == NULL, then we're definitely waiting
366 * on the mutex instead of the condvar, and are
367 * just waiting for mutex ownership, regardless of
368 * why we woke up.
369 */
370 if (abstime == NULL) {
371 _spinlock(&mutex->lock);
372 continue;
373 }
374
375 /*
376 * If we took a normal signal (not from
377 * cancellation) then we should just go back to
378 * sleep without changing state (timeouts, etc).
379 */
380 if ((error == EINTR || error == ECANCELED) &&
381 (tib->tib_canceled == 0 ||
382 (tib->tib_cantcancel & CANCEL_DISABLED))) {
383 _spinlock(&mutex->lock);
384 continue;
385 }
386
387 /*
388 * The remaining reasons for waking up (normal
389 * wakeup, timeout, and cancellation) all mean that
390 * we won't be staying in the condvar queue and
391 * we'll no longer time out or be cancelable.
392 */
393 abstime = NULL;
394 LEAVE_CANCEL_POINT_INNER(tib, 0);
395
396 /*
397 * If we're no longer in the condvar's queue then
398 * we're just waiting for mutex ownership. Need
399 * cond->lock here to prevent race with cond_signal().
400 */
401 _spinlock(&cond->lock);
402 if (self->blocking_cond == NULL) {
403 _spinunlock(&cond->lock);
404 _spinlock(&mutex->lock);
405 continue;
406 }
407 assert(self->blocking_cond == cond);
408
409 /* if timeout or canceled, make note of that */
410 if (error == EWOULDBLOCK)
411 rv = ETIMEDOUT;
412 else if (error == EINTR)
413 canceled = 1;
414
415 /* transfer between the queues */
416 TAILQ_REMOVE(&cond->waiters, self, waiting);
417 assert(mutex == cond->mutex);
418 if (TAILQ_EMPTY(&cond->waiters))
419 cond->mutex = NULL;
420 self->blocking_cond = NULL;
421 _spinunlock(&cond->lock);
422 _spinlock(&mutex->lock);
423
424 /* mutex unlocked right now? */
425 if (mutex->owner == NULL &&
426 TAILQ_EMPTY(&mutex->lockers)) {
427 assert(mutex->count == 0);
428 mutex->owner = self;
429 break;
430 }
431 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
432 }
433
434 /* restore the mutex's count */
435 mutex->count = mutex_count;
436 _spinunlock(&mutex->lock);
437
438 LEAVE_CANCEL_POINT_INNER(tib, canceled);
439
440 return (rv);
441}
442
443int
444pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
445{
446 pthread_cond_t cond;
447 struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
448 struct tib *tib = TIB_GET();
449 pthread_t self = tib->tib_thread;
450 pthread_t next;
451 int mutex_count;
452 int canceled = 0;
453 int error;
454 PREP_CANCEL_POINT(tib);
455
456 if (!*condp)
457 if ((error = pthread_cond_init(condp, NULL)))
458 return (error);
459 cond = *condp;
460 _rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
461 (void *)cond, (void *)mutex);
462
463 if (mutex == NULL)
464#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
465 return (EPERM);
466#else
467 abort();
468#endif
469
470 if (mutex->owner != self) {
471 if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
472 return (EPERM);
473 else
474 abort();
475 }
476
477 ENTER_DELAYED_CANCEL_POINT(tib, self);
478
479 _spinlock(&cond->lock);
480
481 /* mark the condvar as being associated with this mutex */
482 if (cond->mutex == NULL) {
483 cond->mutex = mutex;
484 assert(TAILQ_EMPTY(&cond->waiters));
485 } else if (cond->mutex != mutex) {
486 assert(cond->mutex == mutex);
487 _spinunlock(&cond->lock);
488 LEAVE_CANCEL_POINT_INNER(tib, 1);
489 return (EINVAL);
490 } else
491 assert(! TAILQ_EMPTY(&cond->waiters));
492
493 /* snag the count in case this is a recursive mutex */
494 mutex_count = mutex->count;
495
496 /* transfer from the mutex queue to the condvar queue */
497 _spinlock(&mutex->lock);
498 self->blocking_cond = cond;
499 TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
500 _spinunlock(&cond->lock);
501
502 /* wake the next guy blocked on the mutex */
503 mutex->count = 0;
504 mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
505 if (next != NULL) {
506 TAILQ_REMOVE(&mutex->lockers, next, waiting);
507 __thrwakeup(next, 1);
508 }
509
510 /* wait until we're the owner of the mutex again */
511 while (mutex->owner != self) {
512 error = __thrsleep(self, 0, NULL, &mutex->lock,
513 &self->delayed_cancel);
514
515 /*
516 * If we took a normal signal (not from
517 * cancellation) then we should just go back to
518 * sleep without changing state (timeouts, etc).
519 */
520 if ((error == EINTR || error == ECANCELED) &&
521 (tib->tib_canceled == 0 ||
522 (tib->tib_cantcancel & CANCEL_DISABLED))) {
523 _spinlock(&mutex->lock);
524 continue;
525 }
526
527 /*
528 * The remaining reasons for waking up (normal
529 * wakeup and cancellation) all mean that we won't
530 * be staying in the condvar queue and we'll no
531 * longer be cancelable.
532 */
533 LEAVE_CANCEL_POINT_INNER(tib, 0);
534
535 /*
536 * If we're no longer in the condvar's queue then
537 * we're just waiting for mutex ownership. Need
538 * cond->lock here to prevent race with cond_signal().
539 */
540 _spinlock(&cond->lock);
541 if (self->blocking_cond == NULL) {
542 _spinunlock(&cond->lock);
543 _spinlock(&mutex->lock);
544 continue;
545 }
546 assert(self->blocking_cond == cond);
547
548 /* if canceled, make note of that */
549 if (error == EINTR)
550 canceled = 1;
551
552 /* transfer between the queues */
553 TAILQ_REMOVE(&cond->waiters, self, waiting);
554 assert(mutex == cond->mutex);
555 if (TAILQ_EMPTY(&cond->waiters))
556 cond->mutex = NULL;
557 self->blocking_cond = NULL;
558 _spinunlock(&cond->lock);
559 _spinlock(&mutex->lock);
560
561 /* mutex unlocked right now? */
562 if (mutex->owner == NULL &&
563 TAILQ_EMPTY(&mutex->lockers)) {
564 assert(mutex->count == 0);
565 mutex->owner = self;
566 break;
567 }
568 TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
569 }
570
571 /* restore the mutex's count */
572 mutex->count = mutex_count;
573 _spinunlock(&mutex->lock);
574
575 LEAVE_CANCEL_POINT_INNER(tib, canceled);
576
577 return (0);
578}
579
580
581int
582pthread_cond_signal(pthread_cond_t *condp)
583{
584 pthread_cond_t cond;
585 struct pthread_mutex *mutex;
586 pthread_t thread;
587 int wakeup;
588
589 /* uninitialized? Then there's obviously no one waiting! */
590 if (!*condp)
591 return 0;
592
593 cond = *condp;
594 _rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
595 (void *)cond, (void *)cond->mutex);
596 _spinlock(&cond->lock);
597 thread = TAILQ_FIRST(&cond->waiters);
598 if (thread == NULL) {
599 assert(cond->mutex == NULL);
600 _spinunlock(&cond->lock);
601 return (0);
602 }
603
604 assert(thread->blocking_cond == cond);
605 TAILQ_REMOVE(&cond->waiters, thread, waiting);
606 thread->blocking_cond = NULL;
607
608 mutex = cond->mutex;
609 assert(mutex != NULL);
610 if (TAILQ_EMPTY(&cond->waiters))
611 cond->mutex = NULL;
612
613 /* link locks to prevent race with timedwait */
614 _spinlock(&mutex->lock);
615 _spinunlock(&cond->lock);
616
617 wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
618 if (wakeup)
619 mutex->owner = thread;
620 else
621 TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
622 _spinunlock(&mutex->lock);
623 if (wakeup)
624 __thrwakeup(thread, 1);
625
626 return (0);
627}
628
629int
630pthread_cond_broadcast(pthread_cond_t *condp)
631{
632 pthread_cond_t cond;
633 struct pthread_mutex *mutex;
634 pthread_t thread;
635 pthread_t p;
636 int wakeup;
637
638 /* uninitialized? Then there's obviously no one waiting! */
639 if (!*condp)
640 return 0;
641
642 cond = *condp;
643 _rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
644 (void *)cond, (void *)cond->mutex);
645 _spinlock(&cond->lock);
646 thread = TAILQ_FIRST(&cond->waiters);
647 if (thread == NULL) {
648 assert(cond->mutex == NULL);
649 _spinunlock(&cond->lock);
650 return (0);
651 }
652
653 mutex = cond->mutex;
654 assert(mutex != NULL);
655
656 /* walk the list, clearing the "blocked on condvar" pointer */
657 p = thread;
658 do
659 p->blocking_cond = NULL;
660 while ((p = TAILQ_NEXT(p, waiting)) != NULL);
661
662 /*
663 * We want to transfer all the threads from the condvar's list
664 * to the mutex's list. The TAILQ_* macros don't let us do that
665 * efficiently, so this is direct list surgery. Pay attention!
666 */
667
668 /* 1) attach the first thread to the end of the mutex's list */
669 _spinlock(&mutex->lock);
670 wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
671 thread->waiting.tqe_prev = mutex->lockers.tqh_last;
672 *(mutex->lockers.tqh_last) = thread;
673
674 /* 2) fix up the end pointer for the mutex's list */
675 mutex->lockers.tqh_last = cond->waiters.tqh_last;
676
677 if (wakeup) {
678 TAILQ_REMOVE(&mutex->lockers, thread, waiting);
679 mutex->owner = thread;
680 _spinunlock(&mutex->lock);
681 __thrwakeup(thread, 1);
682 } else
683 _spinunlock(&mutex->lock);
684
685 /* 3) reset the condvar's list and mutex pointer */
686 TAILQ_INIT(&cond->waiters);
687 assert(cond->mutex != NULL);
688 cond->mutex = NULL;
689 _spinunlock(&cond->lock);
690
691 return (0);
692}