A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita
audio
rust
zig
deno
mpris
rockbox
mpd
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22
23#ifdef HAVE_SIGALTSTACK_THREADS
24/*
25 * The sp check in glibc __longjmp_chk() will cause
26 * a fatal error when switching threads via longjmp().
27 */
28#undef _FORTIFY_SOURCE
29#endif
30
31#include "thread-internal.h"
32#include "kernel.h"
33#include "cpu.h"
34#include "string.h"
35#ifdef RB_PROFILE
36#include <profile.h>
37#endif
38#include "core_alloc.h"
39
40#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
41#include <errno.h>
42#endif
43/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
44#ifdef DEBUG
45#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
46#else
47#define THREAD_EXTRA_CHECKS 0
48#endif
49
50/****************************************************************************
51 * ATTENTION!! *
52 * See notes below on implementing processor-specific portions! *
53 ****************************************************************************
54 *
55 * General locking order to guarantee progress. Order must be observed but
56 * all stages are not nescessarily obligatory. Going from 1) to 3) is
57 * perfectly legal.
58 *
59 * 1) IRQ
60 * This is first because of the likelyhood of having an interrupt occur that
61 * also accesses one of the objects farther down the list. Any non-blocking
62 * synchronization done may already have a lock on something during normal
63 * execution and if an interrupt handler running on the same processor as
64 * the one that has the resource locked were to attempt to access the
65 * resource, the interrupt handler would wait forever waiting for an unlock
66 * that will never happen. There is no danger if the interrupt occurs on
67 * a different processor because the one that has the lock will eventually
68 * unlock and the other processor's handler may proceed at that time. Not
69 * nescessary when the resource in question is definitely not available to
70 * interrupt handlers.
71 *
72 * 2) Kernel Object
73 * 1) May be needed beforehand if the kernel object allows dual-use such as
74 * event queues. The kernel object must have a scheme to protect itself from
75 * access by another processor and is responsible for serializing the calls
76 * to block_thread and wakeup_thread both to themselves and to each other.
77 * Objects' queues are also protected here.
78 *
79 * 3) Thread Slot
80 * This locks access to the thread's slot such that its state cannot be
81 * altered by another processor when a state change is in progress such as
82 * when it is in the process of going on a blocked list. An attempt to wake
83 * a thread while it is still blocking will likely desync its state with
84 * the other resources used for that state.
85 *
86 * 4) Core Lists
87 * These lists are specific to a particular processor core and are accessible
88 * by all processor cores and interrupt handlers. The running (rtr) list is
89 * the prime example where a thread may be added by any means.
90 */
91
92/*---------------------------------------------------------------------------
93 * Processor specific: core_sleep/core_wake/misc. notes
94 *
95 * ARM notes:
96 * FIQ is not dealt with by the scheduler code and is simply restored if it
97 * must by masked for some reason - because threading modifies a register
98 * that FIQ may also modify and there's no way to accomplish it atomically.
99 * s3c2440 is such a case.
100 *
101 * Audio interrupts are generally treated at a higher priority than others
102 * usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
103 * are not in general safe. Special cases may be constructed on a per-
104 * source basis and blocking operations are not available.
105 *
106 * core_sleep procedure to implement for any CPU to ensure an asychronous
107 * wakup never results in requiring a wait until the next tick (up to
108 * 10000uS!). May require assembly and careful instruction ordering.
109 *
110 * 1) On multicore, stay awake if directed to do so by another. If so, goto
111 * step 4.
112 * 2) If processor requires, atomically reenable interrupts and perform step
113 * 3.
114 * 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
115 * on Coldfire) goto step 5.
116 * 4) Enable interrupts.
117 * 5) Exit procedure.
118 *
119 * core_wake and multprocessor notes for sleep/wake coordination:
120 * If possible, to wake up another processor, the forcing of an interrupt on
121 * the woken core by the waker core is the easiest way to ensure a non-
122 * delayed wake and immediate execution of any woken threads. If that isn't
123 * available then some careful non-blocking synchonization is needed (as on
124 * PP targets at the moment).
125 *---------------------------------------------------------------------------
126 *
127 *
128 *---------------------------------------------------------------------------
129 * Priority distribution structure (one category for each possible priority):
130 *
131 * +----+----+----+ ... +------+
132 * hist: | F0 | F1 | F2 | | Fn-1 |
133 * +----+----+----+ ... +------+
134 * mask: | b0 | b1 | b2 | | bn-1 |
135 * +----+----+----+ ... +------+
136 *
137 * F = count of threads at priority category n (frequency)
138 * b = bitmask of non-zero priority categories (occupancy)
139 *
140 * / if H[n] != 0 : 1
141 * b[n] = |
142 * \ else : 0
143 *
144 *---------------------------------------------------------------------------
145 * Basic priority inheritance priotocol (PIP):
146 *
147 * Mn = mutex n, Tn = thread n
148 *
149 * A lower priority thread inherits the priority of the highest priority
150 * thread blocked waiting for it to complete an action (such as release a
151 * mutex or respond to a message via queue_send):
152 *
153 * 1) T2->M1->T1
154 *
155 * T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
156 * priority than T1 then T1 inherits the priority of T2.
157 *
158 * 2) T3
159 * \/
160 * T2->M1->T1
161 *
162 * Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
163 * T1 inherits the higher of T2 and T3.
164 *
165 * 3) T3->M2->T2->M1->T1
166 *
167 * T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
168 * then T1 inherits the priority of T3 through T2.
169 *
170 * Blocking chains can grow arbitrarily complex (though it's best that they
171 * not form at all very often :) and build-up from these units.
172 *---------------------------------------------------------------------------
173 */
174static FORCE_INLINE void core_sleep(IF_COP_VOID(unsigned int core));
175static FORCE_INLINE void store_context(void* addr);
176static FORCE_INLINE void load_context(const void* addr);
177
178/****************************************************************************
179 * Processor/OS-specific section - include necessary core support
180 */
181
182#include "asm/thread.c"
183
184#if defined (CPU_PP)
185#include "thread-pp.c"
186#endif /* CPU_PP */
187
188/*
189 * End Processor-specific section
190 ***************************************************************************/
191
192static NO_INLINE NORETURN_ATTR
193 void thread_panicf(const char *msg, struct thread_entry *thread)
194{
195 IF_COP( const unsigned int core = thread->core; )
196 static char name[sizeof (((struct thread_debug_info *)0)->name)];
197 format_thread_name(name, sizeof (name), thread);
198 panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
199}
200
201static NO_INLINE void thread_stkov(struct thread_entry *thread)
202{
203 thread_panicf("Stkov", thread);
204}
205
206#if THREAD_EXTRA_CHECKS
207#define THREAD_PANICF(msg, thread) \
208 thread_panicf(msg, thread)
209#define THREAD_ASSERT(exp, msg, thread) \
210 ({ if (!({ exp; })) thread_panicf((msg), (thread)); })
211#else
212#define THREAD_PANICF(msg, thread) \
213 do {} while (1)
214#define THREAD_ASSERT(exp, msg, thread) \
215 do {} while (0)
216#endif /* THREAD_EXTRA_CHECKS */
217
218/* Thread locking */
219#if NUM_CORES > 1
220#define LOCK_THREAD(thread) \
221 ({ corelock_lock(&(thread)->slot_cl); })
222#define TRY_LOCK_THREAD(thread) \
223 ({ corelock_try_lock(&(thread)->slot_cl); })
224#define UNLOCK_THREAD(thread) \
225 ({ corelock_unlock(&(thread)->slot_cl); })
226#else /* NUM_CORES == 1*/
227#define LOCK_THREAD(thread) \
228 ({ (void)(thread); })
229#define TRY_LOCK_THREAD(thread) \
230 ({ (void)(thread); })
231#define UNLOCK_THREAD(thread) \
232 ({ (void)(thread); })
233#endif /* NUM_CORES */
234
235/* RTR list */
236#define RTR_LOCK(corep) \
237 corelock_lock(&(corep)->rtr_cl)
238#define RTR_UNLOCK(corep) \
239 corelock_unlock(&(corep)->rtr_cl)
240
241#ifdef HAVE_PRIORITY_SCHEDULING
242#define rtr_add_entry(corep, priority) \
243 prio_add_entry(&(corep)->rtr_dist, (priority))
244#define rtr_subtract_entry(corep, priority) \
245 prio_subtract_entry(&(corep)->rtr_dist, (priority))
246#define rtr_move_entry(corep, from, to) \
247 prio_move_entry(&(corep)->rtr_dist, (from), (to))
248#else /* !HAVE_PRIORITY_SCHEDULING */
249#define rtr_add_entry(corep, priority) \
250 do {} while (0)
251#define rtr_subtract_entry(corep, priority) \
252 do {} while (0)
253#define rtr_move_entry(corep, from, to) \
254 do {} while (0)
255#endif /* HAVE_PRIORITY_SCHEDULING */
256
257static FORCE_INLINE void thread_store_context(struct thread_entry *thread)
258{
259 store_context(&thread->context);
260#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
261 thread->__errno = errno;
262#endif
263}
264
265static FORCE_INLINE void thread_load_context(struct thread_entry *thread)
266{
267#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
268 errno = thread->__errno;
269#endif
270 load_context(&thread->context);
271}
272
273static FORCE_INLINE unsigned int
274should_switch_tasks(struct thread_entry *thread)
275{
276#ifdef HAVE_PRIORITY_SCHEDULING
277 const unsigned int core = CURRENT_CORE;
278#if NUM_CORES > 1
279 /* Forget about it if different CPU */
280 if (thread->core != core)
281 return THREAD_OK;
282#endif
283 /* Just woke something therefore a thread is on the run queue */
284 struct thread_entry *current =
285 RTR_THREAD_FIRST(&__core_id_entry(core)->rtr);
286 if (LIKELY(thread->priority >= current->priority))
287 return THREAD_OK;
288
289 /* There is a thread ready to run of higher priority on the same
290 * core as the current one; recommend a task switch. */
291 return THREAD_OK | THREAD_SWITCH;
292#else
293 return THREAD_OK;
294 (void)thread;
295#endif /* HAVE_PRIORITY_SCHEDULING */
296}
297
298#ifdef HAVE_PRIORITY_SCHEDULING
299
300/*---------------------------------------------------------------------------
301 * Increment frequency at category "priority"
302 *---------------------------------------------------------------------------
303 */
304static inline unsigned int prio_add_entry(
305 struct priority_distribution *pd, int priority)
306{
307 unsigned int count = ++pd->hist[priority];
308 if (count == 1)
309 priobit_set_bit(&pd->mask, priority);
310 return count;
311}
312
313/*---------------------------------------------------------------------------
314 * Decrement frequency at category "priority"
315 *---------------------------------------------------------------------------
316 */
317static inline unsigned int prio_subtract_entry(
318 struct priority_distribution *pd, int priority)
319{
320 unsigned int count = --pd->hist[priority];
321 if (count == 0)
322 priobit_clear_bit(&pd->mask, priority);
323 return count;
324}
325
326/*---------------------------------------------------------------------------
327 * Remove from one category and add to another
328 *---------------------------------------------------------------------------
329 */
330static inline void prio_move_entry(
331 struct priority_distribution *pd, int from, int to)
332{
333 if (--pd->hist[from] == 0)
334 priobit_clear_bit(&pd->mask, from);
335
336 if (++pd->hist[to] == 1)
337 priobit_set_bit(&pd->mask, to);
338}
339
340#endif /* HAVE_PRIORITY_SCHEDULING */
341
342/*---------------------------------------------------------------------------
343 * Common init for new thread basic info
344 *---------------------------------------------------------------------------
345 */
346static void new_thread_base_init(struct thread_entry *thread,
347 void **stackp, size_t *stack_sizep,
348 const char *name IF_PRIO(, int priority)
349 IF_COP(, unsigned int core))
350{
351 ALIGN_BUFFER(*stackp, *stack_sizep, MIN_STACK_ALIGN);
352 thread->stack = *stackp;
353 thread->stack_size = *stack_sizep;
354
355 thread->name = name;
356 wait_queue_init(&thread->queue);
357 thread->wqp = NULL;
358 tmo_set_dequeued(thread);
359#ifdef HAVE_PRIORITY_SCHEDULING
360 thread->skip_count = 0;
361 thread->blocker = NULL;
362 thread->base_priority = priority;
363 thread->priority = priority;
364 memset(&thread->pdist, 0, sizeof(thread->pdist));
365 prio_add_entry(&thread->pdist, priority);
366#endif
367#if NUM_CORES > 1
368 thread->core = core;
369#endif
370#ifdef HAVE_SCHEDULER_BOOSTCTRL
371 thread->cpu_boost = 0;
372#endif
373}
374
375/*---------------------------------------------------------------------------
376 * Move a thread onto the core's run queue and promote it
377 *---------------------------------------------------------------------------
378 */
379static inline void core_rtr_add(struct core_entry *corep,
380 struct thread_entry *thread)
381{
382 RTR_LOCK(corep);
383 rtr_queue_add(&corep->rtr, thread);
384 rtr_add_entry(corep, thread->priority);
385#ifdef HAVE_PRIORITY_SCHEDULING
386 thread->skip_count = thread->base_priority;
387#endif
388 thread->state = STATE_RUNNING;
389 RTR_UNLOCK(corep);
390}
391
392/*---------------------------------------------------------------------------
393 * Remove a thread from the core's run queue
394 *---------------------------------------------------------------------------
395 */
396static inline void core_rtr_remove(struct core_entry *corep,
397 struct thread_entry *thread)
398{
399 RTR_LOCK(corep);
400 rtr_queue_remove(&corep->rtr, thread);
401 rtr_subtract_entry(corep, thread->priority);
402 /* Does not demote state */
403 RTR_UNLOCK(corep);
404}
405
406/*---------------------------------------------------------------------------
407 * Move a thread back to a running state on its core
408 *---------------------------------------------------------------------------
409 */
410static NO_INLINE void core_schedule_wakeup(struct thread_entry *thread)
411{
412 const unsigned int core = IF_COP_CORE(thread->core);
413 struct core_entry *corep = __core_id_entry(core);
414 core_rtr_add(corep, thread);
415#if NUM_CORES > 1
416 if (core != CURRENT_CORE)
417 core_wake(core);
418#endif
419}
420
421#ifdef HAVE_PRIORITY_SCHEDULING
422/*---------------------------------------------------------------------------
423 * Locks the thread registered as the owner of the block and makes sure it
424 * didn't change in the meantime
425 *---------------------------------------------------------------------------
426 */
427#if NUM_CORES == 1
428static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
429{
430 return bl->thread;
431}
432#else /* NUM_CORES > 1 */
433static struct thread_entry * lock_blocker_thread(struct blocker *bl)
434{
435 /* The blocker thread may change during the process of trying to
436 capture it */
437 while (1)
438 {
439 struct thread_entry *t = bl->thread;
440
441 /* TRY, or else deadlocks are possible */
442 if (!t)
443 {
444 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
445 if (corelock_try_lock(&blsplay->cl))
446 {
447 if (!bl->thread)
448 return NULL; /* Still multi */
449
450 corelock_unlock(&blsplay->cl);
451 }
452 }
453 else
454 {
455 if (TRY_LOCK_THREAD(t))
456 {
457 if (bl->thread == t)
458 return t;
459
460 UNLOCK_THREAD(t);
461 }
462 }
463 }
464}
465#endif /* NUM_CORES */
466
467static inline void unlock_blocker_thread(struct blocker *bl)
468{
469#if NUM_CORES > 1
470 struct thread_entry *blt = bl->thread;
471 if (blt)
472 UNLOCK_THREAD(blt);
473 else
474 corelock_unlock(&((struct blocker_splay *)bl)->cl);
475#endif /* NUM_CORES > 1*/
476 (void)bl;
477}
478
479/*---------------------------------------------------------------------------
480 * Change the priority and rtr entry for a running thread
481 *---------------------------------------------------------------------------
482 */
483static inline void set_rtr_thread_priority(
484 struct thread_entry *thread, int priority)
485{
486 const unsigned int core = IF_COP_CORE(thread->core);
487 struct core_entry *corep = __core_id_entry(core);
488 RTR_LOCK(corep);
489 rtr_move_entry(corep, thread->priority, priority);
490 thread->priority = priority;
491 RTR_UNLOCK(corep);
492}
493
494/*---------------------------------------------------------------------------
495 * Finds the highest priority thread in a list of threads. If the list is
496 * empty, the PRIORITY_IDLE is returned.
497 *
498 * It is possible to use the struct priority_distribution within an object
499 * instead of scanning the remaining threads in the list but as a compromise,
500 * the resulting per-object memory overhead is saved at a slight speed
501 * penalty under high contention.
502 *---------------------------------------------------------------------------
503 */
504static int wait_queue_find_priority(struct __wait_queue *wqp)
505{
506 int highest_priority = PRIORITY_IDLE;
507 struct thread_entry *thread = WQ_THREAD_FIRST(wqp);
508
509 while (thread != NULL)
510 {
511 int priority = thread->priority;
512 if (priority < highest_priority)
513 highest_priority = priority;
514
515 thread = WQ_THREAD_NEXT(thread);
516 }
517
518 return highest_priority;
519}
520
521/*---------------------------------------------------------------------------
522 * Register priority with blocking system and bubble it down the chain if
523 * any until we reach the end or something is already equal or higher.
524 *
525 * NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
526 * targets but that same action also guarantees a circular block anyway and
527 * those are prevented, right? :-)
528 *---------------------------------------------------------------------------
529 */
530static void inherit_priority(
531 struct blocker * const blocker0, struct blocker *bl,
532 struct thread_entry *blt, int newblpr)
533{
534 int oldblpr = bl->priority;
535
536 while (1)
537 {
538 if (blt == NULL)
539 {
540 /* Multiple owners */
541 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
542
543 /* Recurse down the all the branches of this; it's the only way.
544 We might meet the same queue several times if more than one of
545 these threads is waiting the same queue. That isn't a problem
546 for us since we early-terminate, just notable. */
547 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
548 {
549 bl->priority = oldblpr; /* To see the change each time */
550 blt = __thread_slot_entry(slotnum);
551 LOCK_THREAD(blt);
552 inherit_priority(blocker0, bl, blt, newblpr);
553 }
554
555 corelock_unlock(&blsplay->cl);
556 return;
557 }
558
559 bl->priority = newblpr;
560
561 /* Update blocker thread inheritance record */
562 if (newblpr < PRIORITY_IDLE)
563 prio_add_entry(&blt->pdist, newblpr);
564
565 if (oldblpr < PRIORITY_IDLE)
566 prio_subtract_entry(&blt->pdist, oldblpr);
567
568 int oldpr = blt->priority;
569 int newpr = priobit_ffs(&blt->pdist.mask);
570 if (newpr == oldpr)
571 break; /* No blocker thread priority change */
572
573 if (blt->state == STATE_RUNNING)
574 {
575 set_rtr_thread_priority(blt, newpr);
576 break; /* Running: last in chain */
577 }
578
579 /* Blocker is blocked */
580 blt->priority = newpr;
581
582 bl = blt->blocker;
583 if (LIKELY(bl == NULL))
584 break; /* Block doesn't support PIP */
585
586 if (UNLIKELY(bl == blocker0))
587 break; /* Full circle - deadlock! */
588
589 /* Blocker becomes current thread and the process repeats */
590 struct __wait_queue *wqp = wait_queue_ptr(blt);
591 struct thread_entry *t = blt;
592 blt = lock_blocker_thread(bl);
593
594 UNLOCK_THREAD(t);
595
596 /* Adjust this wait queue */
597 oldblpr = bl->priority;
598 if (newpr <= oldblpr)
599 newblpr = newpr;
600 else if (oldpr <= oldblpr)
601 newblpr = wait_queue_find_priority(wqp);
602
603 if (newblpr == oldblpr)
604 break; /* Queue priority not changing */
605 }
606
607 UNLOCK_THREAD(blt);
608}
609
610/*---------------------------------------------------------------------------
611 * Quick-inherit of priority elevation. 'thread' must be not runnable
612 *---------------------------------------------------------------------------
613 */
614static void priority_inherit_internal_inner(struct thread_entry *thread,
615 int blpr)
616{
617 if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < thread->priority)
618 thread->priority = blpr;
619}
620
621static inline void priority_inherit_internal(struct thread_entry *thread,
622 int blpr)
623{
624 if (blpr < PRIORITY_IDLE)
625 priority_inherit_internal_inner(thread, blpr);
626}
627
628/*---------------------------------------------------------------------------
629 * Quick-disinherit of priority elevation. 'thread' must current
630 *---------------------------------------------------------------------------
631 */
632static void priority_disinherit_internal_inner(struct thread_entry *thread,
633 int blpr)
634{
635 if (prio_subtract_entry(&thread->pdist, blpr) == 0 &&
636 blpr <= thread->priority)
637 {
638 int priority = priobit_ffs(&thread->pdist.mask);
639 if (priority != thread->priority)
640 set_rtr_thread_priority(thread, priority);
641 }
642}
643
644static inline void priority_disinherit_internal(struct thread_entry *thread,
645 int blpr)
646{
647 if (blpr < PRIORITY_IDLE)
648 priority_disinherit_internal_inner(thread, blpr);
649}
650
651void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
652{
653 LOCK_THREAD(thread);
654 priority_disinherit_internal(thread, bl->priority);
655 UNLOCK_THREAD(thread);
656}
657
658/*---------------------------------------------------------------------------
659 * Transfer ownership from a single owner to a multi-owner splay from a wait
660 * queue
661 *---------------------------------------------------------------------------
662 */
663static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
664{
665 /* All threads will have the same blocker and queue; only we are changing
666 it now */
667 struct __wait_queue *wqp = wait_queue_ptr(thread);
668 struct blocker *bl = thread->blocker;
669 struct blocker_splay *blsplay = (struct blocker_splay *)bl;
670 struct thread_entry *blt = bl->thread;
671
672 /* The first thread is already locked and is assumed tagged "multi" */
673 int count = 1;
674
675 /* Multiple versions of the wait queue may be seen if doing more than
676 one thread; queue removal isn't destructive to the pointers of the node
677 being removed; this may lead to the blocker priority being wrong for a
678 time but it gets fixed up below after getting exclusive access to the
679 queue */
680 while (1)
681 {
682 thread->blocker = NULL;
683 wait_queue_remove(thread);
684
685 unsigned int slotnum = THREAD_ID_SLOT(thread->id);
686 threadbit_set_bit(&blsplay->mask, slotnum);
687
688 struct thread_entry *tnext = WQ_THREAD_NEXT(thread);
689 if (tnext == NULL || tnext->retval == 0)
690 break;
691
692 UNLOCK_THREAD(thread);
693
694 count++;
695 thread = tnext;
696
697 LOCK_THREAD(thread);
698 }
699
700 /* Locking order reverses here since the threads are no longer on the
701 queued side */
702 if (count > 1)
703 corelock_lock(&blsplay->cl);
704
705 LOCK_THREAD(blt);
706
707 int blpr = bl->priority;
708 priority_disinherit_internal(blt, blpr);
709
710 if (count > 1)
711 {
712 blsplay->blocker.thread = NULL;
713
714 blpr = wait_queue_find_priority(wqp);
715
716 FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
717 {
718 UNLOCK_THREAD(thread);
719 thread = __thread_slot_entry(slotnum);
720 LOCK_THREAD(thread);
721 priority_inherit_internal(thread, blpr);
722 core_schedule_wakeup(thread);
723 }
724 }
725 else
726 {
727 /* Becomes a simple, direct transfer */
728 blsplay->blocker.thread = thread;
729
730 if (thread->priority <= blpr)
731 blpr = wait_queue_find_priority(wqp);
732
733 priority_inherit_internal(thread, blpr);
734 core_schedule_wakeup(thread);
735 }
736
737 UNLOCK_THREAD(thread);
738
739 bl->priority = blpr;
740
741 UNLOCK_THREAD(blt);
742
743 if (count > 1)
744 corelock_unlock(&blsplay->cl);
745
746 blt->retval = count;
747}
748
749/*---------------------------------------------------------------------------
750 * Transfer ownership to a thread waiting for an objects and transfer
751 * inherited priority boost from other waiters. This algorithm knows that
752 * blocking chains may only unblock from the very end.
753 *
754 * Only the owning thread itself may call this and so the assumption that
755 * it is the running thread is made.
756 *---------------------------------------------------------------------------
757 */
758static void wakeup_thread_transfer(struct thread_entry *thread)
759{
760 /* Waking thread inherits priority boost from object owner (blt) */
761 struct blocker *bl = thread->blocker;
762 struct thread_entry *blt = bl->thread;
763
764 THREAD_ASSERT(__running_self_entry() == blt,
765 "UPPT->wrong thread", __running_self_entry());
766
767 LOCK_THREAD(blt);
768
769 thread->blocker = NULL;
770 struct __wait_queue *wqp = wait_queue_remove(thread);
771
772 int blpr = bl->priority;
773
774 /* Remove the object's boost from the owning thread */
775 priority_disinherit_internal_inner(blt, blpr);
776
777 struct thread_entry *tnext = WQ_THREAD_FIRST(wqp);
778 if (LIKELY(tnext == NULL))
779 {
780 /* Expected shortcut - no more waiters */
781 blpr = PRIORITY_IDLE;
782 }
783 else
784 {
785 /* If thread is at the blocker priority, its removal may drop it */
786 if (thread->priority <= blpr)
787 blpr = wait_queue_find_priority(wqp);
788
789 priority_inherit_internal_inner(thread, blpr);
790 }
791
792 bl->thread = thread; /* This thread pwns */
793
794 core_schedule_wakeup(thread);
795 UNLOCK_THREAD(thread);
796
797 bl->priority = blpr; /* Save highest blocked priority */
798
799 UNLOCK_THREAD(blt);
800}
801
802/*---------------------------------------------------------------------------
803 * Readjust priorities when waking a thread blocked waiting for another
804 * in essence "releasing" the thread's effect on the object owner. Can be
805 * performed from any context.
806 *---------------------------------------------------------------------------
807 */
808static void wakeup_thread_release(struct thread_entry *thread)
809{
810 struct blocker *bl = thread->blocker;
811 struct thread_entry *blt = lock_blocker_thread(bl);
812
813 thread->blocker = NULL;
814 struct __wait_queue *wqp = wait_queue_remove(thread);
815
816 /* Off to see the wizard... */
817 core_schedule_wakeup(thread);
818
819 if (thread->priority > bl->priority)
820 {
821 /* Queue priority won't change */
822 UNLOCK_THREAD(thread);
823 unlock_blocker_thread(bl);
824 return;
825 }
826
827 UNLOCK_THREAD(thread);
828
829 int newblpr = wait_queue_find_priority(wqp);
830 if (newblpr == bl->priority)
831 {
832 /* Blocker priority won't change */
833 unlock_blocker_thread(bl);
834 return;
835 }
836
837 inherit_priority(bl, bl, blt, newblpr);
838}
839
840#endif /* HAVE_PRIORITY_SCHEDULING */
841
842
843/*---------------------------------------------------------------------------
844 * Explicitly wakeup a thread on a blocking queue. Only effects threads of
845 * STATE_BLOCKED and STATE_BLOCKED_W_TMO.
846 *
847 * INTERNAL: Intended for use by kernel and not programs.
848 *---------------------------------------------------------------------------
849 */
850unsigned int wakeup_thread_(struct thread_entry *thread
851 IF_PRIO(, enum wakeup_thread_protocol proto))
852{
853 LOCK_THREAD(thread);
854
855 /* Determine thread's current state. */
856 switch (thread->state)
857 {
858 case STATE_BLOCKED:
859 case STATE_BLOCKED_W_TMO:
860#ifdef HAVE_PRIORITY_SCHEDULING
861 /* Threads with PIP blockers cannot specify "WAKEUP_DEFAULT" */
862 if (thread->blocker != NULL)
863 {
864 static void (* const funcs[])(struct thread_entry *thread)
865 ICONST_ATTR =
866 {
867 [WAKEUP_DEFAULT] = NULL,
868 [WAKEUP_TRANSFER] = wakeup_thread_transfer,
869 [WAKEUP_RELEASE] = wakeup_thread_release,
870 [WAKEUP_TRANSFER_MULTI] = wakeup_thread_queue_multi_transfer,
871 };
872
873 /* Call the specified unblocking PIP (does the rest) */
874 funcs[proto](thread);
875 }
876 else
877#endif /* HAVE_PRIORITY_SCHEDULING */
878 {
879 wait_queue_remove(thread);
880 core_schedule_wakeup(thread);
881 UNLOCK_THREAD(thread);
882 }
883
884 return should_switch_tasks(thread);
885
886 case STATE_RUNNING:
887 if (wait_queue_try_remove(thread))
888 {
889 UNLOCK_THREAD(thread);
890 return THREAD_OK; /* timed out */
891 }
892 /* fallthrough */
893 default:
894 UNLOCK_THREAD(thread);
895 return THREAD_NONE;
896 }
897}
898
899/*---------------------------------------------------------------------------
900 * Check the core's timeout list when at least one thread is due to wake.
901 * Filtering for the condition is done before making the call. Resets the
902 * tick when the next check will occur.
903 *---------------------------------------------------------------------------
904 */
905static NO_INLINE void check_tmo_expired_inner(struct core_entry *corep)
906{
907 const long tick = current_tick; /* snapshot the current tick */
908 long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
909 struct thread_entry *prev = NULL;
910 struct thread_entry *thread = TMO_THREAD_FIRST(&corep->tmo);
911
912 /* If there are no processes waiting for a timeout, just keep the check
913 tick from falling into the past. */
914
915 /* Break the loop once we have walked through the list of all
916 * sleeping processes or have removed them all. */
917 while (thread != NULL)
918 {
919 /* Check sleeping threads. Allow interrupts between checks. */
920 enable_irq();
921
922 struct thread_entry *next = TMO_THREAD_NEXT(thread);
923
924 /* Lock thread slot against explicit wakeup */
925 disable_irq();
926 LOCK_THREAD(thread);
927
928 unsigned int state = thread->state;
929
930 if (LIKELY(state >= TIMEOUT_STATE_FIRST &&
931 TIME_BEFORE(tick, thread->tmo_tick)))
932 {
933 /* Timeout still pending - this will be the usual case */
934 if (TIME_BEFORE(thread->tmo_tick, next_tmo_check))
935 {
936 /* Move the next check up to its time */
937 next_tmo_check = thread->tmo_tick;
938 }
939
940 prev = thread;
941 }
942 else
943 {
944 /* TODO: there are no priority-inheriting timeout blocks
945 right now but the procedure should be established */
946
947 /* Sleep timeout has been reached / garbage collect stale list
948 items */
949 tmo_queue_expire(&corep->tmo, prev, thread);
950
951 if (state >= TIMEOUT_STATE_FIRST)
952 core_rtr_add(corep, thread);
953
954 /* removed this one - prev doesn't change */
955 }
956
957 UNLOCK_THREAD(thread);
958
959 thread = next;
960 }
961
962 corep->next_tmo_check = next_tmo_check;
963}
964
965static FORCE_INLINE void check_tmo_expired(struct core_entry *corep)
966{
967 if (!TIME_BEFORE(current_tick, corep->next_tmo_check))
968 check_tmo_expired_inner(corep);
969}
970
971/*---------------------------------------------------------------------------
972 * Prepares a the current thread to sleep forever or for the given duration.
973 *---------------------------------------------------------------------------
974 */
975static FORCE_INLINE void prepare_block(struct thread_entry *current,
976 unsigned int state, int timeout)
977{
978 const unsigned int core = IF_COP_CORE(current->core);
979
980 /* Remove the thread from the list of running threads. */
981 struct core_entry *corep = __core_id_entry(core);
982 core_rtr_remove(corep, current);
983
984 if (timeout >= 0)
985 {
986 /* Sleep may expire. */
987 long tmo_tick = current_tick + timeout;
988 current->tmo_tick = tmo_tick;
989
990 if (TIME_BEFORE(tmo_tick, corep->next_tmo_check))
991 corep->next_tmo_check = tmo_tick;
992
993 tmo_queue_register(&corep->tmo, current);
994
995 if (state == STATE_BLOCKED)
996 state = STATE_BLOCKED_W_TMO;
997 }
998
999 /* Report new state. */
1000 current->state = state;
1001}
1002
1003/*---------------------------------------------------------------------------
1004 * Switch thread in round robin fashion for any given priority. Any thread
1005 * that removed itself from the running list first must specify itself in
1006 * the paramter.
1007 *
1008 * INTERNAL: Intended for use by kernel and not programs.
1009 *---------------------------------------------------------------------------
1010 */
1011void switch_thread(void)
1012{
1013 const unsigned int core = CURRENT_CORE;
1014 struct core_entry *corep = __core_id_entry(core);
1015 struct thread_entry *thread = corep->running;
1016
1017 if (thread)
1018 {
1019#ifdef RB_PROFILE
1020 profile_thread_stopped(THREAD_ID_SLOT(thread->id));
1021#endif
1022#ifdef BUFLIB_DEBUG_CHECK_VALID
1023 /* Check core_ctx buflib integrity */
1024 core_check_valid();
1025#endif
1026 thread_store_context(thread);
1027
1028 /* Check if the current thread stack is overflown */
1029 if (UNLIKELY(thread->stack[0] != DEADBEEF) && thread->stack_size > 0)
1030 thread_stkov(thread);
1031 }
1032
1033 /* TODO: make a real idle task */
1034 for (;;)
1035 {
1036 disable_irq();
1037
1038 /* Check for expired timeouts */
1039 check_tmo_expired(corep);
1040
1041 RTR_LOCK(corep);
1042
1043 if (!RTR_EMPTY(&corep->rtr))
1044 break;
1045
1046 thread = NULL;
1047
1048 /* Enter sleep mode to reduce power usage */
1049 RTR_UNLOCK(corep);
1050 core_sleep(IF_COP(core));
1051
1052 /* Awakened by interrupt or other CPU */
1053 }
1054
1055 thread = (thread && thread->state == STATE_RUNNING) ?
1056 RTR_THREAD_NEXT(thread) : RTR_THREAD_FIRST(&corep->rtr);
1057
1058#ifdef HAVE_PRIORITY_SCHEDULING
1059 /* Select the new task based on priorities and the last time a
1060 * process got CPU time relative to the highest priority runnable
1061 * task. If priority is not a feature, then FCFS is used (above). */
1062 int max = priobit_ffs(&corep->rtr_dist.mask);
1063
1064 for (;;)
1065 {
1066 int priority = thread->priority;
1067 int diff;
1068
1069 /* This ridiculously simple method of aging seems to work
1070 * suspiciously well. It does tend to reward CPU hogs (under
1071 * yielding) but that's generally not desirable at all. On
1072 * the plus side, it, relatively to other threads, penalizes
1073 * excess yielding which is good if some high priority thread
1074 * is performing no useful work such as polling for a device
1075 * to be ready. Of course, aging is only employed when higher
1076 * and lower priority threads are runnable. The highest
1077 * priority runnable thread(s) are never skipped unless a
1078 * lower-priority process has aged sufficiently. Priorities
1079 * of REALTIME class are run strictly according to priority
1080 * thus are not subject to switchout due to lower-priority
1081 * processes aging; they must give up the processor by going
1082 * off the run list. */
1083 if (LIKELY(priority <= max) ||
1084 (priority > PRIORITY_REALTIME &&
1085 (diff = priority - max, ++thread->skip_count > diff*diff)))
1086 {
1087 break;
1088 }
1089
1090 thread = RTR_THREAD_NEXT(thread);
1091 }
1092
1093 thread->skip_count = 0; /* Reset aging counter */
1094#endif /* HAVE_PRIORITY_SCHEDULING */
1095
1096 rtr_queue_make_first(&corep->rtr, thread);
1097 corep->running = thread;
1098
1099 RTR_UNLOCK(corep);
1100 enable_irq();
1101
1102#ifdef RB_PROFILE
1103 profile_thread_started(THREAD_ID_SLOT(thread->id));
1104#endif
1105
1106 /* And finally, give control to the next thread. */
1107 thread_load_context(thread);
1108}
1109
1110/*---------------------------------------------------------------------------
1111 * Sleeps a thread for at least a specified number of ticks with zero being
1112 * a wait until the next tick.
1113 *
1114 * INTERNAL: Intended for use by kernel and not programs.
1115 *---------------------------------------------------------------------------
1116 */
1117void sleep_thread(int ticks)
1118{
1119 struct thread_entry *current = __running_self_entry();
1120 LOCK_THREAD(current);
1121 prepare_block(current, STATE_SLEEPING, MAX(ticks, 0) + 1);
1122 UNLOCK_THREAD(current);
1123}
1124
1125/*---------------------------------------------------------------------------
1126 * Block a thread on a blocking queue for explicit wakeup. If timeout is
1127 * negative, the block is infinite.
1128 *
1129 * INTERNAL: Intended for use by kernel and not programs.
1130 *---------------------------------------------------------------------------
1131 */
1132void block_thread_(struct thread_entry *current, int timeout)
1133{
1134 LOCK_THREAD(current);
1135
1136#ifdef HAVE_PRIORITY_SCHEDULING
1137 struct blocker *bl = current->blocker;
1138 struct thread_entry *blt = NULL;
1139 if (bl != NULL)
1140 {
1141 current->blocker = bl;
1142 blt = lock_blocker_thread(bl);
1143 }
1144#endif /* HAVE_PRIORITY_SCHEDULING */
1145
1146 wait_queue_register(current);
1147 prepare_block(current, STATE_BLOCKED, timeout);
1148
1149#ifdef HAVE_PRIORITY_SCHEDULING
1150 if (bl != NULL)
1151 {
1152 int newblpr = current->priority;
1153 UNLOCK_THREAD(current);
1154
1155 if (newblpr < bl->priority)
1156 inherit_priority(bl, bl, blt, newblpr);
1157 else
1158 unlock_blocker_thread(bl); /* Queue priority won't change */
1159 }
1160 else
1161#endif /* HAVE_PRIORITY_SCHEDULING */
1162 {
1163 UNLOCK_THREAD(current);
1164 }
1165}
1166
1167/*---------------------------------------------------------------------------
1168 * Place the current core in idle mode - woken up on interrupt or wake
1169 * request from another core.
1170 *---------------------------------------------------------------------------
1171 */
1172void core_idle(void)
1173{
1174 disable_irq();
1175 core_sleep(IF_COP(CURRENT_CORE));
1176}
1177
1178/*---------------------------------------------------------------------------
1179 * Create a thread. If using a dual core architecture, specify which core to
1180 * start the thread on.
1181 *
1182 * Return ID if context area could be allocated, else NULL.
1183 *---------------------------------------------------------------------------
1184 */
1185unsigned int create_thread(void (*function)(void),
1186 void* stack, size_t stack_size,
1187 unsigned flags, const char *name
1188 IF_PRIO(, int priority)
1189 IF_COP(, unsigned int core))
1190{
1191 struct thread_entry *thread = thread_alloc();
1192 if (thread == NULL)
1193 return 0;
1194
1195 new_thread_base_init(thread, &stack, &stack_size, name
1196 IF_PRIO(, priority) IF_COP(, core));
1197
1198 unsigned int stack_words = stack_size / sizeof (uintptr_t);
1199 if (stack_words == 0)
1200 return 0;
1201
1202 /* Munge the stack to make it easy to spot stack overflows */
1203 for (unsigned int i = 0; i < stack_words; i++)
1204 ((uintptr_t *)stack)[i] = DEADBEEF;
1205
1206#if NUM_CORES > 1
1207 /* Writeback stack munging or anything else before starting */
1208 if (core != CURRENT_CORE)
1209 commit_dcache();
1210#endif
1211
1212 thread->context.sp = (typeof (thread->context.sp))(stack + stack_size);
1213 THREAD_STARTUP_INIT(core, thread, function);
1214
1215 int oldlevel = disable_irq_save();
1216 LOCK_THREAD(thread);
1217
1218 thread->state = STATE_FROZEN;
1219
1220 if (!(flags & CREATE_THREAD_FROZEN))
1221 core_schedule_wakeup(thread);
1222
1223 unsigned int id = thread->id; /* Snapshot while locked */
1224
1225 UNLOCK_THREAD(thread);
1226 restore_irq(oldlevel);
1227
1228 return id;
1229}
1230
1231/*---------------------------------------------------------------------------
1232 * Block the current thread until another thread terminates. A thread may
1233 * wait on itself to terminate but that will deadlock
1234 *.
1235 * Parameter is the ID as returned from create_thread().
1236 *---------------------------------------------------------------------------
1237 */
1238void thread_wait(unsigned int thread_id)
1239{
1240 ASSERT_CPU_MODE(CPU_MODE_THREAD_CONTEXT);
1241
1242 struct thread_entry *current = __running_self_entry();
1243 struct thread_entry *thread = __thread_id_entry(thread_id);
1244
1245 corelock_lock(&thread->waiter_cl);
1246
1247 if (thread->id == thread_id && thread->state != STATE_KILLED)
1248 {
1249 disable_irq();
1250 block_thread(current, TIMEOUT_BLOCK, &thread->queue, NULL);
1251
1252 corelock_unlock(&thread->waiter_cl);
1253
1254 switch_thread();
1255 return;
1256 }
1257
1258 corelock_unlock(&thread->waiter_cl);
1259}
1260
1261/*---------------------------------------------------------------------------
1262 * Exit the current thread
1263 *---------------------------------------------------------------------------
1264 */
1265static USED_ATTR NORETURN_ATTR
1266void thread_exit_final(struct thread_entry *current)
1267{
1268 /* Slot is no longer this thread */
1269 new_thread_id(current);
1270 current->name = NULL;
1271
1272 /* No longer using resources from creator */
1273 wait_queue_wake(¤t->queue);
1274
1275 UNLOCK_THREAD(current);
1276 corelock_unlock(¤t->waiter_cl);
1277
1278 thread_free(current);
1279
1280 switch_thread();
1281
1282 /* This should never and must never be reached - if it is, the
1283 * state is corrupted */
1284 THREAD_PANICF("thread_exit->K:*R", current);
1285}
1286
1287void thread_exit(void)
1288{
1289 struct core_entry *corep = __core_id_entry(CURRENT_CORE);
1290 register struct thread_entry *current = corep->running;
1291
1292 /* Cancel CPU boost if any */
1293 cancel_cpu_boost();
1294
1295 disable_irq();
1296
1297 corelock_lock(¤t->waiter_cl);
1298 LOCK_THREAD(current);
1299
1300#ifdef HAVE_PRIORITY_SCHEDULING
1301 /* Only one bit in the mask should be set with a frequency on 1 which
1302 * represents the thread's own base priority otherwise threads are waiting
1303 * on an abandoned object */
1304 if (priobit_popcount(¤t->pdist.mask) != 1 ||
1305 current->pdist.hist[priobit_ffs(¤t->pdist.mask)] > 1)
1306 thread_panicf("abandon ship!", current);
1307#endif /* HAVE_PRIORITY_SCHEDULING */
1308
1309 /* Remove from scheduler lists */
1310 tmo_queue_remove(&corep->tmo, current);
1311 prepare_block(current, STATE_KILLED, -1);
1312 corep->running = NULL; /* No switch_thread context save */
1313
1314#ifdef RB_PROFILE
1315 profile_thread_stopped(THREAD_ID_SLOT(current->id));
1316#endif
1317
1318 /* Do final release of resources and remove the thread */
1319#if NUM_CORES > 1
1320 thread_exit_finalize(current->core, current);
1321#else
1322 thread_exit_final(current);
1323#endif
1324}
1325
1326#ifdef HAVE_PRIORITY_SCHEDULING
1327/*---------------------------------------------------------------------------
1328 * Sets the thread's relative base priority for the core it runs on. Any
1329 * needed inheritance changes also may happen.
1330 *---------------------------------------------------------------------------
1331 */
1332int thread_set_priority(unsigned int thread_id, int priority)
1333{
1334 if (priority < HIGHEST_PRIORITY || priority > LOWEST_PRIORITY)
1335 return -1; /* Invalid priority argument */
1336
1337 int old_base_priority = -1;
1338 struct thread_entry *thread = __thread_id_entry(thread_id);
1339
1340 const int oldlevel = disable_irq_save();
1341 LOCK_THREAD(thread);
1342
1343 if (thread->id != thread_id || thread->state == STATE_KILLED)
1344 goto done; /* Invalid thread */
1345
1346 old_base_priority = thread->base_priority;
1347 if (priority == old_base_priority)
1348 goto done; /* No base priority change */
1349
1350 thread->base_priority = priority;
1351
1352 /* Adjust the thread's priority influence on itself */
1353 prio_move_entry(&thread->pdist, old_base_priority, priority);
1354
1355 int old_priority = thread->priority;
1356 int new_priority = priobit_ffs(&thread->pdist.mask);
1357
1358 if (old_priority == new_priority)
1359 goto done; /* No running priority change */
1360
1361 if (thread->state == STATE_RUNNING)
1362 {
1363 /* This thread is running - just change location on the run queue.
1364 Also sets thread->priority. */
1365 set_rtr_thread_priority(thread, new_priority);
1366 goto done;
1367 }
1368
1369 /* Thread is blocked */
1370 struct blocker *bl = thread->blocker;
1371 if (bl == NULL)
1372 {
1373 thread->priority = new_priority;
1374 goto done; /* End of transitive blocks */
1375 }
1376
1377 struct thread_entry *blt = lock_blocker_thread(bl);
1378 struct __wait_queue *wqp = wait_queue_ptr(thread);
1379
1380 thread->priority = new_priority;
1381
1382 UNLOCK_THREAD(thread);
1383 thread = NULL;
1384
1385 int oldblpr = bl->priority;
1386 int newblpr = oldblpr;
1387 if (new_priority < oldblpr)
1388 newblpr = new_priority;
1389 else if (old_priority <= oldblpr)
1390 newblpr = wait_queue_find_priority(wqp);
1391
1392 if (newblpr == oldblpr)
1393 {
1394 unlock_blocker_thread(bl);
1395 goto done;
1396 }
1397
1398 inherit_priority(bl, bl, blt, newblpr);
1399done:
1400 if (thread)
1401 UNLOCK_THREAD(thread);
1402 restore_irq(oldlevel);
1403 return old_base_priority;
1404}
1405
1406/*---------------------------------------------------------------------------
1407 * Returns the current base priority for a thread.
1408 *---------------------------------------------------------------------------
1409 */
1410int thread_get_priority(unsigned int thread_id)
1411{
1412 struct thread_entry *thread = __thread_id_entry(thread_id);
1413 int base_priority = thread->base_priority;
1414
1415 /* Simply check without locking slot. It may or may not be valid by the
1416 * time the function returns anyway. If all tests pass, it is the
1417 * correct value for when it was valid. */
1418 if (thread->id != thread_id || thread->state == STATE_KILLED)
1419 base_priority = -1;
1420
1421 return base_priority;
1422}
1423#endif /* HAVE_PRIORITY_SCHEDULING */
1424
1425/*---------------------------------------------------------------------------
1426 * Starts a frozen thread - similar semantics to wakeup_thread except that
1427 * the thread is on no scheduler or wakeup queue at all. It exists simply by
1428 * virtue of the slot having a state of STATE_FROZEN.
1429 *---------------------------------------------------------------------------
1430 */
1431void thread_thaw(unsigned int thread_id)
1432{
1433 struct thread_entry *thread = __thread_id_entry(thread_id);
1434 int oldlevel = disable_irq_save();
1435
1436 LOCK_THREAD(thread);
1437
1438 /* If thread is the current one, it cannot be frozen, therefore
1439 * there is no need to check that. */
1440 if (thread->id == thread_id && thread->state == STATE_FROZEN)
1441 core_schedule_wakeup(thread);
1442
1443 UNLOCK_THREAD(thread);
1444 restore_irq(oldlevel);
1445}
1446
1447#if NUM_CORES > 1
1448/*---------------------------------------------------------------------------
1449 * Switch the processor that the currently executing thread runs on.
1450 *---------------------------------------------------------------------------
1451 */
1452static USED_ATTR NORETURN_ATTR
1453void switch_core_final(unsigned int old_core, struct thread_entry *current)
1454{
1455 /* Old core won't be using slot resources at this point */
1456 core_schedule_wakeup(current);
1457 UNLOCK_THREAD(current);
1458#ifdef RB_PROFILE
1459 profile_thread_stopped(THREAD_ID_SLOT(current->id));
1460#endif
1461 switch_thread();
1462 /* not reached */
1463 THREAD_PANICF("switch_core_final->same core!", current);
1464 (void)old_core;
1465}
1466
1467unsigned int switch_core(unsigned int new_core)
1468{
1469 const unsigned int old_core = CURRENT_CORE;
1470 if (old_core == new_core)
1471 return old_core; /* No change */
1472
1473 struct core_entry *corep = __core_id_entry(old_core);
1474 struct thread_entry *current = corep->running;
1475
1476 disable_irq();
1477 LOCK_THREAD(current);
1478
1479 /* Remove us from old core lists */
1480 tmo_queue_remove(&corep->tmo, current);
1481 core_rtr_remove(corep, current);
1482 corep->running = NULL; /* No switch_thread context save */
1483
1484 /* Do the actual migration */
1485 current->core = new_core;
1486 switch_thread_core(old_core, current);
1487
1488 /* Executing on new core */
1489 return old_core;
1490}
1491#endif /* NUM_CORES > 1 */
1492
1493#ifdef HAVE_SCHEDULER_BOOSTCTRL
1494/*---------------------------------------------------------------------------
1495 * Change the boost state of a thread boosting or unboosting the CPU
1496 * as required.
1497 *---------------------------------------------------------------------------
1498 */
1499static inline void boost_thread(struct thread_entry *thread, bool boost)
1500{
1501 if ((thread->cpu_boost != 0) != boost)
1502 {
1503 thread->cpu_boost = boost;
1504#ifdef CPU_BOOST_LOGGING
1505 const char fmt[] = __FILE__" thread[%s]";
1506 char pathbuf[sizeof(fmt) + 32]; /* thread name 32 */
1507 snprintf(pathbuf, sizeof(pathbuf), fmt, thread->name);
1508 cpu_boost_(boost, pathbuf, __LINE__);
1509#else
1510 cpu_boost(boost);
1511#endif
1512 }
1513}
1514
1515void trigger_cpu_boost(void)
1516{
1517 boost_thread(__running_self_entry(), true);
1518}
1519
1520void cancel_cpu_boost(void)
1521{
1522 boost_thread(__running_self_entry(), false);
1523}
1524#endif /* HAVE_SCHEDULER_BOOSTCTRL */
1525
1526/*---------------------------------------------------------------------------
1527 * Initialize threading API. This assumes interrupts are not yet enabled. On
1528 * multicore setups, no core is allowed to proceed until create_thread calls
1529 * are safe to perform.
1530 *---------------------------------------------------------------------------
1531 */
1532void INIT_ATTR init_threads(void)
1533{
1534 const unsigned int core = CURRENT_CORE;
1535
1536 if (core == CPU)
1537 {
1538 thread_alloc_init(); /* before using cores! */
1539
1540 /* Create main thread */
1541 struct thread_entry *thread = thread_alloc();
1542 if (thread == NULL)
1543 {
1544 /* WTF? There really must be a slot available at this stage.
1545 * This can fail if, for example, .bss isn't zero'ed out by the
1546 * loader or threads is in the wrong section. */
1547 THREAD_PANICF("init_threads->no slot", NULL);
1548 }
1549
1550 size_t stack_size;
1551 void *stack = __get_main_stack(&stack_size);
1552 new_thread_base_init(thread, &stack, &stack_size, __main_thread_name
1553 IF_PRIO(, PRIORITY_MAIN_THREAD) IF_COP(, core));
1554
1555 struct core_entry *corep = __core_id_entry(core);
1556 core_rtr_add(corep, thread);
1557 corep->running = thread;
1558
1559#ifdef INIT_MAIN_THREAD
1560 init_main_thread(&thread->context);
1561#endif
1562 }
1563
1564#if NUM_CORES > 1
1565 /* Boot CPU:
1566 * Wait for other processors to finish their inits since create_thread
1567 * isn't safe to call until the kernel inits are done. The first
1568 * threads created in the system must of course be created by CPU.
1569 * Another possible approach is to initialize all cores and slots
1570 * for each core by CPU, let the remainder proceed in parallel and
1571 * signal CPU when all are finished.
1572 *
1573 * Other:
1574 * After last processor completes, it should signal all others to
1575 * proceed or may signal the next and call thread_exit(). The last one
1576 * to finish will signal CPU.
1577 */
1578 core_thread_init(core);
1579
1580 if (core != CPU)
1581 {
1582 /* No main thread on coprocessors - go idle and wait */
1583 switch_thread();
1584 THREAD_PANICF("init_threads() - coprocessor returned", NULL);
1585 }
1586#endif /* NUM_CORES */
1587}