A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita
audio
rust
zig
deno
mpris
rockbox
mpd
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2002 by Ulf Ralberg
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#ifndef THREAD_INTERNAL_H
22#define THREAD_INTERNAL_H
23
24#include "thread.h"
25#include <stdio.h>
26#include "panic.h"
27#include "debug.h"
28
29/*
30 * We need more stack when we run under a host
31 * maybe more expensive C lib functions?
32 *
33 * simulator (possibly) doesn't simulate stack usage anyway but well ... */
34
35#if defined(HAVE_SDL_THREADS) || defined(__PCTOOL__) || defined(CTRU)
36struct regs
37{
38 void *t; /* OS thread */
39 void *told; /* Last thread in slot (explained in thead-sdl.c) */
40 void *s; /* Semaphore for blocking and wakeup */
41 void (*start)(void); /* Start function */
42};
43
44#define DEFAULT_STACK_SIZE 0x100 /* tiny, ignored anyway */
45#else
46#include "asm/thread.h"
47#endif /* HAVE_SDL_THREADS */
48
49/* NOTE: The use of the word "queue" may also refer to a linked list of
50 threads being maintained that are normally dealt with in FIFO order
51 and not necessarily kernel event_queue */
52enum
53{
54 /* States without a timeout must be first */
55 STATE_KILLED = 0, /* Thread is killed (default) */
56 STATE_RUNNING, /* Thread is currently running */
57 STATE_BLOCKED, /* Thread is indefinitely blocked on a queue */
58 /* These states involve adding the thread to the tmo list */
59 STATE_SLEEPING, /* Thread is sleeping with a timeout */
60 STATE_BLOCKED_W_TMO, /* Thread is blocked on a queue with a timeout */
61 /* Miscellaneous states */
62 STATE_FROZEN, /* Thread is suspended and will not run until
63 thread_thaw is called with its ID */
64 THREAD_NUM_STATES,
65 TIMEOUT_STATE_FIRST = STATE_SLEEPING,
66};
67
68#ifdef HAVE_PRIORITY_SCHEDULING
69
70/* Quick-disinherit of priority elevation. Must be a running thread. */
71void priority_disinherit(struct thread_entry *thread, struct blocker *bl);
72
73struct priority_distribution
74{
75 uint8_t hist[NUM_PRIORITIES]; /* Histogram: Frequency for each priority */
76 priobit_t mask; /* Bitmask of hist entries that are not zero */
77};
78
79#endif /* HAVE_PRIORITY_SCHEDULING */
80
81#define __rtr_queue lldc_head
82#define __rtr_queue_node lldc_node
83
84#define __tmo_queue ll_head
85#define __tmo_queue_node ll_node
86
87/* Information kept in each thread slot
88 * members are arranged according to size - largest first - in order
89 * to ensure both alignment and packing at the same time.
90 */
91struct thread_entry
92{
93 struct regs context; /* Register context at switch -
94 _must_ be first member */
95#ifndef HAVE_SDL_THREADS
96 uintptr_t *stack; /* Pointer to top of stack */
97#endif
98 const char *name; /* Thread name */
99 long tmo_tick; /* Tick when thread should be woken */
100 struct __rtr_queue_node rtr; /* Node for run queue */
101 struct __tmo_queue_node tmo; /* Links for timeout list */
102 struct __wait_queue_node wq; /* Node for wait queue */
103 struct __wait_queue *volatile wqp; /* Pointer to registered wait queue */
104#if NUM_CORES > 1
105 struct corelock waiter_cl; /* Corelock for thread_wait */
106 struct corelock slot_cl; /* Corelock to lock thread slot */
107 unsigned char core; /* The core to which thread belongs */
108#endif
109 struct __wait_queue queue; /* List of threads waiting for thread to be
110 removed */
111 volatile intptr_t retval; /* Return value from a blocked operation/
112 misc. use */
113 uint32_t id; /* Current slot id */
114 int __errno; /* Thread error number (errno tls) */
115#ifdef HAVE_PRIORITY_SCHEDULING
116 /* Priority summary of owned objects that support inheritance */
117 struct blocker *blocker; /* Pointer to blocker when this thread is blocked
118 on an object that supports PIP -
119 states: STATE_BLOCKED/STATE_BLOCKED_W_TMO */
120 struct priority_distribution pdist; /* Priority summary of owned objects
121 that have blocked threads and thread's own
122 base priority */
123 int skip_count; /* Number of times skipped if higher priority
124 thread was running */
125 unsigned char base_priority; /* Base priority (set explicitly during
126 creation or thread_set_priority) */
127 unsigned char priority; /* Scheduled priority (higher of base or
128 all threads blocked by this one) */
129#endif
130 unsigned char state; /* Thread slot state (STATE_*) */
131#ifdef HAVE_SCHEDULER_BOOSTCTRL
132 unsigned char cpu_boost; /* CPU frequency boost flag */
133#endif
134#ifndef HAVE_SDL_THREADS
135 size_t stack_size; /* Size of stack in bytes */
136#endif
137};
138
139/* Thread ID, 32 bits = |VVVVVVVV|VVVVVVVV|VVVVVVVV|SSSSSSSS| */
140#define THREAD_ID_VERSION_SHIFT 8
141#define THREAD_ID_VERSION_MASK 0xffffff00
142#define THREAD_ID_SLOT_MASK 0x000000ff
143#define THREAD_ID_INIT(n) ((1u << THREAD_ID_VERSION_SHIFT) | (n))
144#define THREAD_ID_SLOT(id) ((id) & THREAD_ID_SLOT_MASK)
145
146#define DEADBEEF ((uintptr_t)0xdeadbeefdeadbeefull)
147
148/* Information kept for each core
149 * Members are arranged for the same reason as in thread_entry
150 */
151struct core_entry
152{
153 /* "Active" lists - core is constantly active on these and are never
154 locked and interrupts do not access them */
155 struct __rtr_queue rtr; /* Threads that are runnable */
156 struct __tmo_queue tmo; /* Threads on a bounded wait */
157 struct thread_entry *running; /* Currently running thread */
158#ifdef HAVE_PRIORITY_SCHEDULING
159 struct priority_distribution rtr_dist; /* Summary of runnables */
160#endif
161 long next_tmo_check; /* Next due timeout check */
162#if NUM_CORES > 1
163 struct corelock rtr_cl; /* Lock for rtr list */
164#endif /* NUM_CORES */
165};
166
167/* Hide a few scheduler details from itself to make allocation more flexible */
168#define __main_thread_name \
169 ({ extern const char __main_thread_name_str[]; \
170 __main_thread_name_str; })
171
172static FORCE_INLINE
173 void * __get_main_stack(size_t *stacksize)
174{
175#if (CONFIG_PLATFORM & PLATFORM_NATIVE)
176 extern uintptr_t stackbegin[];
177 extern uintptr_t stackend[];
178#else
179 extern uintptr_t *stackbegin;
180 extern uintptr_t *stackend;
181#endif
182 *stacksize = (uintptr_t)stackend - (uintptr_t)stackbegin;
183 return stackbegin;
184}
185
186void format_thread_name(char *buf, size_t bufsize,
187 const struct thread_entry *thread);
188
189static FORCE_INLINE
190 struct core_entry * __core_id_entry(unsigned int core)
191{
192#if NUM_CORES > 1
193 extern struct core_entry * __cores[NUM_CORES];
194 return __cores[core];
195#else
196 extern struct core_entry __cores[NUM_CORES];
197 return &__cores[core];
198#endif
199}
200
201#define __running_self_entry() \
202 __core_id_entry(CURRENT_CORE)->running
203
204static FORCE_INLINE
205 struct thread_entry * __thread_slot_entry(unsigned int slotnum)
206{
207 extern struct thread_entry * __threads[MAXTHREADS];
208 return __threads[slotnum];
209}
210
211#define __thread_id_entry(id) \
212 __thread_slot_entry(THREAD_ID_SLOT(id))
213
214#define THREAD_FROM(p, member) \
215 container_of(p, struct thread_entry, member)
216
217#define RTR_EMPTY(rtrp) \
218 ({ (rtrp)->head == NULL; })
219
220#define RTR_THREAD_FIRST(rtrp) \
221 ({ THREAD_FROM((rtrp)->head, rtr); })
222
223#define RTR_THREAD_NEXT(thread) \
224 ({ THREAD_FROM((thread)->rtr.next, rtr); })
225
226#define TMO_THREAD_FIRST(tmop) \
227 ({ struct __tmo_queue *__tmop = (tmop); \
228 __tmop->head ? THREAD_FROM(__tmop->head, tmo) : NULL; })
229
230#define TMO_THREAD_NEXT(thread) \
231 ({ struct __tmo_queue_node *__next = (thread)->tmo.next; \
232 __next ? THREAD_FROM(__next, tmo) : NULL; })
233
234#define WQ_THREAD_FIRST(wqp) \
235 ({ struct __wait_queue *__wqp = (wqp); \
236 __wqp->head ? THREAD_FROM(__wqp->head, wq) : NULL; })
237
238#define WQ_THREAD_NEXT(thread) \
239 ({ struct __wait_queue_node *__next = (thread)->wq.next; \
240 __next ? THREAD_FROM(__next, wq) : NULL; })
241
242void thread_alloc_init(void) INIT_ATTR;
243struct thread_entry * thread_alloc(void);
244void thread_free(struct thread_entry *thread);
245void new_thread_id(struct thread_entry *thread);
246
247/* Switch to next runnable thread */
248void switch_thread(void);
249/* Blocks a thread for at least the specified number of ticks (0 = wait until
250 * next tick) */
251void sleep_thread(int ticks);
252/* Blocks the current thread on a thread queue (< 0 == infinite) */
253void block_thread_(struct thread_entry *current, int timeout);
254
255#ifdef HAVE_PRIORITY_SCHEDULING
256#define block_thread(thread, timeout, __wqp, bl) \
257 ({ struct thread_entry *__t = (thread); \
258 __t->wqp = (__wqp); \
259 if (!__builtin_constant_p(bl) || (bl)) \
260 __t->blocker = (bl); \
261 block_thread_(__t, (timeout)); })
262#else
263#define block_thread(thread, timeout, __wqp, bl...) \
264 ({ struct thread_entry *__t = (thread); \
265 __t->wqp = (__wqp); \
266 block_thread_(__t, (timeout)); })
267#endif
268
269/* Return bit flags for thread wakeup */
270#define THREAD_NONE 0x0 /* No thread woken up (exclusive) */
271#define THREAD_OK 0x1 /* A thread was woken up */
272#define THREAD_SWITCH 0x2 /* Task switch recommended (one or more of
273 higher priority than current were woken) */
274
275/* A convenience function for waking an entire queue of threads. */
276unsigned int wait_queue_wake(struct __wait_queue *wqp);
277
278/* Wakeup a thread at the head of a list */
279enum wakeup_thread_protocol
280{
281 WAKEUP_DEFAULT,
282 WAKEUP_TRANSFER,
283 WAKEUP_RELEASE,
284 WAKEUP_TRANSFER_MULTI,
285};
286
287unsigned int wakeup_thread_(struct thread_entry *thread
288 IF_PRIO(, enum wakeup_thread_protocol proto));
289
290#ifdef HAVE_PRIORITY_SCHEDULING
291#define wakeup_thread(thread, proto) \
292 wakeup_thread_((thread), (proto))
293#else
294#define wakeup_thread(thread, proto...) \
295 wakeup_thread_((thread));
296#endif
297
298#ifdef RB_PROFILE
299void profile_thread(void);
300#endif
301
302static inline void rtr_queue_init(struct __rtr_queue *rtrp)
303{
304 lldc_init(rtrp);
305}
306
307static inline void rtr_queue_make_first(struct __rtr_queue *rtrp,
308 struct thread_entry *thread)
309{
310 rtrp->head = &thread->rtr;
311}
312
313static inline void rtr_queue_add(struct __rtr_queue *rtrp,
314 struct thread_entry *thread)
315{
316 lldc_insert_last(rtrp, &thread->rtr);
317}
318
319static inline void rtr_queue_remove(struct __rtr_queue *rtrp,
320 struct thread_entry *thread)
321{
322 lldc_remove(rtrp, &thread->rtr);
323}
324
325#define TMO_NOT_QUEUED (NULL + 1)
326
327static inline bool tmo_is_queued(struct thread_entry *thread)
328{
329 return thread->tmo.next != TMO_NOT_QUEUED;
330}
331
332static inline void tmo_set_dequeued(struct thread_entry *thread)
333{
334 thread->tmo.next = TMO_NOT_QUEUED;
335}
336
337static inline void tmo_queue_init(struct __tmo_queue *tmop)
338{
339 ll_init(tmop);
340}
341
342static inline void tmo_queue_expire(struct __tmo_queue *tmop,
343 struct thread_entry *prev,
344 struct thread_entry *thread)
345{
346 ll_remove_next(tmop, prev ? &prev->tmo : NULL);
347 tmo_set_dequeued(thread);
348}
349
350static inline void tmo_queue_remove(struct __tmo_queue *tmop,
351 struct thread_entry *thread)
352{
353 if (tmo_is_queued(thread))
354 {
355 ll_remove(tmop, &thread->tmo);
356 tmo_set_dequeued(thread);
357 }
358}
359
360static inline void tmo_queue_register(struct __tmo_queue *tmop,
361 struct thread_entry *thread)
362{
363 if (!tmo_is_queued(thread))
364 ll_insert_last(tmop, &thread->tmo);
365}
366
367static inline void wait_queue_init(struct __wait_queue *wqp)
368{
369 lld_init(wqp);
370}
371
372static inline void wait_queue_register(struct thread_entry *thread)
373{
374 lld_insert_last(thread->wqp, &thread->wq);
375}
376
377static inline struct __wait_queue *
378 wait_queue_ptr(struct thread_entry *thread)
379{
380 return thread->wqp;
381}
382
383static inline struct __wait_queue *
384 wait_queue_remove(struct thread_entry *thread)
385{
386 struct __wait_queue *wqp = thread->wqp;
387 thread->wqp = NULL;
388 lld_remove(wqp, &thread->wq);
389 return wqp;
390}
391
392static inline struct __wait_queue *
393 wait_queue_try_remove(struct thread_entry *thread)
394{
395 struct __wait_queue *wqp = thread->wqp;
396 if (wqp)
397 {
398 thread->wqp = NULL;
399 lld_remove(wqp, &thread->wq);
400 }
401
402 return wqp;
403}
404
405static inline void blocker_init(struct blocker *bl)
406{
407 bl->thread = NULL;
408#ifdef HAVE_PRIORITY_SCHEDULING
409 bl->priority = PRIORITY_IDLE;
410#endif
411}
412
413static inline void blocker_splay_init(struct blocker_splay *blsplay)
414{
415 blocker_init(&blsplay->blocker);
416#ifdef HAVE_PRIORITY_SCHEDULING
417 threadbit_clear(&blsplay->mask);
418#endif
419 corelock_init(&blsplay->cl);
420}
421
422static inline long get_tmo_tick(struct thread_entry *thread)
423{
424 return thread->tmo_tick;
425}
426
427#endif /* THREAD_INTERNAL_H */