A modern Music Player Daemon based on Rockbox open source high quality audio player
libadwaita audio rust zig deno mpris rockbox mpd
at master 336 lines 10 kB view raw
1/*************************************************************************** 2 * __________ __ ___. 3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___ 4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / 5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < 6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ 7 * \/ \/ \/ \/ \/ 8 * $Id$ 9 * 10 * Copyright (C) 2002 by Ulf Ralberg 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 2 15 * of the License, or (at your option) any later version. 16 * 17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 18 * KIND, either express or implied. 19 * 20 ****************************************************************************/ 21#include "kernel-internal.h" 22#include "system.h" 23 24/* Unless otherwise defined, do nothing */ 25#ifndef YIELD_KERNEL_HOOK 26#define YIELD_KERNEL_HOOK() false 27#endif 28#ifndef SLEEP_KERNEL_HOOK 29#define SLEEP_KERNEL_HOOK(ticks) false 30#endif 31 32const char __main_thread_name_str[] = "main"; 33 34/* Array indexing is more efficient in inlines if the elements are a native 35 word size (100s of bytes fewer instructions) */ 36 37#if NUM_CORES > 1 38static struct core_entry __core_entries[NUM_CORES] IBSS_ATTR; 39struct core_entry *__cores[NUM_CORES] IBSS_ATTR; 40#else 41struct core_entry __cores[NUM_CORES] IBSS_ATTR; 42#endif 43 44static struct thread_entry __thread_entries[MAXTHREADS] IBSS_ATTR; 45struct thread_entry *__threads[MAXTHREADS] IBSS_ATTR; 46 47 48/** Internal functions **/ 49 50/*--------------------------------------------------------------------------- 51 * Find an empty thread slot or NULL if none found. The slot returned will 52 * be locked on multicore. 53 *--------------------------------------------------------------------------- 54 */ 55static struct threadalloc 56{ 57 threadbit_t avail; 58#if NUM_CORES > 1 59 struct corelock cl; 60#endif 61} threadalloc SHAREDBSS_ATTR; 62 63/*--------------------------------------------------------------------------- 64 * Initialize the thread allocator 65 *--------------------------------------------------------------------------- 66 */ 67void thread_alloc_init(void) 68{ 69 corelock_init(&threadalloc.cl); 70 71 for (unsigned int core = 0; core < NUM_CORES; core++) 72 { 73 #if NUM_CORES > 1 74 struct core_entry *c = &__core_entries[core]; 75 __cores[core] = c; 76 #else 77 struct core_entry *c = &__cores[core]; 78 #endif 79 rtr_queue_init(&c->rtr); 80 corelock_init(&c->rtr_cl); 81 tmo_queue_init(&c->tmo); 82 c->next_tmo_check = current_tick; /* Something not in the past */ 83 } 84 85 for (unsigned int slotnum = 0; slotnum < MAXTHREADS; slotnum++) 86 { 87 struct thread_entry *t = &__thread_entries[slotnum]; 88 __threads[slotnum] = t; 89 corelock_init(&t->waiter_cl); 90 corelock_init(&t->slot_cl); 91 t->id = THREAD_ID_INIT(slotnum); 92 threadbit_set_bit(&threadalloc.avail, slotnum); 93 } 94} 95 96/*--------------------------------------------------------------------------- 97 * Allocate a thread alot 98 *--------------------------------------------------------------------------- 99 */ 100struct thread_entry * thread_alloc(void) 101{ 102 struct thread_entry *thread = NULL; 103 104 corelock_lock(&threadalloc.cl); 105 106 unsigned int slotnum = threadbit_ffs(&threadalloc.avail); 107 if (slotnum < MAXTHREADS) 108 { 109 threadbit_clear_bit(&threadalloc.avail, slotnum); 110 thread = __threads[slotnum]; 111 } 112 113 corelock_unlock(&threadalloc.cl); 114 115 return thread; 116} 117 118/*--------------------------------------------------------------------------- 119 * Free the thread slot of 'thread' 120 *--------------------------------------------------------------------------- 121 */ 122void thread_free(struct thread_entry *thread) 123{ 124 corelock_lock(&threadalloc.cl); 125 threadbit_set_bit(&threadalloc.avail, THREAD_ID_SLOT(thread->id)); 126 corelock_unlock(&threadalloc.cl); 127} 128 129/*--------------------------------------------------------------------------- 130 * Assign the thread slot a new ID. Version is 0x00000100..0xffffff00. 131 *--------------------------------------------------------------------------- 132 */ 133void new_thread_id(struct thread_entry *thread) 134{ 135 uint32_t id = thread->id + (1u << THREAD_ID_VERSION_SHIFT); 136 137 /* If wrapped to 0, make it 1 */ 138 if ((id & THREAD_ID_VERSION_MASK) == 0) 139 id |= (1u << THREAD_ID_VERSION_SHIFT); 140 141 thread->id = id; 142} 143 144/*--------------------------------------------------------------------------- 145 * Wakeup an entire queue of threads - returns bitwise-or of return bitmask 146 * from each operation or THREAD_NONE of nothing was awakened. 147 *--------------------------------------------------------------------------- 148 */ 149unsigned int wait_queue_wake(struct __wait_queue *wqp) 150{ 151 unsigned result = THREAD_NONE; 152 struct thread_entry *thread; 153 154 while ((thread = WQ_THREAD_FIRST(wqp))) 155 result |= wakeup_thread(thread, WAKEUP_DEFAULT); 156 157 return result; 158} 159 160 161/** Public functions **/ 162 163#ifdef RB_PROFILE 164void profile_thread(void) 165{ 166 profstart(THREAD_ID_SLOT(__running_self_entry()->id)); 167} 168#endif 169 170/*--------------------------------------------------------------------------- 171 * Return the thread id of the calling thread 172 * -------------------------------------------------------------------------- 173 */ 174unsigned int thread_self(void) 175{ 176 return __running_self_entry()->id; 177} 178 179/*--------------------------------------------------------------------------- 180 * Suspends a thread's execution for at least the specified number of ticks. 181 * 182 * May result in CPU core entering wait-for-interrupt mode if no other thread 183 * may be scheduled. 184 * 185 * NOTE: sleep(0) sleeps until the end of the current tick 186 * sleep(n) that doesn't result in rescheduling: 187 * n <= ticks suspended < n + 1 188 * n to n+1 is a lower bound. Other factors may affect the actual time 189 * a thread is suspended before it runs again. 190 *--------------------------------------------------------------------------- 191 */ 192unsigned sleep(unsigned ticks) 193{ 194 /* In certain situations, certain bootloaders in particular, a normal 195 * threading call is inappropriate. */ 196 if (SLEEP_KERNEL_HOOK(ticks)) 197 return 0; /* Handled */ 198 199 disable_irq(); 200 sleep_thread(ticks); 201 switch_thread(); 202 return 0; 203} 204 205/*--------------------------------------------------------------------------- 206 * Elects another thread to run or, if no other thread may be made ready to 207 * run, immediately returns control back to the calling thread. 208 *--------------------------------------------------------------------------- 209 */ 210void yield(void) 211{ 212 /* In certain situations, certain bootloaders in particular, a normal 213 * threading call is inappropriate. */ 214 if (YIELD_KERNEL_HOOK()) 215 return; /* Handled */ 216 217 switch_thread(); 218} 219 220 221/** Debug screen stuff **/ 222 223void format_thread_name(char *buf, size_t bufsize, 224 const struct thread_entry *thread) 225{ 226 const char *name = thread->name; 227 if (!name) 228 name = ""; 229 230 const char *fmt = *name ? "%s" : "%s%08lX"; 231 snprintf(buf, bufsize, fmt, name, thread->id); 232} 233 234#ifndef HAVE_SDL_THREADS 235/*--------------------------------------------------------------------------- 236 * Returns the maximum percentage of the stack ever used during runtime. 237 *--------------------------------------------------------------------------- 238 */ 239static unsigned int stack_usage(uintptr_t *stackptr, size_t stack_size) 240{ 241 unsigned int usage = 0; 242 unsigned int stack_words = stack_size / sizeof (uintptr_t); 243 244 for (unsigned int i = 0; i < stack_words; i++) 245 { 246 if (stackptr[i] != DEADBEEF) 247 { 248 usage = (stack_words - i) * 100 / stack_words; 249 break; 250 } 251 } 252 253 return usage; 254} 255#endif /* HAVE_SDL_THREADS */ 256 257#if NUM_CORES > 1 258int core_get_debug_info(unsigned int core, struct core_debug_info *infop) 259{ 260 extern uintptr_t * const idle_stacks[NUM_CORES]; 261 262 if (core >= NUM_CORES || !infop) 263 return -1; 264 265 infop->idle_stack_usage = stack_usage(idle_stacks[core], IDLE_STACK_SIZE); 266 return 1; 267} 268#endif /* NUM_CORES > 1 */ 269 270int thread_get_debug_info(unsigned int thread_id, 271 struct thread_debug_info *infop) 272{ 273 static const char status_chars[THREAD_NUM_STATES+1] = 274 { 275 [0 ... THREAD_NUM_STATES] = '?', 276 [STATE_RUNNING] = 'R', 277 [STATE_BLOCKED] = 'B', 278 [STATE_SLEEPING] = 'S', 279 [STATE_BLOCKED_W_TMO] = 'T', 280 [STATE_FROZEN] = 'F', 281 [STATE_KILLED] = 'K', 282 }; 283 284 if (!infop) 285 return -1; 286 287 unsigned int slotnum = THREAD_ID_SLOT(thread_id); 288 if (slotnum >= MAXTHREADS) 289 return -1; 290 291 struct thread_entry *thread = __thread_slot_entry(slotnum); 292 293 int oldlevel = disable_irq_save(); 294 corelock_lock(&threadalloc.cl); 295 corelock_lock(&thread->slot_cl); 296 297 unsigned int state = thread->state; 298 299 int ret = 0; 300 301 if (threadbit_test_bit(&threadalloc.avail, slotnum) == 0) 302 { 303 bool cpu_boost = false; 304#ifdef HAVE_SCHEDULER_BOOSTCTRL 305 cpu_boost = thread->cpu_boost; 306#endif 307#if !defined(HAVE_SDL_THREADS) && !defined(CTRU) 308 infop->stack_usage = stack_usage(thread->stack, thread->stack_size); 309 310 size_t stack_used_current = 311 thread->stack_size - (thread->context.sp - (uintptr_t)thread->stack); 312 313 infop->stack_usage_cur = stack_used_current * 100 / thread->stack_size; 314#endif 315#if NUM_CORES > 1 316 infop->core = thread->core; 317#endif 318#ifdef HAVE_PRIORITY_SCHEDULING 319 infop->base_priority = thread->base_priority; 320 infop->current_priority = thread->priority; 321#endif 322 323 snprintf(infop->statusstr, sizeof (infop->statusstr), "%c%c", 324 cpu_boost ? '+' : (state == STATE_RUNNING ? '*' : ' '), 325 status_chars[state]); 326 327 format_thread_name(infop->name, sizeof (infop->name), thread); 328 ret = 1; 329 } 330 331 corelock_unlock(&thread->slot_cl); 332 corelock_unlock(&threadalloc.cl); 333 restore_irq(oldlevel); 334 335 return ret; 336}