Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <AK/Demangle.h>
28#include <AK/StringBuilder.h>
29#include <Kernel/Arch/i386/CPU.h>
30#include <Kernel/FileSystem/FileDescription.h>
31#include <Kernel/KSyms.h>
32#include <Kernel/Process.h>
33#include <Kernel/Profiling.h>
34#include <Kernel/Scheduler.h>
35#include <Kernel/Thread.h>
36#include <Kernel/VM/MemoryManager.h>
37#include <Kernel/VM/PageDirectory.h>
38#include <LibC/signal_numbers.h>
39#include <LibELF/ELFLoader.h>
40
41//#define SIGNAL_DEBUG
42//#define THREAD_DEBUG
43
44namespace Kernel {
45
46Thread* Thread::current;
47
48static FPUState s_clean_fpu_state;
49
50u16 thread_specific_selector()
51{
52 static u16 selector;
53 if (!selector) {
54 selector = gdt_alloc_entry();
55 auto& descriptor = get_gdt_entry(selector);
56 descriptor.dpl = 3;
57 descriptor.segment_present = 1;
58 descriptor.granularity = 0;
59 descriptor.zero = 0;
60 descriptor.operation_size = 1;
61 descriptor.descriptor_type = 1;
62 descriptor.type = 2;
63 }
64 return selector;
65}
66
67Descriptor& thread_specific_descriptor()
68{
69 return get_gdt_entry(thread_specific_selector());
70}
71
72HashTable<Thread*>& thread_table()
73{
74 ASSERT_INTERRUPTS_DISABLED();
75 static HashTable<Thread*>* table;
76 if (!table)
77 table = new HashTable<Thread*>;
78 return *table;
79}
80
81Thread::Thread(Process& process)
82 : m_process(process)
83 , m_name(process.name())
84{
85 if (m_process.m_thread_count == 0) {
86 // First thread gets TID == PID
87 m_tid = process.pid();
88 } else {
89 m_tid = Process::allocate_pid();
90 }
91 process.m_thread_count++;
92#ifdef THREAD_DEBUG
93 dbg() << "Created new thread " << process.name() << "(" << process.pid() << ":" << m_tid << ")";
94#endif
95 set_default_signal_dispositions();
96 m_fpu_state = (FPUState*)kmalloc_aligned(sizeof(FPUState), 16);
97 reset_fpu_state();
98 memset(&m_tss, 0, sizeof(m_tss));
99 m_tss.iomapbase = sizeof(TSS32);
100
101 // Only IF is set when a process boots.
102 m_tss.eflags = 0x0202;
103 u16 cs, ds, ss, gs;
104
105 if (m_process.is_ring0()) {
106 cs = 0x08;
107 ds = 0x10;
108 ss = 0x10;
109 gs = 0;
110 } else {
111 cs = 0x1b;
112 ds = 0x23;
113 ss = 0x23;
114 gs = thread_specific_selector() | 3;
115 }
116
117 m_tss.ds = ds;
118 m_tss.es = ds;
119 m_tss.fs = ds;
120 m_tss.gs = gs;
121 m_tss.ss = ss;
122 m_tss.cs = cs;
123
124 m_tss.cr3 = m_process.page_directory().cr3();
125
126 m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true);
127 m_kernel_stack_region->set_stack(true);
128 m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
129 m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
130
131 if (m_process.is_ring0()) {
132 m_tss.esp = m_kernel_stack_top;
133 } else {
134 // Ring 3 processes get a separate stack for ring 0.
135 // The ring 3 stack will be assigned by exec().
136 m_tss.ss0 = 0x10;
137 m_tss.esp0 = m_kernel_stack_top;
138 }
139
140 if (m_process.pid() != 0) {
141 InterruptDisabler disabler;
142 thread_table().set(this);
143 Scheduler::init_thread(*this);
144 }
145}
146
147Thread::~Thread()
148{
149 kfree_aligned(m_fpu_state);
150 {
151 InterruptDisabler disabler;
152 thread_table().remove(this);
153 }
154
155 if (selector())
156 gdt_free_entry(selector());
157
158 ASSERT(m_process.m_thread_count);
159 m_process.m_thread_count--;
160}
161
162void Thread::unblock()
163{
164 if (current == this) {
165 set_state(Thread::Running);
166 return;
167 }
168 ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
169 set_state(Thread::Runnable);
170}
171
172void Thread::set_should_die()
173{
174 if (m_should_die) {
175#ifdef THREAD_DEBUG
176 dbg() << *this << " Should already die";
177#endif
178 return;
179 }
180 InterruptDisabler disabler;
181
182 // Remember that we should die instead of returning to
183 // the userspace.
184 m_should_die = true;
185
186 if (is_blocked()) {
187 ASSERT(in_kernel());
188 ASSERT(m_blocker != nullptr);
189 // We're blocked in the kernel.
190 m_blocker->set_interrupted_by_death();
191 unblock();
192 } else if (!in_kernel()) {
193 // We're executing in userspace (and we're clearly
194 // not the current thread). No need to unwind, so
195 // set the state to dying right away. This also
196 // makes sure we won't be scheduled anymore.
197 set_state(Thread::State::Dying);
198 }
199}
200
201void Thread::die_if_needed()
202{
203 ASSERT(current == this);
204
205 if (!m_should_die)
206 return;
207
208 unlock_process_if_locked();
209
210 InterruptDisabler disabler;
211 set_state(Thread::State::Dying);
212
213 if (!Scheduler::is_active())
214 Scheduler::pick_next_and_switch_now();
215}
216
217void Thread::yield_without_holding_big_lock()
218{
219 bool did_unlock = unlock_process_if_locked();
220 Scheduler::yield();
221 if (did_unlock)
222 relock_process();
223}
224
225bool Thread::unlock_process_if_locked()
226{
227 return process().big_lock().force_unlock_if_locked();
228}
229
230void Thread::relock_process()
231{
232 process().big_lock().lock();
233}
234
235u64 Thread::sleep(u32 ticks)
236{
237 ASSERT(state() == Thread::Running);
238 u64 wakeup_time = g_uptime + ticks;
239 auto ret = Thread::current->block<Thread::SleepBlocker>(wakeup_time);
240 if (wakeup_time > g_uptime) {
241 ASSERT(ret != Thread::BlockResult::WokeNormally);
242 }
243 return wakeup_time;
244}
245
246u64 Thread::sleep_until(u64 wakeup_time)
247{
248 ASSERT(state() == Thread::Running);
249 auto ret = Thread::current->block<Thread::SleepBlocker>(wakeup_time);
250 if (wakeup_time > g_uptime)
251 ASSERT(ret != Thread::BlockResult::WokeNormally);
252 return wakeup_time;
253}
254
255const char* Thread::state_string() const
256{
257 switch (state()) {
258 case Thread::Invalid:
259 return "Invalid";
260 case Thread::Runnable:
261 return "Runnable";
262 case Thread::Running:
263 return "Running";
264 case Thread::Dying:
265 return "Dying";
266 case Thread::Dead:
267 return "Dead";
268 case Thread::Stopped:
269 return "Stopped";
270 case Thread::Skip1SchedulerPass:
271 return "Skip1";
272 case Thread::Skip0SchedulerPasses:
273 return "Skip0";
274 case Thread::Queued:
275 return "Queued";
276 case Thread::Blocked:
277 ASSERT(m_blocker != nullptr);
278 return m_blocker->state_string();
279 }
280 kprintf("Thread::state_string(): Invalid state: %u\n", state());
281 ASSERT_NOT_REACHED();
282 return nullptr;
283}
284
285void Thread::finalize()
286{
287 ASSERT(current == g_finalizer);
288
289#ifdef THREAD_DEBUG
290 dbg() << "Finalizing thread " << *this;
291#endif
292 set_state(Thread::State::Dead);
293
294 if (m_joiner) {
295 ASSERT(m_joiner->m_joinee == this);
296 static_cast<JoinBlocker*>(m_joiner->m_blocker)->set_joinee_exit_value(m_exit_value);
297 m_joiner->m_joinee = nullptr;
298 // NOTE: We clear the joiner pointer here as well, to be tidy.
299 m_joiner = nullptr;
300 }
301
302 if (m_dump_backtrace_on_finalization)
303 dbg() << backtrace_impl();
304}
305
306void Thread::finalize_dying_threads()
307{
308 ASSERT(current == g_finalizer);
309 Vector<Thread*, 32> dying_threads;
310 {
311 InterruptDisabler disabler;
312 for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
313 dying_threads.append(&thread);
314 return IterationDecision::Continue;
315 });
316 }
317 for (auto* thread : dying_threads) {
318 auto& process = thread->process();
319 thread->finalize();
320 delete thread;
321 if (process.m_thread_count == 0)
322 process.finalize();
323 }
324}
325
326bool Thread::tick()
327{
328 ++m_ticks;
329 if (tss().cs & 3)
330 ++m_process.m_ticks_in_user;
331 else
332 ++m_process.m_ticks_in_kernel;
333 return --m_ticks_left;
334}
335
336void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
337{
338 ASSERT(signal < 32);
339 InterruptDisabler disabler;
340
341 // FIXME: Figure out what to do for masked signals. Should we also ignore them here?
342 if (should_ignore_signal(signal)) {
343#ifdef SIGNAL_DEBUG
344 dbg() << "signal " << signal << " was ignored by " << process();
345#endif
346 return;
347 }
348
349#ifdef SIGNAL_DEBUG
350 if (sender)
351 dbgprintf("signal: %s(%u) sent %d to %s(%u)\n", sender->name().characters(), sender->pid(), signal, process().name().characters(), pid());
352 else
353 dbgprintf("signal: kernel sent %d to %s(%u)\n", signal, process().name().characters(), pid());
354#endif
355
356 m_pending_signals |= 1 << (signal - 1);
357}
358
359// Certain exceptions, such as SIGSEGV and SIGILL, put a
360// thread into a state where the signal handler must be
361// invoked immediately, otherwise it will continue to fault.
362// This function should be used in an exception handler to
363// ensure that when the thread resumes, it's executing in
364// the appropriate signal handler.
365void Thread::send_urgent_signal_to_self(u8 signal)
366{
367 // FIXME: because of a bug in dispatch_signal we can't
368 // setup a signal while we are the current thread. Because of
369 // this we use a work-around where we send the signal and then
370 // block, allowing the scheduler to properly dispatch the signal
371 // before the thread is next run.
372 send_signal(signal, &process());
373 (void)block<SemiPermanentBlocker>(SemiPermanentBlocker::Reason::Signal);
374}
375
376bool Thread::has_unmasked_pending_signals() const
377{
378 return m_pending_signals & ~m_signal_mask;
379}
380
381ShouldUnblockThread Thread::dispatch_one_pending_signal()
382{
383 ASSERT_INTERRUPTS_DISABLED();
384 u32 signal_candidates = m_pending_signals & ~m_signal_mask;
385 ASSERT(signal_candidates);
386
387 u8 signal = 1;
388 for (; signal < 32; ++signal) {
389 if (signal_candidates & (1 << (signal - 1))) {
390 break;
391 }
392 }
393 return dispatch_signal(signal);
394}
395
396enum class DefaultSignalAction {
397 Terminate,
398 Ignore,
399 DumpCore,
400 Stop,
401 Continue,
402};
403
404DefaultSignalAction default_signal_action(u8 signal)
405{
406 ASSERT(signal && signal < NSIG);
407
408 switch (signal) {
409 case SIGHUP:
410 case SIGINT:
411 case SIGKILL:
412 case SIGPIPE:
413 case SIGALRM:
414 case SIGUSR1:
415 case SIGUSR2:
416 case SIGVTALRM:
417 case SIGSTKFLT:
418 case SIGIO:
419 case SIGPROF:
420 case SIGTERM:
421 case SIGPWR:
422 return DefaultSignalAction::Terminate;
423 case SIGCHLD:
424 case SIGURG:
425 case SIGWINCH:
426 return DefaultSignalAction::Ignore;
427 case SIGQUIT:
428 case SIGILL:
429 case SIGTRAP:
430 case SIGABRT:
431 case SIGBUS:
432 case SIGFPE:
433 case SIGSEGV:
434 case SIGXCPU:
435 case SIGXFSZ:
436 case SIGSYS:
437 return DefaultSignalAction::DumpCore;
438 case SIGCONT:
439 return DefaultSignalAction::Continue;
440 case SIGSTOP:
441 case SIGTSTP:
442 case SIGTTIN:
443 case SIGTTOU:
444 return DefaultSignalAction::Stop;
445 }
446 ASSERT_NOT_REACHED();
447}
448
449bool Thread::should_ignore_signal(u8 signal) const
450{
451 ASSERT(signal < 32);
452 auto& action = m_signal_action_data[signal];
453 if (action.handler_or_sigaction.is_null())
454 return default_signal_action(signal) == DefaultSignalAction::Ignore;
455 if (action.handler_or_sigaction.as_ptr() == SIG_IGN)
456 return true;
457 return false;
458}
459
460bool Thread::has_signal_handler(u8 signal) const
461{
462 ASSERT(signal < 32);
463 auto& action = m_signal_action_data[signal];
464 return !action.handler_or_sigaction.is_null();
465}
466
467static void push_value_on_user_stack(u32* stack, u32 data)
468{
469 *stack -= 4;
470 copy_to_user((u32*)*stack, &data);
471}
472
473ShouldUnblockThread Thread::dispatch_signal(u8 signal)
474{
475 ASSERT_INTERRUPTS_DISABLED();
476 ASSERT(signal > 0 && signal <= 32);
477 ASSERT(!process().is_ring0());
478
479#ifdef SIGNAL_DEBUG
480 kprintf("dispatch_signal %s(%u) <- %u\n", process().name().characters(), pid(), signal);
481#endif
482
483 auto& action = m_signal_action_data[signal];
484 // FIXME: Implement SA_SIGINFO signal handlers.
485 ASSERT(!(action.flags & SA_SIGINFO));
486
487 // Mark this signal as handled.
488 m_pending_signals &= ~(1 << (signal - 1));
489
490 if (signal == SIGSTOP) {
491 m_stop_signal = SIGSTOP;
492 set_state(Stopped);
493 return ShouldUnblockThread::No;
494 }
495
496 if (signal == SIGCONT && state() == Stopped)
497 set_state(Runnable);
498
499 auto handler_vaddr = action.handler_or_sigaction;
500 if (handler_vaddr.is_null()) {
501 switch (default_signal_action(signal)) {
502 case DefaultSignalAction::Stop:
503 m_stop_signal = signal;
504 set_state(Stopped);
505 return ShouldUnblockThread::No;
506 case DefaultSignalAction::DumpCore:
507 process().for_each_thread([](auto& thread) {
508 thread.set_dump_backtrace_on_finalization();
509 return IterationDecision::Continue;
510 });
511 [[fallthrough]];
512 case DefaultSignalAction::Terminate:
513 m_process.terminate_due_to_signal(signal);
514 return ShouldUnblockThread::No;
515 case DefaultSignalAction::Ignore:
516 ASSERT_NOT_REACHED();
517 case DefaultSignalAction::Continue:
518 return ShouldUnblockThread::Yes;
519 }
520 ASSERT_NOT_REACHED();
521 }
522
523 if (handler_vaddr.as_ptr() == SIG_IGN) {
524#ifdef SIGNAL_DEBUG
525 kprintf("%s(%u) ignored signal %u\n", process().name().characters(), pid(), signal);
526#endif
527 return ShouldUnblockThread::Yes;
528 }
529
530 ProcessPagingScope paging_scope(m_process);
531
532 u32 old_signal_mask = m_signal_mask;
533 u32 new_signal_mask = action.mask;
534 if (action.flags & SA_NODEFER)
535 new_signal_mask &= ~(1 << (signal - 1));
536 else
537 new_signal_mask |= 1 << (signal - 1);
538
539 m_signal_mask |= new_signal_mask;
540
541 auto setup_stack = [&]<typename ThreadState>(ThreadState state, u32 * stack)
542 {
543 u32 old_esp = *stack;
544 u32 ret_eip = state.eip;
545 u32 ret_eflags = state.eflags;
546
547 // Align the stack to 16 bytes.
548 // Note that we push 56 bytes (4 * 14) on to the stack,
549 // so we need to account for this here.
550 u32 stack_alignment = (*stack - 56) % 16;
551 *stack -= stack_alignment;
552
553 push_value_on_user_stack(stack, ret_eflags);
554
555 push_value_on_user_stack(stack, ret_eip);
556 push_value_on_user_stack(stack, state.eax);
557 push_value_on_user_stack(stack, state.ecx);
558 push_value_on_user_stack(stack, state.edx);
559 push_value_on_user_stack(stack, state.ebx);
560 push_value_on_user_stack(stack, old_esp);
561 push_value_on_user_stack(stack, state.ebp);
562 push_value_on_user_stack(stack, state.esi);
563 push_value_on_user_stack(stack, state.edi);
564
565 // PUSH old_signal_mask
566 push_value_on_user_stack(stack, old_signal_mask);
567
568 push_value_on_user_stack(stack, signal);
569 push_value_on_user_stack(stack, handler_vaddr.get());
570 push_value_on_user_stack(stack, 0); //push fake return address
571
572 ASSERT((*stack % 16) == 0);
573 };
574
575 // We now place the thread state on the userspace stack.
576 // Note that when we are in the kernel (ie. blocking) we cannot use the
577 // tss, as that will contain kernel state; instead, we use a RegisterState.
578 // Conversely, when the thread isn't blocking the RegisterState may not be
579 // valid (fork, exec etc) but the tss will, so we use that instead.
580 if (!in_kernel()) {
581 u32* stack = &m_tss.esp;
582 setup_stack(m_tss, stack);
583
584 Scheduler::prepare_to_modify_tss(*this);
585 m_tss.cs = 0x1b;
586 m_tss.ds = 0x23;
587 m_tss.es = 0x23;
588 m_tss.fs = 0x23;
589 m_tss.gs = thread_specific_selector() | 3;
590 m_tss.eip = g_return_to_ring3_from_signal_trampoline.get();
591 // FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
592 set_state(Skip1SchedulerPass);
593 } else {
594 auto& regs = get_register_dump_from_stack();
595 u32* stack = ®s.userspace_esp;
596 setup_stack(regs, stack);
597 regs.eip = g_return_to_ring3_from_signal_trampoline.get();
598 }
599
600#ifdef SIGNAL_DEBUG
601 kprintf("signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x\n", process().name().characters(), pid(), state_string(), m_tss.cs, m_tss.eip);
602#endif
603 return ShouldUnblockThread::Yes;
604}
605
606void Thread::set_default_signal_dispositions()
607{
608 // FIXME: Set up all the right default actions. See signal(7).
609 memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
610 m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress(SIG_IGN);
611 m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress(SIG_IGN);
612}
613
614void Thread::push_value_on_stack(uintptr_t value)
615{
616 m_tss.esp -= 4;
617 uintptr_t* stack_ptr = (uintptr_t*)m_tss.esp;
618 copy_to_user(stack_ptr, &value);
619}
620
621RegisterState& Thread::get_register_dump_from_stack()
622{
623 // The userspace registers should be stored at the top of the stack
624 // We have to subtract 2 because the processor decrements the kernel
625 // stack before pushing the args.
626 return *(RegisterState*)(kernel_stack_top() - sizeof(RegisterState));
627}
628
629u32 Thread::make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment)
630{
631 auto* region = m_process.allocate_region(VirtualAddress(), default_userspace_stack_size, "Stack (Main thread)", PROT_READ | PROT_WRITE, false);
632 ASSERT(region);
633 region->set_stack(true);
634
635 u32 new_esp = region->vaddr().offset(default_userspace_stack_size).get();
636
637 // FIXME: This is weird, we put the argument contents at the base of the stack,
638 // and the argument pointers at the top? Why?
639 char* stack_base = (char*)region->vaddr().get();
640 int argc = arguments.size();
641 char** argv = (char**)stack_base;
642 char** env = argv + arguments.size() + 1;
643 char* bufptr = stack_base + (sizeof(char*) * (arguments.size() + 1)) + (sizeof(char*) * (environment.size() + 1));
644
645 SmapDisabler disabler;
646
647 for (size_t i = 0; i < arguments.size(); ++i) {
648 argv[i] = bufptr;
649 memcpy(bufptr, arguments[i].characters(), arguments[i].length());
650 bufptr += arguments[i].length();
651 *(bufptr++) = '\0';
652 }
653 argv[arguments.size()] = nullptr;
654
655 for (size_t i = 0; i < environment.size(); ++i) {
656 env[i] = bufptr;
657 memcpy(bufptr, environment[i].characters(), environment[i].length());
658 bufptr += environment[i].length();
659 *(bufptr++) = '\0';
660 }
661 env[environment.size()] = nullptr;
662
663 auto push_on_new_stack = [&new_esp](u32 value) {
664 new_esp -= 4;
665 u32* stack_ptr = (u32*)new_esp;
666 *stack_ptr = value;
667 };
668
669 // NOTE: The stack needs to be 16-byte aligned.
670 push_on_new_stack((uintptr_t)env);
671 push_on_new_stack((uintptr_t)argv);
672 push_on_new_stack((uintptr_t)argc);
673 push_on_new_stack(0);
674 return new_esp;
675}
676
677Thread* Thread::clone(Process& process)
678{
679 auto* clone = new Thread(process);
680 memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
681 clone->m_signal_mask = m_signal_mask;
682 memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
683 clone->m_thread_specific_data = m_thread_specific_data;
684 return clone;
685}
686
687void Thread::initialize()
688{
689 Scheduler::initialize();
690 asm volatile("fninit");
691 asm volatile("fxsave %0"
692 : "=m"(s_clean_fpu_state));
693}
694
695Vector<Thread*> Thread::all_threads()
696{
697 Vector<Thread*> threads;
698 InterruptDisabler disabler;
699 threads.ensure_capacity(thread_table().size());
700 for (auto* thread : thread_table())
701 threads.unchecked_append(thread);
702 return threads;
703}
704
705bool Thread::is_thread(void* ptr)
706{
707 ASSERT_INTERRUPTS_DISABLED();
708 return thread_table().contains((Thread*)ptr);
709}
710
711void Thread::set_state(State new_state)
712{
713 InterruptDisabler disabler;
714 if (new_state == m_state)
715 return;
716
717 if (new_state == Blocked) {
718 // we should always have a Blocker while blocked
719 ASSERT(m_blocker != nullptr);
720 }
721
722 m_state = new_state;
723 if (m_process.pid() != 0) {
724 Scheduler::update_state_for_thread(*this);
725 }
726
727 if (new_state == Dying) {
728 g_finalizer_has_work = true;
729 g_finalizer_wait_queue->wake_all();
730 }
731}
732
733String Thread::backtrace(ProcessInspectionHandle&) const
734{
735 return backtrace_impl();
736}
737
738struct RecognizedSymbol {
739 u32 address;
740 const KSym* ksym;
741};
742
743static bool symbolicate(const RecognizedSymbol& symbol, const Process& process, StringBuilder& builder)
744{
745 if (!symbol.address)
746 return false;
747
748 bool mask_kernel_addresses = !process.is_superuser();
749 if (!symbol.ksym) {
750 if (!is_user_address(VirtualAddress(symbol.address))) {
751 builder.append("0xdeadc0de\n");
752 } else {
753 if (!Scheduler::is_active() && process.elf_loader() && process.elf_loader()->has_symbols())
754 builder.appendf("%p %s\n", symbol.address, process.elf_loader()->symbolicate(symbol.address).characters());
755 else
756 builder.appendf("%p\n", symbol.address);
757 }
758 return true;
759 }
760 unsigned offset = symbol.address - symbol.ksym->address;
761 if (symbol.ksym->address == ksym_highest_address && offset > 4096) {
762 builder.appendf("%p\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address);
763 } else {
764 builder.appendf("%p %s +%u\n", mask_kernel_addresses ? 0xdeadc0de : symbol.address, demangle(symbol.ksym->name).characters(), offset);
765 }
766 return true;
767}
768
769String Thread::backtrace_impl() const
770{
771 Vector<RecognizedSymbol, 128> recognized_symbols;
772
773 u32 start_frame;
774 if (current == this) {
775 asm volatile("movl %%ebp, %%eax"
776 : "=a"(start_frame));
777 } else {
778 start_frame = frame_ptr();
779 recognized_symbols.append({ tss().eip, ksymbolicate(tss().eip) });
780 }
781
782 auto& process = const_cast<Process&>(this->process());
783 ProcessPagingScope paging_scope(process);
784
785 uintptr_t stack_ptr = start_frame;
786 for (;;) {
787 if (!process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(void*) * 2))
788 break;
789 uintptr_t retaddr;
790
791 if (is_user_range(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2)) {
792 copy_from_user(&retaddr, &((uintptr_t*)stack_ptr)[1]);
793 recognized_symbols.append({ retaddr, ksymbolicate(retaddr) });
794 copy_from_user(&stack_ptr, (uintptr_t*)stack_ptr);
795 } else {
796 memcpy(&retaddr, &((uintptr_t*)stack_ptr)[1], sizeof(uintptr_t));
797 recognized_symbols.append({ retaddr, ksymbolicate(retaddr) });
798 memcpy(&stack_ptr, (uintptr_t*)stack_ptr, sizeof(uintptr_t));
799 }
800 }
801
802 StringBuilder builder;
803 for (auto& symbol : recognized_symbols) {
804 if (!symbolicate(symbol, process, builder))
805 break;
806 }
807 return builder.to_string();
808}
809
810Vector<uintptr_t> Thread::raw_backtrace(uintptr_t ebp) const
811{
812 auto& process = const_cast<Process&>(this->process());
813 ProcessPagingScope paging_scope(process);
814 Vector<uintptr_t, Profiling::max_stack_frame_count> backtrace;
815 backtrace.append(ebp);
816 for (uintptr_t* stack_ptr = (uintptr_t*)ebp; process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2) && MM.can_read_without_faulting(process, VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) {
817 uintptr_t retaddr = stack_ptr[1];
818 backtrace.append(retaddr);
819 if (backtrace.size() == Profiling::max_stack_frame_count)
820 break;
821 }
822 return backtrace;
823}
824
825void Thread::make_thread_specific_region(Badge<Process>)
826{
827 size_t thread_specific_region_alignment = max(process().m_master_tls_alignment, alignof(ThreadSpecificData));
828 size_t thread_specific_region_size = align_up_to(process().m_master_tls_size, thread_specific_region_alignment) + sizeof(ThreadSpecificData);
829 auto* region = process().allocate_region({}, thread_specific_region_size, "Thread-specific", PROT_READ | PROT_WRITE, true);
830 SmapDisabler disabler;
831 auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment)).as_ptr();
832 auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment);
833 m_thread_specific_data = VirtualAddress(thread_specific_data);
834 thread_specific_data->self = thread_specific_data;
835 if (process().m_master_tls_size)
836 memcpy(thread_local_storage, process().m_master_tls_region->vaddr().as_ptr(), process().m_master_tls_size);
837}
838
839const LogStream& operator<<(const LogStream& stream, const Thread& value)
840{
841 return stream << value.process().name() << "(" << value.pid() << ":" << value.tid() << ")";
842}
843
844void Thread::wait_on(WaitQueue& queue, Atomic<bool>* lock, Thread* beneficiary, const char* reason)
845{
846 cli();
847 bool did_unlock = unlock_process_if_locked();
848 if (lock)
849 *lock = false;
850 set_state(State::Queued);
851 queue.enqueue(*current);
852 // Yield and wait for the queue to wake us up again.
853 if (beneficiary)
854 Scheduler::donate_to(beneficiary, reason);
855 else
856 Scheduler::yield();
857 // We've unblocked, relock the process if needed and carry on.
858 if (did_unlock)
859 relock_process();
860}
861
862void Thread::wake_from_queue()
863{
864 ASSERT(state() == State::Queued);
865 set_state(State::Runnable);
866}
867
868Thread* Thread::from_tid(int tid)
869{
870 InterruptDisabler disabler;
871 Thread* found_thread = nullptr;
872 Thread::for_each([&](auto& thread) {
873 if (thread.tid() == tid) {
874 found_thread = &thread;
875 return IterationDecision::Break;
876 }
877 return IterationDecision::Continue;
878 });
879 return found_thread;
880}
881
882void Thread::reset_fpu_state()
883{
884 memcpy(m_fpu_state, &s_clean_fpu_state, sizeof(FPUState));
885}
886
887}