Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#pragma once
28
29#include <AK/Function.h>
30#include <AK/IntrusiveList.h>
31#include <AK/Optional.h>
32#include <AK/OwnPtr.h>
33#include <AK/String.h>
34#include <AK/Vector.h>
35#include <Kernel/Arch/i386/CPU.h>
36#include <Kernel/Forward.h>
37#include <Kernel/KResult.h>
38#include <Kernel/Scheduler.h>
39#include <Kernel/UnixTypes.h>
40#include <LibC/fd_set.h>
41
42namespace Kernel {
43
44enum class ShouldUnblockThread {
45 No = 0,
46 Yes
47};
48
49struct SignalActionData {
50 VirtualAddress handler_or_sigaction;
51 u32 mask { 0 };
52 int flags { 0 };
53};
54
55struct ThreadSpecificData {
56 ThreadSpecificData* self;
57};
58
59#define THREAD_PRIORITY_MIN 1
60#define THREAD_PRIORITY_LOW 10
61#define THREAD_PRIORITY_NORMAL 30
62#define THREAD_PRIORITY_HIGH 50
63#define THREAD_PRIORITY_MAX 99
64
65class Thread {
66 friend class Process;
67 friend class Scheduler;
68
69public:
70 static Thread* current;
71
72 explicit Thread(Process&);
73 ~Thread();
74
75 static Thread* from_tid(int);
76 static void initialize();
77 static void finalize_dying_threads();
78
79 static Vector<Thread*> all_threads();
80 static bool is_thread(void*);
81
82 int tid() const { return m_tid; }
83 int pid() const;
84
85 void set_priority(u32 p) { m_priority = p; }
86 u32 priority() const { return m_priority; }
87
88 void set_priority_boost(u32 boost) { m_priority_boost = boost; }
89 u32 priority_boost() const { return m_priority_boost; }
90
91 u32 effective_priority() const;
92
93 void set_joinable(bool j) { m_is_joinable = j; }
94 bool is_joinable() const { return m_is_joinable; }
95
96 Process& process() { return m_process; }
97 const Process& process() const { return m_process; }
98
99 String backtrace(ProcessInspectionHandle&) const;
100 Vector<FlatPtr> raw_backtrace(FlatPtr ebp) const;
101
102 const String& name() const { return m_name; }
103 void set_name(const StringView& s) { m_name = s; }
104
105 void finalize();
106
107 enum State : u8 {
108 Invalid = 0,
109 Runnable,
110 Running,
111 Skip1SchedulerPass,
112 Skip0SchedulerPasses,
113 Dying,
114 Dead,
115 Stopped,
116 Blocked,
117 Queued,
118 };
119
120 class Blocker {
121 public:
122 virtual ~Blocker() {}
123 virtual bool should_unblock(Thread&, time_t now_s, long us) = 0;
124 virtual const char* state_string() const = 0;
125 virtual bool is_reason_signal() const { return false; }
126 void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
127 bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
128 void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
129 bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
130
131 private:
132 bool m_was_interrupted_while_blocked { false };
133 bool m_was_interrupted_by_death { false };
134 friend class Thread;
135 };
136
137 class JoinBlocker final : public Blocker {
138 public:
139 explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
140 virtual bool should_unblock(Thread&, time_t now_s, long us) override;
141 virtual const char* state_string() const override { return "Joining"; }
142 void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
143
144 private:
145 Thread& m_joinee;
146 void*& m_joinee_exit_value;
147 };
148
149 class FileDescriptionBlocker : public Blocker {
150 public:
151 const FileDescription& blocked_description() const;
152
153 protected:
154 explicit FileDescriptionBlocker(const FileDescription&);
155
156 private:
157 NonnullRefPtr<FileDescription> m_blocked_description;
158 };
159
160 class AcceptBlocker final : public FileDescriptionBlocker {
161 public:
162 explicit AcceptBlocker(const FileDescription&);
163 virtual bool should_unblock(Thread&, time_t, long) override;
164 virtual const char* state_string() const override { return "Accepting"; }
165 };
166
167 class ConnectBlocker final : public FileDescriptionBlocker {
168 public:
169 explicit ConnectBlocker(const FileDescription&);
170 virtual bool should_unblock(Thread&, time_t, long) override;
171 virtual const char* state_string() const override { return "Connecting"; }
172 };
173
174 class WriteBlocker final : public FileDescriptionBlocker {
175 public:
176 explicit WriteBlocker(const FileDescription&);
177 virtual bool should_unblock(Thread&, time_t, long) override;
178 virtual const char* state_string() const override { return "Writing"; }
179
180 private:
181 Optional<timeval> m_deadline;
182 };
183
184 class ReadBlocker final : public FileDescriptionBlocker {
185 public:
186 explicit ReadBlocker(const FileDescription&);
187 virtual bool should_unblock(Thread&, time_t, long) override;
188 virtual const char* state_string() const override { return "Reading"; }
189
190 private:
191 Optional<timeval> m_deadline;
192 };
193
194 class ConditionBlocker final : public Blocker {
195 public:
196 ConditionBlocker(const char* state_string, Function<bool()>&& condition);
197 virtual bool should_unblock(Thread&, time_t, long) override;
198 virtual const char* state_string() const override { return m_state_string; }
199
200 private:
201 Function<bool()> m_block_until_condition;
202 const char* m_state_string { nullptr };
203 };
204
205 class SleepBlocker final : public Blocker {
206 public:
207 explicit SleepBlocker(u64 wakeup_time);
208 virtual bool should_unblock(Thread&, time_t, long) override;
209 virtual const char* state_string() const override { return "Sleeping"; }
210
211 private:
212 u64 m_wakeup_time { 0 };
213 };
214
215 class SelectBlocker final : public Blocker {
216 public:
217 typedef Vector<int, FD_SETSIZE> FDVector;
218 SelectBlocker(const timeval& tv, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
219 virtual bool should_unblock(Thread&, time_t, long) override;
220 virtual const char* state_string() const override { return "Selecting"; }
221
222 private:
223 timeval m_select_timeout;
224 bool m_select_has_timeout { false };
225 const FDVector& m_select_read_fds;
226 const FDVector& m_select_write_fds;
227 const FDVector& m_select_exceptional_fds;
228 };
229
230 class WaitBlocker final : public Blocker {
231 public:
232 WaitBlocker(int wait_options, pid_t& waitee_pid);
233 virtual bool should_unblock(Thread&, time_t, long) override;
234 virtual const char* state_string() const override { return "Waiting"; }
235
236 private:
237 int m_wait_options { 0 };
238 pid_t& m_waitee_pid;
239 };
240
241 class SemiPermanentBlocker final : public Blocker {
242 public:
243 enum class Reason {
244 Signal,
245 };
246
247 SemiPermanentBlocker(Reason reason);
248 virtual bool should_unblock(Thread&, time_t, long) override;
249 virtual const char* state_string() const override
250 {
251 switch (m_reason) {
252 case Reason::Signal:
253 return "Signal";
254 }
255 ASSERT_NOT_REACHED();
256 }
257 virtual bool is_reason_signal() const override { return m_reason == Reason::Signal; }
258
259 private:
260 Reason m_reason;
261 };
262
263 void did_schedule() { ++m_times_scheduled; }
264 u32 times_scheduled() const { return m_times_scheduled; }
265
266 bool is_stopped() const { return m_state == Stopped; }
267 bool is_blocked() const { return m_state == Blocked; }
268 bool in_kernel() const { return (m_tss.cs & 0x03) == 0; }
269
270 u32 frame_ptr() const { return m_tss.ebp; }
271 u32 stack_ptr() const { return m_tss.esp; }
272
273 RegisterState& get_register_dump_from_stack();
274
275 u16 selector() const { return m_far_ptr.selector; }
276 TSS32& tss() { return m_tss; }
277 const TSS32& tss() const { return m_tss; }
278 State state() const { return m_state; }
279 const char* state_string() const;
280 u32 ticks() const { return m_ticks; }
281
282 VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
283
284 u64 sleep(u32 ticks);
285 u64 sleep_until(u64 wakeup_time);
286
287 enum class BlockResult {
288 WokeNormally,
289 InterruptedBySignal,
290 InterruptedByDeath,
291 };
292
293 template<typename T, class... Args>
294 [[nodiscard]] BlockResult block(Args&&... args)
295 {
296 // We should never be blocking a blocked (or otherwise non-active) thread.
297 ASSERT(state() == Thread::Running);
298 ASSERT(m_blocker == nullptr);
299
300 T t(forward<Args>(args)...);
301 m_blocker = &t;
302 set_state(Thread::Blocked);
303
304 // Yield to the scheduler, and wait for us to resume unblocked.
305 yield_without_holding_big_lock();
306
307 // We should no longer be blocked once we woke up
308 ASSERT(state() != Thread::Blocked);
309
310 // Remove ourselves...
311 m_blocker = nullptr;
312
313 if (t.was_interrupted_by_signal())
314 return BlockResult::InterruptedBySignal;
315
316 if (t.was_interrupted_by_death())
317 return BlockResult::InterruptedByDeath;
318
319 return BlockResult::WokeNormally;
320 }
321
322 [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
323 {
324 return block<ConditionBlocker>(state_string, move(condition));
325 }
326
327 void wait_on(WaitQueue& queue, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr);
328 void wake_from_queue();
329
330 void unblock();
331
332 // Tell this thread to unblock if needed,
333 // gracefully unwind the stack and die.
334 void set_should_die();
335 void die_if_needed();
336
337 const FarPtr& far_ptr() const { return m_far_ptr; }
338
339 bool tick();
340 void set_ticks_left(u32 t) { m_ticks_left = t; }
341 u32 ticks_left() const { return m_ticks_left; }
342
343 u32 kernel_stack_base() const { return m_kernel_stack_base; }
344 u32 kernel_stack_top() const { return m_kernel_stack_top; }
345
346 void set_selector(u16 s) { m_far_ptr.selector = s; }
347 void set_state(State);
348
349 void send_urgent_signal_to_self(u8 signal);
350 void send_signal(u8 signal, Process* sender);
351 void consider_unblock(time_t now_sec, long now_usec);
352
353 void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
354
355 ShouldUnblockThread dispatch_one_pending_signal();
356 ShouldUnblockThread dispatch_signal(u8 signal);
357 bool has_unmasked_pending_signals() const;
358 void terminate_due_to_signal(u8 signal);
359 bool should_ignore_signal(u8 signal) const;
360 bool has_signal_handler(u8 signal) const;
361 bool has_pending_signal(u8 signal) const { return m_pending_signals & (1 << (signal - 1)); }
362
363 FPUState& fpu_state() { return *m_fpu_state; }
364
365 void set_default_signal_dispositions();
366 void push_value_on_stack(FlatPtr);
367
368 u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment);
369
370 void make_thread_specific_region(Badge<Process>);
371
372 unsigned syscall_count() const { return m_syscall_count; }
373 void did_syscall() { ++m_syscall_count; }
374 unsigned inode_faults() const { return m_inode_faults; }
375 void did_inode_fault() { ++m_inode_faults; }
376 unsigned zero_faults() const { return m_zero_faults; }
377 void did_zero_fault() { ++m_zero_faults; }
378 unsigned cow_faults() const { return m_cow_faults; }
379 void did_cow_fault() { ++m_cow_faults; }
380
381 unsigned file_read_bytes() const { return m_file_read_bytes; }
382 unsigned file_write_bytes() const { return m_file_write_bytes; }
383
384 void did_file_read(unsigned bytes)
385 {
386 m_file_read_bytes += bytes;
387 }
388
389 void did_file_write(unsigned bytes)
390 {
391 m_file_write_bytes += bytes;
392 }
393
394 unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
395 unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
396
397 void did_unix_socket_read(unsigned bytes)
398 {
399 m_unix_socket_read_bytes += bytes;
400 }
401
402 void did_unix_socket_write(unsigned bytes)
403 {
404 m_unix_socket_write_bytes += bytes;
405 }
406
407 unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
408 unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
409
410 void did_ipv4_socket_read(unsigned bytes)
411 {
412 m_ipv4_socket_read_bytes += bytes;
413 }
414
415 void did_ipv4_socket_write(unsigned bytes)
416 {
417 m_ipv4_socket_write_bytes += bytes;
418 }
419
420 Thread* clone(Process&);
421
422 template<typename Callback>
423 static IterationDecision for_each_in_state(State, Callback);
424 template<typename Callback>
425 static IterationDecision for_each_living(Callback);
426 template<typename Callback>
427 static IterationDecision for_each(Callback);
428
429 static bool is_runnable_state(Thread::State state)
430 {
431 return state == Thread::State::Running || state == Thread::State::Runnable;
432 }
433
434 static constexpr u32 default_kernel_stack_size = 65536;
435 static constexpr u32 default_userspace_stack_size = 4 * MB;
436
437 ThreadTracer* tracer() { return m_tracer.ptr(); }
438 void start_tracing_from(pid_t tracer);
439 void stop_tracing();
440 void tracer_trap(const RegisterState&);
441
442private:
443 IntrusiveListNode m_runnable_list_node;
444 IntrusiveListNode m_wait_queue_node;
445
446private:
447 friend class SchedulerData;
448 friend class WaitQueue;
449 bool unlock_process_if_locked();
450 void relock_process();
451 String backtrace_impl() const;
452 void reset_fpu_state();
453
454 Process& m_process;
455 int m_tid { -1 };
456 TSS32 m_tss;
457 FarPtr m_far_ptr;
458 u32 m_ticks { 0 };
459 u32 m_ticks_left { 0 };
460 u32 m_times_scheduled { 0 };
461 u32 m_pending_signals { 0 };
462 u32 m_signal_mask { 0 };
463 u32 m_kernel_stack_base { 0 };
464 u32 m_kernel_stack_top { 0 };
465 OwnPtr<Region> m_kernel_stack_region;
466 VirtualAddress m_thread_specific_data;
467 SignalActionData m_signal_action_data[32];
468 Blocker* m_blocker { nullptr };
469
470 bool m_is_joinable { true };
471 Thread* m_joiner { nullptr };
472 Thread* m_joinee { nullptr };
473 void* m_exit_value { nullptr };
474
475 unsigned m_syscall_count { 0 };
476 unsigned m_inode_faults { 0 };
477 unsigned m_zero_faults { 0 };
478 unsigned m_cow_faults { 0 };
479
480 unsigned m_file_read_bytes { 0 };
481 unsigned m_file_write_bytes { 0 };
482
483 unsigned m_unix_socket_read_bytes { 0 };
484 unsigned m_unix_socket_write_bytes { 0 };
485
486 unsigned m_ipv4_socket_read_bytes { 0 };
487 unsigned m_ipv4_socket_write_bytes { 0 };
488
489 FPUState* m_fpu_state { nullptr };
490 State m_state { Invalid };
491 String m_name;
492 u32 m_priority { THREAD_PRIORITY_NORMAL };
493 u32 m_extra_priority { 0 };
494 u32 m_priority_boost { 0 };
495
496 u8 m_stop_signal { 0 };
497 State m_stop_state { Invalid };
498
499 bool m_dump_backtrace_on_finalization { false };
500 bool m_should_die { false };
501
502 OwnPtr<ThreadTracer> m_tracer;
503
504 void yield_without_holding_big_lock();
505};
506
507HashTable<Thread*>& thread_table();
508
509template<typename Callback>
510inline IterationDecision Thread::for_each_living(Callback callback)
511{
512 ASSERT_INTERRUPTS_DISABLED();
513 return Thread::for_each([callback](Thread& thread) -> IterationDecision {
514 if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
515 return callback(thread);
516 return IterationDecision::Continue;
517 });
518}
519
520template<typename Callback>
521inline IterationDecision Thread::for_each(Callback callback)
522{
523 ASSERT_INTERRUPTS_DISABLED();
524 auto ret = Scheduler::for_each_runnable(callback);
525 if (ret == IterationDecision::Break)
526 return ret;
527 return Scheduler::for_each_nonrunnable(callback);
528}
529
530template<typename Callback>
531inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
532{
533 ASSERT_INTERRUPTS_DISABLED();
534 auto new_callback = [=](Thread& thread) -> IterationDecision {
535 if (thread.state() == state)
536 return callback(thread);
537 return IterationDecision::Continue;
538 };
539 if (is_runnable_state(state))
540 return Scheduler::for_each_runnable(new_callback);
541 return Scheduler::for_each_nonrunnable(new_callback);
542}
543
544const LogStream& operator<<(const LogStream&, const Thread&);
545
546struct SchedulerData {
547 typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
548
549 ThreadList m_runnable_threads;
550 ThreadList m_nonrunnable_threads;
551
552 ThreadList& thread_list_for_state(Thread::State state)
553 {
554 if (Thread::is_runnable_state(state))
555 return m_runnable_threads;
556 return m_nonrunnable_threads;
557 }
558};
559
560template<typename Callback>
561inline IterationDecision Scheduler::for_each_runnable(Callback callback)
562{
563 ASSERT_INTERRUPTS_DISABLED();
564 auto& tl = g_scheduler_data->m_runnable_threads;
565 for (auto it = tl.begin(); it != tl.end();) {
566 auto& thread = *it;
567 it = ++it;
568 if (callback(thread) == IterationDecision::Break)
569 return IterationDecision::Break;
570 }
571
572 return IterationDecision::Continue;
573}
574
575template<typename Callback>
576inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
577{
578 ASSERT_INTERRUPTS_DISABLED();
579 auto& tl = g_scheduler_data->m_nonrunnable_threads;
580 for (auto it = tl.begin(); it != tl.end();) {
581 auto& thread = *it;
582 it = ++it;
583 if (callback(thread) == IterationDecision::Break)
584 return IterationDecision::Break;
585 }
586
587 return IterationDecision::Continue;
588}
589
590u16 thread_specific_selector();
591Descriptor& thread_specific_descriptor();
592
593}