Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#pragma once
28
29#include <AK/Function.h>
30#include <AK/IntrusiveList.h>
31#include <AK/Optional.h>
32#include <AK/OwnPtr.h>
33#include <AK/String.h>
34#include <AK/Vector.h>
35#include <Kernel/Arch/i386/CPU.h>
36#include <Kernel/Forward.h>
37#include <Kernel/KResult.h>
38#include <Kernel/Scheduler.h>
39#include <Kernel/UnixTypes.h>
40#include <LibC/fd_set.h>
41
42namespace Kernel {
43
44enum class ShouldUnblockThread {
45 No = 0,
46 Yes
47};
48
49struct SignalActionData {
50 VirtualAddress handler_or_sigaction;
51 u32 mask { 0 };
52 int flags { 0 };
53};
54
55struct ThreadSpecificData {
56 ThreadSpecificData* self;
57};
58
59#define THREAD_PRIORITY_MIN 1
60#define THREAD_PRIORITY_LOW 10
61#define THREAD_PRIORITY_NORMAL 30
62#define THREAD_PRIORITY_HIGH 50
63#define THREAD_PRIORITY_MAX 99
64
65class Thread {
66 friend class Process;
67 friend class Scheduler;
68
69public:
70 static Thread* current;
71
72 explicit Thread(Process&);
73 ~Thread();
74
75 static Thread* from_tid(int);
76 static void initialize();
77 static void finalize_dying_threads();
78
79 static Vector<Thread*> all_threads();
80 static bool is_thread(void*);
81
82 int tid() const { return m_tid; }
83 int pid() const;
84
85 void set_priority(u32 p) { m_priority = p; }
86 u32 priority() const { return m_priority; }
87
88 void set_priority_boost(u32 boost) { m_priority_boost = boost; }
89 u32 priority_boost() const { return m_priority_boost; }
90
91 u32 effective_priority() const;
92
93 void set_joinable(bool j) { m_is_joinable = j; }
94 bool is_joinable() const { return m_is_joinable; }
95
96 Process& process() { return m_process; }
97 const Process& process() const { return m_process; }
98
99 String backtrace(ProcessInspectionHandle&) const;
100 Vector<uintptr_t> raw_backtrace(uintptr_t ebp) const;
101
102 const String& name() const { return m_name; }
103 void set_name(StringView s) { m_name = s; }
104
105 void finalize();
106
107 enum State : u8 {
108 Invalid = 0,
109 Runnable,
110 Running,
111 Skip1SchedulerPass,
112 Skip0SchedulerPasses,
113 Dying,
114 Dead,
115 Stopped,
116 Blocked,
117 Queued,
118 };
119
120 class Blocker {
121 public:
122 virtual ~Blocker() {}
123 virtual bool should_unblock(Thread&, time_t now_s, long us) = 0;
124 virtual const char* state_string() const = 0;
125 void set_interrupted_by_death() { m_was_interrupted_by_death = true; }
126 bool was_interrupted_by_death() const { return m_was_interrupted_by_death; }
127 void set_interrupted_by_signal() { m_was_interrupted_while_blocked = true; }
128 bool was_interrupted_by_signal() const { return m_was_interrupted_while_blocked; }
129
130 private:
131 bool m_was_interrupted_while_blocked { false };
132 bool m_was_interrupted_by_death { false };
133 friend class Thread;
134 };
135
136 class JoinBlocker final : public Blocker {
137 public:
138 explicit JoinBlocker(Thread& joinee, void*& joinee_exit_value);
139 virtual bool should_unblock(Thread&, time_t now_s, long us) override;
140 virtual const char* state_string() const override { return "Joining"; }
141 void set_joinee_exit_value(void* value) { m_joinee_exit_value = value; }
142
143 private:
144 Thread& m_joinee;
145 void*& m_joinee_exit_value;
146 };
147
148 class FileDescriptionBlocker : public Blocker {
149 public:
150 const FileDescription& blocked_description() const;
151
152 protected:
153 explicit FileDescriptionBlocker(const FileDescription&);
154
155 private:
156 NonnullRefPtr<FileDescription> m_blocked_description;
157 };
158
159 class AcceptBlocker final : public FileDescriptionBlocker {
160 public:
161 explicit AcceptBlocker(const FileDescription&);
162 virtual bool should_unblock(Thread&, time_t, long) override;
163 virtual const char* state_string() const override { return "Accepting"; }
164 };
165
166 class ConnectBlocker final : public FileDescriptionBlocker {
167 public:
168 explicit ConnectBlocker(const FileDescription&);
169 virtual bool should_unblock(Thread&, time_t, long) override;
170 virtual const char* state_string() const override { return "Connecting"; }
171 };
172
173 class WriteBlocker final : public FileDescriptionBlocker {
174 public:
175 explicit WriteBlocker(const FileDescription&);
176 virtual bool should_unblock(Thread&, time_t, long) override;
177 virtual const char* state_string() const override { return "Writing"; }
178
179 private:
180 Optional<timeval> m_deadline;
181 };
182
183 class ReadBlocker final : public FileDescriptionBlocker {
184 public:
185 explicit ReadBlocker(const FileDescription&);
186 virtual bool should_unblock(Thread&, time_t, long) override;
187 virtual const char* state_string() const override { return "Reading"; }
188
189 private:
190 Optional<timeval> m_deadline;
191 };
192
193 class ConditionBlocker final : public Blocker {
194 public:
195 ConditionBlocker(const char* state_string, Function<bool()>&& condition);
196 virtual bool should_unblock(Thread&, time_t, long) override;
197 virtual const char* state_string() const override { return m_state_string; }
198
199 private:
200 Function<bool()> m_block_until_condition;
201 const char* m_state_string { nullptr };
202 };
203
204 class SleepBlocker final : public Blocker {
205 public:
206 explicit SleepBlocker(u64 wakeup_time);
207 virtual bool should_unblock(Thread&, time_t, long) override;
208 virtual const char* state_string() const override { return "Sleeping"; }
209
210 private:
211 u64 m_wakeup_time { 0 };
212 };
213
214 class SelectBlocker final : public Blocker {
215 public:
216 typedef Vector<int, FD_SETSIZE> FDVector;
217 SelectBlocker(const timeval& tv, bool select_has_timeout, const FDVector& read_fds, const FDVector& write_fds, const FDVector& except_fds);
218 virtual bool should_unblock(Thread&, time_t, long) override;
219 virtual const char* state_string() const override { return "Selecting"; }
220
221 private:
222 timeval m_select_timeout;
223 bool m_select_has_timeout { false };
224 const FDVector& m_select_read_fds;
225 const FDVector& m_select_write_fds;
226 const FDVector& m_select_exceptional_fds;
227 };
228
229 class WaitBlocker final : public Blocker {
230 public:
231 WaitBlocker(int wait_options, pid_t& waitee_pid);
232 virtual bool should_unblock(Thread&, time_t, long) override;
233 virtual const char* state_string() const override { return "Waiting"; }
234
235 private:
236 int m_wait_options { 0 };
237 pid_t& m_waitee_pid;
238 };
239
240 class SemiPermanentBlocker final : public Blocker {
241 public:
242 enum class Reason {
243 Signal,
244 };
245
246 SemiPermanentBlocker(Reason reason);
247 virtual bool should_unblock(Thread&, time_t, long) override;
248 virtual const char* state_string() const override
249 {
250 switch (m_reason) {
251 case Reason::Signal:
252 return "Signal";
253 }
254 ASSERT_NOT_REACHED();
255 }
256
257 private:
258 Reason m_reason;
259 };
260
261 void did_schedule() { ++m_times_scheduled; }
262 u32 times_scheduled() const { return m_times_scheduled; }
263
264 bool is_stopped() const { return m_state == Stopped; }
265 bool is_blocked() const { return m_state == Blocked; }
266 bool in_kernel() const { return (m_tss.cs & 0x03) == 0; }
267
268 u32 frame_ptr() const { return m_tss.ebp; }
269 u32 stack_ptr() const { return m_tss.esp; }
270
271 RegisterState& get_register_dump_from_stack();
272
273 u16 selector() const { return m_far_ptr.selector; }
274 TSS32& tss() { return m_tss; }
275 const TSS32& tss() const { return m_tss; }
276 State state() const { return m_state; }
277 const char* state_string() const;
278 u32 ticks() const { return m_ticks; }
279
280 VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
281
282 u64 sleep(u32 ticks);
283 u64 sleep_until(u64 wakeup_time);
284
285 enum class BlockResult {
286 WokeNormally,
287 InterruptedBySignal,
288 InterruptedByDeath,
289 };
290
291 template<typename T, class... Args>
292 [[nodiscard]] BlockResult block(Args&&... args)
293 {
294 // We should never be blocking a blocked (or otherwise non-active) thread.
295 ASSERT(state() == Thread::Running);
296 ASSERT(m_blocker == nullptr);
297
298 T t(forward<Args>(args)...);
299 m_blocker = &t;
300 set_state(Thread::Blocked);
301
302 // Yield to the scheduler, and wait for us to resume unblocked.
303 yield_without_holding_big_lock();
304
305 // We should no longer be blocked once we woke up
306 ASSERT(state() != Thread::Blocked);
307
308 // Remove ourselves...
309 m_blocker = nullptr;
310
311 if (t.was_interrupted_by_signal())
312 return BlockResult::InterruptedBySignal;
313
314 if (t.was_interrupted_by_death())
315 return BlockResult::InterruptedByDeath;
316
317 return BlockResult::WokeNormally;
318 }
319
320 [[nodiscard]] BlockResult block_until(const char* state_string, Function<bool()>&& condition)
321 {
322 return block<ConditionBlocker>(state_string, move(condition));
323 }
324
325 void wait_on(WaitQueue& queue, Atomic<bool>* lock = nullptr, Thread* beneficiary = nullptr, const char* reason = nullptr);
326 void wake_from_queue();
327
328 void unblock();
329
330 // Tell this thread to unblock if needed,
331 // gracefully unwind the stack and die.
332 void set_should_die();
333 void die_if_needed();
334
335 const FarPtr& far_ptr() const { return m_far_ptr; }
336
337 bool tick();
338 void set_ticks_left(u32 t) { m_ticks_left = t; }
339 u32 ticks_left() const { return m_ticks_left; }
340
341 u32 kernel_stack_base() const { return m_kernel_stack_base; }
342 u32 kernel_stack_top() const { return m_kernel_stack_top; }
343
344 void set_selector(u16 s) { m_far_ptr.selector = s; }
345 void set_state(State);
346
347 void send_urgent_signal_to_self(u8 signal);
348 void send_signal(u8 signal, Process* sender);
349 void consider_unblock(time_t now_sec, long now_usec);
350
351 void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
352
353 ShouldUnblockThread dispatch_one_pending_signal();
354 ShouldUnblockThread dispatch_signal(u8 signal);
355 bool has_unmasked_pending_signals() const;
356 void terminate_due_to_signal(u8 signal);
357 bool should_ignore_signal(u8 signal) const;
358 bool has_signal_handler(u8 signal) const;
359
360 FPUState& fpu_state() { return *m_fpu_state; }
361
362 void set_default_signal_dispositions();
363 void push_value_on_stack(uintptr_t);
364
365 u32 make_userspace_stack_for_main_thread(Vector<String> arguments, Vector<String> environment);
366
367 void make_thread_specific_region(Badge<Process>);
368
369 unsigned syscall_count() const { return m_syscall_count; }
370 void did_syscall() { ++m_syscall_count; }
371 unsigned inode_faults() const { return m_inode_faults; }
372 void did_inode_fault() { ++m_inode_faults; }
373 unsigned zero_faults() const { return m_zero_faults; }
374 void did_zero_fault() { ++m_zero_faults; }
375 unsigned cow_faults() const { return m_cow_faults; }
376 void did_cow_fault() { ++m_cow_faults; }
377
378 unsigned file_read_bytes() const { return m_file_read_bytes; }
379 unsigned file_write_bytes() const { return m_file_write_bytes; }
380
381 void did_file_read(unsigned bytes)
382 {
383 m_file_read_bytes += bytes;
384 }
385
386 void did_file_write(unsigned bytes)
387 {
388 m_file_write_bytes += bytes;
389 }
390
391 unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
392 unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
393
394 void did_unix_socket_read(unsigned bytes)
395 {
396 m_unix_socket_read_bytes += bytes;
397 }
398
399 void did_unix_socket_write(unsigned bytes)
400 {
401 m_unix_socket_write_bytes += bytes;
402 }
403
404 unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
405 unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
406
407 void did_ipv4_socket_read(unsigned bytes)
408 {
409 m_ipv4_socket_read_bytes += bytes;
410 }
411
412 void did_ipv4_socket_write(unsigned bytes)
413 {
414 m_ipv4_socket_write_bytes += bytes;
415 }
416
417 Thread* clone(Process&);
418
419 template<typename Callback>
420 static IterationDecision for_each_in_state(State, Callback);
421 template<typename Callback>
422 static IterationDecision for_each_living(Callback);
423 template<typename Callback>
424 static IterationDecision for_each(Callback);
425
426 static bool is_runnable_state(Thread::State state)
427 {
428 return state == Thread::State::Running || state == Thread::State::Runnable;
429 }
430
431 static constexpr u32 default_kernel_stack_size = 65536;
432 static constexpr u32 default_userspace_stack_size = 4 * MB;
433
434private:
435 IntrusiveListNode m_runnable_list_node;
436 IntrusiveListNode m_wait_queue_node;
437
438private:
439 friend class SchedulerData;
440 friend class WaitQueue;
441 bool unlock_process_if_locked();
442 void relock_process();
443 String backtrace_impl() const;
444 void reset_fpu_state();
445
446 Process& m_process;
447 int m_tid { -1 };
448 TSS32 m_tss;
449 FarPtr m_far_ptr;
450 u32 m_ticks { 0 };
451 u32 m_ticks_left { 0 };
452 u32 m_times_scheduled { 0 };
453 u32 m_pending_signals { 0 };
454 u32 m_signal_mask { 0 };
455 u32 m_kernel_stack_base { 0 };
456 u32 m_kernel_stack_top { 0 };
457 OwnPtr<Region> m_kernel_stack_region;
458 VirtualAddress m_thread_specific_data;
459 SignalActionData m_signal_action_data[32];
460 Blocker* m_blocker { nullptr };
461
462 bool m_is_joinable { true };
463 Thread* m_joiner { nullptr };
464 Thread* m_joinee { nullptr };
465 void* m_exit_value { nullptr };
466
467 unsigned m_syscall_count { 0 };
468 unsigned m_inode_faults { 0 };
469 unsigned m_zero_faults { 0 };
470 unsigned m_cow_faults { 0 };
471
472 unsigned m_file_read_bytes { 0 };
473 unsigned m_file_write_bytes { 0 };
474
475 unsigned m_unix_socket_read_bytes { 0 };
476 unsigned m_unix_socket_write_bytes { 0 };
477
478 unsigned m_ipv4_socket_read_bytes { 0 };
479 unsigned m_ipv4_socket_write_bytes { 0 };
480
481 FPUState* m_fpu_state { nullptr };
482 State m_state { Invalid };
483 String m_name;
484 u32 m_priority { THREAD_PRIORITY_NORMAL };
485 u32 m_extra_priority { 0 };
486 u32 m_priority_boost { 0 };
487
488 u8 m_stop_signal { 0 };
489
490 bool m_dump_backtrace_on_finalization { false };
491 bool m_should_die { false };
492
493 void yield_without_holding_big_lock();
494};
495
496HashTable<Thread*>& thread_table();
497
498template<typename Callback>
499inline IterationDecision Thread::for_each_living(Callback callback)
500{
501 ASSERT_INTERRUPTS_DISABLED();
502 return Thread::for_each([callback](Thread& thread) -> IterationDecision {
503 if (thread.state() != Thread::State::Dead && thread.state() != Thread::State::Dying)
504 return callback(thread);
505 return IterationDecision::Continue;
506 });
507}
508
509template<typename Callback>
510inline IterationDecision Thread::for_each(Callback callback)
511{
512 ASSERT_INTERRUPTS_DISABLED();
513 auto ret = Scheduler::for_each_runnable(callback);
514 if (ret == IterationDecision::Break)
515 return ret;
516 return Scheduler::for_each_nonrunnable(callback);
517}
518
519template<typename Callback>
520inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
521{
522 ASSERT_INTERRUPTS_DISABLED();
523 auto new_callback = [=](Thread& thread) -> IterationDecision {
524 if (thread.state() == state)
525 return callback(thread);
526 return IterationDecision::Continue;
527 };
528 if (is_runnable_state(state))
529 return Scheduler::for_each_runnable(new_callback);
530 return Scheduler::for_each_nonrunnable(new_callback);
531}
532
533const LogStream& operator<<(const LogStream&, const Thread&);
534
535struct SchedulerData {
536 typedef IntrusiveList<Thread, &Thread::m_runnable_list_node> ThreadList;
537
538 ThreadList m_runnable_threads;
539 ThreadList m_nonrunnable_threads;
540
541 ThreadList& thread_list_for_state(Thread::State state)
542 {
543 if (Thread::is_runnable_state(state))
544 return m_runnable_threads;
545 return m_nonrunnable_threads;
546 }
547};
548
549template<typename Callback>
550inline IterationDecision Scheduler::for_each_runnable(Callback callback)
551{
552 ASSERT_INTERRUPTS_DISABLED();
553 auto& tl = g_scheduler_data->m_runnable_threads;
554 for (auto it = tl.begin(); it != tl.end();) {
555 auto& thread = *it;
556 it = ++it;
557 if (callback(thread) == IterationDecision::Break)
558 return IterationDecision::Break;
559 }
560
561 return IterationDecision::Continue;
562}
563
564template<typename Callback>
565inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback)
566{
567 ASSERT_INTERRUPTS_DISABLED();
568 auto& tl = g_scheduler_data->m_nonrunnable_threads;
569 for (auto it = tl.begin(); it != tl.end();) {
570 auto& thread = *it;
571 it = ++it;
572 if (callback(thread) == IterationDecision::Break)
573 return IterationDecision::Break;
574 }
575
576 return IterationDecision::Continue;
577}
578
579u16 thread_specific_selector();
580Descriptor& thread_specific_descriptor();
581
582}