Serenity Operating System
1/*
2 * Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#pragma once
8
9#include <AK/Concepts.h>
10#include <AK/EnumBits.h>
11#include <AK/Error.h>
12#include <AK/IntrusiveList.h>
13#include <AK/Optional.h>
14#include <AK/OwnPtr.h>
15#include <AK/Time.h>
16#include <AK/Variant.h>
17#include <AK/Vector.h>
18#include <Kernel/API/POSIX/sched.h>
19#include <Kernel/API/POSIX/select.h>
20#include <Kernel/API/POSIX/signal_numbers.h>
21#include <Kernel/Arch/RegisterState.h>
22#include <Kernel/Arch/ThreadRegisters.h>
23#include <Kernel/Debug.h>
24#include <Kernel/Forward.h>
25#include <Kernel/KString.h>
26#include <Kernel/Library/ListedRefCounted.h>
27#include <Kernel/Library/LockWeakPtr.h>
28#include <Kernel/Library/LockWeakable.h>
29#include <Kernel/Locking/LockLocation.h>
30#include <Kernel/Locking/LockMode.h>
31#include <Kernel/Locking/LockRank.h>
32#include <Kernel/Locking/SpinlockProtected.h>
33#include <Kernel/Memory/VirtualRange.h>
34#include <Kernel/UnixTypes.h>
35
36namespace Kernel {
37
38class Timer;
39
40enum class DispatchSignalResult {
41 Deferred = 0,
42 Yield,
43 Terminate,
44 Continue
45};
46
47struct ThreadSpecificData {
48 ThreadSpecificData* self;
49};
50
51#define THREAD_AFFINITY_DEFAULT 0xffffffff
52
53class Thread
54 : public ListedRefCounted<Thread, LockType::Spinlock>
55 , public LockWeakable<Thread> {
56 AK_MAKE_NONCOPYABLE(Thread);
57 AK_MAKE_NONMOVABLE(Thread);
58
59 friend class Mutex;
60 friend class Process;
61 friend class Scheduler;
62 friend struct ThreadReadyQueue;
63
64public:
65 static Thread* current()
66 {
67 return Processor::current_thread();
68 }
69
70 static ErrorOr<NonnullLockRefPtr<Thread>> try_create(NonnullLockRefPtr<Process>);
71 ~Thread();
72
73 static LockRefPtr<Thread> from_tid(ThreadID);
74 static void finalize_dying_threads();
75
76 ThreadID tid() const { return m_tid; }
77 ProcessID pid() const;
78
79 void set_priority(u32 p) { m_priority = p; }
80 u32 priority() const { return m_priority; }
81
82 void detach()
83 {
84 SpinlockLocker lock(m_lock);
85 m_is_joinable = false;
86 }
87
88 [[nodiscard]] bool is_joinable() const
89 {
90 SpinlockLocker lock(m_lock);
91 return m_is_joinable;
92 }
93
94 Process& process() { return m_process; }
95 Process const& process() const { return m_process; }
96
97 SpinlockProtected<NonnullOwnPtr<KString>, LockRank::None> const& name() const
98 {
99 return m_name;
100 }
101 void set_name(NonnullOwnPtr<KString> name);
102
103 void finalize();
104
105 enum class State : u8 {
106 Invalid = 0,
107 Runnable,
108 Running,
109 Dying,
110 Dead,
111 Stopped,
112 Blocked,
113 };
114
115 class [[nodiscard]] BlockResult {
116 public:
117 enum Type {
118 WokeNormally,
119 NotBlocked,
120 InterruptedBySignal,
121 InterruptedByDeath,
122 InterruptedByTimeout,
123 };
124
125 BlockResult() = delete;
126
127 BlockResult(Type type)
128 : m_type(type)
129 {
130 }
131
132 bool operator==(Type type) const
133 {
134 return m_type == type;
135 }
136 bool operator!=(Type type) const
137 {
138 return m_type != type;
139 }
140
141 [[nodiscard]] bool was_interrupted() const
142 {
143 switch (m_type) {
144 case InterruptedBySignal:
145 case InterruptedByDeath:
146 return true;
147 default:
148 return false;
149 }
150 }
151
152 private:
153 Type m_type;
154 };
155
156 class BlockTimeout {
157 public:
158 BlockTimeout()
159 : m_infinite(true)
160 {
161 }
162 explicit BlockTimeout(bool is_absolute, Time const* time, Time const* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
163
164 Time const& absolute_time() const { return m_time; }
165 Time const* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
166 clockid_t clock_id() const { return m_clock_id; }
167 bool is_infinite() const { return m_infinite; }
168
169 private:
170 Time m_time {};
171 Time m_start_time {};
172 clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
173 bool m_infinite { false };
174 };
175
176 class BlockerSet;
177
178 class Blocker {
179 AK_MAKE_NONMOVABLE(Blocker);
180 AK_MAKE_NONCOPYABLE(Blocker);
181
182 public:
183 enum class Type {
184 Unknown = 0,
185 File,
186 Futex,
187 Plan9FS,
188 Join,
189 Queue,
190 Routing,
191 Sleep,
192 Signal,
193 Wait,
194 Flock
195 };
196 virtual ~Blocker();
197 virtual StringView state_string() const = 0;
198 virtual Type blocker_type() const = 0;
199 virtual BlockTimeout const& override_timeout(BlockTimeout const& timeout) { return timeout; }
200 virtual bool can_be_interrupted() const { return true; }
201 virtual bool setup_blocker();
202 virtual void finalize();
203
204 Thread& thread() { return m_thread; }
205
206 enum class UnblockImmediatelyReason {
207 UnblockConditionAlreadyMet,
208 TimeoutInThePast,
209 };
210
211 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
212
213 virtual void was_unblocked(bool did_timeout)
214 {
215 if (did_timeout) {
216 SpinlockLocker lock(m_lock);
217 m_did_timeout = true;
218 }
219 }
220 void set_interrupted_by_death()
221 {
222 SpinlockLocker lock(m_lock);
223 do_set_interrupted_by_death();
224 }
225 void set_interrupted_by_signal(u8 signal)
226 {
227 SpinlockLocker lock(m_lock);
228 do_set_interrupted_by_signal(signal);
229 }
230 u8 was_interrupted_by_signal() const
231 {
232 SpinlockLocker lock(m_lock);
233 return do_get_interrupted_by_signal();
234 }
235 virtual Thread::BlockResult block_result()
236 {
237 SpinlockLocker lock(m_lock);
238 if (m_was_interrupted_by_death)
239 return Thread::BlockResult::InterruptedByDeath;
240 if (m_was_interrupted_by_signal != 0)
241 return Thread::BlockResult::InterruptedBySignal;
242 if (m_did_timeout)
243 return Thread::BlockResult::InterruptedByTimeout;
244 return Thread::BlockResult::WokeNormally;
245 }
246
247 void begin_blocking(Badge<Thread>);
248 BlockResult end_blocking(Badge<Thread>, bool);
249
250 protected:
251 Blocker()
252 : m_thread(*Thread::current())
253 {
254 }
255
256 void do_set_interrupted_by_death()
257 {
258 m_was_interrupted_by_death = true;
259 }
260 void do_set_interrupted_by_signal(u8 signal)
261 {
262 VERIFY(signal != 0);
263 m_was_interrupted_by_signal = signal;
264 }
265 void do_clear_interrupted_by_signal()
266 {
267 m_was_interrupted_by_signal = 0;
268 }
269 u8 do_get_interrupted_by_signal() const
270 {
271 return m_was_interrupted_by_signal;
272 }
273 [[nodiscard]] bool was_interrupted() const
274 {
275 return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
276 }
277 void unblock_from_blocker()
278 {
279 {
280 SpinlockLocker lock(m_lock);
281 if (!m_is_blocking)
282 return;
283 m_is_blocking = false;
284 }
285
286 m_thread->unblock_from_blocker(*this);
287 }
288
289 bool add_to_blocker_set(BlockerSet&, void* = nullptr);
290 void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
291
292 // FIXME: Figure out whether this can be Thread.
293 mutable RecursiveSpinlock<LockRank::None> m_lock {};
294
295 private:
296 BlockerSet* m_blocker_set { nullptr };
297 NonnullLockRefPtr<Thread> m_thread;
298 u8 m_was_interrupted_by_signal { 0 };
299 bool m_is_blocking { false };
300 bool m_was_interrupted_by_death { false };
301 bool m_did_timeout { false };
302 };
303
304 class BlockerSet {
305 AK_MAKE_NONCOPYABLE(BlockerSet);
306 AK_MAKE_NONMOVABLE(BlockerSet);
307
308 public:
309 BlockerSet() = default;
310
311 virtual ~BlockerSet()
312 {
313 VERIFY(!m_lock.is_locked());
314 VERIFY(m_blockers.is_empty());
315 }
316
317 bool add_blocker(Blocker& blocker, void* data)
318 {
319 SpinlockLocker lock(m_lock);
320 if (!should_add_blocker(blocker, data))
321 return false;
322 m_blockers.append({ &blocker, data });
323 return true;
324 }
325
326 void remove_blocker(Blocker& blocker)
327 {
328 SpinlockLocker lock(m_lock);
329 // NOTE: it's possible that the blocker is no longer present
330 m_blockers.remove_all_matching([&](auto& info) {
331 return info.blocker == &blocker;
332 });
333 }
334
335 bool is_empty() const
336 {
337 SpinlockLocker lock(m_lock);
338 return is_empty_locked();
339 }
340
341 protected:
342 template<typename Callback>
343 bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
344 {
345 SpinlockLocker lock(m_lock);
346 return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
347 }
348
349 template<typename Callback>
350 bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
351 {
352 VERIFY(m_lock.is_locked());
353 bool stop_iterating = false;
354 bool did_unblock_any = false;
355 for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
356 auto& info = m_blockers[i];
357 if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
358 m_blockers.remove(i);
359 did_unblock_any = true;
360 continue;
361 }
362
363 i++;
364 }
365 return did_unblock_any;
366 }
367
368 bool is_empty_locked() const
369 {
370 VERIFY(m_lock.is_locked());
371 return m_blockers.is_empty();
372 }
373
374 virtual bool should_add_blocker(Blocker&, void*) { return true; }
375
376 struct BlockerInfo {
377 Blocker* blocker;
378 void* data;
379 };
380
381 Vector<BlockerInfo, 4> do_take_blockers(size_t count)
382 {
383 if (m_blockers.size() <= count)
384 return move(m_blockers);
385
386 size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
387 VERIFY(move_count > 0);
388
389 Vector<BlockerInfo, 4> taken_blockers;
390 taken_blockers.ensure_capacity(move_count);
391 for (size_t i = 0; i < move_count; i++)
392 taken_blockers.append(m_blockers.take(i));
393 m_blockers.remove(0, move_count);
394 return taken_blockers;
395 }
396
397 void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
398 {
399 if (blockers_to_append.is_empty())
400 return;
401 if (m_blockers.is_empty()) {
402 m_blockers = move(blockers_to_append);
403 return;
404 }
405 m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
406 for (size_t i = 0; i < blockers_to_append.size(); i++)
407 m_blockers.append(blockers_to_append.take(i));
408 blockers_to_append.clear();
409 }
410
411 // FIXME: Check whether this can be Thread.
412 mutable Spinlock<LockRank::None> m_lock {};
413
414 private:
415 Vector<BlockerInfo, 4> m_blockers;
416 };
417
418 friend class JoinBlocker;
419 class JoinBlocker final : public Blocker {
420 public:
421 explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
422 virtual Type blocker_type() const override { return Type::Join; }
423 virtual StringView state_string() const override { return "Joining"sv; }
424 virtual bool can_be_interrupted() const override { return false; }
425 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
426
427 virtual bool setup_blocker() override;
428
429 bool unblock(void*, bool);
430
431 private:
432 NonnullLockRefPtr<Thread> m_joinee;
433 void*& m_joinee_exit_value;
434 ErrorOr<void>& m_try_join_result;
435 bool m_did_unblock { false };
436 };
437
438 class WaitQueueBlocker final : public Blocker {
439 public:
440 explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
441 virtual ~WaitQueueBlocker();
442
443 virtual Type blocker_type() const override { return Type::Queue; }
444 virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
445 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
446 virtual bool setup_blocker() override;
447
448 bool unblock();
449
450 protected:
451 WaitQueue& m_wait_queue;
452 StringView m_block_reason;
453 bool m_did_unblock { false };
454 };
455
456 class FutexBlocker final : public Blocker {
457 public:
458 explicit FutexBlocker(FutexQueue&, u32);
459 virtual ~FutexBlocker();
460
461 virtual Type blocker_type() const override { return Type::Futex; }
462 virtual StringView state_string() const override { return "Futex"sv; }
463 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
464 virtual bool setup_blocker() override;
465
466 u32 bitset() const { return m_bitset; }
467
468 void begin_requeue()
469 {
470 // We need to hold the lock until we moved it over
471 m_previous_interrupts_state = m_lock.lock();
472 }
473 void finish_requeue(FutexQueue&);
474
475 bool unblock_bitset(u32 bitset);
476 bool unblock(bool force = false);
477
478 protected:
479 FutexQueue& m_futex_queue;
480 u32 m_bitset { 0 };
481 InterruptsState m_previous_interrupts_state { InterruptsState::Disabled };
482 bool m_did_unblock { false };
483 };
484
485 class FileBlocker : public Blocker {
486 public:
487 enum class BlockFlags : u16 {
488 None = 0,
489
490 Read = 1 << 0,
491 Write = 1 << 1,
492 ReadPriority = 1 << 2,
493 WritePriority = 1 << 3,
494
495 Accept = 1 << 4,
496 Connect = 1 << 5,
497 SocketFlags = Accept | Connect,
498
499 WriteError = 1 << 6,
500 WriteHangUp = 1 << 7,
501 ReadHangUp = 1 << 8,
502 Exception = WriteError | WriteHangUp | ReadHangUp,
503 };
504
505 virtual Type blocker_type() const override { return Type::File; }
506
507 virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
508 };
509
510 class OpenFileDescriptionBlocker : public FileBlocker {
511 public:
512 OpenFileDescription const& blocked_description() const;
513
514 virtual bool unblock_if_conditions_are_met(bool, void*) override;
515 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
516 virtual bool setup_blocker() override;
517
518 protected:
519 explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
520
521 private:
522 NonnullRefPtr<OpenFileDescription> m_blocked_description;
523 const BlockFlags m_flags;
524 BlockFlags& m_unblocked_flags;
525 bool m_did_unblock { false };
526 };
527
528 class AcceptBlocker final : public OpenFileDescriptionBlocker {
529 public:
530 explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
531 virtual StringView state_string() const override { return "Accepting"sv; }
532 };
533
534 class ConnectBlocker final : public OpenFileDescriptionBlocker {
535 public:
536 explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
537 virtual StringView state_string() const override { return "Connecting"sv; }
538 };
539
540 class WriteBlocker final : public OpenFileDescriptionBlocker {
541 public:
542 explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
543 virtual StringView state_string() const override { return "Writing"sv; }
544 virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
545
546 private:
547 BlockTimeout m_timeout;
548 };
549
550 class ReadBlocker final : public OpenFileDescriptionBlocker {
551 public:
552 explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
553 virtual StringView state_string() const override { return "Reading"sv; }
554 virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
555
556 private:
557 BlockTimeout m_timeout;
558 };
559
560 class SleepBlocker final : public Blocker {
561 public:
562 explicit SleepBlocker(BlockTimeout const&, Time* = nullptr);
563 virtual StringView state_string() const override { return "Sleeping"sv; }
564 virtual Type blocker_type() const override { return Type::Sleep; }
565 virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
566 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
567 virtual void was_unblocked(bool) override;
568 virtual Thread::BlockResult block_result() override;
569
570 private:
571 void calculate_remaining();
572
573 BlockTimeout m_deadline;
574 Time* m_remaining;
575 };
576
577 class SelectBlocker final : public FileBlocker {
578 public:
579 struct FDInfo {
580 RefPtr<OpenFileDescription> description;
581 BlockFlags block_flags { BlockFlags::None };
582 BlockFlags unblocked_flags { BlockFlags::None };
583 };
584
585 using FDVector = Vector<FDInfo, FD_SETSIZE>;
586 explicit SelectBlocker(FDVector&);
587 virtual ~SelectBlocker();
588
589 virtual bool unblock_if_conditions_are_met(bool, void*) override;
590 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
591 virtual void was_unblocked(bool) override;
592 virtual StringView state_string() const override { return "Selecting"sv; }
593 virtual bool setup_blocker() override;
594 virtual void finalize() override;
595
596 private:
597 size_t collect_unblocked_flags();
598
599 FDVector& m_fds;
600 bool m_did_unblock { false };
601 };
602
603 class SignalBlocker final : public Blocker {
604 public:
605 explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
606 virtual StringView state_string() const override { return "Pending Signal"sv; }
607 virtual Type blocker_type() const override { return Type::Signal; }
608 void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
609 virtual bool setup_blocker() override;
610 bool check_pending_signals(bool from_add_blocker);
611
612 private:
613 sigset_t m_pending_set { 0 };
614 siginfo_t& m_result;
615 bool m_did_unblock { false };
616 };
617
618 class SignalBlockerSet final : public BlockerSet {
619 public:
620 void unblock_all_blockers_whose_conditions_are_met()
621 {
622 BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
623 VERIFY(b.blocker_type() == Blocker::Type::Signal);
624 auto& blocker = static_cast<Thread::SignalBlocker&>(b);
625 return blocker.check_pending_signals(false);
626 });
627 }
628
629 private:
630 bool should_add_blocker(Blocker& b, void*) override
631 {
632 VERIFY(b.blocker_type() == Blocker::Type::Signal);
633 auto& blocker = static_cast<Thread::SignalBlocker&>(b);
634 return !blocker.check_pending_signals(true);
635 }
636 };
637
638 class WaitBlocker final : public Blocker {
639 public:
640 enum class UnblockFlags {
641 Terminated,
642 Stopped,
643 Continued,
644 Disowned
645 };
646
647 WaitBlocker(int wait_options, Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
648 virtual StringView state_string() const override { return "Waiting"sv; }
649 virtual Type blocker_type() const override { return Type::Wait; }
650 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
651 virtual void was_unblocked(bool) override;
652 virtual bool setup_blocker() override;
653
654 bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
655 bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
656
657 private:
658 void do_was_disowned();
659 void do_set_result(siginfo_t const&);
660
661 int const m_wait_options;
662 ErrorOr<siginfo_t>& m_result;
663 Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> m_waitee;
664 bool m_did_unblock { false };
665 bool m_got_sigchild { false };
666 };
667
668 class WaitBlockerSet final : public BlockerSet {
669 friend class WaitBlocker;
670
671 public:
672 explicit WaitBlockerSet(Process& process)
673 : m_process(process)
674 {
675 }
676
677 void disowned_by_waiter(Process&);
678 bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
679 void try_unblock(WaitBlocker&);
680 void finalize();
681
682 protected:
683 virtual bool should_add_blocker(Blocker&, void*) override;
684
685 private:
686 struct ProcessBlockInfo {
687 NonnullLockRefPtr<Process> process;
688 WaitBlocker::UnblockFlags flags;
689 u8 signal;
690 bool was_waited { false };
691
692 explicit ProcessBlockInfo(NonnullLockRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
693 ~ProcessBlockInfo();
694 };
695
696 Process& m_process;
697 Vector<ProcessBlockInfo, 2> m_processes;
698 bool m_finalized { false };
699 };
700
701 class FlockBlocker final : public Blocker {
702 public:
703 FlockBlocker(NonnullRefPtr<Inode>, flock const&);
704 virtual StringView state_string() const override { return "Locking File"sv; }
705 virtual Type blocker_type() const override { return Type::Flock; }
706 virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
707 virtual bool setup_blocker() override;
708 bool try_unblock(bool from_add_blocker);
709
710 private:
711 NonnullRefPtr<Inode> m_inode;
712 flock const& m_flock;
713 bool m_did_unblock { false };
714 };
715
716 class FlockBlockerSet final : public BlockerSet {
717 public:
718 void unblock_all_blockers_whose_conditions_are_met()
719 {
720 BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
721 VERIFY(b.blocker_type() == Blocker::Type::Flock);
722 auto& blocker = static_cast<Thread::FlockBlocker&>(b);
723 return blocker.try_unblock(false);
724 });
725 }
726
727 private:
728 bool should_add_blocker(Blocker& b, void*) override
729 {
730 VERIFY(b.blocker_type() == Blocker::Type::Flock);
731 auto& blocker = static_cast<Thread::FlockBlocker&>(b);
732 return !blocker.try_unblock(true);
733 }
734 };
735
736 template<typename AddBlockerHandler>
737 ErrorOr<void> try_join(AddBlockerHandler add_blocker)
738 {
739 if (Thread::current() == this)
740 return EDEADLK;
741
742 SpinlockLocker lock(m_lock);
743
744 // Joining dead threads is allowed for two main reasons:
745 // - Thread join behavior should not be racy when a thread is joined and exiting at roughly the same time.
746 // This is common behavior when threads are given a signal to end (meaning they are going to exit ASAP) and then joined.
747 // - POSIX requires that exited threads are joinable (at least, there is no language in the specification forbidding it).
748 if (!m_is_joinable || state() == Thread::State::Invalid)
749 return EINVAL;
750
751 add_blocker();
752
753 // From this point on the thread is no longer joinable by anyone
754 // else. It also means that if the join is timed, it becomes
755 // detached when a timeout happens.
756 m_is_joinable = false;
757 return {};
758 }
759
760 void did_schedule() { ++m_times_scheduled; }
761 u32 times_scheduled() const { return m_times_scheduled; }
762
763 void resume_from_stopped();
764
765 [[nodiscard]] bool should_be_stopped() const;
766 [[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; }
767 [[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; }
768
769 u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
770 void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
771 u32 affinity() const { return m_cpu_affinity; }
772 void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
773
774 RegisterState& get_register_dump_from_stack();
775 RegisterState const& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
776
777 DebugRegisterState& debug_register_state() { return m_debug_register_state; }
778 DebugRegisterState const& debug_register_state() const { return m_debug_register_state; }
779
780 ThreadRegisters& regs() { return m_regs; }
781 ThreadRegisters const& regs() const { return m_regs; }
782
783 State state() const { return m_state; }
784 StringView state_string() const;
785
786 VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
787 size_t thread_specific_region_size() const;
788 size_t thread_specific_region_alignment() const;
789
790 ALWAYS_INLINE void yield_if_stopped()
791 {
792 // If some thread stopped us, we need to yield to someone else
793 // We check this when entering/exiting a system call. A thread
794 // may continue to execute in user land until the next timer
795 // tick or entering the next system call, or if it's in kernel
796 // mode then we will intercept prior to returning back to user
797 // mode.
798 SpinlockLocker lock(m_lock);
799 while (state() == Thread::State::Stopped) {
800 lock.unlock();
801 // We shouldn't be holding the big lock here
802 yield_without_releasing_big_lock();
803 lock.lock();
804 }
805 }
806
807 void block(Kernel::Mutex&, SpinlockLocker<Spinlock<LockRank::None>>&, u32);
808
809 template<typename BlockerType, class... Args>
810 BlockResult block(BlockTimeout const& timeout, Args&&... args)
811 {
812 BlockerType blocker(forward<Args>(args)...);
813 return block_impl(timeout, blocker);
814 }
815
816 u32 unblock_from_mutex(Kernel::Mutex&);
817 void unblock_from_blocker(Blocker&);
818 void unblock(u8 signal = 0);
819
820 template<class... Args>
821 Thread::BlockResult wait_on(WaitQueue& wait_queue, Thread::BlockTimeout const& timeout, Args&&... args)
822 {
823 VERIFY(this == Thread::current());
824 return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
825 }
826
827 BlockResult sleep(clockid_t, Time const&, Time* = nullptr);
828 BlockResult sleep(Time const& duration, Time* remaining_time = nullptr)
829 {
830 return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
831 }
832 BlockResult sleep_until(clockid_t, Time const&);
833 BlockResult sleep_until(Time const& duration)
834 {
835 return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
836 }
837
838 // Tell this thread to unblock if needed,
839 // gracefully unwind the stack and die.
840 void set_should_die();
841 [[nodiscard]] bool should_die() const { return m_should_die; }
842 void die_if_needed();
843
844 void exit(void* = nullptr);
845
846 void update_time_scheduled(u64, bool, bool);
847 bool tick();
848 void set_ticks_left(u32 t) { m_ticks_left = t; }
849 u32 ticks_left() const { return m_ticks_left; }
850
851 FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
852 FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
853
854 void set_state(State, u8 = 0);
855
856 [[nodiscard]] bool is_initialized() const { return m_initialized; }
857 void set_initialized(bool initialized) { m_initialized = initialized; }
858
859 void send_urgent_signal_to_self(u8 signal);
860 void send_signal(u8 signal, Process* sender);
861
862 u32 update_signal_mask(u32 signal_mask);
863 u32 signal_mask_block(sigset_t signal_set, bool block);
864 u32 signal_mask() const;
865 void reset_signals_for_exec();
866
867 ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
868 ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
869
870 void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
871
872 DispatchSignalResult dispatch_one_pending_signal();
873 DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
874 DispatchSignalResult dispatch_signal(u8 signal);
875 void check_dispatch_pending_signal();
876 [[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
877 [[nodiscard]] bool should_ignore_signal(u8 signal) const;
878 [[nodiscard]] bool has_signal_handler(u8 signal) const;
879 [[nodiscard]] bool is_signal_masked(u8 signal) const;
880 u32 pending_signals() const;
881 u32 pending_signals_for_state() const;
882
883 [[nodiscard]] bool has_alternative_signal_stack() const;
884 [[nodiscard]] bool is_in_alternative_signal_stack() const;
885
886 FPUState& fpu_state() { return m_fpu_state; }
887
888 ErrorOr<void> make_thread_specific_region(Badge<Process>);
889
890 unsigned syscall_count() const { return m_syscall_count; }
891 void did_syscall() { ++m_syscall_count; }
892 unsigned inode_faults() const { return m_inode_faults; }
893 void did_inode_fault() { ++m_inode_faults; }
894 unsigned zero_faults() const { return m_zero_faults; }
895 void did_zero_fault() { ++m_zero_faults; }
896 unsigned cow_faults() const { return m_cow_faults; }
897 void did_cow_fault() { ++m_cow_faults; }
898
899 unsigned file_read_bytes() const { return m_file_read_bytes; }
900 unsigned file_write_bytes() const { return m_file_write_bytes; }
901
902 void did_file_read(unsigned bytes)
903 {
904 m_file_read_bytes += bytes;
905 }
906
907 void did_file_write(unsigned bytes)
908 {
909 m_file_write_bytes += bytes;
910 }
911
912 unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
913 unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
914
915 void did_unix_socket_read(unsigned bytes)
916 {
917 m_unix_socket_read_bytes += bytes;
918 }
919
920 void did_unix_socket_write(unsigned bytes)
921 {
922 m_unix_socket_write_bytes += bytes;
923 }
924
925 unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
926 unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
927
928 void did_ipv4_socket_read(unsigned bytes)
929 {
930 m_ipv4_socket_read_bytes += bytes;
931 }
932
933 void did_ipv4_socket_write(unsigned bytes)
934 {
935 m_ipv4_socket_write_bytes += bytes;
936 }
937
938 void set_active(bool active) { m_is_active = active; }
939
940 u32 saved_critical() const { return m_saved_critical; }
941 void save_critical(u32 critical) { m_saved_critical = critical; }
942
943 void track_lock_acquire(LockRank rank);
944 void track_lock_release(LockRank rank);
945
946 [[nodiscard]] bool is_active() const { return m_is_active; }
947
948 [[nodiscard]] bool is_finalizable() const
949 {
950 // We can't finalize as long as this thread is still running
951 // Note that checking for Running state here isn't sufficient
952 // as the thread may not be in Running state but switching out.
953 // m_is_active is set to false once the context switch is
954 // complete and the thread is not executing on any processor.
955 if (m_is_active.load(AK::memory_order_acquire))
956 return false;
957 // We can't finalize until the thread is either detached or
958 // a join has started. We can't make m_is_joinable atomic
959 // because that would introduce a race in try_join.
960 SpinlockLocker lock(m_lock);
961 return !m_is_joinable;
962 }
963
964 ErrorOr<NonnullLockRefPtr<Thread>> try_clone(Process&);
965
966 template<IteratorFunction<Thread&> Callback>
967 static IterationDecision for_each_in_state(State, Callback);
968 template<IteratorFunction<Thread&> Callback>
969 static IterationDecision for_each(Callback);
970
971 template<VoidFunction<Thread&> Callback>
972 static IterationDecision for_each_in_state(State, Callback);
973 template<VoidFunction<Thread&> Callback>
974 static IterationDecision for_each(Callback);
975
976 static constexpr u32 default_kernel_stack_size = 65536;
977 static constexpr u32 default_userspace_stack_size = 1 * MiB;
978
979 u64 time_in_user() const { return m_total_time_scheduled_user.load(AK::MemoryOrder::memory_order_relaxed); }
980 u64 time_in_kernel() const { return m_total_time_scheduled_kernel.load(AK::MemoryOrder::memory_order_relaxed); }
981
982 ExecutionMode previous_mode() const { return m_previous_mode; }
983 bool set_previous_mode(ExecutionMode mode)
984 {
985 if (m_previous_mode == mode)
986 return false;
987 m_previous_mode = mode;
988 return true;
989 }
990
991 TrapFrame*& current_trap() { return m_current_trap; }
992 TrapFrame const* const& current_trap() const { return m_current_trap; }
993
994 RecursiveSpinlock<LockRank::Thread>& get_lock() const { return m_lock; }
995
996#if LOCK_DEBUG
997 void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
998 {
999 VERIFY(refs_delta != 0);
1000 m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
1001 SpinlockLocker list_lock(m_holding_locks_lock);
1002 if (refs_delta > 0) {
1003 bool have_existing = false;
1004 for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
1005 auto& info = m_holding_locks_list[i];
1006 if (info.lock == &lock) {
1007 have_existing = true;
1008 info.count += refs_delta;
1009 break;
1010 }
1011 }
1012 if (!have_existing)
1013 m_holding_locks_list.append({ &lock, location, 1 });
1014 } else {
1015 VERIFY(refs_delta < 0);
1016 bool found = false;
1017 for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
1018 auto& info = m_holding_locks_list[i];
1019 if (info.lock == &lock) {
1020 VERIFY(info.count >= (unsigned)-refs_delta);
1021 info.count -= (unsigned)-refs_delta;
1022 if (info.count == 0)
1023 m_holding_locks_list.remove(i);
1024 found = true;
1025 break;
1026 }
1027 }
1028 VERIFY(found);
1029 }
1030 }
1031 u32 lock_count() const
1032 {
1033 return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
1034 }
1035#endif
1036
1037 bool is_handling_page_fault() const
1038 {
1039 return m_handling_page_fault;
1040 }
1041 void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
1042 void set_idle_thread() { m_is_idle_thread = true; }
1043 bool is_idle_thread() const { return m_is_idle_thread; }
1044
1045 void set_crashing() { m_is_crashing = true; }
1046 [[nodiscard]] bool is_crashing() const { return m_is_crashing; }
1047
1048 ALWAYS_INLINE u32 enter_profiler()
1049 {
1050 return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
1051 }
1052
1053 ALWAYS_INLINE u32 leave_profiler()
1054 {
1055 return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
1056 }
1057
1058 bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
1059 void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
1060
1061 bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
1062 void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
1063
1064 bool is_allocation_enabled() const { return m_allocation_enabled; }
1065 void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
1066
1067 ErrorOr<NonnullOwnPtr<KString>> backtrace();
1068
1069 Blocker const* blocker() const { return m_blocker; };
1070 Kernel::Mutex const* blocking_mutex() const { return m_blocking_mutex; }
1071
1072#if LOCK_DEBUG
1073 struct HoldingLockInfo {
1074 Mutex* lock;
1075 LockLocation lock_location;
1076 unsigned count;
1077 };
1078
1079 template<IteratorFunction<HoldingLockInfo const&> Callback>
1080 void for_each_held_lock(Callback);
1081 template<VoidFunction<HoldingLockInfo const&> Callback>
1082 void for_each_held_lock(Callback);
1083#endif
1084
1085private:
1086 Thread(NonnullLockRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullLockRefPtr<Timer>, NonnullOwnPtr<KString>);
1087
1088 BlockResult block_impl(BlockTimeout const&, Blocker&);
1089
1090 IntrusiveListNode<Thread> m_process_thread_list_node;
1091 int m_runnable_priority { -1 };
1092
1093 friend class WaitQueue;
1094
1095 class JoinBlockerSet final : public BlockerSet {
1096 public:
1097 void thread_did_exit(void* exit_value)
1098 {
1099 SpinlockLocker lock(m_lock);
1100 VERIFY(!m_thread_did_exit);
1101 m_thread_did_exit = true;
1102 m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
1103 do_unblock_joiner();
1104 }
1105 void thread_finalizing()
1106 {
1107 SpinlockLocker lock(m_lock);
1108 do_unblock_joiner();
1109 }
1110 void* exit_value() const
1111 {
1112 VERIFY(m_thread_did_exit);
1113 return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
1114 }
1115
1116 void try_unblock(JoinBlocker& blocker)
1117 {
1118 SpinlockLocker lock(m_lock);
1119 if (m_thread_did_exit)
1120 blocker.unblock(exit_value(), false);
1121 }
1122
1123 protected:
1124 virtual bool should_add_blocker(Blocker& b, void*) override
1125 {
1126 VERIFY(b.blocker_type() == Blocker::Type::Join);
1127 auto& blocker = static_cast<JoinBlocker&>(b);
1128
1129 // NOTE: m_lock is held already!
1130 if (m_thread_did_exit) {
1131 blocker.unblock(exit_value(), true);
1132 return false;
1133 }
1134 return true;
1135 }
1136
1137 private:
1138 void do_unblock_joiner()
1139 {
1140 unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
1141 VERIFY(b.blocker_type() == Blocker::Type::Join);
1142 auto& blocker = static_cast<JoinBlocker&>(b);
1143 return blocker.unblock(exit_value(), false);
1144 });
1145 }
1146
1147 Atomic<void*> m_exit_value { nullptr };
1148 bool m_thread_did_exit { false };
1149 };
1150
1151 LockMode unlock_process_if_locked(u32&);
1152 void relock_process(LockMode, u32);
1153 void reset_fpu_state();
1154
1155 mutable RecursiveSpinlock<LockRank::Thread> m_lock {};
1156 mutable RecursiveSpinlock<LockRank::None> m_block_lock {};
1157 NonnullLockRefPtr<Process> m_process;
1158 ThreadID m_tid { -1 };
1159 ThreadRegisters m_regs {};
1160 DebugRegisterState m_debug_register_state {};
1161 TrapFrame* m_current_trap { nullptr };
1162 u32 m_saved_critical { 1 };
1163 IntrusiveListNode<Thread> m_ready_queue_node;
1164 Atomic<u32> m_cpu { 0 };
1165 u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
1166 Optional<u64> m_last_time_scheduled;
1167 Atomic<u64> m_total_time_scheduled_user { 0 };
1168 Atomic<u64> m_total_time_scheduled_kernel { 0 };
1169 u32 m_ticks_left { 0 };
1170 u32 m_times_scheduled { 0 };
1171 u32 m_ticks_in_user { 0 };
1172 u32 m_ticks_in_kernel { 0 };
1173 u32 m_pending_signals { 0 };
1174 u8 m_currently_handled_signal { 0 };
1175 u32 m_signal_mask { 0 };
1176 FlatPtr m_alternative_signal_stack { 0 };
1177 FlatPtr m_alternative_signal_stack_size { 0 };
1178 SignalBlockerSet m_signal_blocker_set;
1179 FlatPtr m_kernel_stack_base { 0 };
1180 FlatPtr m_kernel_stack_top { 0 };
1181 NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
1182 VirtualAddress m_thread_specific_data;
1183 Optional<Memory::VirtualRange> m_thread_specific_range;
1184 Array<Optional<u32>, NSIG> m_signal_action_masks;
1185 Array<ProcessID, NSIG> m_signal_senders;
1186 Blocker* m_blocker { nullptr };
1187 Kernel::Mutex* m_blocking_mutex { nullptr };
1188 u32 m_lock_requested_count { 0 };
1189 IntrusiveListNode<Thread> m_blocked_threads_list_node;
1190 LockRank m_lock_rank_mask {};
1191 bool m_allocation_enabled { true };
1192
1193 // FIXME: remove this after annihilating Process::m_big_lock
1194 IntrusiveListNode<Thread> m_big_lock_blocked_threads_list_node;
1195
1196#if LOCK_DEBUG
1197 Atomic<u32> m_holding_locks { 0 };
1198 Spinlock<LockRank::None> m_holding_locks_lock {};
1199 Vector<HoldingLockInfo> m_holding_locks_list;
1200#endif
1201
1202 JoinBlockerSet m_join_blocker_set;
1203 Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
1204 bool m_is_joinable { true };
1205 bool m_handling_page_fault { false };
1206 ExecutionMode m_previous_mode { ExecutionMode::Kernel }; // We always start out in kernel mode
1207
1208 unsigned m_syscall_count { 0 };
1209 unsigned m_inode_faults { 0 };
1210 unsigned m_zero_faults { 0 };
1211 unsigned m_cow_faults { 0 };
1212
1213 unsigned m_file_read_bytes { 0 };
1214 unsigned m_file_write_bytes { 0 };
1215
1216 unsigned m_unix_socket_read_bytes { 0 };
1217 unsigned m_unix_socket_write_bytes { 0 };
1218
1219 unsigned m_ipv4_socket_read_bytes { 0 };
1220 unsigned m_ipv4_socket_write_bytes { 0 };
1221
1222 FPUState m_fpu_state {};
1223 State m_state { Thread::State::Invalid };
1224 SpinlockProtected<NonnullOwnPtr<KString>, LockRank::None> m_name;
1225 u32 m_priority { THREAD_PRIORITY_NORMAL };
1226
1227 State m_stop_state { Thread::State::Invalid };
1228
1229 bool m_dump_backtrace_on_finalization { false };
1230 bool m_should_die { false };
1231 bool m_initialized { false };
1232 bool m_is_idle_thread { false };
1233 bool m_is_crashing { false };
1234 bool m_is_promise_violation_pending { false };
1235 Atomic<bool> m_have_any_unmasked_pending_signals { false };
1236 Atomic<u32> m_nested_profiler_calls { 0 };
1237
1238 NonnullLockRefPtr<Timer> m_block_timer;
1239
1240 bool m_is_profiling_suppressed { false };
1241
1242 void yield_and_release_relock_big_lock();
1243
1244 enum class VerifyLockNotHeld {
1245 Yes,
1246 No
1247 };
1248
1249 void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
1250 void drop_thread_count();
1251
1252 mutable IntrusiveListNode<Thread> m_global_thread_list_node;
1253
1254public:
1255 using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
1256 using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
1257
1258 static SpinlockProtected<GlobalList, LockRank::None>& all_instances();
1259};
1260
1261AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
1262
1263template<IteratorFunction<Thread&> Callback>
1264inline IterationDecision Thread::for_each(Callback callback)
1265{
1266 return Thread::all_instances().with([&](auto& list) -> IterationDecision {
1267 for (auto& thread : list) {
1268 IterationDecision decision = callback(thread);
1269 if (decision != IterationDecision::Continue)
1270 return decision;
1271 }
1272 return IterationDecision::Continue;
1273 });
1274}
1275
1276template<IteratorFunction<Thread&> Callback>
1277inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
1278{
1279 return Thread::all_instances().with([&](auto& list) -> IterationDecision {
1280 for (auto& thread : list) {
1281 if (thread.state() != state)
1282 continue;
1283 IterationDecision decision = callback(thread);
1284 if (decision != IterationDecision::Continue)
1285 return decision;
1286 }
1287 return IterationDecision::Continue;
1288 });
1289}
1290
1291template<VoidFunction<Thread&> Callback>
1292inline IterationDecision Thread::for_each(Callback callback)
1293{
1294 return Thread::all_instances().with([&](auto& list) {
1295 for (auto& thread : list) {
1296 if (callback(thread) == IterationDecision::Break)
1297 return IterationDecision::Break;
1298 }
1299 return IterationDecision::Continue;
1300 });
1301}
1302
1303template<VoidFunction<Thread&> Callback>
1304inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
1305{
1306 return for_each_in_state(state, [&](auto& thread) {
1307 callback(thread);
1308 return IterationDecision::Continue;
1309 });
1310}
1311
1312#if LOCK_DEBUG
1313template<IteratorFunction<Thread::HoldingLockInfo const&> Callback>
1314inline void Thread::for_each_held_lock(Callback callback)
1315{
1316 SpinlockLocker list_lock(m_holding_locks_lock);
1317
1318 for (auto const& lock_info : m_holding_locks_list) {
1319 if (callback(lock_info) == IterationDecision::Break)
1320 break;
1321 }
1322}
1323
1324template<VoidFunction<Thread::HoldingLockInfo const&> Callback>
1325inline void Thread::for_each_held_lock(Callback callback)
1326{
1327 for_each_held_lock([&](auto const& lock_info) {
1328 callback(lock_info);
1329 return IterationDecision::Continue;
1330 });
1331}
1332#endif
1333
1334}
1335
1336template<>
1337struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
1338 ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
1339};