Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2022, Idan Horowitz <idan.horowitz@serenityos.org>
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#include <Kernel/Debug.h>
9#include <Kernel/KSyms.h>
10#include <Kernel/Locking/LockLocation.h>
11#include <Kernel/Locking/Mutex.h>
12#include <Kernel/Locking/Spinlock.h>
13#include <Kernel/Thread.h>
14
15extern bool g_in_early_boot;
16
17namespace Kernel {
18
19void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
20{
21 // NOTE: This may be called from an interrupt handler (not an IRQ handler)
22 // and also from within critical sections!
23 VERIFY(!Processor::current_in_irq());
24 if constexpr (LOCK_IN_CRITICAL_DEBUG) {
25 // There are no interrupts enabled in early boot.
26 if (!g_in_early_boot)
27 VERIFY_INTERRUPTS_ENABLED();
28 }
29 VERIFY(mode != Mode::Unlocked);
30 auto* current_thread = Thread::current();
31
32 SpinlockLocker lock(m_lock);
33 bool did_block = false;
34 Mode current_mode = m_mode;
35 switch (current_mode) {
36 case Mode::Unlocked: {
37 dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
38 m_mode = mode;
39 VERIFY(!m_holder);
40 VERIFY(m_shared_holders == 0);
41 if (mode == Mode::Exclusive) {
42 m_holder = current_thread;
43 } else {
44 VERIFY(mode == Mode::Shared);
45 ++m_shared_holders;
46#if LOCK_SHARED_UPGRADE_DEBUG
47 m_shared_holders_map.set(current_thread, 1);
48#endif
49 }
50 VERIFY(m_times_locked == 0);
51 m_times_locked++;
52
53#if LOCK_DEBUG
54 if (current_thread) {
55 current_thread->holding_lock(*this, 1, location);
56 }
57#endif
58 return;
59 }
60 case Mode::Exclusive: {
61 VERIFY(m_holder);
62 if (m_holder != current_thread) {
63 block(*current_thread, mode, lock, 1);
64 did_block = true;
65 // If we blocked then m_mode should have been updated to what we requested
66 VERIFY(m_mode == mode);
67 }
68
69 if (m_mode == Mode::Exclusive) {
70 VERIFY(m_holder == current_thread);
71 VERIFY(m_shared_holders == 0);
72 } else if (did_block && mode == Mode::Shared) {
73 // Only if we blocked trying to acquire a shared lock the lock would have been converted
74 VERIFY(!m_holder);
75 VERIFY(m_shared_holders > 0);
76 }
77
78 if constexpr (LOCK_TRACE_DEBUG) {
79 if (mode == Mode::Exclusive)
80 dbgln("Mutex::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
81 else
82 dbgln("Mutex::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
83 }
84
85 VERIFY(m_times_locked > 0);
86 if (!did_block) {
87 // if we didn't block we must still be an exclusive lock
88 VERIFY(m_mode == Mode::Exclusive);
89 m_times_locked++;
90 }
91
92#if LOCK_DEBUG
93 current_thread->holding_lock(*this, 1, location);
94#endif
95 return;
96 }
97 case Mode::Shared: {
98 VERIFY(m_behavior == MutexBehavior::Regular);
99 VERIFY(!m_holder);
100 if (mode == Mode::Exclusive) {
101 dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): blocking for exclusive access, currently shared, locks held {}", this, m_name, m_times_locked);
102#if LOCK_SHARED_UPGRADE_DEBUG
103 VERIFY(m_shared_holders_map.size() != 1 || m_shared_holders_map.begin()->key != current_thread);
104#endif
105 // WARNING: The following block will deadlock if the current thread is the only shared locker of this Mutex
106 // and is asking to upgrade the lock to be exclusive without first releasing the shared lock. We have no
107 // allocation-free way to detect such a scenario, so if you suspect that this is the cause of your deadlock,
108 // try turning on LOCK_SHARED_UPGRADE_DEBUG.
109 block(*current_thread, mode, lock, 1);
110 did_block = true;
111 VERIFY(m_mode == mode);
112 }
113
114 dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
115
116 VERIFY(m_times_locked > 0);
117 if (m_mode == Mode::Shared) {
118 VERIFY(!m_holder);
119 VERIFY(!did_block);
120 } else if (did_block) {
121 VERIFY(mode == Mode::Exclusive);
122 VERIFY(m_holder == current_thread);
123 VERIFY(m_shared_holders == 0);
124 }
125
126 if (!did_block) {
127 // if we didn't block we must still be a shared lock
128 VERIFY(m_mode == Mode::Shared);
129 m_times_locked++;
130 VERIFY(m_shared_holders > 0);
131 ++m_shared_holders;
132#if LOCK_SHARED_UPGRADE_DEBUG
133 auto it = m_shared_holders_map.find(current_thread);
134 if (it != m_shared_holders_map.end())
135 it->value++;
136 else
137 m_shared_holders_map.set(current_thread, 1);
138#endif
139 }
140
141#if LOCK_DEBUG
142 current_thread->holding_lock(*this, 1, location);
143#endif
144 return;
145 }
146 default:
147 VERIFY_NOT_REACHED();
148 }
149}
150
151void Mutex::unlock()
152{
153 // NOTE: This may be called from an interrupt handler (not an IRQ handler)
154 // and also from within critical sections!
155 VERIFY(!Processor::current_in_irq());
156 if constexpr (LOCK_IN_CRITICAL_DEBUG) {
157 // There are no interrupts enabled in early boot.
158 if (!g_in_early_boot)
159 VERIFY_INTERRUPTS_ENABLED();
160 }
161 auto* current_thread = Thread::current();
162 SpinlockLocker lock(m_lock);
163 Mode current_mode = m_mode;
164 if constexpr (LOCK_TRACE_DEBUG) {
165 if (current_mode == Mode::Shared)
166 dbgln("Mutex::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
167 else
168 dbgln("Mutex::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
169 }
170
171 VERIFY(current_mode != Mode::Unlocked);
172
173 VERIFY(m_times_locked > 0);
174 m_times_locked--;
175
176 switch (current_mode) {
177 case Mode::Exclusive:
178 VERIFY(m_holder == current_thread);
179 VERIFY(m_shared_holders == 0);
180 if (m_times_locked == 0)
181 m_holder = nullptr;
182 break;
183 case Mode::Shared: {
184 VERIFY(!m_holder);
185 VERIFY(m_shared_holders > 0);
186 --m_shared_holders;
187#if LOCK_SHARED_UPGRADE_DEBUG
188 auto it = m_shared_holders_map.find(current_thread);
189 if (it->value > 1)
190 it->value--;
191 else
192 m_shared_holders_map.remove(it);
193#endif
194 break;
195 }
196 default:
197 VERIFY_NOT_REACHED();
198 }
199
200#if LOCK_DEBUG
201 if (current_thread) {
202 current_thread->holding_lock(*this, -1, {});
203 }
204#endif
205
206 if (m_times_locked == 0) {
207 VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders == 0);
208
209 m_mode = Mode::Unlocked;
210 unblock_waiters(current_mode);
211 }
212}
213
214void Mutex::block(Thread& current_thread, Mode mode, SpinlockLocker<Spinlock<LockRank::None>>& lock, u32 requested_locks)
215{
216 if constexpr (LOCK_IN_CRITICAL_DEBUG) {
217 // There are no interrupts enabled in early boot.
218 if (!g_in_early_boot)
219 VERIFY_INTERRUPTS_ENABLED();
220 }
221 m_blocked_thread_lists.with([&](auto& lists) {
222 auto append_to_list = [&]<typename L>(L& list) {
223 VERIFY(!list.contains(current_thread));
224 list.append(current_thread);
225 };
226
227 if (m_behavior == MutexBehavior::BigLock)
228 append_to_list(lists.exclusive_big_lock);
229 else
230 append_to_list(lists.list_for_mode(mode));
231 });
232
233 dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waiting...", this, m_name);
234 current_thread.block(*this, lock, requested_locks);
235 dbgln_if(LOCK_TRACE_DEBUG, "Mutex::lock @ {} ({}) waited", this, m_name);
236
237 m_blocked_thread_lists.with([&](auto& lists) {
238 auto remove_from_list = [&]<typename L>(L& list) {
239 VERIFY(list.contains(current_thread));
240 list.remove(current_thread);
241 };
242
243 if (m_behavior == MutexBehavior::BigLock)
244 remove_from_list(lists.exclusive_big_lock);
245 else
246 remove_from_list(lists.list_for_mode(mode));
247 });
248}
249
250void Mutex::unblock_waiters(Mode previous_mode)
251{
252 VERIFY(m_times_locked == 0);
253 VERIFY(m_mode == Mode::Unlocked);
254
255 m_blocked_thread_lists.with([&](auto& lists) {
256 auto unblock_shared = [&]() {
257 if (lists.shared.is_empty())
258 return false;
259 VERIFY(m_behavior == MutexBehavior::Regular);
260 m_mode = Mode::Shared;
261 for (auto& thread : lists.shared) {
262 auto requested_locks = thread.unblock_from_mutex(*this);
263 m_shared_holders += requested_locks;
264#if LOCK_SHARED_UPGRADE_DEBUG
265 auto set_result = m_shared_holders_map.set(&thread, requested_locks);
266 VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
267#endif
268 m_times_locked += requested_locks;
269 }
270 return true;
271 };
272 auto unblock_exclusive = [&]<typename L>(L& list) {
273 if (auto* next_exclusive_thread = list.first()) {
274 m_mode = Mode::Exclusive;
275 m_times_locked = next_exclusive_thread->unblock_from_mutex(*this);
276 m_holder = next_exclusive_thread;
277 return true;
278 }
279 return false;
280 };
281
282 if (m_behavior == MutexBehavior::BigLock) {
283 unblock_exclusive(lists.exclusive_big_lock);
284 } else if (previous_mode == Mode::Exclusive) {
285 if (!unblock_shared())
286 unblock_exclusive(lists.exclusive);
287 } else {
288 if (!unblock_exclusive(lists.exclusive))
289 unblock_shared();
290 }
291 });
292}
293
294auto Mutex::force_unlock_exclusive_if_locked(u32& lock_count_to_restore) -> Mode
295{
296 VERIFY(m_behavior == MutexBehavior::BigLock);
297 // NOTE: This may be called from an interrupt handler (not an IRQ handler)
298 // and also from within critical sections!
299 VERIFY(!Processor::current_in_irq());
300
301 auto* current_thread = Thread::current();
302 SpinlockLocker lock(m_lock);
303 auto current_mode = m_mode;
304 switch (current_mode) {
305 case Mode::Exclusive: {
306 if (m_holder != current_thread) {
307 lock_count_to_restore = 0;
308 return Mode::Unlocked;
309 }
310
311 dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::force_unlock_exclusive_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
312#if LOCK_DEBUG
313 m_holder->holding_lock(*this, -(int)m_times_locked, {});
314#endif
315 m_holder = nullptr;
316 VERIFY(m_times_locked > 0);
317 lock_count_to_restore = m_times_locked;
318 m_times_locked = 0;
319 m_mode = Mode::Unlocked;
320 unblock_waiters(Mode::Exclusive);
321 break;
322 }
323 case Mode::Unlocked: {
324 lock_count_to_restore = 0;
325 break;
326 }
327 default:
328 VERIFY_NOT_REACHED();
329 }
330 return current_mode;
331}
332
333void Mutex::restore_exclusive_lock(u32 lock_count, [[maybe_unused]] LockLocation const& location)
334{
335 VERIFY(m_behavior == MutexBehavior::BigLock);
336 VERIFY(lock_count > 0);
337 VERIFY(!Processor::current_in_irq());
338
339 auto* current_thread = Thread::current();
340 bool did_block = false;
341 SpinlockLocker lock(m_lock);
342 [[maybe_unused]] auto previous_mode = m_mode;
343 if (m_mode == Mode::Exclusive && m_holder != current_thread) {
344 block(*current_thread, Mode::Exclusive, lock, lock_count);
345 did_block = true;
346 // If we blocked then m_mode should have been updated to what we requested
347 VERIFY(m_mode == Mode::Exclusive);
348 }
349
350 dbgln_if(LOCK_RESTORE_DEBUG, "Mutex::restore_exclusive_lock @ {}: restoring exclusive with lock count {}, was {}", this, lock_count, mode_to_string(previous_mode));
351
352 VERIFY(m_mode != Mode::Shared);
353 VERIFY(m_shared_holders == 0);
354 if (did_block) {
355 VERIFY(m_times_locked > 0);
356 VERIFY(m_holder == current_thread);
357 } else {
358 if (m_mode == Mode::Unlocked) {
359 m_mode = Mode::Exclusive;
360 VERIFY(m_times_locked == 0);
361 m_times_locked = lock_count;
362 VERIFY(!m_holder);
363 m_holder = current_thread;
364 } else {
365 VERIFY(m_mode == Mode::Exclusive);
366 VERIFY(m_holder == current_thread);
367 VERIFY(m_times_locked > 0);
368 m_times_locked += lock_count;
369 }
370 }
371
372#if LOCK_DEBUG
373 m_holder->holding_lock(*this, (int)lock_count, location);
374#endif
375}
376
377}