Serenity Operating System
1/*
2 * Copyright (c) 2020, the SerenityOS developers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <Kernel/Devices/AsyncDeviceRequest.h>
8#include <Kernel/Devices/Device.h>
9
10namespace Kernel {
11
12AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
13 : m_device(device)
14 , m_process(Process::current())
15{
16}
17
18AsyncDeviceRequest::~AsyncDeviceRequest()
19{
20 {
21 SpinlockLocker lock(m_lock);
22 VERIFY(is_completed_result(m_result));
23 VERIFY(m_sub_requests_pending.is_empty());
24 }
25
26 // We should not need any locking here anymore. The destructor should
27 // only be called until either wait() or cancel() (once implemented) returned.
28 // At that point no sub-request should be adding more requests and all
29 // sub-requests should be completed (either succeeded, failed, or cancelled).
30 // Which means there should be no more pending sub-requests and the
31 // entire AsyncDeviceRequest hierarchy should be immutable.
32 while (!m_sub_requests_complete.is_empty()) {
33 // Note: sub_request is ref-counted, and we use this specific pattern
34 // to allow make sure the refcount is dropped properly.
35 auto sub_request = m_sub_requests_complete.take_first();
36 VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
37 VERIFY(sub_request->m_parent_request == this);
38 sub_request->m_parent_request = nullptr;
39 }
40}
41
42void AsyncDeviceRequest::request_finished()
43{
44 if (m_parent_request)
45 m_parent_request->sub_request_finished(*this);
46
47 // Trigger processing the next request
48 m_device.process_next_queued_request({}, *this);
49
50 // Wake anyone who may be waiting
51 m_queue.wake_all();
52}
53
54auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult
55{
56 VERIFY(!m_parent_request);
57 auto request_result = get_request_result();
58 if (is_completed_result(request_result))
59 return { request_result, Thread::BlockResult::NotBlocked };
60 auto wait_result = m_queue.wait_on(Thread::BlockTimeout(false, timeout), name());
61 return { get_request_result(), wait_result };
62}
63
64auto AsyncDeviceRequest::get_request_result() const -> RequestResult
65{
66 SpinlockLocker lock(m_lock);
67 return m_result;
68}
69
70void AsyncDeviceRequest::add_sub_request(NonnullLockRefPtr<AsyncDeviceRequest> sub_request)
71{
72 // Sub-requests cannot be for the same device
73 VERIFY(&m_device != &sub_request->m_device);
74 VERIFY(sub_request->m_parent_request == nullptr);
75 sub_request->m_parent_request = this;
76
77 SpinlockLocker lock(m_lock);
78 VERIFY(!is_completed_result(m_result));
79 m_sub_requests_pending.append(sub_request);
80 if (m_result == Started)
81 sub_request->do_start(move(lock));
82}
83
84void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
85{
86 bool all_completed;
87 {
88 SpinlockLocker lock(m_lock);
89 VERIFY(m_result == Started);
90
91 if (m_sub_requests_pending.contains(sub_request)) {
92 // Note: append handles removing from any previous intrusive list internally.
93 m_sub_requests_complete.append(sub_request);
94 }
95
96 all_completed = m_sub_requests_pending.is_empty();
97 if (all_completed) {
98 // Aggregate any errors
99 bool any_failures = false;
100 bool any_memory_faults = false;
101 for (auto& com_sub_request : m_sub_requests_complete) {
102 auto sub_result = com_sub_request.get_request_result();
103 VERIFY(is_completed_result(sub_result));
104 switch (sub_result) {
105 case Failure:
106 any_failures = true;
107 break;
108 case MemoryFault:
109 any_memory_faults = true;
110 break;
111 default:
112 break;
113 }
114 if (any_failures && any_memory_faults)
115 break; // Stop checking if all error conditions were found
116 }
117 if (any_failures)
118 m_result = Failure;
119 else if (any_memory_faults)
120 m_result = MemoryFault;
121 else
122 m_result = Success;
123 }
124 }
125 if (all_completed)
126 request_finished();
127}
128
129void AsyncDeviceRequest::complete(RequestResult result)
130{
131 VERIFY(result == Success || result == Failure || result == MemoryFault);
132 ScopedCritical critical;
133 {
134 SpinlockLocker lock(m_lock);
135 VERIFY(m_result == Started);
136 m_result = result;
137 }
138 if (Processor::current_in_irq()) {
139 ref(); // Make sure we don't get freed
140 Processor::deferred_call_queue([this]() {
141 request_finished();
142 unref();
143 });
144 } else {
145 request_finished();
146 }
147}
148
149}