Serenity Operating System
1/*
2 * Copyright (c) 2020, the SerenityOS developers.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#pragma once
8
9#include <AK/IntrusiveList.h>
10#include <Kernel/Library/NonnullLockRefPtr.h>
11#include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
12#include <Kernel/Process.h>
13#include <Kernel/Thread.h>
14#include <Kernel/UserOrKernelBuffer.h>
15#include <Kernel/WaitQueue.h>
16
17namespace Kernel {
18
19class Device;
20
21extern WorkQueue* g_io_work;
22
23class AsyncDeviceRequest : public AtomicRefCounted<AsyncDeviceRequest> {
24 AK_MAKE_NONCOPYABLE(AsyncDeviceRequest);
25 AK_MAKE_NONMOVABLE(AsyncDeviceRequest);
26
27public:
28 enum [[nodiscard]] RequestResult {
29 Pending = 0,
30 Started,
31 Success,
32 Failure,
33 MemoryFault,
34 OutOfMemory,
35 Cancelled
36 };
37
38 class RequestWaitResult {
39 friend class AsyncDeviceRequest;
40
41 public:
42 RequestResult request_result() const { return m_request_result; }
43 Thread::BlockResult wait_result() const { return m_wait_result; }
44
45 private:
46 RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result)
47 : m_request_result(request_result)
48 , m_wait_result(wait_result)
49 {
50 }
51
52 RequestResult m_request_result;
53 Thread::BlockResult m_wait_result;
54 };
55
56 virtual ~AsyncDeviceRequest();
57
58 virtual StringView name() const = 0;
59 virtual void start() = 0;
60
61 void add_sub_request(NonnullLockRefPtr<AsyncDeviceRequest>);
62
63 [[nodiscard]] RequestWaitResult wait(Time* = nullptr);
64
65 void do_start(SpinlockLocker<Spinlock<LockRank::None>>&& requests_lock)
66 {
67 if (is_completed_result(m_result))
68 return;
69 m_result = Started;
70 requests_lock.unlock();
71
72 start();
73 }
74
75 void complete(RequestResult result);
76
77 void set_private(void* priv)
78 {
79 VERIFY(!m_private || !priv);
80 m_private = priv;
81 }
82 void* get_private() const { return m_private; }
83
84 template<typename... Args>
85 ErrorOr<void> write_to_buffer(UserOrKernelBuffer& buffer, Args... args)
86 {
87 if (in_target_context(buffer))
88 return buffer.write(forward<Args>(args)...);
89 ScopedAddressSpaceSwitcher switcher(m_process);
90 return buffer.write(forward<Args>(args)...);
91 }
92
93 template<size_t BUFFER_BYTES, typename... Args>
94 ErrorOr<size_t> write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args)
95 {
96 if (in_target_context(buffer))
97 return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
98 ScopedAddressSpaceSwitcher switcher(m_process);
99 return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
100 }
101
102 template<typename... Args>
103 ErrorOr<void> read_from_buffer(UserOrKernelBuffer const& buffer, Args... args)
104 {
105 if (in_target_context(buffer))
106 return buffer.read(forward<Args>(args)...);
107 ScopedAddressSpaceSwitcher switcher(m_process);
108 return buffer.read(forward<Args>(args)...);
109 }
110
111 template<size_t BUFFER_BYTES, typename... Args>
112 ErrorOr<size_t> read_from_buffer_buffered(UserOrKernelBuffer const& buffer, Args... args)
113 {
114 if (in_target_context(buffer))
115 return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
116 ScopedAddressSpaceSwitcher switcher(m_process);
117 return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
118 }
119
120protected:
121 AsyncDeviceRequest(Device&);
122
123 RequestResult get_request_result() const;
124
125private:
126 void sub_request_finished(AsyncDeviceRequest&);
127 void request_finished();
128
129 [[nodiscard]] bool in_target_context(UserOrKernelBuffer const& buffer) const
130 {
131 if (buffer.is_kernel_buffer())
132 return true;
133 return m_process == &Process::current();
134 }
135
136 [[nodiscard]] static bool is_completed_result(RequestResult result)
137 {
138 return result > Started;
139 }
140
141 Device& m_device;
142
143 AsyncDeviceRequest* m_parent_request { nullptr };
144 RequestResult m_result { Pending };
145 IntrusiveListNode<AsyncDeviceRequest, LockRefPtr<AsyncDeviceRequest>> m_list_node;
146
147 using AsyncDeviceSubRequestList = IntrusiveList<&AsyncDeviceRequest::m_list_node>;
148
149 AsyncDeviceSubRequestList m_sub_requests_pending;
150 AsyncDeviceSubRequestList m_sub_requests_complete;
151 WaitQueue m_queue;
152 NonnullLockRefPtr<Process> m_process;
153 void* m_private { nullptr };
154 mutable Spinlock<LockRank::None> m_lock {};
155};
156
157}