Serenity Operating System
1/*
2 * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
3 * Copyright (c) 2022, the SerenityOS developers.
4 *
5 * SPDX-License-Identifier: BSD-2-Clause
6 */
7
8#pragma once
9
10#include <AK/EnumBits.h>
11#include <AK/IntrusiveList.h>
12#include <AK/IntrusiveRedBlackTree.h>
13#include <Kernel/Forward.h>
14#include <Kernel/KString.h>
15#include <Kernel/Library/LockWeakable.h>
16#include <Kernel/Locking/LockRank.h>
17#include <Kernel/Memory/PageFaultResponse.h>
18#include <Kernel/Memory/VirtualRange.h>
19#include <Kernel/Sections.h>
20#include <Kernel/UnixTypes.h>
21
22namespace Kernel {
23class PageFault;
24}
25
26namespace Kernel::Memory {
27
28enum class ShouldFlushTLB {
29 No,
30 Yes,
31};
32
33class Region final
34 : public LockWeakable<Region> {
35 friend class AddressSpace;
36 friend class MemoryManager;
37 friend class RegionTree;
38
39public:
40 enum Access : u8 {
41 None = 0,
42 Read = 1,
43 Write = 2,
44 Execute = 4,
45 HasBeenReadable = 16,
46 HasBeenWritable = 32,
47 HasBeenExecutable = 64,
48 ReadOnly = Read,
49 ReadWrite = Read | Write,
50 ReadWriteExecute = Read | Write | Execute,
51 };
52
53 enum class Cacheable {
54 No = 0,
55 Yes,
56 };
57
58 static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
59 static ErrorOr<NonnullOwnPtr<Region>> create_unbacked();
60 static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false);
61
62 ~Region();
63
64 [[nodiscard]] VirtualRange const& range() const { return m_range; }
65 [[nodiscard]] VirtualAddress vaddr() const { return m_range.base(); }
66 [[nodiscard]] size_t size() const { return m_range.size(); }
67 [[nodiscard]] bool is_readable() const { return (m_access & Access::Read) == Access::Read; }
68 [[nodiscard]] bool is_writable() const { return (m_access & Access::Write) == Access::Write; }
69 [[nodiscard]] bool is_executable() const { return (m_access & Access::Execute) == Access::Execute; }
70
71 [[nodiscard]] bool has_been_readable() const { return (m_access & Access::HasBeenReadable) == Access::HasBeenReadable; }
72 [[nodiscard]] bool has_been_writable() const { return (m_access & Access::HasBeenWritable) == Access::HasBeenWritable; }
73 [[nodiscard]] bool has_been_executable() const { return (m_access & Access::HasBeenExecutable) == Access::HasBeenExecutable; }
74
75 [[nodiscard]] bool is_cacheable() const { return m_cacheable; }
76 [[nodiscard]] StringView name() const { return m_name ? m_name->view() : StringView {}; }
77 [[nodiscard]] OwnPtr<KString> take_name() { return move(m_name); }
78 [[nodiscard]] Region::Access access() const { return static_cast<Region::Access>(m_access); }
79
80 void set_name(OwnPtr<KString> name) { m_name = move(name); }
81
82 [[nodiscard]] VMObject const& vmobject() const { return *m_vmobject; }
83 [[nodiscard]] VMObject& vmobject() { return *m_vmobject; }
84 void set_vmobject(NonnullLockRefPtr<VMObject>&&);
85
86 [[nodiscard]] bool is_shared() const { return m_shared; }
87 void set_shared(bool shared) { m_shared = shared; }
88
89 [[nodiscard]] bool is_stack() const { return m_stack; }
90 void set_stack(bool stack) { m_stack = stack; }
91
92 [[nodiscard]] bool is_immutable() const { return m_immutable; }
93 void set_immutable() { m_immutable = true; }
94
95 [[nodiscard]] bool is_mmap() const { return m_mmap; }
96
97 void set_mmap(bool mmap, bool description_was_readable, bool description_was_writable)
98 {
99 m_mmap = mmap;
100 m_mmapped_from_readable = description_was_readable;
101 m_mmapped_from_writable = description_was_writable;
102 }
103
104 [[nodiscard]] bool is_write_combine() const { return m_write_combine; }
105 ErrorOr<void> set_write_combine(bool);
106
107 [[nodiscard]] bool is_user() const { return !is_kernel(); }
108 [[nodiscard]] bool is_kernel() const { return vaddr().get() < USER_RANGE_BASE || vaddr().get() >= kernel_mapping_base; }
109
110 PageFaultResponse handle_fault(PageFault const&);
111
112 ErrorOr<NonnullOwnPtr<Region>> try_clone();
113
114 [[nodiscard]] bool contains(VirtualAddress vaddr) const
115 {
116 return m_range.contains(vaddr);
117 }
118
119 [[nodiscard]] bool contains(VirtualRange const& range) const
120 {
121 return m_range.contains(range);
122 }
123
124 [[nodiscard]] unsigned page_index_from_address(VirtualAddress vaddr) const
125 {
126 return (vaddr - m_range.base()).get() / PAGE_SIZE;
127 }
128
129 [[nodiscard]] VirtualAddress vaddr_from_page_index(size_t page_index) const
130 {
131 return vaddr().offset(page_index * PAGE_SIZE);
132 }
133
134 [[nodiscard]] bool translate_vmobject_page(size_t& index) const
135 {
136 auto first_index = first_page_index();
137 if (index < first_index) {
138 index = first_index;
139 return false;
140 }
141 index -= first_index;
142 auto total_page_count = this->page_count();
143 if (index >= total_page_count) {
144 index = first_index + total_page_count - 1;
145 return false;
146 }
147 return true;
148 }
149
150 [[nodiscard]] ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const
151 {
152 return first_page_index() + page_index;
153 }
154
155 [[nodiscard]] size_t first_page_index() const
156 {
157 return m_offset_in_vmobject / PAGE_SIZE;
158 }
159
160 [[nodiscard]] size_t page_count() const
161 {
162 return size() / PAGE_SIZE;
163 }
164
165 RefPtr<PhysicalPage> physical_page(size_t index) const;
166 RefPtr<PhysicalPage>& physical_page_slot(size_t index);
167
168 [[nodiscard]] size_t offset_in_vmobject() const
169 {
170 return m_offset_in_vmobject;
171 }
172
173 [[nodiscard]] size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const
174 {
175 return m_offset_in_vmobject + vaddr.get() - this->vaddr().get();
176 }
177
178 [[nodiscard]] size_t amount_resident() const;
179 [[nodiscard]] size_t amount_shared() const;
180 [[nodiscard]] size_t amount_dirty() const;
181
182 [[nodiscard]] bool should_cow(size_t page_index) const;
183 ErrorOr<void> set_should_cow(size_t page_index, bool);
184
185 [[nodiscard]] size_t cow_pages() const;
186
187 void set_readable(bool b) { set_access_bit(Access::Read, b); }
188 void set_writable(bool b) { set_access_bit(Access::Write, b); }
189 void set_executable(bool b) { set_access_bit(Access::Execute, b); }
190
191 void unsafe_clear_access() { m_access = Region::None; }
192
193 void set_page_directory(PageDirectory&);
194 ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
195 void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes);
196 void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock<LockRank::None>>& pd_locker);
197
198 void remap();
199
200 [[nodiscard]] bool is_mapped() const { return m_page_directory != nullptr; }
201
202 void clear_to_zero();
203
204 [[nodiscard]] bool is_syscall_region() const { return m_syscall_region; }
205 void set_syscall_region(bool b) { m_syscall_region = b; }
206
207 [[nodiscard]] bool mmapped_from_readable() const { return m_mmapped_from_readable; }
208 [[nodiscard]] bool mmapped_from_writable() const { return m_mmapped_from_writable; }
209
210private:
211 Region();
212 Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
213 Region(VirtualRange const&, NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
214
215 [[nodiscard]] bool remap_vmobject_page(size_t page_index, NonnullRefPtr<PhysicalPage>);
216
217 void set_access_bit(Access access, bool b)
218 {
219 if (b)
220 m_access |= access | (access << 4);
221 else
222 m_access &= ~access;
223 }
224
225 [[nodiscard]] PageFaultResponse handle_cow_fault(size_t page_index);
226 [[nodiscard]] PageFaultResponse handle_inode_fault(size_t page_index);
227 [[nodiscard]] PageFaultResponse handle_zero_fault(size_t page_index, PhysicalPage& page_in_slot_at_time_of_fault);
228
229 [[nodiscard]] bool map_individual_page_impl(size_t page_index);
230 [[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
231
232 LockRefPtr<PageDirectory> m_page_directory;
233 VirtualRange m_range;
234 size_t m_offset_in_vmobject { 0 };
235 LockRefPtr<VMObject> m_vmobject;
236 OwnPtr<KString> m_name;
237 u8 m_access { Region::None };
238 bool m_shared : 1 { false };
239 bool m_cacheable : 1 { false };
240 bool m_stack : 1 { false };
241 bool m_mmap : 1 { false };
242 bool m_immutable : 1 { false };
243 bool m_syscall_region : 1 { false };
244 bool m_write_combine : 1 { false };
245 bool m_mmapped_from_readable : 1 { false };
246 bool m_mmapped_from_writable : 1 { false };
247
248 IntrusiveRedBlackTreeNode<FlatPtr, Region, RawPtr<Region>> m_tree_node;
249 IntrusiveListNode<Region> m_vmobject_list_node;
250
251public:
252 using ListInVMObject = IntrusiveList<&Region::m_vmobject_list_node>;
253};
254
255AK_ENUM_BITWISE_OPERATORS(Region::Access)
256
257constexpr Region::Access prot_to_region_access_flags(int prot)
258{
259 Region::Access access = Region::Access::None;
260 if ((prot & PROT_READ) == PROT_READ)
261 access |= Region::Access::Read;
262 if ((prot & PROT_WRITE) == PROT_WRITE)
263 access |= Region::Access::Write;
264 if ((prot & PROT_EXEC) == PROT_EXEC)
265 access |= Region::Access::Execute;
266 return access;
267}
268
269constexpr int region_access_flags_to_prot(Region::Access access)
270{
271 int prot = 0;
272 if ((access & Region::Access::Read) == Region::Access::Read)
273 prot |= PROT_READ;
274 if ((access & Region::Access::Write) == Region::Access::Write)
275 prot |= PROT_WRITE;
276 if ((access & Region::Access::Execute) == Region::Access::Execute)
277 prot |= PROT_EXEC;
278 return prot;
279}
280
281}