Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#pragma once
28
29#include <AK/InlineLinkedList.h>
30#include <AK/String.h>
31#include <AK/Weakable.h>
32#include <Kernel/Heap/SlabAllocator.h>
33#include <Kernel/VM/RangeAllocator.h>
34
35namespace Kernel {
36
37class Inode;
38class VMObject;
39
40enum class PageFaultResponse {
41 ShouldCrash,
42 Continue,
43};
44
45class Region final
46 : public InlineLinkedListNode<Region>
47 , public Weakable<Region> {
48 friend class MemoryManager;
49
50 MAKE_SLAB_ALLOCATED(Region)
51public:
52 enum Access {
53 Read = 1,
54 Write = 2,
55 Execute = 4,
56 };
57
58 static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
59 static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
60
61 ~Region();
62
63 const Range& range() const { return m_range; }
64 VirtualAddress vaddr() const { return m_range.base(); }
65 size_t size() const { return m_range.size(); }
66 bool is_readable() const { return m_access & Access::Read; }
67 bool is_writable() const { return m_access & Access::Write; }
68 bool is_executable() const { return m_access & Access::Execute; }
69 bool is_cacheable() const { return m_cacheable; }
70 const String& name() const { return m_name; }
71 unsigned access() const { return m_access; }
72
73 void set_name(const String& name) { m_name = name; }
74
75 const VMObject& vmobject() const { return *m_vmobject; }
76 VMObject& vmobject() { return *m_vmobject; }
77
78 bool is_shared() const { return m_shared; }
79 void set_shared(bool shared) { m_shared = shared; }
80
81 bool is_stack() const { return m_stack; }
82 void set_stack(bool stack) { m_stack = stack; }
83
84 bool is_mmap() const { return m_mmap; }
85 void set_mmap(bool mmap) { m_mmap = mmap; }
86
87 bool is_user_accessible() const { return m_user_accessible; }
88 void set_user_accessible(bool b) { m_user_accessible = b; }
89
90 PageFaultResponse handle_fault(const PageFault&);
91
92 NonnullOwnPtr<Region> clone();
93
94 bool contains(VirtualAddress vaddr) const
95 {
96 return m_range.contains(vaddr);
97 }
98
99 bool contains(const Range& range) const
100 {
101 return m_range.contains(range);
102 }
103
104 unsigned page_index_from_address(VirtualAddress vaddr) const
105 {
106 return (vaddr - m_range.base()).get() / PAGE_SIZE;
107 }
108
109 size_t first_page_index() const
110 {
111 return m_offset_in_vmobject / PAGE_SIZE;
112 }
113
114 size_t last_page_index() const
115 {
116 return (first_page_index() + page_count()) - 1;
117 }
118
119 size_t page_count() const
120 {
121 return size() / PAGE_SIZE;
122 }
123
124 size_t offset_in_vmobject() const
125 {
126 return m_offset_in_vmobject;
127 }
128
129 bool commit();
130 bool commit(size_t page_index);
131
132 size_t amount_resident() const;
133 size_t amount_shared() const;
134 size_t amount_dirty() const;
135
136 bool should_cow(size_t page_index) const;
137 void set_should_cow(size_t page_index, bool);
138
139 u32 cow_pages() const;
140
141 void set_readable(bool b) { set_access_bit(Access::Read, b); }
142 void set_writable(bool b) { set_access_bit(Access::Write, b); }
143 void set_executable(bool b) { set_access_bit(Access::Execute, b); }
144
145 void set_page_directory(PageDirectory&);
146 void map(PageDirectory&);
147 enum class ShouldDeallocateVirtualMemoryRange {
148 No,
149 Yes,
150 };
151 void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
152
153 void remap();
154 void remap_page(size_t index);
155
156 // For InlineLinkedListNode
157 Region* m_next { nullptr };
158 Region* m_prev { nullptr };
159
160 // NOTE: These are public so we can make<> them.
161 Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable);
162
163private:
164 Bitmap& ensure_cow_map() const;
165
166 void set_access_bit(Access access, bool b)
167 {
168 if (b)
169 m_access |= access;
170 else
171 m_access &= ~access;
172 }
173
174 PageFaultResponse handle_cow_fault(size_t page_index);
175 PageFaultResponse handle_inode_fault(size_t page_index);
176 PageFaultResponse handle_zero_fault(size_t page_index);
177
178 void map_individual_page_impl(size_t page_index);
179
180 RefPtr<PageDirectory> m_page_directory;
181 Range m_range;
182 size_t m_offset_in_vmobject { 0 };
183 NonnullRefPtr<VMObject> m_vmobject;
184 String m_name;
185 u8 m_access { 0 };
186 bool m_shared : 1 { false };
187 bool m_user_accessible : 1 { false };
188 bool m_cacheable : 1 { false };
189 bool m_stack : 1 { false };
190 bool m_mmap : 1 { false };
191 mutable OwnPtr<Bitmap> m_cow_map;
192};
193
194}