Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#pragma once
28
29#include <AK/HashTable.h>
30#include <AK/NonnullRefPtrVector.h>
31#include <AK/String.h>
32#include <Kernel/Arch/i386/CPU.h>
33#include <Kernel/Forward.h>
34#include <Kernel/VM/PhysicalPage.h>
35#include <Kernel/VM/Region.h>
36#include <Kernel/VM/VMObject.h>
37
38namespace Kernel {
39
40#define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
41
42template<typename T>
43inline T* low_physical_to_virtual(T* physical)
44{
45 return (T*)(((u8*)physical) + 0xc0000000);
46}
47
48inline u32 low_physical_to_virtual(u32 physical)
49{
50 return physical + 0xc0000000;
51}
52
53template<typename T>
54inline T* virtual_to_low_physical(T* physical)
55{
56 return (T*)(((u8*)physical) - 0xc0000000);
57}
58
59inline u32 virtual_to_low_physical(u32 physical)
60{
61 return physical - 0xc0000000;
62}
63
64class KBuffer;
65class SynthFSInode;
66
67#define MM Kernel::MemoryManager::the()
68
69class MemoryManager {
70 AK_MAKE_ETERNAL
71 friend class PageDirectory;
72 friend class PhysicalPage;
73 friend class PhysicalRegion;
74 friend class Region;
75 friend class VMObject;
76 friend Optional<KBuffer> procfs$mm(InodeIdentifier);
77 friend Optional<KBuffer> procfs$memstat(InodeIdentifier);
78
79public:
80 static MemoryManager& the();
81
82 static void initialize();
83
84 PageFaultResponse handle_page_fault(const PageFault&);
85
86 void enter_process_paging_scope(Process&);
87
88 bool validate_user_stack(const Process&, VirtualAddress) const;
89 bool validate_user_read(const Process&, VirtualAddress, size_t) const;
90 bool validate_user_write(const Process&, VirtualAddress, size_t) const;
91
92 bool validate_kernel_read(const Process&, VirtualAddress, size_t) const;
93
94 bool can_read_without_faulting(const Process&, VirtualAddress, size_t) const;
95
96 enum class ShouldZeroFill {
97 No,
98 Yes
99 };
100
101 RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
102 RefPtr<PhysicalPage> allocate_supervisor_physical_page();
103 void deallocate_user_physical_page(PhysicalPage&&);
104 void deallocate_supervisor_physical_page(PhysicalPage&&);
105
106 OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
107 OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
108 OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
109 OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false);
110
111 unsigned user_physical_pages() const { return m_user_physical_pages; }
112 unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
113 unsigned super_physical_pages() const { return m_super_physical_pages; }
114 unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
115
116 template<typename Callback>
117 static void for_each_vmobject(Callback callback)
118 {
119 for (auto& vmobject : MM.m_vmobjects) {
120 if (callback(vmobject) == IterationDecision::Break)
121 break;
122 }
123 }
124
125 static Region* region_from_vaddr(Process&, VirtualAddress);
126 static const Region* region_from_vaddr(const Process&, VirtualAddress);
127
128 void dump_kernel_regions();
129
130 PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
131
132private:
133 MemoryManager();
134 ~MemoryManager();
135
136 enum class AccessSpace { Kernel,
137 User };
138 enum class AccessType { Read,
139 Write };
140 template<AccessSpace, AccessType>
141 bool validate_range(const Process&, VirtualAddress, size_t) const;
142
143 void register_vmobject(VMObject&);
144 void unregister_vmobject(VMObject&);
145 void register_region(Region&);
146 void unregister_region(Region&);
147
148 void detect_cpu_features();
149 void setup_low_identity_mapping();
150 void protect_kernel_image();
151 void parse_memory_map();
152 void flush_entire_tlb();
153 void flush_tlb(VirtualAddress);
154
155 static Region* user_region_from_vaddr(Process&, VirtualAddress);
156 static Region* kernel_region_from_vaddr(VirtualAddress);
157
158 static Region* region_from_vaddr(VirtualAddress);
159
160 RefPtr<PhysicalPage> find_free_user_physical_page();
161 u8* quickmap_page(PhysicalPage&);
162 void unquickmap_page();
163
164 PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
165 PageTableEntry* quickmap_pt(PhysicalAddress);
166
167 PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
168
169 const PageTableEntry* pte(const PageDirectory&, VirtualAddress);
170 PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
171
172 RefPtr<PageDirectory> m_kernel_page_directory;
173 RefPtr<PhysicalPage> m_low_page_table;
174
175 RefPtr<PhysicalPage> m_shared_zero_page;
176
177 unsigned m_user_physical_pages { 0 };
178 unsigned m_user_physical_pages_used { 0 };
179 unsigned m_super_physical_pages { 0 };
180 unsigned m_super_physical_pages_used { 0 };
181
182 NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
183 NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
184
185 InlineLinkedList<Region> m_user_regions;
186 InlineLinkedList<Region> m_kernel_regions;
187
188 InlineLinkedList<VMObject> m_vmobjects;
189
190 bool m_quickmap_in_use { false };
191};
192
193class ProcessPagingScope {
194public:
195 explicit ProcessPagingScope(Process&);
196 ~ProcessPagingScope();
197
198private:
199 u32 m_previous_cr3 { 0 };
200};
201
202template<typename Callback>
203void VMObject::for_each_region(Callback callback)
204{
205 // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
206 // Perhaps VMObject could have a Vector<Region*> with all of his mappers?
207 for (auto& region : MM.m_user_regions) {
208 if (®ion.vmobject() == this)
209 callback(region);
210 }
211 for (auto& region : MM.m_kernel_regions) {
212 if (®ion.vmobject() == this)
213 callback(region);
214 }
215}
216
217inline bool is_user_address(VirtualAddress vaddr)
218{
219 return vaddr.get() < 0xc0000000;
220}
221
222inline bool is_user_range(VirtualAddress vaddr, size_t size)
223{
224 if (vaddr.offset(size) < vaddr)
225 return false;
226 return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
227}
228
229inline bool PhysicalPage::is_shared_zero_page() const
230{
231 return this == &MM.shared_zero_page();
232}
233
234}