Serenity Operating System
at hosted 228 lines 8.1 kB view raw
1/* 2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, this 9 * list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#pragma once 28 29#include <AK/HashTable.h> 30#include <AK/NonnullRefPtrVector.h> 31#include <AK/String.h> 32#include <Kernel/Arch/i386/CPU.h> 33#include <Kernel/Forward.h> 34#include <Kernel/VM/PhysicalPage.h> 35#include <Kernel/VM/Region.h> 36#include <Kernel/VM/VMObject.h> 37 38namespace Kernel { 39 40#define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1))) 41 42template<typename T> 43inline T* low_physical_to_virtual(T* physical) 44{ 45 return (T*)(((u8*)physical) + 0xc0000000); 46} 47 48inline u32 low_physical_to_virtual(u32 physical) 49{ 50 return physical + 0xc0000000; 51} 52 53template<typename T> 54inline T* virtual_to_low_physical(T* physical) 55{ 56 return (T*)(((u8*)physical) - 0xc0000000); 57} 58 59inline u32 virtual_to_low_physical(u32 physical) 60{ 61 return physical - 0xc0000000; 62} 63 64class KBuffer; 65class SynthFSInode; 66 67#define MM Kernel::MemoryManager::the() 68 69class MemoryManager { 70 AK_MAKE_ETERNAL 71 friend class PageDirectory; 72 friend class PhysicalPage; 73 friend class PhysicalRegion; 74 friend class Region; 75 friend class VMObject; 76 friend Optional<KBuffer> procfs$mm(InodeIdentifier); 77 friend Optional<KBuffer> procfs$memstat(InodeIdentifier); 78 79public: 80 static MemoryManager& the(); 81 82 static void initialize(); 83 84 PageFaultResponse handle_page_fault(const PageFault&); 85 86 void enter_process_paging_scope(Process&); 87 88 bool validate_user_stack(const Process&, VirtualAddress) const; 89 bool validate_user_read(const Process&, VirtualAddress, size_t) const; 90 bool validate_user_write(const Process&, VirtualAddress, size_t) const; 91 92 bool validate_kernel_read(const Process&, VirtualAddress, size_t) const; 93 94 bool can_read_without_faulting(const Process&, VirtualAddress, size_t) const; 95 96 enum class ShouldZeroFill { 97 No, 98 Yes 99 }; 100 101 RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes); 102 RefPtr<PhysicalPage> allocate_supervisor_physical_page(); 103 Vector<RefPtr<PhysicalPage>> allocate_contiguous_supervisor_physical_pages(size_t size); 104 void deallocate_user_physical_page(PhysicalPage&&); 105 void deallocate_supervisor_physical_page(PhysicalPage&&); 106 107 OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true); 108 OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true); 109 OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true); 110 OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true); 111 OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true); 112 OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true); 113 114 unsigned user_physical_pages() const { return m_user_physical_pages; } 115 unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } 116 unsigned super_physical_pages() const { return m_super_physical_pages; } 117 unsigned super_physical_pages_used() const { return m_super_physical_pages_used; } 118 119 template<typename Callback> 120 static void for_each_vmobject(Callback callback) 121 { 122 for (auto& vmobject : MM.m_vmobjects) { 123 if (callback(vmobject) == IterationDecision::Break) 124 break; 125 } 126 } 127 128 static Region* region_from_vaddr(Process&, VirtualAddress); 129 static const Region* region_from_vaddr(const Process&, VirtualAddress); 130 131 void dump_kernel_regions(); 132 133 PhysicalPage& shared_zero_page() { return *m_shared_zero_page; } 134 135private: 136 MemoryManager(); 137 ~MemoryManager(); 138 139 enum class AccessSpace { Kernel, 140 User }; 141 enum class AccessType { Read, 142 Write }; 143 template<AccessSpace, AccessType> 144 bool validate_range(const Process&, VirtualAddress, size_t) const; 145 146 void register_vmobject(VMObject&); 147 void unregister_vmobject(VMObject&); 148 void register_region(Region&); 149 void unregister_region(Region&); 150 151 void detect_cpu_features(); 152 void setup_low_identity_mapping(); 153 void protect_kernel_image(); 154 void parse_memory_map(); 155 void flush_entire_tlb(); 156 void flush_tlb(VirtualAddress); 157 158 static Region* user_region_from_vaddr(Process&, VirtualAddress); 159 static Region* kernel_region_from_vaddr(VirtualAddress); 160 161 static Region* region_from_vaddr(VirtualAddress); 162 163 RefPtr<PhysicalPage> find_free_user_physical_page(); 164 u8* quickmap_page(PhysicalPage&); 165 void unquickmap_page(); 166 167 PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index); 168 PageTableEntry* quickmap_pt(PhysicalAddress); 169 170 PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } 171 172 const PageTableEntry* pte(const PageDirectory&, VirtualAddress); 173 PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress); 174 175 RefPtr<PageDirectory> m_kernel_page_directory; 176 RefPtr<PhysicalPage> m_low_page_table; 177 178 RefPtr<PhysicalPage> m_shared_zero_page; 179 180 unsigned m_user_physical_pages { 0 }; 181 unsigned m_user_physical_pages_used { 0 }; 182 unsigned m_super_physical_pages { 0 }; 183 unsigned m_super_physical_pages_used { 0 }; 184 185 NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions; 186 NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions; 187 188 InlineLinkedList<Region> m_user_regions; 189 InlineLinkedList<Region> m_kernel_regions; 190 191 InlineLinkedList<VMObject> m_vmobjects; 192 193 bool m_quickmap_in_use { false }; 194}; 195 196template<typename Callback> 197void VMObject::for_each_region(Callback callback) 198{ 199 // FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes. 200 // Perhaps VMObject could have a Vector<Region*> with all of his mappers? 201 for (auto& region : MM.m_user_regions) { 202 if (&region.vmobject() == this) 203 callback(region); 204 } 205 for (auto& region : MM.m_kernel_regions) { 206 if (&region.vmobject() == this) 207 callback(region); 208 } 209} 210 211inline bool is_user_address(VirtualAddress vaddr) 212{ 213 return vaddr.get() < 0xc0000000; 214} 215 216inline bool is_user_range(VirtualAddress vaddr, size_t size) 217{ 218 if (vaddr.offset(size) < vaddr) 219 return false; 220 return is_user_address(vaddr) && is_user_address(vaddr.offset(size)); 221} 222 223inline bool PhysicalPage::is_shared_zero_page() const 224{ 225 return this == &MM.shared_zero_page(); 226} 227 228}