Serenity Operating System
at master 348 lines 11 kB view raw
1/* 2 * Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org> 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 */ 6 7#pragma once 8 9#include <AK/Badge.h> 10#include <AK/Concepts.h> 11#include <AK/HashTable.h> 12#include <AK/IntrusiveRedBlackTree.h> 13#include <Kernel/Forward.h> 14#include <Kernel/Locking/Spinlock.h> 15#include <Kernel/Memory/AllocationStrategy.h> 16#include <Kernel/Memory/PhysicalPage.h> 17#include <Kernel/Memory/PhysicalRegion.h> 18#include <Kernel/Memory/Region.h> 19#include <Kernel/Memory/RegionTree.h> 20#include <Kernel/Memory/VMObject.h> 21 22struct KmallocGlobalData; 23 24namespace Kernel::Memory { 25 26class PageDirectoryEntry; 27class PageTableEntry; 28 29ErrorOr<FlatPtr> page_round_up(FlatPtr x); 30 31constexpr FlatPtr page_round_down(FlatPtr x) 32{ 33 return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1); 34} 35 36inline FlatPtr virtual_to_low_physical(FlatPtr virtual_) 37{ 38 return virtual_ - physical_to_virtual_offset; 39} 40 41enum class UsedMemoryRangeType { 42 LowMemory = 0, 43 Kernel, 44 BootModule, 45 PhysicalPages, 46 __Count 47}; 48 49static constexpr StringView UserMemoryRangeTypeNames[] { 50 "Low memory"sv, 51 "Kernel"sv, 52 "Boot module"sv, 53 "Physical Pages"sv 54}; 55static_assert(array_size(UserMemoryRangeTypeNames) == to_underlying(UsedMemoryRangeType::__Count)); 56 57struct UsedMemoryRange { 58 UsedMemoryRangeType type {}; 59 PhysicalAddress start; 60 PhysicalAddress end; 61}; 62 63struct ContiguousReservedMemoryRange { 64 PhysicalAddress start; 65 PhysicalSize length {}; 66}; 67 68enum class PhysicalMemoryRangeType { 69 Usable = 0, 70 Reserved, 71 ACPI_Reclaimable, 72 ACPI_NVS, 73 BadMemory, 74 Unknown, 75}; 76 77struct PhysicalMemoryRange { 78 PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown }; 79 PhysicalAddress start; 80 PhysicalSize length {}; 81}; 82 83#define MM Kernel::Memory::MemoryManager::the() 84 85struct MemoryManagerData { 86 static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; } 87 88 Spinlock<LockRank::None> m_quickmap_in_use {}; 89 InterruptsState m_quickmap_previous_interrupts_state; 90}; 91 92// This class represents a set of committed physical pages. 93// When you ask MemoryManager to commit pages for you, you get one of these in return. 94// You can allocate pages from it via `take_one()` 95// It will uncommit any (unallocated) remaining pages when destroyed. 96class CommittedPhysicalPageSet { 97 AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet); 98 99public: 100 CommittedPhysicalPageSet(Badge<MemoryManager>, size_t page_count) 101 : m_page_count(page_count) 102 { 103 } 104 105 CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other) 106 : m_page_count(exchange(other.m_page_count, 0)) 107 { 108 } 109 110 ~CommittedPhysicalPageSet(); 111 112 bool is_empty() const { return m_page_count == 0; } 113 size_t page_count() const { return m_page_count; } 114 115 [[nodiscard]] NonnullRefPtr<PhysicalPage> take_one(); 116 void uncommit_one(); 117 118 void operator=(CommittedPhysicalPageSet&&) = delete; 119 120private: 121 size_t m_page_count { 0 }; 122}; 123 124class MemoryManager { 125 friend class PageDirectory; 126 friend class AnonymousVMObject; 127 friend class Region; 128 friend class RegionTree; 129 friend class VMObject; 130 friend struct ::KmallocGlobalData; 131 132public: 133 static MemoryManager& the(); 134 static bool is_initialized(); 135 136 static void initialize(u32 cpu); 137 138 static inline MemoryManagerData& get_data() 139 { 140 return ProcessorSpecific<MemoryManagerData>::get(); 141 } 142 143 PageFaultResponse handle_page_fault(PageFault const&); 144 145 void set_page_writable_direct(VirtualAddress, bool); 146 147 void protect_readonly_after_init_memory(); 148 void unmap_prekernel(); 149 void unmap_text_after_init(); 150 void protect_ksyms_after_init(); 151 152 static void enter_process_address_space(Process&); 153 static void enter_address_space(AddressSpace&); 154 155 bool validate_user_stack(AddressSpace&, VirtualAddress) const; 156 157 enum class ShouldZeroFill { 158 No, 159 Yes 160 }; 161 162 ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count); 163 void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count); 164 165 NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes); 166 ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); 167 ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> allocate_contiguous_physical_pages(size_t size); 168 void deallocate_physical_page(PhysicalAddress); 169 170 ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); 171 ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page); 172 ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access); 173 ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages); 174 ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access); 175 ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes); 176 ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); 177 ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); 178 ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_region_anywhere(size_t size, size_t alignment); 179 ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t); 180 181 struct SystemMemoryInfo { 182 PhysicalSize physical_pages { 0 }; 183 PhysicalSize physical_pages_used { 0 }; 184 PhysicalSize physical_pages_committed { 0 }; 185 PhysicalSize physical_pages_uncommitted { 0 }; 186 }; 187 188 SystemMemoryInfo get_system_memory_info(); 189 190 template<IteratorFunction<VMObject&> Callback> 191 static void for_each_vmobject(Callback callback) 192 { 193 VMObject::all_instances().with([&](auto& list) { 194 for (auto& vmobject : list) { 195 if (callback(vmobject) == IterationDecision::Break) 196 break; 197 } 198 }); 199 } 200 201 template<VoidFunction<VMObject&> Callback> 202 static void for_each_vmobject(Callback callback) 203 { 204 VMObject::all_instances().with([&](auto& list) { 205 for (auto& vmobject : list) { 206 callback(vmobject); 207 } 208 }); 209 } 210 211 static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress); 212 static void validate_syscall_preconditions(Process&, RegisterState const&); 213 214 void dump_kernel_regions(); 215 216 PhysicalPage& shared_zero_page() { return *m_shared_zero_page; } 217 PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; } 218 219 PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } 220 221 template<typename Callback> 222 void for_each_used_memory_range(Callback callback) 223 { 224 m_global_data.template with([&](auto& global_data) { 225 for (auto& range : global_data.used_memory_ranges) 226 callback(range); 227 }); 228 } 229 bool is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress, size_t read_length) const; 230 231 PhysicalPageEntry& get_physical_page_entry(PhysicalAddress); 232 PhysicalAddress get_physical_address(PhysicalPage const&); 233 234 void copy_physical_page(PhysicalPage&, u8 page_buffer[PAGE_SIZE]); 235 236 IterationDecision for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)>); 237 238private: 239 MemoryManager(); 240 ~MemoryManager(); 241 242 void initialize_physical_pages(); 243 void register_reserved_ranges(); 244 245 void unregister_kernel_region(Region&); 246 247 void protect_kernel_image(); 248 void parse_memory_map(); 249 static void flush_tlb_local(VirtualAddress, size_t page_count = 1); 250 static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1); 251 252 static Region* kernel_region_from_vaddr(VirtualAddress); 253 254 static Region* find_region_from_vaddr(VirtualAddress); 255 256 RefPtr<PhysicalPage> find_free_physical_page(bool); 257 258 ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page) 259 { 260 return quickmap_page(page.paddr()); 261 } 262 u8* quickmap_page(PhysicalAddress const&); 263 void unquickmap_page(); 264 265 PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index); 266 PageTableEntry* quickmap_pt(PhysicalAddress); 267 268 PageTableEntry* pte(PageDirectory&, VirtualAddress); 269 PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress); 270 enum class IsLastPTERelease { 271 Yes, 272 No 273 }; 274 void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease); 275 276 // NOTE: These are outside of GlobalData as they are only assigned on startup, 277 // and then never change. Atomic ref-counting covers that case without 278 // the need for additional synchronization. 279 LockRefPtr<PageDirectory> m_kernel_page_directory; 280 RefPtr<PhysicalPage> m_shared_zero_page; 281 RefPtr<PhysicalPage> m_lazy_committed_page; 282 283 // NOTE: These are outside of GlobalData as they are initialized on startup, 284 // and then never change. 285 PhysicalPageEntry* m_physical_page_entries { nullptr }; 286 size_t m_physical_page_entries_count { 0 }; 287 288 struct GlobalData { 289 GlobalData(); 290 291 SystemMemoryInfo system_memory_info; 292 293 Vector<NonnullOwnPtr<PhysicalRegion>> physical_regions; 294 OwnPtr<PhysicalRegion> physical_pages_region; 295 296 RegionTree region_tree; 297 298 Vector<UsedMemoryRange> used_memory_ranges; 299 Vector<PhysicalMemoryRange> physical_memory_ranges; 300 Vector<ContiguousReservedMemoryRange> reserved_memory_ranges; 301 }; 302 303 SpinlockProtected<GlobalData, LockRank::None> m_global_data; 304}; 305 306inline bool is_user_address(VirtualAddress vaddr) 307{ 308 return vaddr.get() < USER_RANGE_CEILING; 309} 310 311inline bool is_user_range(VirtualAddress vaddr, size_t size) 312{ 313 if (vaddr.offset(size) < vaddr) 314 return false; 315 if (!is_user_address(vaddr)) 316 return false; 317 if (size <= 1) 318 return true; 319 return is_user_address(vaddr.offset(size - 1)); 320} 321 322inline bool is_user_range(VirtualRange const& range) 323{ 324 return is_user_range(range.base(), range.size()); 325} 326 327inline bool PhysicalPage::is_shared_zero_page() const 328{ 329 return this == &MM.shared_zero_page(); 330} 331 332inline bool PhysicalPage::is_lazy_committed_page() const 333{ 334 return this == &MM.lazy_committed_page(); 335} 336 337inline ErrorOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size) 338{ 339 if ((address + size) < address) 340 return EINVAL; 341 342 auto base = VirtualAddress { address }.page_base(); 343 auto end = TRY(Memory::page_round_up(address + size)); 344 345 return Memory::VirtualRange { base, end - base.get() }; 346} 347 348}