Serenity Operating System
at portability 212 lines 7.1 kB view raw
1/* 2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, this 9 * list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright notice, 12 * this list of conditions and the following disclaimer in the documentation 13 * and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <AK/BinarySearch.h> 28#include <AK/QuickSort.h> 29#include <Kernel/Random.h> 30#include <Kernel/Thread.h> 31#include <Kernel/VM/RangeAllocator.h> 32 33//#define VRA_DEBUG 34#define VM_GUARD_PAGES 35 36namespace Kernel { 37 38RangeAllocator::RangeAllocator() 39{ 40} 41 42void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size) 43{ 44 m_total_range = { base, size }; 45 m_available_ranges.append({ base, size }); 46#ifdef VRA_DEBUG 47 dump(); 48#endif 49} 50 51void RangeAllocator::initialize_from_parent(const RangeAllocator& parent_allocator) 52{ 53 m_total_range = parent_allocator.m_total_range; 54 m_available_ranges = parent_allocator.m_available_ranges; 55} 56 57RangeAllocator::~RangeAllocator() 58{ 59} 60 61void RangeAllocator::dump() const 62{ 63 dbgprintf("RangeAllocator{%p}\n", this); 64 for (auto& range : m_available_ranges) { 65 dbgprintf(" %x -> %x\n", range.base().get(), range.end().get() - 1); 66 } 67} 68 69Vector<Range, 2> Range::carve(const Range& taken) 70{ 71 Vector<Range, 2> parts; 72 if (taken == *this) 73 return {}; 74 if (taken.base() > base()) 75 parts.append({ base(), taken.base().get() - base().get() }); 76 if (taken.end() < end()) 77 parts.append({ taken.end(), end().get() - taken.end().get() }); 78#ifdef VRA_DEBUG 79 dbgprintf("VRA: carve: take %x-%x from %x-%x\n", 80 taken.base().get(), taken.end().get() - 1, 81 base().get(), end().get() - 1); 82 for (int i = 0; i < parts.size(); ++i) 83 dbgprintf(" %x-%x\n", parts[i].base().get(), parts[i].end().get() - 1); 84#endif 85 return parts; 86} 87 88void RangeAllocator::carve_at_index(int index, const Range& range) 89{ 90 auto remaining_parts = m_available_ranges[index].carve(range); 91 ASSERT(remaining_parts.size() >= 1); 92 m_available_ranges[index] = remaining_parts[0]; 93 if (remaining_parts.size() == 2) 94 m_available_ranges.insert(index + 1, move(remaining_parts[1])); 95} 96 97Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment) 98{ 99 if (!size) 100 return {}; 101 102#ifdef VM_GUARD_PAGES 103 // NOTE: We pad VM allocations with a guard page on each side. 104 size_t effective_size = size + PAGE_SIZE * 2; 105 size_t offset_from_effective_base = PAGE_SIZE; 106#else 107 size_t effective_size = size; 108 size_t offset_from_effective_base = 0; 109#endif 110 111 for (size_t i = 0; i < m_available_ranges.size(); ++i) { 112 auto& available_range = m_available_ranges[i]; 113 // FIXME: This check is probably excluding some valid candidates when using a large alignment. 114 if (available_range.size() < (effective_size + alignment)) 115 continue; 116 117 uintptr_t initial_base = available_range.base().offset(offset_from_effective_base).get(); 118 uintptr_t aligned_base = round_up_to_power_of_two(initial_base, alignment); 119 120 Range allocated_range(VirtualAddress(aligned_base), size); 121 if (available_range == allocated_range) { 122#ifdef VRA_DEBUG 123 dbgprintf("VRA: Allocated perfect-fit anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get()); 124#endif 125 m_available_ranges.remove(i); 126 return allocated_range; 127 } 128 carve_at_index(i, allocated_range); 129#ifdef VRA_DEBUG 130 dbgprintf("VRA: Allocated anywhere(%zu, %zu): %x\n", size, alignment, allocated_range.base().get()); 131 dump(); 132#endif 133 return allocated_range; 134 } 135 kprintf("VRA: Failed to allocate anywhere: %zu, %zu\n", size, alignment); 136 return {}; 137} 138 139Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) 140{ 141 if (!size) 142 return {}; 143 144 Range allocated_range(base, size); 145 for (size_t i = 0; i < m_available_ranges.size(); ++i) { 146 auto& available_range = m_available_ranges[i]; 147 if (!available_range.contains(base, size)) 148 continue; 149 if (available_range == allocated_range) { 150 m_available_ranges.remove(i); 151 return allocated_range; 152 } 153 carve_at_index(i, allocated_range); 154#ifdef VRA_DEBUG 155 dbgprintf("VRA: Allocated specific(%u): %x\n", size, available_range.base().get()); 156 dump(); 157#endif 158 return allocated_range; 159 } 160 kprintf("VRA: Failed to allocate specific range: %x(%u)\n", base.get(), size); 161 return {}; 162} 163 164void RangeAllocator::deallocate(Range range) 165{ 166 ASSERT(m_total_range.contains(range)); 167 ASSERT(range.size()); 168 ASSERT(range.base() < range.end()); 169 170#ifdef VRA_DEBUG 171 dbgprintf("VRA: Deallocate: %x(%u)\n", range.base().get(), range.size()); 172 dump(); 173#endif 174 175 ASSERT(!m_available_ranges.is_empty()); 176 177 int nearby_index = 0; 178 auto* existing_range = binary_search( 179 m_available_ranges.data(), m_available_ranges.size(), range, [](auto& a, auto& b) { 180 return a.base().get() - b.end().get(); 181 }, 182 &nearby_index); 183 184 size_t inserted_index = 0; 185 if (existing_range) { 186 existing_range->m_size += range.size(); 187 inserted_index = nearby_index; 188 } else { 189 m_available_ranges.insert_before_matching( 190 Range(range), [&](auto& entry) { 191 return entry.base() >= range.end(); 192 }, 193 nearby_index, &inserted_index); 194 } 195 196 if (inserted_index < (m_available_ranges.size() - 1)) { 197 // We already merged with previous. Try to merge with next. 198 auto& inserted_range = m_available_ranges[inserted_index]; 199 auto& next_range = m_available_ranges[inserted_index + 1]; 200 if (inserted_range.end() == next_range.base()) { 201 inserted_range.m_size += next_range.size(); 202 m_available_ranges.remove(inserted_index + 1); 203 return; 204 } 205 } 206#ifdef VRA_DEBUG 207 dbgprintf("VRA: After deallocate\n"); 208 dump(); 209#endif 210} 211 212}