Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#pragma once
28
29#include <AK/Assertions.h>
30#include <AK/Noncopyable.h>
31#include <AK/Optional.h>
32#include <AK/Platform.h>
33#include <AK/StdLibExtras.h>
34#include <AK/Types.h>
35#include <AK/kmalloc.h>
36
37namespace AK {
38
39class Bitmap {
40 AK_MAKE_NONCOPYABLE(Bitmap)
41public:
42 // NOTE: A wrapping Bitmap won't try to free the wrapped data.
43 static Bitmap wrap(u8* data, size_t size)
44 {
45 return Bitmap(data, size);
46 }
47
48 static Bitmap create(size_t size, bool default_value = 0)
49 {
50 return Bitmap(size, default_value);
51 }
52
53 static Bitmap create()
54 {
55 return Bitmap();
56 }
57
58 Bitmap(Bitmap&& other)
59 {
60 m_owned = exchange(other.m_owned, false);
61 m_data = exchange(other.m_data, nullptr);
62 m_size = exchange(other.m_size, 0);
63 }
64
65 Bitmap& operator=(Bitmap&& other)
66 {
67 if (this != &other) {
68 if (m_owned)
69 kfree(m_data);
70 m_owned = exchange(other.m_owned, false);
71 m_data = exchange(other.m_data, nullptr);
72 m_size = exchange(other.m_size, 0);
73 }
74 return *this;
75 }
76
77 ~Bitmap()
78 {
79 if (m_owned)
80 kfree(m_data);
81 m_data = nullptr;
82 }
83
84 size_t size() const { return m_size; }
85 bool get(size_t index) const
86 {
87 ASSERT(index < m_size);
88 return 0 != (m_data[index / 8] & (1u << (index % 8)));
89 }
90 void set(size_t index, bool value) const
91 {
92 ASSERT(index < m_size);
93 if (value)
94 m_data[index / 8] |= static_cast<u8>((1u << (index % 8)));
95 else
96 m_data[index / 8] &= static_cast<u8>(~(1u << (index % 8)));
97 }
98 void set_range(size_t start, size_t len, bool value)
99 {
100 for (size_t index = start; index < start + len; ++index) {
101 set(index, value);
102 }
103 }
104
105 u8* data() { return m_data; }
106 const u8* data() const { return m_data; }
107
108 void grow(size_t size, bool default_value)
109 {
110 ASSERT(size > m_size);
111
112 auto previous_size_bytes = size_in_bytes();
113 auto previous_size = m_size;
114 auto previous_data = m_data;
115
116 m_size = size;
117 m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
118
119 fill(default_value);
120
121 if (previous_data != nullptr) {
122 __builtin_memcpy(m_data, previous_data, previous_size_bytes);
123 if (previous_size % 8)
124 set_range(previous_size, 8 - previous_size % 8, default_value);
125 kfree(previous_data);
126 }
127 }
128
129 void fill(bool value)
130 {
131 __builtin_memset(m_data, value ? 0xff : 0x00, size_in_bytes());
132 }
133
134 Optional<size_t> find_first_set() const
135 {
136 size_t i = 0;
137 while (i < m_size / 8 && m_data[i] == 0x00)
138 i++;
139
140 for (size_t j = i * 8; j < m_size; j++) {
141 if (get(j))
142 return j;
143 }
144
145 return {};
146 }
147
148 Optional<size_t> find_first_unset() const
149 {
150 size_t i = 0;
151 while (i < m_size / 8 && m_data[i] == 0xff)
152 i++;
153
154 for (size_t j = i * 8; j < m_size; j++)
155 if (!get(j))
156 return j;
157
158 return {};
159 }
160
161 // The function will return the next range of unset bits starting from the
162 // @from value.
163 // @from: the postition from which the search starts. The var will be
164 // changed and new value is the offset of the found block.
165 // @min_length: minimum size of the range which will be returned.
166 // @max_length: maximum size of the range which will be returned.
167 // This is used to increase performance, since the range of
168 // unset bits can be long, and we don't need the while range,
169 // so we can stop when we've reached @max_length.
170 inline Optional<size_t> find_next_range_of_unset_bits(size_t& from, size_t min_length = 1, size_t max_length = max_size) const
171 {
172 if (min_length > max_length) {
173 return {};
174 }
175
176 u32* bitmap32 = (u32*)m_data;
177
178 // Calculating the start offset.
179 size_t start_bucket_index = from / 32;
180 size_t start_bucket_bit = from % 32;
181
182 size_t* start_of_free_chunks = &from;
183 size_t free_chunks = 0;
184
185 for (size_t bucket_index = start_bucket_index; bucket_index < m_size / 32; ++bucket_index) {
186 if (bitmap32[bucket_index] == 0xffffffff) {
187 // Skip over completely full bucket of size 32.
188 if (free_chunks >= min_length) {
189 return min(free_chunks, max_length);
190 }
191 free_chunks = 0;
192 start_bucket_bit = 0;
193 continue;
194 }
195 if (bitmap32[bucket_index] == 0x0) {
196 // Skip over completely empty bucket of size 32.
197 if (free_chunks == 0) {
198 *start_of_free_chunks = bucket_index * 32;
199 }
200 free_chunks += 32;
201 if (free_chunks >= max_length) {
202 return max_length;
203 }
204 start_bucket_bit = 0;
205 continue;
206 }
207
208 u32 bucket = bitmap32[bucket_index];
209 u8 viewed_bits = start_bucket_bit;
210 u32 trailing_zeroes = 0;
211
212 bucket >>= viewed_bits;
213 start_bucket_bit = 0;
214
215 while (viewed_bits < 32) {
216 if (bucket == 0) {
217 if (free_chunks == 0) {
218 *start_of_free_chunks = bucket_index * 32 + viewed_bits;
219 }
220 free_chunks += 32 - viewed_bits;
221 viewed_bits = 32;
222 } else {
223 trailing_zeroes = count_trailing_zeroes_32(bucket);
224 bucket >>= trailing_zeroes;
225
226 if (free_chunks == 0) {
227 *start_of_free_chunks = bucket_index * 32 + viewed_bits;
228 }
229 free_chunks += trailing_zeroes;
230 viewed_bits += trailing_zeroes;
231
232 if (free_chunks >= min_length) {
233 return min(free_chunks, max_length);
234 }
235
236 // Deleting trailing ones.
237 u32 trailing_ones = count_trailing_zeroes_32(~bucket);
238 bucket >>= trailing_ones;
239 viewed_bits += trailing_ones;
240 free_chunks = 0;
241 }
242 }
243 }
244
245 if (free_chunks < min_length) {
246 return {};
247 }
248
249 return min(free_chunks, max_length);
250 }
251
252 Optional<size_t> find_longest_range_of_unset_bits(size_t max_length, size_t& found_range_size) const
253 {
254 size_t start = 0;
255 size_t max_region_start = 0;
256 size_t max_region_size = 0;
257
258 while (true) {
259 // Look for the next block which is bigger than currunt.
260 auto length_of_found_range = find_next_range_of_unset_bits(start, max_region_size + 1, max_length);
261 if (length_of_found_range.has_value()) {
262 max_region_start = start;
263 max_region_size = length_of_found_range.value();
264 start += max_region_size;
265 } else {
266 // No ranges which are bigger than current were found.
267 break;
268 }
269 }
270
271 found_range_size = max_region_size;
272 if (max_region_size) {
273 return max_region_start;
274 }
275 return {};
276 }
277
278 Optional<size_t> find_first_fit(size_t minimum_length) const
279 {
280 size_t start = 0;
281 auto length_of_found_range = find_next_range_of_unset_bits(start, minimum_length, minimum_length);
282 if (length_of_found_range.has_value()) {
283 return start;
284 }
285 return {};
286 }
287
288 Optional<size_t> find_best_fit(size_t minimum_length) const
289 {
290 size_t start = 0;
291 size_t best_region_start = 0;
292 size_t best_region_size = max_size;
293 bool found = false;
294
295 while (true) {
296 // Look for the next block which is bigger than requested length.
297 auto length_of_found_range = find_next_range_of_unset_bits(start, minimum_length, best_region_size);
298 if (length_of_found_range.has_value()) {
299 if (best_region_size > length_of_found_range.value() || !found) {
300 best_region_start = start;
301 best_region_size = length_of_found_range.value();
302 found = true;
303 }
304 start += length_of_found_range.value();
305 } else {
306 // There are no ranges which can fit requested length.
307 break;
308 }
309 }
310
311 if (found) {
312 return best_region_start;
313 }
314 return {};
315 }
316
317 Bitmap()
318 : m_size(0)
319 , m_owned(true)
320 {
321 m_data = nullptr;
322 }
323
324 Bitmap(size_t size, bool default_value)
325 : m_size(size)
326 , m_owned(true)
327 {
328 ASSERT(m_size != 0);
329 m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
330 fill(default_value);
331 }
332
333 Bitmap(u8* data, size_t size)
334 : m_data(data)
335 , m_size(size)
336 , m_owned(false)
337 {
338 }
339
340 static constexpr u32 max_size = 0xffffffff;
341
342private:
343 size_t size_in_bytes() const { return ceil_div(m_size, 8); }
344
345 u8* m_data { nullptr };
346 size_t m_size { 0 };
347 bool m_owned { false };
348};
349
350}
351
352using AK::Bitmap;