at v5.2-rc2 17 kB view raw
1/* 2 * Fast and scalable bitmaps. 3 * 4 * Copyright (C) 2016 Facebook 5 * Copyright (C) 2013-2014 Jens Axboe 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public 9 * License v2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <https://www.gnu.org/licenses/>. 18 */ 19 20#ifndef __LINUX_SCALE_BITMAP_H 21#define __LINUX_SCALE_BITMAP_H 22 23#include <linux/kernel.h> 24#include <linux/slab.h> 25 26struct seq_file; 27 28/** 29 * struct sbitmap_word - Word in a &struct sbitmap. 30 */ 31struct sbitmap_word { 32 /** 33 * @depth: Number of bits being used in @word/@cleared 34 */ 35 unsigned long depth; 36 37 /** 38 * @word: word holding free bits 39 */ 40 unsigned long word ____cacheline_aligned_in_smp; 41 42 /** 43 * @cleared: word holding cleared bits 44 */ 45 unsigned long cleared ____cacheline_aligned_in_smp; 46 47 /** 48 * @swap_lock: Held while swapping word <-> cleared 49 */ 50 spinlock_t swap_lock; 51} ____cacheline_aligned_in_smp; 52 53/** 54 * struct sbitmap - Scalable bitmap. 55 * 56 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This 57 * trades off higher memory usage for better scalability. 58 */ 59struct sbitmap { 60 /** 61 * @depth: Number of bits used in the whole bitmap. 62 */ 63 unsigned int depth; 64 65 /** 66 * @shift: log2(number of bits used per word) 67 */ 68 unsigned int shift; 69 70 /** 71 * @map_nr: Number of words (cachelines) being used for the bitmap. 72 */ 73 unsigned int map_nr; 74 75 /** 76 * @map: Allocated bitmap. 77 */ 78 struct sbitmap_word *map; 79}; 80 81#define SBQ_WAIT_QUEUES 8 82#define SBQ_WAKE_BATCH 8 83 84/** 85 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. 86 */ 87struct sbq_wait_state { 88 /** 89 * @wait_cnt: Number of frees remaining before we wake up. 90 */ 91 atomic_t wait_cnt; 92 93 /** 94 * @wait: Wait queue. 95 */ 96 wait_queue_head_t wait; 97} ____cacheline_aligned_in_smp; 98 99/** 100 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free 101 * bits. 102 * 103 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to 104 * avoid contention on the wait queue spinlock. This ensures that we don't hit a 105 * scalability wall when we run out of free bits and have to start putting tasks 106 * to sleep. 107 */ 108struct sbitmap_queue { 109 /** 110 * @sb: Scalable bitmap. 111 */ 112 struct sbitmap sb; 113 114 /* 115 * @alloc_hint: Cache of last successfully allocated or freed bit. 116 * 117 * This is per-cpu, which allows multiple users to stick to different 118 * cachelines until the map is exhausted. 119 */ 120 unsigned int __percpu *alloc_hint; 121 122 /** 123 * @wake_batch: Number of bits which must be freed before we wake up any 124 * waiters. 125 */ 126 unsigned int wake_batch; 127 128 /** 129 * @wake_index: Next wait queue in @ws to wake up. 130 */ 131 atomic_t wake_index; 132 133 /** 134 * @ws: Wait queues. 135 */ 136 struct sbq_wait_state *ws; 137 138 /* 139 * @ws_active: count of currently active ws waitqueues 140 */ 141 atomic_t ws_active; 142 143 /** 144 * @round_robin: Allocate bits in strict round-robin order. 145 */ 146 bool round_robin; 147 148 /** 149 * @min_shallow_depth: The minimum shallow depth which may be passed to 150 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). 151 */ 152 unsigned int min_shallow_depth; 153}; 154 155/** 156 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. 157 * @sb: Bitmap to initialize. 158 * @depth: Number of bits to allocate. 159 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if 160 * given, a good default is chosen. 161 * @flags: Allocation flags. 162 * @node: Memory node to allocate on. 163 * 164 * Return: Zero on success or negative errno on failure. 165 */ 166int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 167 gfp_t flags, int node); 168 169/** 170 * sbitmap_free() - Free memory used by a &struct sbitmap. 171 * @sb: Bitmap to free. 172 */ 173static inline void sbitmap_free(struct sbitmap *sb) 174{ 175 kfree(sb->map); 176 sb->map = NULL; 177} 178 179/** 180 * sbitmap_resize() - Resize a &struct sbitmap. 181 * @sb: Bitmap to resize. 182 * @depth: New number of bits to resize to. 183 * 184 * Doesn't reallocate anything. It's up to the caller to ensure that the new 185 * depth doesn't exceed the depth that the sb was initialized with. 186 */ 187void sbitmap_resize(struct sbitmap *sb, unsigned int depth); 188 189/** 190 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. 191 * @sb: Bitmap to allocate from. 192 * @alloc_hint: Hint for where to start searching for a free bit. 193 * @round_robin: If true, be stricter about allocation order; always allocate 194 * starting from the last allocated bit. This is less efficient 195 * than the default behavior (false). 196 * 197 * This operation provides acquire barrier semantics if it succeeds. 198 * 199 * Return: Non-negative allocated bit number if successful, -1 otherwise. 200 */ 201int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); 202 203/** 204 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, 205 * limiting the depth used from each word. 206 * @sb: Bitmap to allocate from. 207 * @alloc_hint: Hint for where to start searching for a free bit. 208 * @shallow_depth: The maximum number of bits to allocate from a single word. 209 * 210 * This rather specific operation allows for having multiple users with 211 * different allocation limits. E.g., there can be a high-priority class that 212 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() 213 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority 214 * class can only allocate half of the total bits in the bitmap, preventing it 215 * from starving out the high-priority class. 216 * 217 * Return: Non-negative allocated bit number if successful, -1 otherwise. 218 */ 219int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, 220 unsigned long shallow_depth); 221 222/** 223 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. 224 * @sb: Bitmap to check. 225 * 226 * Return: true if any bit in the bitmap is set, false otherwise. 227 */ 228bool sbitmap_any_bit_set(const struct sbitmap *sb); 229 230/** 231 * sbitmap_any_bit_clear() - Check for an unset bit in a &struct 232 * sbitmap. 233 * @sb: Bitmap to check. 234 * 235 * Return: true if any bit in the bitmap is clear, false otherwise. 236 */ 237bool sbitmap_any_bit_clear(const struct sbitmap *sb); 238 239#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) 240#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) 241 242typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); 243 244/** 245 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. 246 * @start: Where to start the iteration. 247 * @sb: Bitmap to iterate over. 248 * @fn: Callback. Should return true to continue or false to break early. 249 * @data: Pointer to pass to callback. 250 * 251 * This is inline even though it's non-trivial so that the function calls to the 252 * callback will hopefully get optimized away. 253 */ 254static inline void __sbitmap_for_each_set(struct sbitmap *sb, 255 unsigned int start, 256 sb_for_each_fn fn, void *data) 257{ 258 unsigned int index; 259 unsigned int nr; 260 unsigned int scanned = 0; 261 262 if (start >= sb->depth) 263 start = 0; 264 index = SB_NR_TO_INDEX(sb, start); 265 nr = SB_NR_TO_BIT(sb, start); 266 267 while (scanned < sb->depth) { 268 unsigned long word; 269 unsigned int depth = min_t(unsigned int, 270 sb->map[index].depth - nr, 271 sb->depth - scanned); 272 273 scanned += depth; 274 word = sb->map[index].word & ~sb->map[index].cleared; 275 if (!word) 276 goto next; 277 278 /* 279 * On the first iteration of the outer loop, we need to add the 280 * bit offset back to the size of the word for find_next_bit(). 281 * On all other iterations, nr is zero, so this is a noop. 282 */ 283 depth += nr; 284 while (1) { 285 nr = find_next_bit(&word, depth, nr); 286 if (nr >= depth) 287 break; 288 if (!fn(sb, (index << sb->shift) + nr, data)) 289 return; 290 291 nr++; 292 } 293next: 294 nr = 0; 295 if (++index >= sb->map_nr) 296 index = 0; 297 } 298} 299 300/** 301 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. 302 * @sb: Bitmap to iterate over. 303 * @fn: Callback. Should return true to continue or false to break early. 304 * @data: Pointer to pass to callback. 305 */ 306static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, 307 void *data) 308{ 309 __sbitmap_for_each_set(sb, 0, fn, data); 310} 311 312static inline unsigned long *__sbitmap_word(struct sbitmap *sb, 313 unsigned int bitnr) 314{ 315 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; 316} 317 318/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ 319 320static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) 321{ 322 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 323} 324 325static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) 326{ 327 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 328} 329 330/* 331 * This one is special, since it doesn't actually clear the bit, rather it 332 * sets the corresponding bit in the ->cleared mask instead. Paired with 333 * the caller doing sbitmap_deferred_clear() if a given index is full, which 334 * will clear the previously freed entries in the corresponding ->word. 335 */ 336static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) 337{ 338 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; 339 340 set_bit(SB_NR_TO_BIT(sb, bitnr), addr); 341} 342 343static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, 344 unsigned int bitnr) 345{ 346 clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 347} 348 349static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) 350{ 351 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); 352} 353 354/** 355 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. 356 * @sb: Bitmap to show. 357 * @m: struct seq_file to write to. 358 * 359 * This is intended for debugging. The format may change at any time. 360 */ 361void sbitmap_show(struct sbitmap *sb, struct seq_file *m); 362 363/** 364 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct 365 * seq_file. 366 * @sb: Bitmap to show. 367 * @m: struct seq_file to write to. 368 * 369 * This is intended for debugging. The output isn't guaranteed to be internally 370 * consistent. 371 */ 372void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); 373 374/** 375 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific 376 * memory node. 377 * @sbq: Bitmap queue to initialize. 378 * @depth: See sbitmap_init_node(). 379 * @shift: See sbitmap_init_node(). 380 * @round_robin: See sbitmap_get(). 381 * @flags: Allocation flags. 382 * @node: Memory node to allocate on. 383 * 384 * Return: Zero on success or negative errno on failure. 385 */ 386int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, 387 int shift, bool round_robin, gfp_t flags, int node); 388 389/** 390 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. 391 * 392 * @sbq: Bitmap queue to free. 393 */ 394static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) 395{ 396 kfree(sbq->ws); 397 free_percpu(sbq->alloc_hint); 398 sbitmap_free(&sbq->sb); 399} 400 401/** 402 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. 403 * @sbq: Bitmap queue to resize. 404 * @depth: New number of bits to resize to. 405 * 406 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do 407 * some extra work on the &struct sbitmap_queue, so it's not safe to just 408 * resize the underlying &struct sbitmap. 409 */ 410void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); 411 412/** 413 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct 414 * sbitmap_queue with preemption already disabled. 415 * @sbq: Bitmap queue to allocate from. 416 * 417 * Return: Non-negative allocated bit number if successful, -1 otherwise. 418 */ 419int __sbitmap_queue_get(struct sbitmap_queue *sbq); 420 421/** 422 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct 423 * sbitmap_queue, limiting the depth used from each word, with preemption 424 * already disabled. 425 * @sbq: Bitmap queue to allocate from. 426 * @shallow_depth: The maximum number of bits to allocate from a single word. 427 * See sbitmap_get_shallow(). 428 * 429 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after 430 * initializing @sbq. 431 * 432 * Return: Non-negative allocated bit number if successful, -1 otherwise. 433 */ 434int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 435 unsigned int shallow_depth); 436 437/** 438 * sbitmap_queue_get() - Try to allocate a free bit from a &struct 439 * sbitmap_queue. 440 * @sbq: Bitmap queue to allocate from. 441 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to 442 * sbitmap_queue_clear()). 443 * 444 * Return: Non-negative allocated bit number if successful, -1 otherwise. 445 */ 446static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, 447 unsigned int *cpu) 448{ 449 int nr; 450 451 *cpu = get_cpu(); 452 nr = __sbitmap_queue_get(sbq); 453 put_cpu(); 454 return nr; 455} 456 457/** 458 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct 459 * sbitmap_queue, limiting the depth used from each word. 460 * @sbq: Bitmap queue to allocate from. 461 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to 462 * sbitmap_queue_clear()). 463 * @shallow_depth: The maximum number of bits to allocate from a single word. 464 * See sbitmap_get_shallow(). 465 * 466 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after 467 * initializing @sbq. 468 * 469 * Return: Non-negative allocated bit number if successful, -1 otherwise. 470 */ 471static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, 472 unsigned int *cpu, 473 unsigned int shallow_depth) 474{ 475 int nr; 476 477 *cpu = get_cpu(); 478 nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); 479 put_cpu(); 480 return nr; 481} 482 483/** 484 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the 485 * minimum shallow depth that will be used. 486 * @sbq: Bitmap queue in question. 487 * @min_shallow_depth: The minimum shallow depth that will be passed to 488 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). 489 * 490 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size 491 * depends on the depth of the bitmap. Since the shallow allocation functions 492 * effectively operate with a different depth, the shallow depth must be taken 493 * into account when calculating the batch size. This function must be called 494 * with the minimum shallow depth that will be used. Failure to do so can result 495 * in missed wakeups. 496 */ 497void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, 498 unsigned int min_shallow_depth); 499 500/** 501 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a 502 * &struct sbitmap_queue. 503 * @sbq: Bitmap to free from. 504 * @nr: Bit number to free. 505 * @cpu: CPU the bit was allocated on. 506 */ 507void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 508 unsigned int cpu); 509 510static inline int sbq_index_inc(int index) 511{ 512 return (index + 1) & (SBQ_WAIT_QUEUES - 1); 513} 514 515static inline void sbq_index_atomic_inc(atomic_t *index) 516{ 517 int old = atomic_read(index); 518 int new = sbq_index_inc(old); 519 atomic_cmpxchg(index, old, new); 520} 521 522/** 523 * sbq_wait_ptr() - Get the next wait queue to use for a &struct 524 * sbitmap_queue. 525 * @sbq: Bitmap queue to wait on. 526 * @wait_index: A counter per "user" of @sbq. 527 */ 528static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, 529 atomic_t *wait_index) 530{ 531 struct sbq_wait_state *ws; 532 533 ws = &sbq->ws[atomic_read(wait_index)]; 534 sbq_index_atomic_inc(wait_index); 535 return ws; 536} 537 538/** 539 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct 540 * sbitmap_queue. 541 * @sbq: Bitmap queue to wake up. 542 */ 543void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); 544 545/** 546 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue 547 * on a &struct sbitmap_queue. 548 * @sbq: Bitmap queue to wake up. 549 */ 550void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); 551 552/** 553 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct 554 * seq_file. 555 * @sbq: Bitmap queue to show. 556 * @m: struct seq_file to write to. 557 * 558 * This is intended for debugging. The format may change at any time. 559 */ 560void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); 561 562struct sbq_wait { 563 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ 564 struct wait_queue_entry wait; 565}; 566 567#define DEFINE_SBQ_WAIT(name) \ 568 struct sbq_wait name = { \ 569 .sbq = NULL, \ 570 .wait = { \ 571 .private = current, \ 572 .func = autoremove_wake_function, \ 573 .entry = LIST_HEAD_INIT((name).wait.entry), \ 574 } \ 575 } 576 577/* 578 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra 579 * internal state. 580 */ 581void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, 582 struct sbq_wait_state *ws, 583 struct sbq_wait *sbq_wait, int state); 584 585/* 586 * Must be paired with sbitmap_prepare_to_wait(). 587 */ 588void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, 589 struct sbq_wait *sbq_wait); 590 591/* 592 * Wrapper around add_wait_queue(), which maintains some extra internal state 593 */ 594void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, 595 struct sbq_wait_state *ws, 596 struct sbq_wait *sbq_wait); 597 598/* 599 * Must be paired with sbitmap_add_wait_queue() 600 */ 601void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); 602 603#endif /* __LINUX_SCALE_BITMAP_H */