at v6.8 28 kB view raw
1/* SPDX-License-Identifier: GPL-2.0+ */ 2#ifndef _LINUX_MAPLE_TREE_H 3#define _LINUX_MAPLE_TREE_H 4/* 5 * Maple Tree - An RCU-safe adaptive tree for storing ranges 6 * Copyright (c) 2018-2022 Oracle 7 * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com> 8 * Matthew Wilcox <willy@infradead.org> 9 */ 10 11#include <linux/kernel.h> 12#include <linux/rcupdate.h> 13#include <linux/spinlock.h> 14/* #define CONFIG_MAPLE_RCU_DISABLED */ 15 16/* 17 * Allocated nodes are mutable until they have been inserted into the tree, 18 * at which time they cannot change their type until they have been removed 19 * from the tree and an RCU grace period has passed. 20 * 21 * Removed nodes have their ->parent set to point to themselves. RCU readers 22 * check ->parent before relying on the value that they loaded from the 23 * slots array. This lets us reuse the slots array for the RCU head. 24 * 25 * Nodes in the tree point to their parent unless bit 0 is set. 26 */ 27#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) 28/* 64bit sizes */ 29#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */ 30#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */ 31#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */ 32#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1) 33#else 34/* 32bit sizes */ 35#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */ 36#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */ 37#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */ 38#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2) 39#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */ 40 41#define MAPLE_NODE_MASK 255UL 42 43/* 44 * The node->parent of the root node has bit 0 set and the rest of the pointer 45 * is a pointer to the tree itself. No more bits are available in this pointer 46 * (on m68k, the data structure may only be 2-byte aligned). 47 * 48 * Internal non-root nodes can only have maple_range_* nodes as parents. The 49 * parent pointer is 256B aligned like all other tree nodes. When storing a 32 50 * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an 51 * extra bit to store the offset. This extra bit comes from a reuse of the last 52 * bit in the node type. This is possible by using bit 1 to indicate if bit 2 53 * is part of the type or the slot. 54 * 55 * Once the type is decided, the decision of an allocation range type or a range 56 * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE 57 * flag. 58 * 59 * Node types: 60 * 0x??1 = Root 61 * 0x?00 = 16 bit nodes 62 * 0x010 = 32 bit nodes 63 * 0x110 = 64 bit nodes 64 * 65 * Slot size and location in the parent pointer: 66 * type : slot location 67 * 0x??1 : Root 68 * 0x?00 : 16 bit values, type in 0-1, slot in 2-6 69 * 0x010 : 32 bit values, type in 0-2, slot in 3-6 70 * 0x110 : 64 bit values, type in 0-2, slot in 3-6 71 */ 72 73/* 74 * This metadata is used to optimize the gap updating code and in reverse 75 * searching for gaps or any other code that needs to find the end of the data. 76 */ 77struct maple_metadata { 78 unsigned char end; 79 unsigned char gap; 80}; 81 82/* 83 * Leaf nodes do not store pointers to nodes, they store user data. Users may 84 * store almost any bit pattern. As noted above, the optimisation of storing an 85 * entry at 0 in the root pointer cannot be done for data which have the bottom 86 * two bits set to '10'. We also reserve values with the bottom two bits set to 87 * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs 88 * return errnos as a negative errno shifted right by two bits and the bottom 89 * two bits set to '10', and while choosing to store these values in the array 90 * is not an error, it may lead to confusion if you're testing for an error with 91 * mas_is_err(). 92 * 93 * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits 94 * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now. 95 * 96 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 97 * indicate that the tree is specifying ranges, Pivots may appear in the 98 * subtree with an entry attached to the value whereas keys are unique to a 99 * specific position of a B-tree. Pivot values are inclusive of the slot with 100 * the same index. 101 */ 102 103struct maple_range_64 { 104 struct maple_pnode *parent; 105 unsigned long pivot[MAPLE_RANGE64_SLOTS - 1]; 106 union { 107 void __rcu *slot[MAPLE_RANGE64_SLOTS]; 108 struct { 109 void __rcu *pad[MAPLE_RANGE64_SLOTS - 1]; 110 struct maple_metadata meta; 111 }; 112 }; 113}; 114 115/* 116 * At tree creation time, the user can specify that they're willing to trade off 117 * storing fewer entries in a tree in return for storing more information in 118 * each node. 119 * 120 * The maple tree supports recording the largest range of NULL entries available 121 * in this node, also called gaps. This optimises the tree for allocating a 122 * range. 123 */ 124struct maple_arange_64 { 125 struct maple_pnode *parent; 126 unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1]; 127 void __rcu *slot[MAPLE_ARANGE64_SLOTS]; 128 unsigned long gap[MAPLE_ARANGE64_SLOTS]; 129 struct maple_metadata meta; 130}; 131 132struct maple_alloc { 133 unsigned long total; 134 unsigned char node_count; 135 unsigned int request_count; 136 struct maple_alloc *slot[MAPLE_ALLOC_SLOTS]; 137}; 138 139struct maple_topiary { 140 struct maple_pnode *parent; 141 struct maple_enode *next; /* Overlaps the pivot */ 142}; 143 144enum maple_type { 145 maple_dense, 146 maple_leaf_64, 147 maple_range_64, 148 maple_arange_64, 149}; 150 151 152/** 153 * DOC: Maple tree flags 154 * 155 * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree 156 * * MT_FLAGS_USE_RCU - Operate in RCU mode 157 * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags 158 * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value 159 * * MT_FLAGS_LOCK_MASK - How the mt_lock is used 160 * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe 161 * * MT_FLAGS_LOCK_BH - Acquired bh-safe 162 * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used 163 * 164 * MAPLE_HEIGHT_MAX The largest height that can be stored 165 */ 166#define MT_FLAGS_ALLOC_RANGE 0x01 167#define MT_FLAGS_USE_RCU 0x02 168#define MT_FLAGS_HEIGHT_OFFSET 0x02 169#define MT_FLAGS_HEIGHT_MASK 0x7C 170#define MT_FLAGS_LOCK_MASK 0x300 171#define MT_FLAGS_LOCK_IRQ 0x100 172#define MT_FLAGS_LOCK_BH 0x200 173#define MT_FLAGS_LOCK_EXTERN 0x300 174 175#define MAPLE_HEIGHT_MAX 31 176 177 178#define MAPLE_NODE_TYPE_MASK 0x0F 179#define MAPLE_NODE_TYPE_SHIFT 0x03 180 181#define MAPLE_RESERVED_RANGE 4096 182 183#ifdef CONFIG_LOCKDEP 184typedef struct lockdep_map *lockdep_map_p; 185#define mt_lock_is_held(mt) \ 186 (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock)) 187 188#define mt_write_lock_is_held(mt) \ 189 (!(mt)->ma_external_lock || \ 190 lock_is_held_type((mt)->ma_external_lock, 0)) 191 192#define mt_set_external_lock(mt, lock) \ 193 (mt)->ma_external_lock = &(lock)->dep_map 194 195#define mt_on_stack(mt) (mt).ma_external_lock = NULL 196#else 197typedef struct { /* nothing */ } lockdep_map_p; 198#define mt_lock_is_held(mt) 1 199#define mt_write_lock_is_held(mt) 1 200#define mt_set_external_lock(mt, lock) do { } while (0) 201#define mt_on_stack(mt) do { } while (0) 202#endif 203 204/* 205 * If the tree contains a single entry at index 0, it is usually stored in 206 * tree->ma_root. To optimise for the page cache, an entry which ends in '00', 207 * '01' or '11' is stored in the root, but an entry which ends in '10' will be 208 * stored in a node. Bits 3-6 are used to store enum maple_type. 209 * 210 * The flags are used both to store some immutable information about this tree 211 * (set at tree creation time) and dynamic information set under the spinlock. 212 * 213 * Another use of flags are to indicate global states of the tree. This is the 214 * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in 215 * RCU mode. This mode was added to allow the tree to reuse nodes instead of 216 * re-allocating and RCU freeing nodes when there is a single user. 217 */ 218struct maple_tree { 219 union { 220 spinlock_t ma_lock; 221 lockdep_map_p ma_external_lock; 222 }; 223 unsigned int ma_flags; 224 void __rcu *ma_root; 225}; 226 227/** 228 * MTREE_INIT() - Initialize a maple tree 229 * @name: The maple tree name 230 * @__flags: The maple tree flags 231 * 232 */ 233#define MTREE_INIT(name, __flags) { \ 234 .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \ 235 .ma_flags = __flags, \ 236 .ma_root = NULL, \ 237} 238 239/** 240 * MTREE_INIT_EXT() - Initialize a maple tree with an external lock. 241 * @name: The tree name 242 * @__flags: The maple tree flags 243 * @__lock: The external lock 244 */ 245#ifdef CONFIG_LOCKDEP 246#define MTREE_INIT_EXT(name, __flags, __lock) { \ 247 .ma_external_lock = &(__lock).dep_map, \ 248 .ma_flags = (__flags), \ 249 .ma_root = NULL, \ 250} 251#else 252#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags) 253#endif 254 255#define DEFINE_MTREE(name) \ 256 struct maple_tree name = MTREE_INIT(name, 0) 257 258#define mtree_lock(mt) spin_lock((&(mt)->ma_lock)) 259#define mtree_lock_nested(mas, subclass) \ 260 spin_lock_nested((&(mt)->ma_lock), subclass) 261#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock)) 262 263/* 264 * The Maple Tree squeezes various bits in at various points which aren't 265 * necessarily obvious. Usually, this is done by observing that pointers are 266 * N-byte aligned and thus the bottom log_2(N) bits are available for use. We 267 * don't use the high bits of pointers to store additional information because 268 * we don't know what bits are unused on any given architecture. 269 * 270 * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8 271 * low bits for our own purposes. Nodes are currently of 4 types: 272 * 1. Single pointer (Range is 0-0) 273 * 2. Non-leaf Allocation Range nodes 274 * 3. Non-leaf Range nodes 275 * 4. Leaf Range nodes All nodes consist of a number of node slots, 276 * pivots, and a parent pointer. 277 */ 278 279struct maple_node { 280 union { 281 struct { 282 struct maple_pnode *parent; 283 void __rcu *slot[MAPLE_NODE_SLOTS]; 284 }; 285 struct { 286 void *pad; 287 struct rcu_head rcu; 288 struct maple_enode *piv_parent; 289 unsigned char parent_slot; 290 enum maple_type type; 291 unsigned char slot_len; 292 unsigned int ma_flags; 293 }; 294 struct maple_range_64 mr64; 295 struct maple_arange_64 ma64; 296 struct maple_alloc alloc; 297 }; 298}; 299 300/* 301 * More complicated stores can cause two nodes to become one or three and 302 * potentially alter the height of the tree. Either half of the tree may need 303 * to be rebalanced against the other. The ma_topiary struct is used to track 304 * which nodes have been 'cut' from the tree so that the change can be done 305 * safely at a later date. This is done to support RCU. 306 */ 307struct ma_topiary { 308 struct maple_enode *head; 309 struct maple_enode *tail; 310 struct maple_tree *mtree; 311}; 312 313void *mtree_load(struct maple_tree *mt, unsigned long index); 314 315int mtree_insert(struct maple_tree *mt, unsigned long index, 316 void *entry, gfp_t gfp); 317int mtree_insert_range(struct maple_tree *mt, unsigned long first, 318 unsigned long last, void *entry, gfp_t gfp); 319int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 320 void *entry, unsigned long size, unsigned long min, 321 unsigned long max, gfp_t gfp); 322int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 323 void *entry, unsigned long size, unsigned long min, 324 unsigned long max, gfp_t gfp); 325 326int mtree_store_range(struct maple_tree *mt, unsigned long first, 327 unsigned long last, void *entry, gfp_t gfp); 328int mtree_store(struct maple_tree *mt, unsigned long index, 329 void *entry, gfp_t gfp); 330void *mtree_erase(struct maple_tree *mt, unsigned long index); 331 332int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp); 333int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp); 334 335void mtree_destroy(struct maple_tree *mt); 336void __mt_destroy(struct maple_tree *mt); 337 338/** 339 * mtree_empty() - Determine if a tree has any present entries. 340 * @mt: Maple Tree. 341 * 342 * Context: Any context. 343 * Return: %true if the tree contains only NULL pointers. 344 */ 345static inline bool mtree_empty(const struct maple_tree *mt) 346{ 347 return mt->ma_root == NULL; 348} 349 350/* Advanced API */ 351 352/* 353 * Maple State Status 354 * ma_active means the maple state is pointing to a node and offset and can 355 * continue operating on the tree. 356 * ma_start means we have not searched the tree. 357 * ma_root means we have searched the tree and the entry we found lives in 358 * the root of the tree (ie it has index 0, length 1 and is the only entry in 359 * the tree). 360 * ma_none means we have searched the tree and there is no node in the 361 * tree for this entry. For example, we searched for index 1 in an empty 362 * tree. Or we have a tree which points to a full leaf node and we 363 * searched for an entry which is larger than can be contained in that 364 * leaf node. 365 * ma_pause means the data within the maple state may be stale, restart the 366 * operation 367 * ma_overflow means the search has reached the upper limit of the search 368 * ma_underflow means the search has reached the lower limit of the search 369 * ma_error means there was an error, check the node for the error number. 370 */ 371enum maple_status { 372 ma_active, 373 ma_start, 374 ma_root, 375 ma_none, 376 ma_pause, 377 ma_overflow, 378 ma_underflow, 379 ma_error, 380}; 381 382/* 383 * The maple state is defined in the struct ma_state and is used to keep track 384 * of information during operations, and even between operations when using the 385 * advanced API. 386 * 387 * If state->node has bit 0 set then it references a tree location which is not 388 * a node (eg the root). If bit 1 is set, the rest of the bits are a negative 389 * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the 390 * node type. 391 * 392 * state->alloc either has a request number of nodes or an allocated node. If 393 * stat->alloc has a requested number of nodes, the first bit will be set (0x1) 394 * and the remaining bits are the value. If state->alloc is a node, then the 395 * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for 396 * storing more allocated nodes, a total number of nodes allocated, and the 397 * node_count in this node. node_count is the number of allocated nodes in this 398 * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further 399 * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc 400 * by removing a node from the state->alloc node until state->alloc->node_count 401 * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted 402 * to state->alloc. Nodes are pushed onto state->alloc by putting the current 403 * state->alloc into the pushed node's slot[0]. 404 * 405 * The state also contains the implied min/max of the state->node, the depth of 406 * this search, and the offset. The implied min/max are either from the parent 407 * node or are 0-oo for the root node. The depth is incremented or decremented 408 * every time a node is walked down or up. The offset is the slot/pivot of 409 * interest in the node - either for reading or writing. 410 * 411 * When returning a value the maple state index and last respectively contain 412 * the start and end of the range for the entry. Ranges are inclusive in the 413 * Maple Tree. 414 * 415 * The status of the state is used to determine how the next action should treat 416 * the state. For instance, if the status is ma_start then the next action 417 * should start at the root of the tree and walk down. If the status is 418 * ma_pause then the node may be stale data and should be discarded. If the 419 * status is ma_overflow, then the last action hit the upper limit. 420 * 421 */ 422struct ma_state { 423 struct maple_tree *tree; /* The tree we're operating in */ 424 unsigned long index; /* The index we're operating on - range start */ 425 unsigned long last; /* The last index we're operating on - range end */ 426 struct maple_enode *node; /* The node containing this entry */ 427 unsigned long min; /* The minimum index of this node - implied pivot min */ 428 unsigned long max; /* The maximum index of this node - implied pivot max */ 429 struct maple_alloc *alloc; /* Allocated nodes for this operation */ 430 enum maple_status status; /* The status of the state (active, start, none, etc) */ 431 unsigned char depth; /* depth of tree descent during write */ 432 unsigned char offset; 433 unsigned char mas_flags; 434 unsigned char end; /* The end of the node */ 435}; 436 437struct ma_wr_state { 438 struct ma_state *mas; 439 struct maple_node *node; /* Decoded mas->node */ 440 unsigned long r_min; /* range min */ 441 unsigned long r_max; /* range max */ 442 enum maple_type type; /* mas->node type */ 443 unsigned char offset_end; /* The offset where the write ends */ 444 unsigned long *pivots; /* mas->node->pivots pointer */ 445 unsigned long end_piv; /* The pivot at the offset end */ 446 void __rcu **slots; /* mas->node->slots pointer */ 447 void *entry; /* The entry to write */ 448 void *content; /* The existing entry that is being overwritten */ 449}; 450 451#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) 452#define mas_lock_nested(mas, subclass) \ 453 spin_lock_nested(&((mas)->tree->ma_lock), subclass) 454#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock)) 455 456/* 457 * Special values for ma_state.node. 458 * MA_ERROR represents an errno. After dropping the lock and attempting 459 * to resolve the error, the walk would have to be restarted from the 460 * top of the tree as the tree may have been modified. 461 */ 462#define MA_ERROR(err) \ 463 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) 464 465#define MA_STATE(name, mt, first, end) \ 466 struct ma_state name = { \ 467 .tree = mt, \ 468 .index = first, \ 469 .last = end, \ 470 .node = NULL, \ 471 .status = ma_start, \ 472 .min = 0, \ 473 .max = ULONG_MAX, \ 474 .alloc = NULL, \ 475 .mas_flags = 0, \ 476 } 477 478#define MA_WR_STATE(name, ma_state, wr_entry) \ 479 struct ma_wr_state name = { \ 480 .mas = ma_state, \ 481 .content = NULL, \ 482 .entry = wr_entry, \ 483 } 484 485#define MA_TOPIARY(name, tree) \ 486 struct ma_topiary name = { \ 487 .head = NULL, \ 488 .tail = NULL, \ 489 .mtree = tree, \ 490 } 491 492void *mas_walk(struct ma_state *mas); 493void *mas_store(struct ma_state *mas, void *entry); 494void *mas_erase(struct ma_state *mas); 495int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp); 496void mas_store_prealloc(struct ma_state *mas, void *entry); 497void *mas_find(struct ma_state *mas, unsigned long max); 498void *mas_find_range(struct ma_state *mas, unsigned long max); 499void *mas_find_rev(struct ma_state *mas, unsigned long min); 500void *mas_find_range_rev(struct ma_state *mas, unsigned long max); 501int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp); 502 503bool mas_nomem(struct ma_state *mas, gfp_t gfp); 504void mas_pause(struct ma_state *mas); 505void maple_tree_init(void); 506void mas_destroy(struct ma_state *mas); 507int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries); 508 509void *mas_prev(struct ma_state *mas, unsigned long min); 510void *mas_prev_range(struct ma_state *mas, unsigned long max); 511void *mas_next(struct ma_state *mas, unsigned long max); 512void *mas_next_range(struct ma_state *mas, unsigned long max); 513 514int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, 515 unsigned long size); 516/* 517 * This finds an empty area from the highest address to the lowest. 518 * AKA "Topdown" version, 519 */ 520int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 521 unsigned long max, unsigned long size); 522 523static inline void mas_init(struct ma_state *mas, struct maple_tree *tree, 524 unsigned long addr) 525{ 526 memset(mas, 0, sizeof(struct ma_state)); 527 mas->tree = tree; 528 mas->index = mas->last = addr; 529 mas->max = ULONG_MAX; 530 mas->status = ma_start; 531 mas->node = NULL; 532} 533 534static inline bool mas_is_active(struct ma_state *mas) 535{ 536 return mas->status == ma_active; 537} 538 539static inline bool mas_is_err(struct ma_state *mas) 540{ 541 return mas->status == ma_error; 542} 543 544/** 545 * mas_reset() - Reset a Maple Tree operation state. 546 * @mas: Maple Tree operation state. 547 * 548 * Resets the error or walk state of the @mas so future walks of the 549 * array will start from the root. Use this if you have dropped the 550 * lock and want to reuse the ma_state. 551 * 552 * Context: Any context. 553 */ 554static __always_inline void mas_reset(struct ma_state *mas) 555{ 556 mas->status = ma_start; 557 mas->node = NULL; 558} 559 560/** 561 * mas_for_each() - Iterate over a range of the maple tree. 562 * @__mas: Maple Tree operation state (maple_state) 563 * @__entry: Entry retrieved from the tree 564 * @__max: maximum index to retrieve from the tree 565 * 566 * When returned, mas->index and mas->last will hold the entire range for the 567 * entry. 568 * 569 * Note: may return the zero entry. 570 */ 571#define mas_for_each(__mas, __entry, __max) \ 572 while (((__entry) = mas_find((__mas), (__max))) != NULL) 573 574#ifdef CONFIG_DEBUG_MAPLE_TREE 575enum mt_dump_format { 576 mt_dump_dec, 577 mt_dump_hex, 578}; 579 580extern atomic_t maple_tree_tests_run; 581extern atomic_t maple_tree_tests_passed; 582 583void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); 584void mas_dump(const struct ma_state *mas); 585void mas_wr_dump(const struct ma_wr_state *wr_mas); 586void mt_validate(struct maple_tree *mt); 587void mt_cache_shrink(void); 588#define MT_BUG_ON(__tree, __x) do { \ 589 atomic_inc(&maple_tree_tests_run); \ 590 if (__x) { \ 591 pr_info("BUG at %s:%d (%u)\n", \ 592 __func__, __LINE__, __x); \ 593 mt_dump(__tree, mt_dump_hex); \ 594 pr_info("Pass: %u Run:%u\n", \ 595 atomic_read(&maple_tree_tests_passed), \ 596 atomic_read(&maple_tree_tests_run)); \ 597 dump_stack(); \ 598 } else { \ 599 atomic_inc(&maple_tree_tests_passed); \ 600 } \ 601} while (0) 602 603#define MAS_BUG_ON(__mas, __x) do { \ 604 atomic_inc(&maple_tree_tests_run); \ 605 if (__x) { \ 606 pr_info("BUG at %s:%d (%u)\n", \ 607 __func__, __LINE__, __x); \ 608 mas_dump(__mas); \ 609 mt_dump((__mas)->tree, mt_dump_hex); \ 610 pr_info("Pass: %u Run:%u\n", \ 611 atomic_read(&maple_tree_tests_passed), \ 612 atomic_read(&maple_tree_tests_run)); \ 613 dump_stack(); \ 614 } else { \ 615 atomic_inc(&maple_tree_tests_passed); \ 616 } \ 617} while (0) 618 619#define MAS_WR_BUG_ON(__wrmas, __x) do { \ 620 atomic_inc(&maple_tree_tests_run); \ 621 if (__x) { \ 622 pr_info("BUG at %s:%d (%u)\n", \ 623 __func__, __LINE__, __x); \ 624 mas_wr_dump(__wrmas); \ 625 mas_dump((__wrmas)->mas); \ 626 mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ 627 pr_info("Pass: %u Run:%u\n", \ 628 atomic_read(&maple_tree_tests_passed), \ 629 atomic_read(&maple_tree_tests_run)); \ 630 dump_stack(); \ 631 } else { \ 632 atomic_inc(&maple_tree_tests_passed); \ 633 } \ 634} while (0) 635 636#define MT_WARN_ON(__tree, __x) ({ \ 637 int ret = !!(__x); \ 638 atomic_inc(&maple_tree_tests_run); \ 639 if (ret) { \ 640 pr_info("WARN at %s:%d (%u)\n", \ 641 __func__, __LINE__, __x); \ 642 mt_dump(__tree, mt_dump_hex); \ 643 pr_info("Pass: %u Run:%u\n", \ 644 atomic_read(&maple_tree_tests_passed), \ 645 atomic_read(&maple_tree_tests_run)); \ 646 dump_stack(); \ 647 } else { \ 648 atomic_inc(&maple_tree_tests_passed); \ 649 } \ 650 unlikely(ret); \ 651}) 652 653#define MAS_WARN_ON(__mas, __x) ({ \ 654 int ret = !!(__x); \ 655 atomic_inc(&maple_tree_tests_run); \ 656 if (ret) { \ 657 pr_info("WARN at %s:%d (%u)\n", \ 658 __func__, __LINE__, __x); \ 659 mas_dump(__mas); \ 660 mt_dump((__mas)->tree, mt_dump_hex); \ 661 pr_info("Pass: %u Run:%u\n", \ 662 atomic_read(&maple_tree_tests_passed), \ 663 atomic_read(&maple_tree_tests_run)); \ 664 dump_stack(); \ 665 } else { \ 666 atomic_inc(&maple_tree_tests_passed); \ 667 } \ 668 unlikely(ret); \ 669}) 670 671#define MAS_WR_WARN_ON(__wrmas, __x) ({ \ 672 int ret = !!(__x); \ 673 atomic_inc(&maple_tree_tests_run); \ 674 if (ret) { \ 675 pr_info("WARN at %s:%d (%u)\n", \ 676 __func__, __LINE__, __x); \ 677 mas_wr_dump(__wrmas); \ 678 mas_dump((__wrmas)->mas); \ 679 mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ 680 pr_info("Pass: %u Run:%u\n", \ 681 atomic_read(&maple_tree_tests_passed), \ 682 atomic_read(&maple_tree_tests_run)); \ 683 dump_stack(); \ 684 } else { \ 685 atomic_inc(&maple_tree_tests_passed); \ 686 } \ 687 unlikely(ret); \ 688}) 689#else 690#define MT_BUG_ON(__tree, __x) BUG_ON(__x) 691#define MAS_BUG_ON(__mas, __x) BUG_ON(__x) 692#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x) 693#define MT_WARN_ON(__tree, __x) WARN_ON(__x) 694#define MAS_WARN_ON(__mas, __x) WARN_ON(__x) 695#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x) 696#endif /* CONFIG_DEBUG_MAPLE_TREE */ 697 698/** 699 * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the 700 * current location. 701 * @mas: Maple Tree operation state. 702 * @start: New start of range in the Maple Tree. 703 * @last: New end of range in the Maple Tree. 704 * 705 * set the internal maple state values to a sub-range. 706 * Please use mas_set_range() if you do not know where you are in the tree. 707 */ 708static inline void __mas_set_range(struct ma_state *mas, unsigned long start, 709 unsigned long last) 710{ 711 /* Ensure the range starts within the current slot */ 712 MAS_WARN_ON(mas, mas_is_active(mas) && 713 (mas->index > start || mas->last < start)); 714 mas->index = start; 715 mas->last = last; 716} 717 718/** 719 * mas_set_range() - Set up Maple Tree operation state for a different index. 720 * @mas: Maple Tree operation state. 721 * @start: New start of range in the Maple Tree. 722 * @last: New end of range in the Maple Tree. 723 * 724 * Move the operation state to refer to a different range. This will 725 * have the effect of starting a walk from the top; see mas_next() 726 * to move to an adjacent index. 727 */ 728static inline 729void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) 730{ 731 mas_reset(mas); 732 __mas_set_range(mas, start, last); 733} 734 735/** 736 * mas_set() - Set up Maple Tree operation state for a different index. 737 * @mas: Maple Tree operation state. 738 * @index: New index into the Maple Tree. 739 * 740 * Move the operation state to refer to a different index. This will 741 * have the effect of starting a walk from the top; see mas_next() 742 * to move to an adjacent index. 743 */ 744static inline void mas_set(struct ma_state *mas, unsigned long index) 745{ 746 747 mas_set_range(mas, index, index); 748} 749 750static inline bool mt_external_lock(const struct maple_tree *mt) 751{ 752 return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN; 753} 754 755/** 756 * mt_init_flags() - Initialise an empty maple tree with flags. 757 * @mt: Maple Tree 758 * @flags: maple tree flags. 759 * 760 * If you need to initialise a Maple Tree with special flags (eg, an 761 * allocation tree), use this function. 762 * 763 * Context: Any context. 764 */ 765static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags) 766{ 767 mt->ma_flags = flags; 768 if (!mt_external_lock(mt)) 769 spin_lock_init(&mt->ma_lock); 770 rcu_assign_pointer(mt->ma_root, NULL); 771} 772 773/** 774 * mt_init() - Initialise an empty maple tree. 775 * @mt: Maple Tree 776 * 777 * An empty Maple Tree. 778 * 779 * Context: Any context. 780 */ 781static inline void mt_init(struct maple_tree *mt) 782{ 783 mt_init_flags(mt, 0); 784} 785 786static inline bool mt_in_rcu(struct maple_tree *mt) 787{ 788#ifdef CONFIG_MAPLE_RCU_DISABLED 789 return false; 790#endif 791 return mt->ma_flags & MT_FLAGS_USE_RCU; 792} 793 794/** 795 * mt_clear_in_rcu() - Switch the tree to non-RCU mode. 796 * @mt: The Maple Tree 797 */ 798static inline void mt_clear_in_rcu(struct maple_tree *mt) 799{ 800 if (!mt_in_rcu(mt)) 801 return; 802 803 if (mt_external_lock(mt)) { 804 WARN_ON(!mt_lock_is_held(mt)); 805 mt->ma_flags &= ~MT_FLAGS_USE_RCU; 806 } else { 807 mtree_lock(mt); 808 mt->ma_flags &= ~MT_FLAGS_USE_RCU; 809 mtree_unlock(mt); 810 } 811} 812 813/** 814 * mt_set_in_rcu() - Switch the tree to RCU safe mode. 815 * @mt: The Maple Tree 816 */ 817static inline void mt_set_in_rcu(struct maple_tree *mt) 818{ 819 if (mt_in_rcu(mt)) 820 return; 821 822 if (mt_external_lock(mt)) { 823 WARN_ON(!mt_lock_is_held(mt)); 824 mt->ma_flags |= MT_FLAGS_USE_RCU; 825 } else { 826 mtree_lock(mt); 827 mt->ma_flags |= MT_FLAGS_USE_RCU; 828 mtree_unlock(mt); 829 } 830} 831 832static inline unsigned int mt_height(const struct maple_tree *mt) 833{ 834 return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET; 835} 836 837void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max); 838void *mt_find_after(struct maple_tree *mt, unsigned long *index, 839 unsigned long max); 840void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min); 841void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); 842 843/** 844 * mt_for_each - Iterate over each entry starting at index until max. 845 * @__tree: The Maple Tree 846 * @__entry: The current entry 847 * @__index: The index to start the search from. Subsequently used as iterator. 848 * @__max: The maximum limit for @index 849 * 850 * This iterator skips all entries, which resolve to a NULL pointer, 851 * e.g. entries which has been reserved with XA_ZERO_ENTRY. 852 */ 853#define mt_for_each(__tree, __entry, __index, __max) \ 854 for (__entry = mt_find(__tree, &(__index), __max); \ 855 __entry; __entry = mt_find_after(__tree, &(__index), __max)) 856 857#endif /*_LINUX_MAPLE_TREE_H */