at v6.6 28 kB view raw
1/* SPDX-License-Identifier: GPL-2.0+ */ 2#ifndef _LINUX_MAPLE_TREE_H 3#define _LINUX_MAPLE_TREE_H 4/* 5 * Maple Tree - An RCU-safe adaptive tree for storing ranges 6 * Copyright (c) 2018-2022 Oracle 7 * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com> 8 * Matthew Wilcox <willy@infradead.org> 9 */ 10 11#include <linux/kernel.h> 12#include <linux/rcupdate.h> 13#include <linux/spinlock.h> 14/* #define CONFIG_MAPLE_RCU_DISABLED */ 15 16/* 17 * Allocated nodes are mutable until they have been inserted into the tree, 18 * at which time they cannot change their type until they have been removed 19 * from the tree and an RCU grace period has passed. 20 * 21 * Removed nodes have their ->parent set to point to themselves. RCU readers 22 * check ->parent before relying on the value that they loaded from the 23 * slots array. This lets us reuse the slots array for the RCU head. 24 * 25 * Nodes in the tree point to their parent unless bit 0 is set. 26 */ 27#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) 28/* 64bit sizes */ 29#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */ 30#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */ 31#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */ 32#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1) 33#else 34/* 32bit sizes */ 35#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */ 36#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */ 37#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */ 38#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2) 39#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */ 40 41#define MAPLE_NODE_MASK 255UL 42 43/* 44 * The node->parent of the root node has bit 0 set and the rest of the pointer 45 * is a pointer to the tree itself. No more bits are available in this pointer 46 * (on m68k, the data structure may only be 2-byte aligned). 47 * 48 * Internal non-root nodes can only have maple_range_* nodes as parents. The 49 * parent pointer is 256B aligned like all other tree nodes. When storing a 32 50 * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an 51 * extra bit to store the offset. This extra bit comes from a reuse of the last 52 * bit in the node type. This is possible by using bit 1 to indicate if bit 2 53 * is part of the type or the slot. 54 * 55 * Once the type is decided, the decision of an allocation range type or a range 56 * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE 57 * flag. 58 * 59 * Node types: 60 * 0x??1 = Root 61 * 0x?00 = 16 bit nodes 62 * 0x010 = 32 bit nodes 63 * 0x110 = 64 bit nodes 64 * 65 * Slot size and location in the parent pointer: 66 * type : slot location 67 * 0x??1 : Root 68 * 0x?00 : 16 bit values, type in 0-1, slot in 2-6 69 * 0x010 : 32 bit values, type in 0-2, slot in 3-6 70 * 0x110 : 64 bit values, type in 0-2, slot in 3-6 71 */ 72 73/* 74 * This metadata is used to optimize the gap updating code and in reverse 75 * searching for gaps or any other code that needs to find the end of the data. 76 */ 77struct maple_metadata { 78 unsigned char end; 79 unsigned char gap; 80}; 81 82/* 83 * Leaf nodes do not store pointers to nodes, they store user data. Users may 84 * store almost any bit pattern. As noted above, the optimisation of storing an 85 * entry at 0 in the root pointer cannot be done for data which have the bottom 86 * two bits set to '10'. We also reserve values with the bottom two bits set to 87 * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs 88 * return errnos as a negative errno shifted right by two bits and the bottom 89 * two bits set to '10', and while choosing to store these values in the array 90 * is not an error, it may lead to confusion if you're testing for an error with 91 * mas_is_err(). 92 * 93 * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits 94 * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now. 95 * 96 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 97 * indicate that the tree is specifying ranges, Pivots may appear in the 98 * subtree with an entry attached to the value whereas keys are unique to a 99 * specific position of a B-tree. Pivot values are inclusive of the slot with 100 * the same index. 101 */ 102 103struct maple_range_64 { 104 struct maple_pnode *parent; 105 unsigned long pivot[MAPLE_RANGE64_SLOTS - 1]; 106 union { 107 void __rcu *slot[MAPLE_RANGE64_SLOTS]; 108 struct { 109 void __rcu *pad[MAPLE_RANGE64_SLOTS - 1]; 110 struct maple_metadata meta; 111 }; 112 }; 113}; 114 115/* 116 * At tree creation time, the user can specify that they're willing to trade off 117 * storing fewer entries in a tree in return for storing more information in 118 * each node. 119 * 120 * The maple tree supports recording the largest range of NULL entries available 121 * in this node, also called gaps. This optimises the tree for allocating a 122 * range. 123 */ 124struct maple_arange_64 { 125 struct maple_pnode *parent; 126 unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1]; 127 void __rcu *slot[MAPLE_ARANGE64_SLOTS]; 128 unsigned long gap[MAPLE_ARANGE64_SLOTS]; 129 struct maple_metadata meta; 130}; 131 132struct maple_alloc { 133 unsigned long total; 134 unsigned char node_count; 135 unsigned int request_count; 136 struct maple_alloc *slot[MAPLE_ALLOC_SLOTS]; 137}; 138 139struct maple_topiary { 140 struct maple_pnode *parent; 141 struct maple_enode *next; /* Overlaps the pivot */ 142}; 143 144enum maple_type { 145 maple_dense, 146 maple_leaf_64, 147 maple_range_64, 148 maple_arange_64, 149}; 150 151 152/** 153 * DOC: Maple tree flags 154 * 155 * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree 156 * * MT_FLAGS_USE_RCU - Operate in RCU mode 157 * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags 158 * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value 159 * * MT_FLAGS_LOCK_MASK - How the mt_lock is used 160 * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe 161 * * MT_FLAGS_LOCK_BH - Acquired bh-safe 162 * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used 163 * 164 * MAPLE_HEIGHT_MAX The largest height that can be stored 165 */ 166#define MT_FLAGS_ALLOC_RANGE 0x01 167#define MT_FLAGS_USE_RCU 0x02 168#define MT_FLAGS_HEIGHT_OFFSET 0x02 169#define MT_FLAGS_HEIGHT_MASK 0x7C 170#define MT_FLAGS_LOCK_MASK 0x300 171#define MT_FLAGS_LOCK_IRQ 0x100 172#define MT_FLAGS_LOCK_BH 0x200 173#define MT_FLAGS_LOCK_EXTERN 0x300 174 175#define MAPLE_HEIGHT_MAX 31 176 177 178#define MAPLE_NODE_TYPE_MASK 0x0F 179#define MAPLE_NODE_TYPE_SHIFT 0x03 180 181#define MAPLE_RESERVED_RANGE 4096 182 183#ifdef CONFIG_LOCKDEP 184typedef struct lockdep_map *lockdep_map_p; 185#define mt_lock_is_held(mt) \ 186 (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock)) 187 188#define mt_write_lock_is_held(mt) \ 189 (!(mt)->ma_external_lock || \ 190 lock_is_held_type((mt)->ma_external_lock, 0)) 191 192#define mt_set_external_lock(mt, lock) \ 193 (mt)->ma_external_lock = &(lock)->dep_map 194 195#define mt_on_stack(mt) (mt).ma_external_lock = NULL 196#else 197typedef struct { /* nothing */ } lockdep_map_p; 198#define mt_lock_is_held(mt) 1 199#define mt_write_lock_is_held(mt) 1 200#define mt_set_external_lock(mt, lock) do { } while (0) 201#define mt_on_stack(mt) do { } while (0) 202#endif 203 204/* 205 * If the tree contains a single entry at index 0, it is usually stored in 206 * tree->ma_root. To optimise for the page cache, an entry which ends in '00', 207 * '01' or '11' is stored in the root, but an entry which ends in '10' will be 208 * stored in a node. Bits 3-6 are used to store enum maple_type. 209 * 210 * The flags are used both to store some immutable information about this tree 211 * (set at tree creation time) and dynamic information set under the spinlock. 212 * 213 * Another use of flags are to indicate global states of the tree. This is the 214 * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in 215 * RCU mode. This mode was added to allow the tree to reuse nodes instead of 216 * re-allocating and RCU freeing nodes when there is a single user. 217 */ 218struct maple_tree { 219 union { 220 spinlock_t ma_lock; 221 lockdep_map_p ma_external_lock; 222 }; 223 unsigned int ma_flags; 224 void __rcu *ma_root; 225}; 226 227/** 228 * MTREE_INIT() - Initialize a maple tree 229 * @name: The maple tree name 230 * @__flags: The maple tree flags 231 * 232 */ 233#define MTREE_INIT(name, __flags) { \ 234 .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \ 235 .ma_flags = __flags, \ 236 .ma_root = NULL, \ 237} 238 239/** 240 * MTREE_INIT_EXT() - Initialize a maple tree with an external lock. 241 * @name: The tree name 242 * @__flags: The maple tree flags 243 * @__lock: The external lock 244 */ 245#ifdef CONFIG_LOCKDEP 246#define MTREE_INIT_EXT(name, __flags, __lock) { \ 247 .ma_external_lock = &(__lock).dep_map, \ 248 .ma_flags = (__flags), \ 249 .ma_root = NULL, \ 250} 251#else 252#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags) 253#endif 254 255#define DEFINE_MTREE(name) \ 256 struct maple_tree name = MTREE_INIT(name, 0) 257 258#define mtree_lock(mt) spin_lock((&(mt)->ma_lock)) 259#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock)) 260 261/* 262 * The Maple Tree squeezes various bits in at various points which aren't 263 * necessarily obvious. Usually, this is done by observing that pointers are 264 * N-byte aligned and thus the bottom log_2(N) bits are available for use. We 265 * don't use the high bits of pointers to store additional information because 266 * we don't know what bits are unused on any given architecture. 267 * 268 * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8 269 * low bits for our own purposes. Nodes are currently of 4 types: 270 * 1. Single pointer (Range is 0-0) 271 * 2. Non-leaf Allocation Range nodes 272 * 3. Non-leaf Range nodes 273 * 4. Leaf Range nodes All nodes consist of a number of node slots, 274 * pivots, and a parent pointer. 275 */ 276 277struct maple_node { 278 union { 279 struct { 280 struct maple_pnode *parent; 281 void __rcu *slot[MAPLE_NODE_SLOTS]; 282 }; 283 struct { 284 void *pad; 285 struct rcu_head rcu; 286 struct maple_enode *piv_parent; 287 unsigned char parent_slot; 288 enum maple_type type; 289 unsigned char slot_len; 290 unsigned int ma_flags; 291 }; 292 struct maple_range_64 mr64; 293 struct maple_arange_64 ma64; 294 struct maple_alloc alloc; 295 }; 296}; 297 298/* 299 * More complicated stores can cause two nodes to become one or three and 300 * potentially alter the height of the tree. Either half of the tree may need 301 * to be rebalanced against the other. The ma_topiary struct is used to track 302 * which nodes have been 'cut' from the tree so that the change can be done 303 * safely at a later date. This is done to support RCU. 304 */ 305struct ma_topiary { 306 struct maple_enode *head; 307 struct maple_enode *tail; 308 struct maple_tree *mtree; 309}; 310 311void *mtree_load(struct maple_tree *mt, unsigned long index); 312 313int mtree_insert(struct maple_tree *mt, unsigned long index, 314 void *entry, gfp_t gfp); 315int mtree_insert_range(struct maple_tree *mt, unsigned long first, 316 unsigned long last, void *entry, gfp_t gfp); 317int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 318 void *entry, unsigned long size, unsigned long min, 319 unsigned long max, gfp_t gfp); 320int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 321 void *entry, unsigned long size, unsigned long min, 322 unsigned long max, gfp_t gfp); 323 324int mtree_store_range(struct maple_tree *mt, unsigned long first, 325 unsigned long last, void *entry, gfp_t gfp); 326int mtree_store(struct maple_tree *mt, unsigned long index, 327 void *entry, gfp_t gfp); 328void *mtree_erase(struct maple_tree *mt, unsigned long index); 329 330void mtree_destroy(struct maple_tree *mt); 331void __mt_destroy(struct maple_tree *mt); 332 333/** 334 * mtree_empty() - Determine if a tree has any present entries. 335 * @mt: Maple Tree. 336 * 337 * Context: Any context. 338 * Return: %true if the tree contains only NULL pointers. 339 */ 340static inline bool mtree_empty(const struct maple_tree *mt) 341{ 342 return mt->ma_root == NULL; 343} 344 345/* Advanced API */ 346 347/* 348 * The maple state is defined in the struct ma_state and is used to keep track 349 * of information during operations, and even between operations when using the 350 * advanced API. 351 * 352 * If state->node has bit 0 set then it references a tree location which is not 353 * a node (eg the root). If bit 1 is set, the rest of the bits are a negative 354 * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the 355 * node type. 356 * 357 * state->alloc either has a request number of nodes or an allocated node. If 358 * stat->alloc has a requested number of nodes, the first bit will be set (0x1) 359 * and the remaining bits are the value. If state->alloc is a node, then the 360 * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for 361 * storing more allocated nodes, a total number of nodes allocated, and the 362 * node_count in this node. node_count is the number of allocated nodes in this 363 * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further 364 * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc 365 * by removing a node from the state->alloc node until state->alloc->node_count 366 * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted 367 * to state->alloc. Nodes are pushed onto state->alloc by putting the current 368 * state->alloc into the pushed node's slot[0]. 369 * 370 * The state also contains the implied min/max of the state->node, the depth of 371 * this search, and the offset. The implied min/max are either from the parent 372 * node or are 0-oo for the root node. The depth is incremented or decremented 373 * every time a node is walked down or up. The offset is the slot/pivot of 374 * interest in the node - either for reading or writing. 375 * 376 * When returning a value the maple state index and last respectively contain 377 * the start and end of the range for the entry. Ranges are inclusive in the 378 * Maple Tree. 379 */ 380struct ma_state { 381 struct maple_tree *tree; /* The tree we're operating in */ 382 unsigned long index; /* The index we're operating on - range start */ 383 unsigned long last; /* The last index we're operating on - range end */ 384 struct maple_enode *node; /* The node containing this entry */ 385 unsigned long min; /* The minimum index of this node - implied pivot min */ 386 unsigned long max; /* The maximum index of this node - implied pivot max */ 387 struct maple_alloc *alloc; /* Allocated nodes for this operation */ 388 unsigned char depth; /* depth of tree descent during write */ 389 unsigned char offset; 390 unsigned char mas_flags; 391}; 392 393struct ma_wr_state { 394 struct ma_state *mas; 395 struct maple_node *node; /* Decoded mas->node */ 396 unsigned long r_min; /* range min */ 397 unsigned long r_max; /* range max */ 398 enum maple_type type; /* mas->node type */ 399 unsigned char offset_end; /* The offset where the write ends */ 400 unsigned char node_end; /* mas->node end */ 401 unsigned long *pivots; /* mas->node->pivots pointer */ 402 unsigned long end_piv; /* The pivot at the offset end */ 403 void __rcu **slots; /* mas->node->slots pointer */ 404 void *entry; /* The entry to write */ 405 void *content; /* The existing entry that is being overwritten */ 406}; 407 408#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock)) 409#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock)) 410 411 412/* 413 * Special values for ma_state.node. 414 * MAS_START means we have not searched the tree. 415 * MAS_ROOT means we have searched the tree and the entry we found lives in 416 * the root of the tree (ie it has index 0, length 1 and is the only entry in 417 * the tree). 418 * MAS_NONE means we have searched the tree and there is no node in the 419 * tree for this entry. For example, we searched for index 1 in an empty 420 * tree. Or we have a tree which points to a full leaf node and we 421 * searched for an entry which is larger than can be contained in that 422 * leaf node. 423 * MA_ERROR represents an errno. After dropping the lock and attempting 424 * to resolve the error, the walk would have to be restarted from the 425 * top of the tree as the tree may have been modified. 426 */ 427#define MAS_START ((struct maple_enode *)1UL) 428#define MAS_ROOT ((struct maple_enode *)5UL) 429#define MAS_NONE ((struct maple_enode *)9UL) 430#define MAS_PAUSE ((struct maple_enode *)17UL) 431#define MAS_OVERFLOW ((struct maple_enode *)33UL) 432#define MAS_UNDERFLOW ((struct maple_enode *)65UL) 433#define MA_ERROR(err) \ 434 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) 435 436#define MA_STATE(name, mt, first, end) \ 437 struct ma_state name = { \ 438 .tree = mt, \ 439 .index = first, \ 440 .last = end, \ 441 .node = MAS_START, \ 442 .min = 0, \ 443 .max = ULONG_MAX, \ 444 .alloc = NULL, \ 445 .mas_flags = 0, \ 446 } 447 448#define MA_WR_STATE(name, ma_state, wr_entry) \ 449 struct ma_wr_state name = { \ 450 .mas = ma_state, \ 451 .content = NULL, \ 452 .entry = wr_entry, \ 453 } 454 455#define MA_TOPIARY(name, tree) \ 456 struct ma_topiary name = { \ 457 .head = NULL, \ 458 .tail = NULL, \ 459 .mtree = tree, \ 460 } 461 462void *mas_walk(struct ma_state *mas); 463void *mas_store(struct ma_state *mas, void *entry); 464void *mas_erase(struct ma_state *mas); 465int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp); 466void mas_store_prealloc(struct ma_state *mas, void *entry); 467void *mas_find(struct ma_state *mas, unsigned long max); 468void *mas_find_range(struct ma_state *mas, unsigned long max); 469void *mas_find_rev(struct ma_state *mas, unsigned long min); 470void *mas_find_range_rev(struct ma_state *mas, unsigned long max); 471int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp); 472bool mas_is_err(struct ma_state *mas); 473 474bool mas_nomem(struct ma_state *mas, gfp_t gfp); 475void mas_pause(struct ma_state *mas); 476void maple_tree_init(void); 477void mas_destroy(struct ma_state *mas); 478int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries); 479 480void *mas_prev(struct ma_state *mas, unsigned long min); 481void *mas_prev_range(struct ma_state *mas, unsigned long max); 482void *mas_next(struct ma_state *mas, unsigned long max); 483void *mas_next_range(struct ma_state *mas, unsigned long max); 484 485int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max, 486 unsigned long size); 487/* 488 * This finds an empty area from the highest address to the lowest. 489 * AKA "Topdown" version, 490 */ 491int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 492 unsigned long max, unsigned long size); 493 494static inline void mas_init(struct ma_state *mas, struct maple_tree *tree, 495 unsigned long addr) 496{ 497 memset(mas, 0, sizeof(struct ma_state)); 498 mas->tree = tree; 499 mas->index = mas->last = addr; 500 mas->max = ULONG_MAX; 501 mas->node = MAS_START; 502} 503 504/* Checks if a mas has not found anything */ 505static inline bool mas_is_none(const struct ma_state *mas) 506{ 507 return mas->node == MAS_NONE; 508} 509 510/* Checks if a mas has been paused */ 511static inline bool mas_is_paused(const struct ma_state *mas) 512{ 513 return mas->node == MAS_PAUSE; 514} 515 516/* Check if the mas is pointing to a node or not */ 517static inline bool mas_is_active(struct ma_state *mas) 518{ 519 if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE) 520 return true; 521 522 return false; 523} 524 525/** 526 * mas_reset() - Reset a Maple Tree operation state. 527 * @mas: Maple Tree operation state. 528 * 529 * Resets the error or walk state of the @mas so future walks of the 530 * array will start from the root. Use this if you have dropped the 531 * lock and want to reuse the ma_state. 532 * 533 * Context: Any context. 534 */ 535static inline void mas_reset(struct ma_state *mas) 536{ 537 mas->node = MAS_START; 538} 539 540/** 541 * mas_for_each() - Iterate over a range of the maple tree. 542 * @__mas: Maple Tree operation state (maple_state) 543 * @__entry: Entry retrieved from the tree 544 * @__max: maximum index to retrieve from the tree 545 * 546 * When returned, mas->index and mas->last will hold the entire range for the 547 * entry. 548 * 549 * Note: may return the zero entry. 550 */ 551#define mas_for_each(__mas, __entry, __max) \ 552 while (((__entry) = mas_find((__mas), (__max))) != NULL) 553/** 554 * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the 555 * current location. 556 * @mas: Maple Tree operation state. 557 * @start: New start of range in the Maple Tree. 558 * @last: New end of range in the Maple Tree. 559 * 560 * set the internal maple state values to a sub-range. 561 * Please use mas_set_range() if you do not know where you are in the tree. 562 */ 563static inline void __mas_set_range(struct ma_state *mas, unsigned long start, 564 unsigned long last) 565{ 566 mas->index = start; 567 mas->last = last; 568} 569 570/** 571 * mas_set_range() - Set up Maple Tree operation state for a different index. 572 * @mas: Maple Tree operation state. 573 * @start: New start of range in the Maple Tree. 574 * @last: New end of range in the Maple Tree. 575 * 576 * Move the operation state to refer to a different range. This will 577 * have the effect of starting a walk from the top; see mas_next() 578 * to move to an adjacent index. 579 */ 580static inline 581void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last) 582{ 583 __mas_set_range(mas, start, last); 584 mas->node = MAS_START; 585} 586 587/** 588 * mas_set() - Set up Maple Tree operation state for a different index. 589 * @mas: Maple Tree operation state. 590 * @index: New index into the Maple Tree. 591 * 592 * Move the operation state to refer to a different index. This will 593 * have the effect of starting a walk from the top; see mas_next() 594 * to move to an adjacent index. 595 */ 596static inline void mas_set(struct ma_state *mas, unsigned long index) 597{ 598 599 mas_set_range(mas, index, index); 600} 601 602static inline bool mt_external_lock(const struct maple_tree *mt) 603{ 604 return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN; 605} 606 607/** 608 * mt_init_flags() - Initialise an empty maple tree with flags. 609 * @mt: Maple Tree 610 * @flags: maple tree flags. 611 * 612 * If you need to initialise a Maple Tree with special flags (eg, an 613 * allocation tree), use this function. 614 * 615 * Context: Any context. 616 */ 617static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags) 618{ 619 mt->ma_flags = flags; 620 if (!mt_external_lock(mt)) 621 spin_lock_init(&mt->ma_lock); 622 rcu_assign_pointer(mt->ma_root, NULL); 623} 624 625/** 626 * mt_init() - Initialise an empty maple tree. 627 * @mt: Maple Tree 628 * 629 * An empty Maple Tree. 630 * 631 * Context: Any context. 632 */ 633static inline void mt_init(struct maple_tree *mt) 634{ 635 mt_init_flags(mt, 0); 636} 637 638static inline bool mt_in_rcu(struct maple_tree *mt) 639{ 640#ifdef CONFIG_MAPLE_RCU_DISABLED 641 return false; 642#endif 643 return mt->ma_flags & MT_FLAGS_USE_RCU; 644} 645 646/** 647 * mt_clear_in_rcu() - Switch the tree to non-RCU mode. 648 * @mt: The Maple Tree 649 */ 650static inline void mt_clear_in_rcu(struct maple_tree *mt) 651{ 652 if (!mt_in_rcu(mt)) 653 return; 654 655 if (mt_external_lock(mt)) { 656 WARN_ON(!mt_lock_is_held(mt)); 657 mt->ma_flags &= ~MT_FLAGS_USE_RCU; 658 } else { 659 mtree_lock(mt); 660 mt->ma_flags &= ~MT_FLAGS_USE_RCU; 661 mtree_unlock(mt); 662 } 663} 664 665/** 666 * mt_set_in_rcu() - Switch the tree to RCU safe mode. 667 * @mt: The Maple Tree 668 */ 669static inline void mt_set_in_rcu(struct maple_tree *mt) 670{ 671 if (mt_in_rcu(mt)) 672 return; 673 674 if (mt_external_lock(mt)) { 675 WARN_ON(!mt_lock_is_held(mt)); 676 mt->ma_flags |= MT_FLAGS_USE_RCU; 677 } else { 678 mtree_lock(mt); 679 mt->ma_flags |= MT_FLAGS_USE_RCU; 680 mtree_unlock(mt); 681 } 682} 683 684static inline unsigned int mt_height(const struct maple_tree *mt) 685{ 686 return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET; 687} 688 689void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max); 690void *mt_find_after(struct maple_tree *mt, unsigned long *index, 691 unsigned long max); 692void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min); 693void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); 694 695/** 696 * mt_for_each - Iterate over each entry starting at index until max. 697 * @__tree: The Maple Tree 698 * @__entry: The current entry 699 * @__index: The index to start the search from. Subsequently used as iterator. 700 * @__max: The maximum limit for @index 701 * 702 * This iterator skips all entries, which resolve to a NULL pointer, 703 * e.g. entries which has been reserved with XA_ZERO_ENTRY. 704 */ 705#define mt_for_each(__tree, __entry, __index, __max) \ 706 for (__entry = mt_find(__tree, &(__index), __max); \ 707 __entry; __entry = mt_find_after(__tree, &(__index), __max)) 708 709 710#ifdef CONFIG_DEBUG_MAPLE_TREE 711enum mt_dump_format { 712 mt_dump_dec, 713 mt_dump_hex, 714}; 715 716extern atomic_t maple_tree_tests_run; 717extern atomic_t maple_tree_tests_passed; 718 719void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); 720void mas_dump(const struct ma_state *mas); 721void mas_wr_dump(const struct ma_wr_state *wr_mas); 722void mt_validate(struct maple_tree *mt); 723void mt_cache_shrink(void); 724#define MT_BUG_ON(__tree, __x) do { \ 725 atomic_inc(&maple_tree_tests_run); \ 726 if (__x) { \ 727 pr_info("BUG at %s:%d (%u)\n", \ 728 __func__, __LINE__, __x); \ 729 mt_dump(__tree, mt_dump_hex); \ 730 pr_info("Pass: %u Run:%u\n", \ 731 atomic_read(&maple_tree_tests_passed), \ 732 atomic_read(&maple_tree_tests_run)); \ 733 dump_stack(); \ 734 } else { \ 735 atomic_inc(&maple_tree_tests_passed); \ 736 } \ 737} while (0) 738 739#define MAS_BUG_ON(__mas, __x) do { \ 740 atomic_inc(&maple_tree_tests_run); \ 741 if (__x) { \ 742 pr_info("BUG at %s:%d (%u)\n", \ 743 __func__, __LINE__, __x); \ 744 mas_dump(__mas); \ 745 mt_dump((__mas)->tree, mt_dump_hex); \ 746 pr_info("Pass: %u Run:%u\n", \ 747 atomic_read(&maple_tree_tests_passed), \ 748 atomic_read(&maple_tree_tests_run)); \ 749 dump_stack(); \ 750 } else { \ 751 atomic_inc(&maple_tree_tests_passed); \ 752 } \ 753} while (0) 754 755#define MAS_WR_BUG_ON(__wrmas, __x) do { \ 756 atomic_inc(&maple_tree_tests_run); \ 757 if (__x) { \ 758 pr_info("BUG at %s:%d (%u)\n", \ 759 __func__, __LINE__, __x); \ 760 mas_wr_dump(__wrmas); \ 761 mas_dump((__wrmas)->mas); \ 762 mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ 763 pr_info("Pass: %u Run:%u\n", \ 764 atomic_read(&maple_tree_tests_passed), \ 765 atomic_read(&maple_tree_tests_run)); \ 766 dump_stack(); \ 767 } else { \ 768 atomic_inc(&maple_tree_tests_passed); \ 769 } \ 770} while (0) 771 772#define MT_WARN_ON(__tree, __x) ({ \ 773 int ret = !!(__x); \ 774 atomic_inc(&maple_tree_tests_run); \ 775 if (ret) { \ 776 pr_info("WARN at %s:%d (%u)\n", \ 777 __func__, __LINE__, __x); \ 778 mt_dump(__tree, mt_dump_hex); \ 779 pr_info("Pass: %u Run:%u\n", \ 780 atomic_read(&maple_tree_tests_passed), \ 781 atomic_read(&maple_tree_tests_run)); \ 782 dump_stack(); \ 783 } else { \ 784 atomic_inc(&maple_tree_tests_passed); \ 785 } \ 786 unlikely(ret); \ 787}) 788 789#define MAS_WARN_ON(__mas, __x) ({ \ 790 int ret = !!(__x); \ 791 atomic_inc(&maple_tree_tests_run); \ 792 if (ret) { \ 793 pr_info("WARN at %s:%d (%u)\n", \ 794 __func__, __LINE__, __x); \ 795 mas_dump(__mas); \ 796 mt_dump((__mas)->tree, mt_dump_hex); \ 797 pr_info("Pass: %u Run:%u\n", \ 798 atomic_read(&maple_tree_tests_passed), \ 799 atomic_read(&maple_tree_tests_run)); \ 800 dump_stack(); \ 801 } else { \ 802 atomic_inc(&maple_tree_tests_passed); \ 803 } \ 804 unlikely(ret); \ 805}) 806 807#define MAS_WR_WARN_ON(__wrmas, __x) ({ \ 808 int ret = !!(__x); \ 809 atomic_inc(&maple_tree_tests_run); \ 810 if (ret) { \ 811 pr_info("WARN at %s:%d (%u)\n", \ 812 __func__, __LINE__, __x); \ 813 mas_wr_dump(__wrmas); \ 814 mas_dump((__wrmas)->mas); \ 815 mt_dump((__wrmas)->mas->tree, mt_dump_hex); \ 816 pr_info("Pass: %u Run:%u\n", \ 817 atomic_read(&maple_tree_tests_passed), \ 818 atomic_read(&maple_tree_tests_run)); \ 819 dump_stack(); \ 820 } else { \ 821 atomic_inc(&maple_tree_tests_passed); \ 822 } \ 823 unlikely(ret); \ 824}) 825#else 826#define MT_BUG_ON(__tree, __x) BUG_ON(__x) 827#define MAS_BUG_ON(__mas, __x) BUG_ON(__x) 828#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x) 829#define MT_WARN_ON(__tree, __x) WARN_ON(__x) 830#define MAS_WARN_ON(__mas, __x) WARN_ON(__x) 831#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x) 832#endif /* CONFIG_DEBUG_MAPLE_TREE */ 833 834#endif /*_LINUX_MAPLE_TREE_H */