at v6.16-rc5 60 kB view raw
1/* SPDX-License-Identifier: GPL-2.0+ */ 2#ifndef _LINUX_XARRAY_H 3#define _LINUX_XARRAY_H 4/* 5 * eXtensible Arrays 6 * Copyright (c) 2017 Microsoft Corporation 7 * Author: Matthew Wilcox <willy@infradead.org> 8 * 9 * See Documentation/core-api/xarray.rst for how to use the XArray. 10 */ 11 12#include <linux/bitmap.h> 13#include <linux/bug.h> 14#include <linux/compiler.h> 15#include <linux/err.h> 16#include <linux/gfp.h> 17#include <linux/kconfig.h> 18#include <linux/limits.h> 19#include <linux/lockdep.h> 20#include <linux/rcupdate.h> 21#include <linux/sched/mm.h> 22#include <linux/spinlock.h> 23#include <linux/types.h> 24 25struct list_lru; 26 27/* 28 * The bottom two bits of the entry determine how the XArray interprets 29 * the contents: 30 * 31 * 00: Pointer entry 32 * 10: Internal entry 33 * x1: Value entry or tagged pointer 34 * 35 * Attempting to store internal entries in the XArray is a bug. 36 * 37 * Most internal entries are pointers to the next node in the tree. 38 * The following internal entries have a special meaning: 39 * 40 * 0-62: Sibling entries 41 * 256: Retry entry 42 * 257: Zero entry 43 * 44 * Errors are also represented as internal entries, but use the negative 45 * space (-4094 to -2). They're never stored in the slots array; only 46 * returned by the normal API. 47 */ 48 49#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) 50 51/** 52 * xa_mk_value() - Create an XArray entry from an integer. 53 * @v: Value to store in XArray. 54 * 55 * Context: Any context. 56 * Return: An entry suitable for storing in the XArray. 57 */ 58static inline void *xa_mk_value(unsigned long v) 59{ 60 WARN_ON((long)v < 0); 61 return (void *)((v << 1) | 1); 62} 63 64/** 65 * xa_to_value() - Get value stored in an XArray entry. 66 * @entry: XArray entry. 67 * 68 * Context: Any context. 69 * Return: The value stored in the XArray entry. 70 */ 71static inline unsigned long xa_to_value(const void *entry) 72{ 73 return (unsigned long)entry >> 1; 74} 75 76/** 77 * xa_is_value() - Determine if an entry is a value. 78 * @entry: XArray entry. 79 * 80 * Context: Any context. 81 * Return: True if the entry is a value, false if it is a pointer. 82 */ 83static inline bool xa_is_value(const void *entry) 84{ 85 return (unsigned long)entry & 1; 86} 87 88/** 89 * xa_tag_pointer() - Create an XArray entry for a tagged pointer. 90 * @p: Plain pointer. 91 * @tag: Tag value (0, 1 or 3). 92 * 93 * If the user of the XArray prefers, they can tag their pointers instead 94 * of storing value entries. Three tags are available (0, 1 and 3). 95 * These are distinct from the xa_mark_t as they are not replicated up 96 * through the array and cannot be searched for. 97 * 98 * Context: Any context. 99 * Return: An XArray entry. 100 */ 101static inline void *xa_tag_pointer(void *p, unsigned long tag) 102{ 103 return (void *)((unsigned long)p | tag); 104} 105 106/** 107 * xa_untag_pointer() - Turn an XArray entry into a plain pointer. 108 * @entry: XArray entry. 109 * 110 * If you have stored a tagged pointer in the XArray, call this function 111 * to get the untagged version of the pointer. 112 * 113 * Context: Any context. 114 * Return: A pointer. 115 */ 116static inline void *xa_untag_pointer(void *entry) 117{ 118 return (void *)((unsigned long)entry & ~3UL); 119} 120 121/** 122 * xa_pointer_tag() - Get the tag stored in an XArray entry. 123 * @entry: XArray entry. 124 * 125 * If you have stored a tagged pointer in the XArray, call this function 126 * to get the tag of that pointer. 127 * 128 * Context: Any context. 129 * Return: A tag. 130 */ 131static inline unsigned int xa_pointer_tag(void *entry) 132{ 133 return (unsigned long)entry & 3UL; 134} 135 136/* 137 * xa_mk_internal() - Create an internal entry. 138 * @v: Value to turn into an internal entry. 139 * 140 * Internal entries are used for a number of purposes. Entries 0-255 are 141 * used for sibling entries (only 0-62 are used by the current code). 256 142 * is used for the retry entry. 257 is used for the reserved / zero entry. 143 * Negative internal entries are used to represent errnos. Node pointers 144 * are also tagged as internal entries in some situations. 145 * 146 * Context: Any context. 147 * Return: An XArray internal entry corresponding to this value. 148 */ 149static inline void *xa_mk_internal(unsigned long v) 150{ 151 return (void *)((v << 2) | 2); 152} 153 154/* 155 * xa_to_internal() - Extract the value from an internal entry. 156 * @entry: XArray entry. 157 * 158 * Context: Any context. 159 * Return: The value which was stored in the internal entry. 160 */ 161static inline unsigned long xa_to_internal(const void *entry) 162{ 163 return (unsigned long)entry >> 2; 164} 165 166/* 167 * xa_is_internal() - Is the entry an internal entry? 168 * @entry: XArray entry. 169 * 170 * Context: Any context. 171 * Return: %true if the entry is an internal entry. 172 */ 173static inline bool xa_is_internal(const void *entry) 174{ 175 return ((unsigned long)entry & 3) == 2; 176} 177 178#define XA_ZERO_ENTRY xa_mk_internal(257) 179 180/** 181 * xa_is_zero() - Is the entry a zero entry? 182 * @entry: Entry retrieved from the XArray 183 * 184 * The normal API will return NULL as the contents of a slot containing 185 * a zero entry. You can only see zero entries by using the advanced API. 186 * 187 * Return: %true if the entry is a zero entry. 188 */ 189static inline bool xa_is_zero(const void *entry) 190{ 191 return unlikely(entry == XA_ZERO_ENTRY); 192} 193 194/** 195 * xa_is_err() - Report whether an XArray operation returned an error 196 * @entry: Result from calling an XArray function 197 * 198 * If an XArray operation cannot complete an operation, it will return 199 * a special value indicating an error. This function tells you 200 * whether an error occurred; xa_err() tells you which error occurred. 201 * 202 * Context: Any context. 203 * Return: %true if the entry indicates an error. 204 */ 205static inline bool xa_is_err(const void *entry) 206{ 207 return unlikely(xa_is_internal(entry) && 208 entry >= xa_mk_internal(-MAX_ERRNO)); 209} 210 211/** 212 * xa_err() - Turn an XArray result into an errno. 213 * @entry: Result from calling an XArray function. 214 * 215 * If an XArray operation cannot complete an operation, it will return 216 * a special pointer value which encodes an errno. This function extracts 217 * the errno from the pointer value, or returns 0 if the pointer does not 218 * represent an errno. 219 * 220 * Context: Any context. 221 * Return: A negative errno or 0. 222 */ 223static inline int xa_err(void *entry) 224{ 225 /* xa_to_internal() would not do sign extension. */ 226 if (xa_is_err(entry)) 227 return (long)entry >> 2; 228 return 0; 229} 230 231/** 232 * struct xa_limit - Represents a range of IDs. 233 * @min: The lowest ID to allocate (inclusive). 234 * @max: The maximum ID to allocate (inclusive). 235 * 236 * This structure is used either directly or via the XA_LIMIT() macro 237 * to communicate the range of IDs that are valid for allocation. 238 * Three common ranges are predefined for you: 239 * * xa_limit_32b - [0 - UINT_MAX] 240 * * xa_limit_31b - [0 - INT_MAX] 241 * * xa_limit_16b - [0 - USHRT_MAX] 242 */ 243struct xa_limit { 244 u32 max; 245 u32 min; 246}; 247 248#define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } 249 250#define xa_limit_32b XA_LIMIT(0, UINT_MAX) 251#define xa_limit_31b XA_LIMIT(0, INT_MAX) 252#define xa_limit_16b XA_LIMIT(0, USHRT_MAX) 253 254typedef unsigned __bitwise xa_mark_t; 255#define XA_MARK_0 ((__force xa_mark_t)0U) 256#define XA_MARK_1 ((__force xa_mark_t)1U) 257#define XA_MARK_2 ((__force xa_mark_t)2U) 258#define XA_PRESENT ((__force xa_mark_t)8U) 259#define XA_MARK_MAX XA_MARK_2 260#define XA_FREE_MARK XA_MARK_0 261 262enum xa_lock_type { 263 XA_LOCK_IRQ = 1, 264 XA_LOCK_BH = 2, 265}; 266 267/* 268 * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags, 269 * and we remain compatible with that. 270 */ 271#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) 272#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) 273#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) 274#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) 275#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) 276#define XA_FLAGS_ACCOUNT ((__force gfp_t)32U) 277#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ 278 (__force unsigned)(mark))) 279 280/* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ 281#define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) 282#define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) 283 284/** 285 * struct xarray - The anchor of the XArray. 286 * @xa_lock: Lock that protects the contents of the XArray. 287 * 288 * To use the xarray, define it statically or embed it in your data structure. 289 * It is a very small data structure, so it does not usually make sense to 290 * allocate it separately and keep a pointer to it in your data structure. 291 * 292 * You may use the xa_lock to protect your own data structures as well. 293 */ 294/* 295 * If all of the entries in the array are NULL, @xa_head is a NULL pointer. 296 * If the only non-NULL entry in the array is at index 0, @xa_head is that 297 * entry. If any other entry in the array is non-NULL, @xa_head points 298 * to an @xa_node. 299 */ 300struct xarray { 301 spinlock_t xa_lock; 302/* private: The rest of the data structure is not to be used directly. */ 303 gfp_t xa_flags; 304 void __rcu * xa_head; 305}; 306 307#define XARRAY_INIT(name, flags) { \ 308 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \ 309 .xa_flags = flags, \ 310 .xa_head = NULL, \ 311} 312 313/** 314 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags. 315 * @name: A string that names your XArray. 316 * @flags: XA_FLAG values. 317 * 318 * This is intended for file scope definitions of XArrays. It declares 319 * and initialises an empty XArray with the chosen name and flags. It is 320 * equivalent to calling xa_init_flags() on the array, but it does the 321 * initialisation at compiletime instead of runtime. 322 */ 323#define DEFINE_XARRAY_FLAGS(name, flags) \ 324 struct xarray name = XARRAY_INIT(name, flags) 325 326/** 327 * DEFINE_XARRAY() - Define an XArray. 328 * @name: A string that names your XArray. 329 * 330 * This is intended for file scope definitions of XArrays. It declares 331 * and initialises an empty XArray with the chosen name. It is equivalent 332 * to calling xa_init() on the array, but it does the initialisation at 333 * compiletime instead of runtime. 334 */ 335#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) 336 337/** 338 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. 339 * @name: A string that names your XArray. 340 * 341 * This is intended for file scope definitions of allocating XArrays. 342 * See also DEFINE_XARRAY(). 343 */ 344#define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) 345 346/** 347 * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. 348 * @name: A string that names your XArray. 349 * 350 * This is intended for file scope definitions of allocating XArrays. 351 * See also DEFINE_XARRAY(). 352 */ 353#define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) 354 355void *xa_load(struct xarray *, unsigned long index); 356void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 357void *xa_erase(struct xarray *, unsigned long index); 358void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, 359 void *entry, gfp_t); 360bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); 361void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 362void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 363void *xa_find(struct xarray *xa, unsigned long *index, 364 unsigned long max, xa_mark_t) __attribute__((nonnull(2))); 365void *xa_find_after(struct xarray *xa, unsigned long *index, 366 unsigned long max, xa_mark_t) __attribute__((nonnull(2))); 367unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, 368 unsigned long max, unsigned int n, xa_mark_t); 369void xa_destroy(struct xarray *); 370 371/** 372 * xa_init_flags() - Initialise an empty XArray with flags. 373 * @xa: XArray. 374 * @flags: XA_FLAG values. 375 * 376 * If you need to initialise an XArray with special flags (eg you need 377 * to take the lock from interrupt context), use this function instead 378 * of xa_init(). 379 * 380 * Context: Any context. 381 */ 382static inline void xa_init_flags(struct xarray *xa, gfp_t flags) 383{ 384 spin_lock_init(&xa->xa_lock); 385 xa->xa_flags = flags; 386 xa->xa_head = NULL; 387} 388 389/** 390 * xa_init() - Initialise an empty XArray. 391 * @xa: XArray. 392 * 393 * An empty XArray is full of NULL entries. 394 * 395 * Context: Any context. 396 */ 397static inline void xa_init(struct xarray *xa) 398{ 399 xa_init_flags(xa, 0); 400} 401 402/** 403 * xa_empty() - Determine if an array has any present entries. 404 * @xa: XArray. 405 * 406 * Context: Any context. 407 * Return: %true if the array contains only NULL pointers. 408 */ 409static inline bool xa_empty(const struct xarray *xa) 410{ 411 return xa->xa_head == NULL; 412} 413 414/** 415 * xa_marked() - Inquire whether any entry in this array has a mark set 416 * @xa: Array 417 * @mark: Mark value 418 * 419 * Context: Any context. 420 * Return: %true if any entry has this mark set. 421 */ 422static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) 423{ 424 return xa->xa_flags & XA_FLAGS_MARK(mark); 425} 426 427/** 428 * xa_for_each_range() - Iterate over a portion of an XArray. 429 * @xa: XArray. 430 * @index: Index of @entry. 431 * @entry: Entry retrieved from array. 432 * @start: First index to retrieve from array. 433 * @last: Last index to retrieve from array. 434 * 435 * During the iteration, @entry will have the value of the entry stored 436 * in @xa at @index. You may modify @index during the iteration if you 437 * want to skip or reprocess indices. It is safe to modify the array 438 * during the iteration. At the end of the iteration, @entry will be set 439 * to NULL and @index will have a value less than or equal to max. 440 * 441 * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have 442 * to handle your own locking with xas_for_each(), and if you have to unlock 443 * after each iteration, it will also end up being O(n.log(n)). 444 * xa_for_each_range() will spin if it hits a retry entry; if you intend to 445 * see retry entries, you should use the xas_for_each() iterator instead. 446 * The xas_for_each() iterator will expand into more inline code than 447 * xa_for_each_range(). 448 * 449 * Context: Any context. Takes and releases the RCU lock. 450 */ 451#define xa_for_each_range(xa, index, entry, start, last) \ 452 for (index = start, \ 453 entry = xa_find(xa, &index, last, XA_PRESENT); \ 454 entry; \ 455 entry = xa_find_after(xa, &index, last, XA_PRESENT)) 456 457/** 458 * xa_for_each_start() - Iterate over a portion of an XArray. 459 * @xa: XArray. 460 * @index: Index of @entry. 461 * @entry: Entry retrieved from array. 462 * @start: First index to retrieve from array. 463 * 464 * During the iteration, @entry will have the value of the entry stored 465 * in @xa at @index. You may modify @index during the iteration if you 466 * want to skip or reprocess indices. It is safe to modify the array 467 * during the iteration. At the end of the iteration, @entry will be set 468 * to NULL and @index will have a value less than or equal to max. 469 * 470 * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have 471 * to handle your own locking with xas_for_each(), and if you have to unlock 472 * after each iteration, it will also end up being O(n.log(n)). 473 * xa_for_each_start() will spin if it hits a retry entry; if you intend to 474 * see retry entries, you should use the xas_for_each() iterator instead. 475 * The xas_for_each() iterator will expand into more inline code than 476 * xa_for_each_start(). 477 * 478 * Context: Any context. Takes and releases the RCU lock. 479 */ 480#define xa_for_each_start(xa, index, entry, start) \ 481 xa_for_each_range(xa, index, entry, start, ULONG_MAX) 482 483/** 484 * xa_for_each() - Iterate over present entries in an XArray. 485 * @xa: XArray. 486 * @index: Index of @entry. 487 * @entry: Entry retrieved from array. 488 * 489 * During the iteration, @entry will have the value of the entry stored 490 * in @xa at @index. You may modify @index during the iteration if you want 491 * to skip or reprocess indices. It is safe to modify the array during the 492 * iteration. At the end of the iteration, @entry will be set to NULL and 493 * @index will have a value less than or equal to max. 494 * 495 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have 496 * to handle your own locking with xas_for_each(), and if you have to unlock 497 * after each iteration, it will also end up being O(n.log(n)). xa_for_each() 498 * will spin if it hits a retry entry; if you intend to see retry entries, 499 * you should use the xas_for_each() iterator instead. The xas_for_each() 500 * iterator will expand into more inline code than xa_for_each(). 501 * 502 * Context: Any context. Takes and releases the RCU lock. 503 */ 504#define xa_for_each(xa, index, entry) \ 505 xa_for_each_start(xa, index, entry, 0) 506 507/** 508 * xa_for_each_marked() - Iterate over marked entries in an XArray. 509 * @xa: XArray. 510 * @index: Index of @entry. 511 * @entry: Entry retrieved from array. 512 * @filter: Selection criterion. 513 * 514 * During the iteration, @entry will have the value of the entry stored 515 * in @xa at @index. The iteration will skip all entries in the array 516 * which do not match @filter. You may modify @index during the iteration 517 * if you want to skip or reprocess indices. It is safe to modify the array 518 * during the iteration. At the end of the iteration, @entry will be set to 519 * NULL and @index will have a value less than or equal to max. 520 * 521 * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). 522 * You have to handle your own locking with xas_for_each(), and if you have 523 * to unlock after each iteration, it will also end up being O(n.log(n)). 524 * xa_for_each_marked() will spin if it hits a retry entry; if you intend to 525 * see retry entries, you should use the xas_for_each_marked() iterator 526 * instead. The xas_for_each_marked() iterator will expand into more inline 527 * code than xa_for_each_marked(). 528 * 529 * Context: Any context. Takes and releases the RCU lock. 530 */ 531#define xa_for_each_marked(xa, index, entry, filter) \ 532 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ 533 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) 534 535#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 536#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 537#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) 538#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock) 539#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock) 540#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock) 541#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock) 542#define xa_lock_irqsave(xa, flags) \ 543 spin_lock_irqsave(&(xa)->xa_lock, flags) 544#define xa_unlock_irqrestore(xa, flags) \ 545 spin_unlock_irqrestore(&(xa)->xa_lock, flags) 546#define xa_lock_nested(xa, subclass) \ 547 spin_lock_nested(&(xa)->xa_lock, subclass) 548#define xa_lock_bh_nested(xa, subclass) \ 549 spin_lock_bh_nested(&(xa)->xa_lock, subclass) 550#define xa_lock_irq_nested(xa, subclass) \ 551 spin_lock_irq_nested(&(xa)->xa_lock, subclass) 552#define xa_lock_irqsave_nested(xa, flags, subclass) \ 553 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass) 554 555/* 556 * Versions of the normal API which require the caller to hold the 557 * xa_lock. If the GFP flags allow it, they will drop the lock to 558 * allocate memory, then reacquire it afterwards. These functions 559 * may also re-enable interrupts if the XArray flags indicate the 560 * locking should be interrupt safe. 561 */ 562void *__xa_erase(struct xarray *, unsigned long index); 563void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); 564void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, 565 void *entry, gfp_t); 566int __must_check __xa_insert(struct xarray *, unsigned long index, 567 void *entry, gfp_t); 568int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, 569 struct xa_limit, gfp_t); 570int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, 571 struct xa_limit, u32 *next, gfp_t); 572void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 573void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 574 575/** 576 * xa_store_bh() - Store this entry in the XArray. 577 * @xa: XArray. 578 * @index: Index into array. 579 * @entry: New entry. 580 * @gfp: Memory allocation flags. 581 * 582 * This function is like calling xa_store() except it disables softirqs 583 * while holding the array lock. 584 * 585 * Context: Any context. Takes and releases the xa_lock while 586 * disabling softirqs. 587 * Return: The old entry at this index or xa_err() if an error happened. 588 */ 589static inline void *xa_store_bh(struct xarray *xa, unsigned long index, 590 void *entry, gfp_t gfp) 591{ 592 void *curr; 593 594 might_alloc(gfp); 595 xa_lock_bh(xa); 596 curr = __xa_store(xa, index, entry, gfp); 597 xa_unlock_bh(xa); 598 599 return curr; 600} 601 602/** 603 * xa_store_irq() - Store this entry in the XArray. 604 * @xa: XArray. 605 * @index: Index into array. 606 * @entry: New entry. 607 * @gfp: Memory allocation flags. 608 * 609 * This function is like calling xa_store() except it disables interrupts 610 * while holding the array lock. 611 * 612 * Context: Process context. Takes and releases the xa_lock while 613 * disabling interrupts. 614 * Return: The old entry at this index or xa_err() if an error happened. 615 */ 616static inline void *xa_store_irq(struct xarray *xa, unsigned long index, 617 void *entry, gfp_t gfp) 618{ 619 void *curr; 620 621 might_alloc(gfp); 622 xa_lock_irq(xa); 623 curr = __xa_store(xa, index, entry, gfp); 624 xa_unlock_irq(xa); 625 626 return curr; 627} 628 629/** 630 * xa_erase_bh() - Erase this entry from the XArray. 631 * @xa: XArray. 632 * @index: Index of entry. 633 * 634 * After this function returns, loading from @index will return %NULL. 635 * If the index is part of a multi-index entry, all indices will be erased 636 * and none of the entries will be part of a multi-index entry. 637 * 638 * Context: Any context. Takes and releases the xa_lock while 639 * disabling softirqs. 640 * Return: The entry which used to be at this index. 641 */ 642static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) 643{ 644 void *entry; 645 646 xa_lock_bh(xa); 647 entry = __xa_erase(xa, index); 648 xa_unlock_bh(xa); 649 650 return entry; 651} 652 653/** 654 * xa_erase_irq() - Erase this entry from the XArray. 655 * @xa: XArray. 656 * @index: Index of entry. 657 * 658 * After this function returns, loading from @index will return %NULL. 659 * If the index is part of a multi-index entry, all indices will be erased 660 * and none of the entries will be part of a multi-index entry. 661 * 662 * Context: Process context. Takes and releases the xa_lock while 663 * disabling interrupts. 664 * Return: The entry which used to be at this index. 665 */ 666static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) 667{ 668 void *entry; 669 670 xa_lock_irq(xa); 671 entry = __xa_erase(xa, index); 672 xa_unlock_irq(xa); 673 674 return entry; 675} 676 677/** 678 * xa_cmpxchg() - Conditionally replace an entry in the XArray. 679 * @xa: XArray. 680 * @index: Index into array. 681 * @old: Old value to test against. 682 * @entry: New value to place in array. 683 * @gfp: Memory allocation flags. 684 * 685 * If the entry at @index is the same as @old, replace it with @entry. 686 * If the return value is equal to @old, then the exchange was successful. 687 * 688 * Context: Any context. Takes and releases the xa_lock. May sleep 689 * if the @gfp flags permit. 690 * Return: The old value at this index or xa_err() if an error happened. 691 */ 692static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, 693 void *old, void *entry, gfp_t gfp) 694{ 695 void *curr; 696 697 might_alloc(gfp); 698 xa_lock(xa); 699 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 700 xa_unlock(xa); 701 702 return curr; 703} 704 705/** 706 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. 707 * @xa: XArray. 708 * @index: Index into array. 709 * @old: Old value to test against. 710 * @entry: New value to place in array. 711 * @gfp: Memory allocation flags. 712 * 713 * This function is like calling xa_cmpxchg() except it disables softirqs 714 * while holding the array lock. 715 * 716 * Context: Any context. Takes and releases the xa_lock while 717 * disabling softirqs. May sleep if the @gfp flags permit. 718 * Return: The old value at this index or xa_err() if an error happened. 719 */ 720static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, 721 void *old, void *entry, gfp_t gfp) 722{ 723 void *curr; 724 725 might_alloc(gfp); 726 xa_lock_bh(xa); 727 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 728 xa_unlock_bh(xa); 729 730 return curr; 731} 732 733/** 734 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. 735 * @xa: XArray. 736 * @index: Index into array. 737 * @old: Old value to test against. 738 * @entry: New value to place in array. 739 * @gfp: Memory allocation flags. 740 * 741 * This function is like calling xa_cmpxchg() except it disables interrupts 742 * while holding the array lock. 743 * 744 * Context: Process context. Takes and releases the xa_lock while 745 * disabling interrupts. May sleep if the @gfp flags permit. 746 * Return: The old value at this index or xa_err() if an error happened. 747 */ 748static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, 749 void *old, void *entry, gfp_t gfp) 750{ 751 void *curr; 752 753 might_alloc(gfp); 754 xa_lock_irq(xa); 755 curr = __xa_cmpxchg(xa, index, old, entry, gfp); 756 xa_unlock_irq(xa); 757 758 return curr; 759} 760 761/** 762 * xa_insert() - Store this entry in the XArray unless another entry is 763 * already present. 764 * @xa: XArray. 765 * @index: Index into array. 766 * @entry: New entry. 767 * @gfp: Memory allocation flags. 768 * 769 * Inserting a NULL entry will store a reserved entry (like xa_reserve()) 770 * if no entry is present. Inserting will fail if a reserved entry is 771 * present, even though loading from this index will return NULL. 772 * 773 * Context: Any context. Takes and releases the xa_lock. May sleep if 774 * the @gfp flags permit. 775 * Return: 0 if the store succeeded. -EBUSY if another entry was present. 776 * -ENOMEM if memory could not be allocated. 777 */ 778static inline int __must_check xa_insert(struct xarray *xa, 779 unsigned long index, void *entry, gfp_t gfp) 780{ 781 int err; 782 783 might_alloc(gfp); 784 xa_lock(xa); 785 err = __xa_insert(xa, index, entry, gfp); 786 xa_unlock(xa); 787 788 return err; 789} 790 791/** 792 * xa_insert_bh() - Store this entry in the XArray unless another entry is 793 * already present. 794 * @xa: XArray. 795 * @index: Index into array. 796 * @entry: New entry. 797 * @gfp: Memory allocation flags. 798 * 799 * Inserting a NULL entry will store a reserved entry (like xa_reserve()) 800 * if no entry is present. Inserting will fail if a reserved entry is 801 * present, even though loading from this index will return NULL. 802 * 803 * Context: Any context. Takes and releases the xa_lock while 804 * disabling softirqs. May sleep if the @gfp flags permit. 805 * Return: 0 if the store succeeded. -EBUSY if another entry was present. 806 * -ENOMEM if memory could not be allocated. 807 */ 808static inline int __must_check xa_insert_bh(struct xarray *xa, 809 unsigned long index, void *entry, gfp_t gfp) 810{ 811 int err; 812 813 might_alloc(gfp); 814 xa_lock_bh(xa); 815 err = __xa_insert(xa, index, entry, gfp); 816 xa_unlock_bh(xa); 817 818 return err; 819} 820 821/** 822 * xa_insert_irq() - Store this entry in the XArray unless another entry is 823 * already present. 824 * @xa: XArray. 825 * @index: Index into array. 826 * @entry: New entry. 827 * @gfp: Memory allocation flags. 828 * 829 * Inserting a NULL entry will store a reserved entry (like xa_reserve()) 830 * if no entry is present. Inserting will fail if a reserved entry is 831 * present, even though loading from this index will return NULL. 832 * 833 * Context: Process context. Takes and releases the xa_lock while 834 * disabling interrupts. May sleep if the @gfp flags permit. 835 * Return: 0 if the store succeeded. -EBUSY if another entry was present. 836 * -ENOMEM if memory could not be allocated. 837 */ 838static inline int __must_check xa_insert_irq(struct xarray *xa, 839 unsigned long index, void *entry, gfp_t gfp) 840{ 841 int err; 842 843 might_alloc(gfp); 844 xa_lock_irq(xa); 845 err = __xa_insert(xa, index, entry, gfp); 846 xa_unlock_irq(xa); 847 848 return err; 849} 850 851/** 852 * xa_alloc() - Find somewhere to store this entry in the XArray. 853 * @xa: XArray. 854 * @id: Pointer to ID. 855 * @entry: New entry. 856 * @limit: Range of ID to allocate. 857 * @gfp: Memory allocation flags. 858 * 859 * Finds an empty entry in @xa between @limit.min and @limit.max, 860 * stores the index into the @id pointer, then stores the entry at 861 * that index. A concurrent lookup will not see an uninitialised @id. 862 * 863 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 864 * in xa_init_flags(). 865 * 866 * Context: Any context. Takes and releases the xa_lock. May sleep if 867 * the @gfp flags permit. 868 * Return: 0 on success, -ENOMEM if memory could not be allocated or 869 * -EBUSY if there are no free entries in @limit. 870 */ 871static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, 872 void *entry, struct xa_limit limit, gfp_t gfp) 873{ 874 int err; 875 876 might_alloc(gfp); 877 xa_lock(xa); 878 err = __xa_alloc(xa, id, entry, limit, gfp); 879 xa_unlock(xa); 880 881 return err; 882} 883 884/** 885 * xa_alloc_bh() - Find somewhere to store this entry in the XArray. 886 * @xa: XArray. 887 * @id: Pointer to ID. 888 * @entry: New entry. 889 * @limit: Range of ID to allocate. 890 * @gfp: Memory allocation flags. 891 * 892 * Finds an empty entry in @xa between @limit.min and @limit.max, 893 * stores the index into the @id pointer, then stores the entry at 894 * that index. A concurrent lookup will not see an uninitialised @id. 895 * 896 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 897 * in xa_init_flags(). 898 * 899 * Context: Any context. Takes and releases the xa_lock while 900 * disabling softirqs. May sleep if the @gfp flags permit. 901 * Return: 0 on success, -ENOMEM if memory could not be allocated or 902 * -EBUSY if there are no free entries in @limit. 903 */ 904static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, 905 void *entry, struct xa_limit limit, gfp_t gfp) 906{ 907 int err; 908 909 might_alloc(gfp); 910 xa_lock_bh(xa); 911 err = __xa_alloc(xa, id, entry, limit, gfp); 912 xa_unlock_bh(xa); 913 914 return err; 915} 916 917/** 918 * xa_alloc_irq() - Find somewhere to store this entry in the XArray. 919 * @xa: XArray. 920 * @id: Pointer to ID. 921 * @entry: New entry. 922 * @limit: Range of ID to allocate. 923 * @gfp: Memory allocation flags. 924 * 925 * Finds an empty entry in @xa between @limit.min and @limit.max, 926 * stores the index into the @id pointer, then stores the entry at 927 * that index. A concurrent lookup will not see an uninitialised @id. 928 * 929 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 930 * in xa_init_flags(). 931 * 932 * Context: Process context. Takes and releases the xa_lock while 933 * disabling interrupts. May sleep if the @gfp flags permit. 934 * Return: 0 on success, -ENOMEM if memory could not be allocated or 935 * -EBUSY if there are no free entries in @limit. 936 */ 937static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, 938 void *entry, struct xa_limit limit, gfp_t gfp) 939{ 940 int err; 941 942 might_alloc(gfp); 943 xa_lock_irq(xa); 944 err = __xa_alloc(xa, id, entry, limit, gfp); 945 xa_unlock_irq(xa); 946 947 return err; 948} 949 950/** 951 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. 952 * @xa: XArray. 953 * @id: Pointer to ID. 954 * @entry: New entry. 955 * @limit: Range of allocated ID. 956 * @next: Pointer to next ID to allocate. 957 * @gfp: Memory allocation flags. 958 * 959 * Finds an empty entry in @xa between @limit.min and @limit.max, 960 * stores the index into the @id pointer, then stores the entry at 961 * that index. A concurrent lookup will not see an uninitialised @id. 962 * The search for an empty entry will start at @next and will wrap 963 * around if necessary. 964 * 965 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 966 * in xa_init_flags(). 967 * 968 * Note that callers interested in whether wrapping has occurred should 969 * use __xa_alloc_cyclic() instead. 970 * 971 * Context: Any context. Takes and releases the xa_lock. May sleep if 972 * the @gfp flags permit. 973 * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be 974 * allocated or -EBUSY if there are no free entries in @limit. 975 */ 976static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, 977 struct xa_limit limit, u32 *next, gfp_t gfp) 978{ 979 int err; 980 981 might_alloc(gfp); 982 xa_lock(xa); 983 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 984 xa_unlock(xa); 985 986 return err < 0 ? err : 0; 987} 988 989/** 990 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. 991 * @xa: XArray. 992 * @id: Pointer to ID. 993 * @entry: New entry. 994 * @limit: Range of allocated ID. 995 * @next: Pointer to next ID to allocate. 996 * @gfp: Memory allocation flags. 997 * 998 * Finds an empty entry in @xa between @limit.min and @limit.max, 999 * stores the index into the @id pointer, then stores the entry at 1000 * that index. A concurrent lookup will not see an uninitialised @id. 1001 * The search for an empty entry will start at @next and will wrap 1002 * around if necessary. 1003 * 1004 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 1005 * in xa_init_flags(). 1006 * 1007 * Note that callers interested in whether wrapping has occurred should 1008 * use __xa_alloc_cyclic() instead. 1009 * 1010 * Context: Any context. Takes and releases the xa_lock while 1011 * disabling softirqs. May sleep if the @gfp flags permit. 1012 * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be 1013 * allocated or -EBUSY if there are no free entries in @limit. 1014 */ 1015static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, 1016 struct xa_limit limit, u32 *next, gfp_t gfp) 1017{ 1018 int err; 1019 1020 might_alloc(gfp); 1021 xa_lock_bh(xa); 1022 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 1023 xa_unlock_bh(xa); 1024 1025 return err < 0 ? err : 0; 1026} 1027 1028/** 1029 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. 1030 * @xa: XArray. 1031 * @id: Pointer to ID. 1032 * @entry: New entry. 1033 * @limit: Range of allocated ID. 1034 * @next: Pointer to next ID to allocate. 1035 * @gfp: Memory allocation flags. 1036 * 1037 * Finds an empty entry in @xa between @limit.min and @limit.max, 1038 * stores the index into the @id pointer, then stores the entry at 1039 * that index. A concurrent lookup will not see an uninitialised @id. 1040 * The search for an empty entry will start at @next and will wrap 1041 * around if necessary. 1042 * 1043 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set 1044 * in xa_init_flags(). 1045 * 1046 * Note that callers interested in whether wrapping has occurred should 1047 * use __xa_alloc_cyclic() instead. 1048 * 1049 * Context: Process context. Takes and releases the xa_lock while 1050 * disabling interrupts. May sleep if the @gfp flags permit. 1051 * Return: 0 if the allocation succeeded, -ENOMEM if memory could not be 1052 * allocated or -EBUSY if there are no free entries in @limit. 1053 */ 1054static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, 1055 struct xa_limit limit, u32 *next, gfp_t gfp) 1056{ 1057 int err; 1058 1059 might_alloc(gfp); 1060 xa_lock_irq(xa); 1061 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); 1062 xa_unlock_irq(xa); 1063 1064 return err < 0 ? err : 0; 1065} 1066 1067/** 1068 * xa_reserve() - Reserve this index in the XArray. 1069 * @xa: XArray. 1070 * @index: Index into array. 1071 * @gfp: Memory allocation flags. 1072 * 1073 * Ensures there is somewhere to store an entry at @index in the array. 1074 * If there is already something stored at @index, this function does 1075 * nothing. If there was nothing there, the entry is marked as reserved. 1076 * Loading from a reserved entry returns a %NULL pointer. 1077 * 1078 * If you do not use the entry that you have reserved, call xa_release() 1079 * or xa_erase() to free any unnecessary memory. 1080 * 1081 * Context: Any context. Takes and releases the xa_lock. 1082 * May sleep if the @gfp flags permit. 1083 * Return: 0 if the reservation succeeded or -ENOMEM if it failed. 1084 */ 1085static inline __must_check 1086int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) 1087{ 1088 return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); 1089} 1090 1091/** 1092 * xa_reserve_bh() - Reserve this index in the XArray. 1093 * @xa: XArray. 1094 * @index: Index into array. 1095 * @gfp: Memory allocation flags. 1096 * 1097 * A softirq-disabling version of xa_reserve(). 1098 * 1099 * Context: Any context. Takes and releases the xa_lock while 1100 * disabling softirqs. 1101 * Return: 0 if the reservation succeeded or -ENOMEM if it failed. 1102 */ 1103static inline __must_check 1104int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) 1105{ 1106 return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); 1107} 1108 1109/** 1110 * xa_reserve_irq() - Reserve this index in the XArray. 1111 * @xa: XArray. 1112 * @index: Index into array. 1113 * @gfp: Memory allocation flags. 1114 * 1115 * An interrupt-disabling version of xa_reserve(). 1116 * 1117 * Context: Process context. Takes and releases the xa_lock while 1118 * disabling interrupts. 1119 * Return: 0 if the reservation succeeded or -ENOMEM if it failed. 1120 */ 1121static inline __must_check 1122int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) 1123{ 1124 return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); 1125} 1126 1127/** 1128 * xa_release() - Release a reserved entry. 1129 * @xa: XArray. 1130 * @index: Index of entry. 1131 * 1132 * After calling xa_reserve(), you can call this function to release the 1133 * reservation. If the entry at @index has been stored to, this function 1134 * will do nothing. 1135 */ 1136static inline void xa_release(struct xarray *xa, unsigned long index) 1137{ 1138 xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); 1139} 1140 1141/* Everything below here is the Advanced API. Proceed with caution. */ 1142 1143/* 1144 * The xarray is constructed out of a set of 'chunks' of pointers. Choosing 1145 * the best chunk size requires some tradeoffs. A power of two recommends 1146 * itself so that we can walk the tree based purely on shifts and masks. 1147 * Generally, the larger the better; as the number of slots per level of the 1148 * tree increases, the less tall the tree needs to be. But that needs to be 1149 * balanced against the memory consumption of each node. On a 64-bit system, 1150 * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we 1151 * doubled the number of slots per node, we'd get only 3 nodes per 4kB page. 1152 */ 1153#ifndef XA_CHUNK_SHIFT 1154#define XA_CHUNK_SHIFT (IS_ENABLED(CONFIG_BASE_SMALL) ? 4 : 6) 1155#endif 1156#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT) 1157#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1) 1158#define XA_MAX_MARKS 3 1159#define XA_MARK_LONGS BITS_TO_LONGS(XA_CHUNK_SIZE) 1160 1161/* 1162 * @count is the count of every non-NULL element in the ->slots array 1163 * whether that is a value entry, a retry entry, a user pointer, 1164 * a sibling entry or a pointer to the next level of the tree. 1165 * @nr_values is the count of every element in ->slots which is 1166 * either a value entry or a sibling of a value entry. 1167 */ 1168struct xa_node { 1169 unsigned char shift; /* Bits remaining in each slot */ 1170 unsigned char offset; /* Slot offset in parent */ 1171 unsigned char count; /* Total entry count */ 1172 unsigned char nr_values; /* Value entry count */ 1173 struct xa_node __rcu *parent; /* NULL at top of tree */ 1174 struct xarray *array; /* The array we belong to */ 1175 union { 1176 struct list_head private_list; /* For tree user */ 1177 struct rcu_head rcu_head; /* Used when freeing node */ 1178 }; 1179 void __rcu *slots[XA_CHUNK_SIZE]; 1180 union { 1181 unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS]; 1182 unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS]; 1183 }; 1184}; 1185 1186void xa_dump(const struct xarray *); 1187void xa_dump_node(const struct xa_node *); 1188 1189#ifdef XA_DEBUG 1190#define XA_BUG_ON(xa, x) do { \ 1191 if (x) { \ 1192 xa_dump(xa); \ 1193 BUG(); \ 1194 } \ 1195 } while (0) 1196#define XA_NODE_BUG_ON(node, x) do { \ 1197 if (x) { \ 1198 if (node) xa_dump_node(node); \ 1199 BUG(); \ 1200 } \ 1201 } while (0) 1202#else 1203#define XA_BUG_ON(xa, x) do { } while (0) 1204#define XA_NODE_BUG_ON(node, x) do { } while (0) 1205#endif 1206 1207/* Private */ 1208static inline void *xa_head(const struct xarray *xa) 1209{ 1210 return rcu_dereference_check(xa->xa_head, 1211 lockdep_is_held(&xa->xa_lock)); 1212} 1213 1214/* Private */ 1215static inline void *xa_head_locked(const struct xarray *xa) 1216{ 1217 return rcu_dereference_protected(xa->xa_head, 1218 lockdep_is_held(&xa->xa_lock)); 1219} 1220 1221/* Private */ 1222static inline void *xa_entry(const struct xarray *xa, 1223 const struct xa_node *node, unsigned int offset) 1224{ 1225 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); 1226 return rcu_dereference_check(node->slots[offset], 1227 lockdep_is_held(&xa->xa_lock)); 1228} 1229 1230/* Private */ 1231static inline void *xa_entry_locked(const struct xarray *xa, 1232 const struct xa_node *node, unsigned int offset) 1233{ 1234 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE); 1235 return rcu_dereference_protected(node->slots[offset], 1236 lockdep_is_held(&xa->xa_lock)); 1237} 1238 1239/* Private */ 1240static inline struct xa_node *xa_parent(const struct xarray *xa, 1241 const struct xa_node *node) 1242{ 1243 return rcu_dereference_check(node->parent, 1244 lockdep_is_held(&xa->xa_lock)); 1245} 1246 1247/* Private */ 1248static inline struct xa_node *xa_parent_locked(const struct xarray *xa, 1249 const struct xa_node *node) 1250{ 1251 return rcu_dereference_protected(node->parent, 1252 lockdep_is_held(&xa->xa_lock)); 1253} 1254 1255/* Private */ 1256static inline void *xa_mk_node(const struct xa_node *node) 1257{ 1258 return (void *)((unsigned long)node | 2); 1259} 1260 1261/* Private */ 1262static inline struct xa_node *xa_to_node(const void *entry) 1263{ 1264 return (struct xa_node *)((unsigned long)entry - 2); 1265} 1266 1267/* Private */ 1268static inline bool xa_is_node(const void *entry) 1269{ 1270 return xa_is_internal(entry) && (unsigned long)entry > 4096; 1271} 1272 1273/* Private */ 1274static inline void *xa_mk_sibling(unsigned int offset) 1275{ 1276 return xa_mk_internal(offset); 1277} 1278 1279/* Private */ 1280static inline unsigned long xa_to_sibling(const void *entry) 1281{ 1282 return xa_to_internal(entry); 1283} 1284 1285/** 1286 * xa_is_sibling() - Is the entry a sibling entry? 1287 * @entry: Entry retrieved from the XArray 1288 * 1289 * Return: %true if the entry is a sibling entry. 1290 */ 1291static inline bool xa_is_sibling(const void *entry) 1292{ 1293 return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) && 1294 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); 1295} 1296 1297#define XA_RETRY_ENTRY xa_mk_internal(256) 1298 1299/** 1300 * xa_is_retry() - Is the entry a retry entry? 1301 * @entry: Entry retrieved from the XArray 1302 * 1303 * Return: %true if the entry is a retry entry. 1304 */ 1305static inline bool xa_is_retry(const void *entry) 1306{ 1307 return unlikely(entry == XA_RETRY_ENTRY); 1308} 1309 1310/** 1311 * xa_is_advanced() - Is the entry only permitted for the advanced API? 1312 * @entry: Entry to be stored in the XArray. 1313 * 1314 * Return: %true if the entry cannot be stored by the normal API. 1315 */ 1316static inline bool xa_is_advanced(const void *entry) 1317{ 1318 return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); 1319} 1320 1321/** 1322 * typedef xa_update_node_t - A callback function from the XArray. 1323 * @node: The node which is being processed 1324 * 1325 * This function is called every time the XArray updates the count of 1326 * present and value entries in a node. It allows advanced users to 1327 * maintain the private_list in the node. 1328 * 1329 * Context: The xa_lock is held and interrupts may be disabled. 1330 * Implementations should not drop the xa_lock, nor re-enable 1331 * interrupts. 1332 */ 1333typedef void (*xa_update_node_t)(struct xa_node *node); 1334 1335void xa_delete_node(struct xa_node *, xa_update_node_t); 1336 1337/* 1338 * The xa_state is opaque to its users. It contains various different pieces 1339 * of state involved in the current operation on the XArray. It should be 1340 * declared on the stack and passed between the various internal routines. 1341 * The various elements in it should not be accessed directly, but only 1342 * through the provided accessor functions. The below documentation is for 1343 * the benefit of those working on the code, not for users of the XArray. 1344 * 1345 * @xa_node usually points to the xa_node containing the slot we're operating 1346 * on (and @xa_offset is the offset in the slots array). If there is a 1347 * single entry in the array at index 0, there are no allocated xa_nodes to 1348 * point to, and so we store %NULL in @xa_node. @xa_node is set to 1349 * the value %XAS_RESTART if the xa_state is not walked to the correct 1350 * position in the tree of nodes for this operation. If an error occurs 1351 * during an operation, it is set to an %XAS_ERROR value. If we run off the 1352 * end of the allocated nodes, it is set to %XAS_BOUNDS. 1353 */ 1354struct xa_state { 1355 struct xarray *xa; 1356 unsigned long xa_index; 1357 unsigned char xa_shift; 1358 unsigned char xa_sibs; 1359 unsigned char xa_offset; 1360 unsigned char xa_pad; /* Helps gcc generate better code */ 1361 struct xa_node *xa_node; 1362 struct xa_node *xa_alloc; 1363 xa_update_node_t xa_update; 1364 struct list_lru *xa_lru; 1365}; 1366 1367/* 1368 * We encode errnos in the xas->xa_node. If an error has happened, we need to 1369 * drop the lock to fix it, and once we've done so the xa_state is invalid. 1370 */ 1371#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL)) 1372#define XAS_BOUNDS ((struct xa_node *)1UL) 1373#define XAS_RESTART ((struct xa_node *)3UL) 1374 1375#define __XA_STATE(array, index, shift, sibs) { \ 1376 .xa = array, \ 1377 .xa_index = index, \ 1378 .xa_shift = shift, \ 1379 .xa_sibs = sibs, \ 1380 .xa_offset = 0, \ 1381 .xa_pad = 0, \ 1382 .xa_node = XAS_RESTART, \ 1383 .xa_alloc = NULL, \ 1384 .xa_update = NULL, \ 1385 .xa_lru = NULL, \ 1386} 1387 1388/** 1389 * XA_STATE() - Declare an XArray operation state. 1390 * @name: Name of this operation state (usually xas). 1391 * @array: Array to operate on. 1392 * @index: Initial index of interest. 1393 * 1394 * Declare and initialise an xa_state on the stack. 1395 */ 1396#define XA_STATE(name, array, index) \ 1397 struct xa_state name = __XA_STATE(array, index, 0, 0) 1398 1399/** 1400 * XA_STATE_ORDER() - Declare an XArray operation state. 1401 * @name: Name of this operation state (usually xas). 1402 * @array: Array to operate on. 1403 * @index: Initial index of interest. 1404 * @order: Order of entry. 1405 * 1406 * Declare and initialise an xa_state on the stack. This variant of 1407 * XA_STATE() allows you to specify the 'order' of the element you 1408 * want to operate on.` 1409 */ 1410#define XA_STATE_ORDER(name, array, index, order) \ 1411 struct xa_state name = __XA_STATE(array, \ 1412 (index >> order) << order, \ 1413 order - (order % XA_CHUNK_SHIFT), \ 1414 (1U << (order % XA_CHUNK_SHIFT)) - 1) 1415 1416#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) 1417#define xas_trylock(xas) xa_trylock((xas)->xa) 1418#define xas_lock(xas) xa_lock((xas)->xa) 1419#define xas_unlock(xas) xa_unlock((xas)->xa) 1420#define xas_lock_bh(xas) xa_lock_bh((xas)->xa) 1421#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) 1422#define xas_lock_irq(xas) xa_lock_irq((xas)->xa) 1423#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) 1424#define xas_lock_irqsave(xas, flags) \ 1425 xa_lock_irqsave((xas)->xa, flags) 1426#define xas_unlock_irqrestore(xas, flags) \ 1427 xa_unlock_irqrestore((xas)->xa, flags) 1428 1429/** 1430 * xas_error() - Return an errno stored in the xa_state. 1431 * @xas: XArray operation state. 1432 * 1433 * Return: 0 if no error has been noted. A negative errno if one has. 1434 */ 1435static inline int xas_error(const struct xa_state *xas) 1436{ 1437 return xa_err(xas->xa_node); 1438} 1439 1440/** 1441 * xas_set_err() - Note an error in the xa_state. 1442 * @xas: XArray operation state. 1443 * @err: Negative error number. 1444 * 1445 * Only call this function with a negative @err; zero or positive errors 1446 * will probably not behave the way you think they should. If you want 1447 * to clear the error from an xa_state, use xas_reset(). 1448 */ 1449static inline void xas_set_err(struct xa_state *xas, long err) 1450{ 1451 xas->xa_node = XA_ERROR(err); 1452} 1453 1454/** 1455 * xas_invalid() - Is the xas in a retry or error state? 1456 * @xas: XArray operation state. 1457 * 1458 * Return: %true if the xas cannot be used for operations. 1459 */ 1460static inline bool xas_invalid(const struct xa_state *xas) 1461{ 1462 return (unsigned long)xas->xa_node & 3; 1463} 1464 1465/** 1466 * xas_valid() - Is the xas a valid cursor into the array? 1467 * @xas: XArray operation state. 1468 * 1469 * Return: %true if the xas can be used for operations. 1470 */ 1471static inline bool xas_valid(const struct xa_state *xas) 1472{ 1473 return !xas_invalid(xas); 1474} 1475 1476/** 1477 * xas_is_node() - Does the xas point to a node? 1478 * @xas: XArray operation state. 1479 * 1480 * Return: %true if the xas currently references a node. 1481 */ 1482static inline bool xas_is_node(const struct xa_state *xas) 1483{ 1484 return xas_valid(xas) && xas->xa_node; 1485} 1486 1487/* True if the pointer is something other than a node */ 1488static inline bool xas_not_node(struct xa_node *node) 1489{ 1490 return ((unsigned long)node & 3) || !node; 1491} 1492 1493/* True if the node represents RESTART or an error */ 1494static inline bool xas_frozen(struct xa_node *node) 1495{ 1496 return (unsigned long)node & 2; 1497} 1498 1499/* True if the node represents head-of-tree, RESTART or BOUNDS */ 1500static inline bool xas_top(struct xa_node *node) 1501{ 1502 return node <= XAS_RESTART; 1503} 1504 1505/** 1506 * xas_reset() - Reset an XArray operation state. 1507 * @xas: XArray operation state. 1508 * 1509 * Resets the error or walk state of the @xas so future walks of the 1510 * array will start from the root. Use this if you have dropped the 1511 * xarray lock and want to reuse the xa_state. 1512 * 1513 * Context: Any context. 1514 */ 1515static inline void xas_reset(struct xa_state *xas) 1516{ 1517 xas->xa_node = XAS_RESTART; 1518} 1519 1520/** 1521 * xas_retry() - Retry the operation if appropriate. 1522 * @xas: XArray operation state. 1523 * @entry: Entry from xarray. 1524 * 1525 * The advanced functions may sometimes return an internal entry, such as 1526 * a retry entry or a zero entry. This function sets up the @xas to restart 1527 * the walk from the head of the array if needed. 1528 * 1529 * Context: Any context. 1530 * Return: true if the operation needs to be retried. 1531 */ 1532static inline bool xas_retry(struct xa_state *xas, const void *entry) 1533{ 1534 if (xa_is_zero(entry)) 1535 return true; 1536 if (!xa_is_retry(entry)) 1537 return false; 1538 xas_reset(xas); 1539 return true; 1540} 1541 1542void *xas_load(struct xa_state *); 1543void *xas_store(struct xa_state *, void *entry); 1544void *xas_find(struct xa_state *, unsigned long max); 1545void *xas_find_conflict(struct xa_state *); 1546 1547bool xas_get_mark(const struct xa_state *, xa_mark_t); 1548void xas_set_mark(const struct xa_state *, xa_mark_t); 1549void xas_clear_mark(const struct xa_state *, xa_mark_t); 1550void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t); 1551void xas_init_marks(const struct xa_state *); 1552 1553bool xas_nomem(struct xa_state *, gfp_t); 1554void xas_destroy(struct xa_state *); 1555void xas_pause(struct xa_state *); 1556 1557void xas_create_range(struct xa_state *); 1558 1559#ifdef CONFIG_XARRAY_MULTI 1560int xa_get_order(struct xarray *, unsigned long index); 1561int xas_get_order(struct xa_state *xas); 1562void xas_split(struct xa_state *, void *entry, unsigned int order); 1563void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); 1564void xas_try_split(struct xa_state *xas, void *entry, unsigned int order); 1565unsigned int xas_try_split_min_order(unsigned int order); 1566#else 1567static inline int xa_get_order(struct xarray *xa, unsigned long index) 1568{ 1569 return 0; 1570} 1571 1572static inline int xas_get_order(struct xa_state *xas) 1573{ 1574 return 0; 1575} 1576 1577static inline void xas_split(struct xa_state *xas, void *entry, 1578 unsigned int order) 1579{ 1580 xas_store(xas, entry); 1581} 1582 1583static inline void xas_split_alloc(struct xa_state *xas, void *entry, 1584 unsigned int order, gfp_t gfp) 1585{ 1586} 1587 1588static inline void xas_try_split(struct xa_state *xas, void *entry, 1589 unsigned int order) 1590{ 1591} 1592 1593static inline unsigned int xas_try_split_min_order(unsigned int order) 1594{ 1595 return 0; 1596} 1597 1598#endif 1599 1600/** 1601 * xas_reload() - Refetch an entry from the xarray. 1602 * @xas: XArray operation state. 1603 * 1604 * Use this function to check that a previously loaded entry still has 1605 * the same value. This is useful for the lockless pagecache lookup where 1606 * we walk the array with only the RCU lock to protect us, lock the page, 1607 * then check that the page hasn't moved since we looked it up. 1608 * 1609 * The caller guarantees that @xas is still valid. If it may be in an 1610 * error or restart state, call xas_load() instead. 1611 * 1612 * Return: The entry at this location in the xarray. 1613 */ 1614static inline void *xas_reload(struct xa_state *xas) 1615{ 1616 struct xa_node *node = xas->xa_node; 1617 void *entry; 1618 char offset; 1619 1620 if (!node) 1621 return xa_head(xas->xa); 1622 if (IS_ENABLED(CONFIG_XARRAY_MULTI)) { 1623 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK; 1624 entry = xa_entry(xas->xa, node, offset); 1625 if (!xa_is_sibling(entry)) 1626 return entry; 1627 offset = xa_to_sibling(entry); 1628 } else { 1629 offset = xas->xa_offset; 1630 } 1631 return xa_entry(xas->xa, node, offset); 1632} 1633 1634/** 1635 * xas_set() - Set up XArray operation state for a different index. 1636 * @xas: XArray operation state. 1637 * @index: New index into the XArray. 1638 * 1639 * Move the operation state to refer to a different index. This will 1640 * have the effect of starting a walk from the top; see xas_next() 1641 * to move to an adjacent index. 1642 */ 1643static inline void xas_set(struct xa_state *xas, unsigned long index) 1644{ 1645 xas->xa_index = index; 1646 xas->xa_node = XAS_RESTART; 1647} 1648 1649/** 1650 * xas_advance() - Skip over sibling entries. 1651 * @xas: XArray operation state. 1652 * @index: Index of last sibling entry. 1653 * 1654 * Move the operation state to refer to the last sibling entry. 1655 * This is useful for loops that normally want to see sibling 1656 * entries but sometimes want to skip them. Use xas_set() if you 1657 * want to move to an index which is not part of this entry. 1658 */ 1659static inline void xas_advance(struct xa_state *xas, unsigned long index) 1660{ 1661 unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0; 1662 1663 xas->xa_index = index; 1664 xas->xa_offset = (index >> shift) & XA_CHUNK_MASK; 1665} 1666 1667/** 1668 * xas_set_order() - Set up XArray operation state for a multislot entry. 1669 * @xas: XArray operation state. 1670 * @index: Target of the operation. 1671 * @order: Entry occupies 2^@order indices. 1672 */ 1673static inline void xas_set_order(struct xa_state *xas, unsigned long index, 1674 unsigned int order) 1675{ 1676#ifdef CONFIG_XARRAY_MULTI 1677 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0; 1678 xas->xa_shift = order - (order % XA_CHUNK_SHIFT); 1679 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; 1680 xas->xa_node = XAS_RESTART; 1681#else 1682 BUG_ON(order > 0); 1683 xas_set(xas, index); 1684#endif 1685} 1686 1687/** 1688 * xas_set_update() - Set up XArray operation state for a callback. 1689 * @xas: XArray operation state. 1690 * @update: Function to call when updating a node. 1691 * 1692 * The XArray can notify a caller after it has updated an xa_node. 1693 * This is advanced functionality and is only needed by the page 1694 * cache and swap cache. 1695 */ 1696static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) 1697{ 1698 xas->xa_update = update; 1699} 1700 1701static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru) 1702{ 1703 xas->xa_lru = lru; 1704} 1705 1706/** 1707 * xas_next_entry() - Advance iterator to next present entry. 1708 * @xas: XArray operation state. 1709 * @max: Highest index to return. 1710 * 1711 * xas_next_entry() is an inline function to optimise xarray traversal for 1712 * speed. It is equivalent to calling xas_find(), and will call xas_find() 1713 * for all the hard cases. 1714 * 1715 * Return: The next present entry after the one currently referred to by @xas. 1716 */ 1717static inline void *xas_next_entry(struct xa_state *xas, unsigned long max) 1718{ 1719 struct xa_node *node = xas->xa_node; 1720 void *entry; 1721 1722 if (unlikely(xas_not_node(node) || node->shift || 1723 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) 1724 return xas_find(xas, max); 1725 1726 do { 1727 if (unlikely(xas->xa_index >= max)) 1728 return xas_find(xas, max); 1729 if (unlikely(xas->xa_offset == XA_CHUNK_MASK)) 1730 return xas_find(xas, max); 1731 entry = xa_entry(xas->xa, node, xas->xa_offset + 1); 1732 if (unlikely(xa_is_internal(entry))) 1733 return xas_find(xas, max); 1734 xas->xa_offset++; 1735 xas->xa_index++; 1736 } while (!entry); 1737 1738 return entry; 1739} 1740 1741/* Private */ 1742static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance, 1743 xa_mark_t mark) 1744{ 1745 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark]; 1746 unsigned int offset = xas->xa_offset; 1747 1748 if (advance) 1749 offset++; 1750 if (XA_CHUNK_SIZE == BITS_PER_LONG) { 1751 if (offset < XA_CHUNK_SIZE) { 1752 unsigned long data = *addr & (~0UL << offset); 1753 if (data) 1754 return __ffs(data); 1755 } 1756 return XA_CHUNK_SIZE; 1757 } 1758 1759 return find_next_bit(addr, XA_CHUNK_SIZE, offset); 1760} 1761 1762/** 1763 * xas_next_marked() - Advance iterator to next marked entry. 1764 * @xas: XArray operation state. 1765 * @max: Highest index to return. 1766 * @mark: Mark to search for. 1767 * 1768 * xas_next_marked() is an inline function to optimise xarray traversal for 1769 * speed. It is equivalent to calling xas_find_marked(), and will call 1770 * xas_find_marked() for all the hard cases. 1771 * 1772 * Return: The next marked entry after the one currently referred to by @xas. 1773 */ 1774static inline void *xas_next_marked(struct xa_state *xas, unsigned long max, 1775 xa_mark_t mark) 1776{ 1777 struct xa_node *node = xas->xa_node; 1778 void *entry; 1779 unsigned int offset; 1780 1781 if (unlikely(xas_not_node(node) || node->shift)) 1782 return xas_find_marked(xas, max, mark); 1783 offset = xas_find_chunk(xas, true, mark); 1784 xas->xa_offset = offset; 1785 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset; 1786 if (xas->xa_index > max) 1787 return NULL; 1788 if (offset == XA_CHUNK_SIZE) 1789 return xas_find_marked(xas, max, mark); 1790 entry = xa_entry(xas->xa, node, offset); 1791 if (!entry) 1792 return xas_find_marked(xas, max, mark); 1793 return entry; 1794} 1795 1796/* 1797 * If iterating while holding a lock, drop the lock and reschedule 1798 * every %XA_CHECK_SCHED loops. 1799 */ 1800enum { 1801 XA_CHECK_SCHED = 4096, 1802}; 1803 1804/** 1805 * xas_for_each() - Iterate over a range of an XArray. 1806 * @xas: XArray operation state. 1807 * @entry: Entry retrieved from the array. 1808 * @max: Maximum index to retrieve from array. 1809 * 1810 * The loop body will be executed for each entry present in the xarray 1811 * between the current xas position and @max. @entry will be set to 1812 * the entry retrieved from the xarray. It is safe to delete entries 1813 * from the array in the loop body. You should hold either the RCU lock 1814 * or the xa_lock while iterating. If you need to drop the lock, call 1815 * xas_pause() first. 1816 */ 1817#define xas_for_each(xas, entry, max) \ 1818 for (entry = xas_find(xas, max); entry; \ 1819 entry = xas_next_entry(xas, max)) 1820 1821/** 1822 * xas_for_each_marked() - Iterate over a range of an XArray. 1823 * @xas: XArray operation state. 1824 * @entry: Entry retrieved from the array. 1825 * @max: Maximum index to retrieve from array. 1826 * @mark: Mark to search for. 1827 * 1828 * The loop body will be executed for each marked entry in the xarray 1829 * between the current xas position and @max. @entry will be set to 1830 * the entry retrieved from the xarray. It is safe to delete entries 1831 * from the array in the loop body. You should hold either the RCU lock 1832 * or the xa_lock while iterating. If you need to drop the lock, call 1833 * xas_pause() first. 1834 */ 1835#define xas_for_each_marked(xas, entry, max, mark) \ 1836 for (entry = xas_find_marked(xas, max, mark); entry; \ 1837 entry = xas_next_marked(xas, max, mark)) 1838 1839/** 1840 * xas_for_each_conflict() - Iterate over a range of an XArray. 1841 * @xas: XArray operation state. 1842 * @entry: Entry retrieved from the array. 1843 * 1844 * The loop body will be executed for each entry in the XArray that 1845 * lies within the range specified by @xas. If the loop terminates 1846 * normally, @entry will be %NULL. The user may break out of the loop, 1847 * which will leave @entry set to the conflicting entry. The caller 1848 * may also call xa_set_err() to exit the loop while setting an error 1849 * to record the reason. 1850 */ 1851#define xas_for_each_conflict(xas, entry) \ 1852 while ((entry = xas_find_conflict(xas))) 1853 1854void *__xas_next(struct xa_state *); 1855void *__xas_prev(struct xa_state *); 1856 1857/** 1858 * xas_prev() - Move iterator to previous index. 1859 * @xas: XArray operation state. 1860 * 1861 * If the @xas was in an error state, it will remain in an error state 1862 * and this function will return %NULL. If the @xas has never been walked, 1863 * it will have the effect of calling xas_load(). Otherwise one will be 1864 * subtracted from the index and the state will be walked to the correct 1865 * location in the array for the next operation. 1866 * 1867 * If the iterator was referencing index 0, this function wraps 1868 * around to %ULONG_MAX. 1869 * 1870 * Return: The entry at the new index. This may be %NULL or an internal 1871 * entry. 1872 */ 1873static inline void *xas_prev(struct xa_state *xas) 1874{ 1875 struct xa_node *node = xas->xa_node; 1876 1877 if (unlikely(xas_not_node(node) || node->shift || 1878 xas->xa_offset == 0)) 1879 return __xas_prev(xas); 1880 1881 xas->xa_index--; 1882 xas->xa_offset--; 1883 return xa_entry(xas->xa, node, xas->xa_offset); 1884} 1885 1886/** 1887 * xas_next() - Move state to next index. 1888 * @xas: XArray operation state. 1889 * 1890 * If the @xas was in an error state, it will remain in an error state 1891 * and this function will return %NULL. If the @xas has never been walked, 1892 * it will have the effect of calling xas_load(). Otherwise one will be 1893 * added to the index and the state will be walked to the correct 1894 * location in the array for the next operation. 1895 * 1896 * If the iterator was referencing index %ULONG_MAX, this function wraps 1897 * around to 0. 1898 * 1899 * Return: The entry at the new index. This may be %NULL or an internal 1900 * entry. 1901 */ 1902static inline void *xas_next(struct xa_state *xas) 1903{ 1904 struct xa_node *node = xas->xa_node; 1905 1906 if (unlikely(xas_not_node(node) || node->shift || 1907 xas->xa_offset == XA_CHUNK_MASK)) 1908 return __xas_next(xas); 1909 1910 xas->xa_index++; 1911 xas->xa_offset++; 1912 return xa_entry(xas->xa, node, xas->xa_offset); 1913} 1914 1915#endif /* _LINUX_XARRAY_H */