at v4.19 9.1 kB view raw
1/* 2 * include/linux/idr.h 3 * 4 * 2002-10-18 written by Jim Houston jim.houston@ccur.com 5 * Copyright (C) 2002 by Concurrent Computer Corporation 6 * Distributed under the GNU GPL license version 2. 7 * 8 * Small id to pointer translation service avoiding fixed sized 9 * tables. 10 */ 11 12#ifndef __IDR_H__ 13#define __IDR_H__ 14 15#include <linux/radix-tree.h> 16#include <linux/gfp.h> 17#include <linux/percpu.h> 18 19struct idr { 20 struct radix_tree_root idr_rt; 21 unsigned int idr_base; 22 unsigned int idr_next; 23}; 24 25/* 26 * The IDR API does not expose the tagging functionality of the radix tree 27 * to users. Use tag 0 to track whether a node has free space below it. 28 */ 29#define IDR_FREE 0 30 31/* Set the IDR flag and the IDR_FREE tag */ 32#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ 33 (1 << (ROOT_TAG_SHIFT + IDR_FREE))) 34 35#define IDR_INIT_BASE(name, base) { \ 36 .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ 37 .idr_base = (base), \ 38 .idr_next = 0, \ 39} 40 41/** 42 * IDR_INIT() - Initialise an IDR. 43 * @name: Name of IDR. 44 * 45 * A freshly-initialised IDR contains no IDs. 46 */ 47#define IDR_INIT(name) IDR_INIT_BASE(name, 0) 48 49/** 50 * DEFINE_IDR() - Define a statically-allocated IDR. 51 * @name: Name of IDR. 52 * 53 * An IDR defined using this macro is ready for use with no additional 54 * initialisation required. It contains no IDs. 55 */ 56#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 57 58/** 59 * idr_get_cursor - Return the current position of the cyclic allocator 60 * @idr: idr handle 61 * 62 * The value returned is the value that will be next returned from 63 * idr_alloc_cyclic() if it is free (otherwise the search will start from 64 * this position). 65 */ 66static inline unsigned int idr_get_cursor(const struct idr *idr) 67{ 68 return READ_ONCE(idr->idr_next); 69} 70 71/** 72 * idr_set_cursor - Set the current position of the cyclic allocator 73 * @idr: idr handle 74 * @val: new position 75 * 76 * The next call to idr_alloc_cyclic() will return @val if it is free 77 * (otherwise the search will start from this position). 78 */ 79static inline void idr_set_cursor(struct idr *idr, unsigned int val) 80{ 81 WRITE_ONCE(idr->idr_next, val); 82} 83 84/** 85 * DOC: idr sync 86 * idr synchronization (stolen from radix-tree.h) 87 * 88 * idr_find() is able to be called locklessly, using RCU. The caller must 89 * ensure calls to this function are made within rcu_read_lock() regions. 90 * Other readers (lock-free or otherwise) and modifications may be running 91 * concurrently. 92 * 93 * It is still required that the caller manage the synchronization and 94 * lifetimes of the items. So if RCU lock-free lookups are used, typically 95 * this would mean that the items have their own locks, or are amenable to 96 * lock-free access; and that the items are freed by RCU (or only freed after 97 * having been deleted from the idr tree *and* a synchronize_rcu() grace 98 * period). 99 */ 100 101#define idr_lock(idr) xa_lock(&(idr)->idr_rt) 102#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt) 103#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt) 104#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt) 105#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt) 106#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt) 107#define idr_lock_irqsave(idr, flags) \ 108 xa_lock_irqsave(&(idr)->idr_rt, flags) 109#define idr_unlock_irqrestore(idr, flags) \ 110 xa_unlock_irqrestore(&(idr)->idr_rt, flags) 111 112void idr_preload(gfp_t gfp_mask); 113 114int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); 115int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, 116 unsigned long max, gfp_t); 117int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); 118void *idr_remove(struct idr *, unsigned long id); 119void *idr_find(const struct idr *, unsigned long id); 120int idr_for_each(const struct idr *, 121 int (*fn)(int id, void *p, void *data), void *data); 122void *idr_get_next(struct idr *, int *nextid); 123void *idr_get_next_ul(struct idr *, unsigned long *nextid); 124void *idr_replace(struct idr *, void *, unsigned long id); 125void idr_destroy(struct idr *); 126 127/** 128 * idr_init_base() - Initialise an IDR. 129 * @idr: IDR handle. 130 * @base: The base value for the IDR. 131 * 132 * This variation of idr_init() creates an IDR which will allocate IDs 133 * starting at %base. 134 */ 135static inline void idr_init_base(struct idr *idr, int base) 136{ 137 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); 138 idr->idr_base = base; 139 idr->idr_next = 0; 140} 141 142/** 143 * idr_init() - Initialise an IDR. 144 * @idr: IDR handle. 145 * 146 * Initialise a dynamically allocated IDR. To initialise a 147 * statically allocated IDR, use DEFINE_IDR(). 148 */ 149static inline void idr_init(struct idr *idr) 150{ 151 idr_init_base(idr, 0); 152} 153 154/** 155 * idr_is_empty() - Are there any IDs allocated? 156 * @idr: IDR handle. 157 * 158 * Return: %true if any IDs have been allocated from this IDR. 159 */ 160static inline bool idr_is_empty(const struct idr *idr) 161{ 162 return radix_tree_empty(&idr->idr_rt) && 163 radix_tree_tagged(&idr->idr_rt, IDR_FREE); 164} 165 166/** 167 * idr_preload_end - end preload section started with idr_preload() 168 * 169 * Each idr_preload() should be matched with an invocation of this 170 * function. See idr_preload() for details. 171 */ 172static inline void idr_preload_end(void) 173{ 174 preempt_enable(); 175} 176 177/** 178 * idr_for_each_entry() - Iterate over an IDR's elements of a given type. 179 * @idr: IDR handle. 180 * @entry: The type * to use as cursor 181 * @id: Entry ID. 182 * 183 * @entry and @id do not need to be initialized before the loop, and 184 * after normal termination @entry is left with the value NULL. This 185 * is convenient for a "not found" value. 186 */ 187#define idr_for_each_entry(idr, entry, id) \ 188 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) 189 190/** 191 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. 192 * @idr: IDR handle. 193 * @entry: The type * to use as cursor. 194 * @id: Entry ID. 195 * 196 * @entry and @id do not need to be initialized before the loop, and 197 * after normal termination @entry is left with the value NULL. This 198 * is convenient for a "not found" value. 199 */ 200#define idr_for_each_entry_ul(idr, entry, id) \ 201 for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) 202 203/** 204 * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type 205 * @idr: IDR handle. 206 * @entry: The type * to use as a cursor. 207 * @id: Entry ID. 208 * 209 * Continue to iterate over entries, continuing after the current position. 210 */ 211#define idr_for_each_entry_continue(idr, entry, id) \ 212 for ((entry) = idr_get_next((idr), &(id)); \ 213 entry; \ 214 ++id, (entry) = idr_get_next((idr), &(id))) 215 216/* 217 * IDA - IDR based id allocator, use when translation from id to 218 * pointer isn't necessary. 219 */ 220#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ 221#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) 222#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) 223 224struct ida_bitmap { 225 unsigned long bitmap[IDA_BITMAP_LONGS]; 226}; 227 228DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); 229 230struct ida { 231 struct radix_tree_root ida_rt; 232}; 233 234#define IDA_INIT(name) { \ 235 .ida_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER | GFP_NOWAIT), \ 236} 237#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) 238 239int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); 240void ida_free(struct ida *, unsigned int id); 241void ida_destroy(struct ida *ida); 242 243/** 244 * ida_alloc() - Allocate an unused ID. 245 * @ida: IDA handle. 246 * @gfp: Memory allocation flags. 247 * 248 * Allocate an ID between 0 and %INT_MAX, inclusive. 249 * 250 * Context: Any context. 251 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, 252 * or %-ENOSPC if there are no free IDs. 253 */ 254static inline int ida_alloc(struct ida *ida, gfp_t gfp) 255{ 256 return ida_alloc_range(ida, 0, ~0, gfp); 257} 258 259/** 260 * ida_alloc_min() - Allocate an unused ID. 261 * @ida: IDA handle. 262 * @min: Lowest ID to allocate. 263 * @gfp: Memory allocation flags. 264 * 265 * Allocate an ID between @min and %INT_MAX, inclusive. 266 * 267 * Context: Any context. 268 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, 269 * or %-ENOSPC if there are no free IDs. 270 */ 271static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) 272{ 273 return ida_alloc_range(ida, min, ~0, gfp); 274} 275 276/** 277 * ida_alloc_max() - Allocate an unused ID. 278 * @ida: IDA handle. 279 * @max: Highest ID to allocate. 280 * @gfp: Memory allocation flags. 281 * 282 * Allocate an ID between 0 and @max, inclusive. 283 * 284 * Context: Any context. 285 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, 286 * or %-ENOSPC if there are no free IDs. 287 */ 288static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) 289{ 290 return ida_alloc_range(ida, 0, max, gfp); 291} 292 293static inline void ida_init(struct ida *ida) 294{ 295 INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); 296} 297 298#define ida_simple_get(ida, start, end, gfp) \ 299 ida_alloc_range(ida, start, (end) - 1, gfp) 300#define ida_simple_remove(ida, id) ida_free(ida, id) 301 302static inline bool ida_is_empty(const struct ida *ida) 303{ 304 return radix_tree_empty(&ida->ida_rt); 305} 306 307/* in lib/radix-tree.c */ 308int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 309#endif /* __IDR_H__ */