at v4.16 7.4 kB view raw
1/* 2 * include/linux/idr.h 3 * 4 * 2002-10-18 written by Jim Houston jim.houston@ccur.com 5 * Copyright (C) 2002 by Concurrent Computer Corporation 6 * Distributed under the GNU GPL license version 2. 7 * 8 * Small id to pointer translation service avoiding fixed sized 9 * tables. 10 */ 11 12#ifndef __IDR_H__ 13#define __IDR_H__ 14 15#include <linux/radix-tree.h> 16#include <linux/gfp.h> 17#include <linux/percpu.h> 18 19struct idr { 20 struct radix_tree_root idr_rt; 21 unsigned int idr_base; 22 unsigned int idr_next; 23}; 24 25/* 26 * The IDR API does not expose the tagging functionality of the radix tree 27 * to users. Use tag 0 to track whether a node has free space below it. 28 */ 29#define IDR_FREE 0 30 31/* Set the IDR flag and the IDR_FREE tag */ 32#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) 33 34#define IDR_INIT_BASE(base) { \ 35 .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \ 36 .idr_base = (base), \ 37 .idr_next = 0, \ 38} 39 40/** 41 * IDR_INIT() - Initialise an IDR. 42 * 43 * A freshly-initialised IDR contains no IDs. 44 */ 45#define IDR_INIT IDR_INIT_BASE(0) 46 47/** 48 * DEFINE_IDR() - Define a statically-allocated IDR 49 * @name: Name of IDR 50 * 51 * An IDR defined using this macro is ready for use with no additional 52 * initialisation required. It contains no IDs. 53 */ 54#define DEFINE_IDR(name) struct idr name = IDR_INIT 55 56/** 57 * idr_get_cursor - Return the current position of the cyclic allocator 58 * @idr: idr handle 59 * 60 * The value returned is the value that will be next returned from 61 * idr_alloc_cyclic() if it is free (otherwise the search will start from 62 * this position). 63 */ 64static inline unsigned int idr_get_cursor(const struct idr *idr) 65{ 66 return READ_ONCE(idr->idr_next); 67} 68 69/** 70 * idr_set_cursor - Set the current position of the cyclic allocator 71 * @idr: idr handle 72 * @val: new position 73 * 74 * The next call to idr_alloc_cyclic() will return @val if it is free 75 * (otherwise the search will start from this position). 76 */ 77static inline void idr_set_cursor(struct idr *idr, unsigned int val) 78{ 79 WRITE_ONCE(idr->idr_next, val); 80} 81 82/** 83 * DOC: idr sync 84 * idr synchronization (stolen from radix-tree.h) 85 * 86 * idr_find() is able to be called locklessly, using RCU. The caller must 87 * ensure calls to this function are made within rcu_read_lock() regions. 88 * Other readers (lock-free or otherwise) and modifications may be running 89 * concurrently. 90 * 91 * It is still required that the caller manage the synchronization and 92 * lifetimes of the items. So if RCU lock-free lookups are used, typically 93 * this would mean that the items have their own locks, or are amenable to 94 * lock-free access; and that the items are freed by RCU (or only freed after 95 * having been deleted from the idr tree *and* a synchronize_rcu() grace 96 * period). 97 */ 98 99void idr_preload(gfp_t gfp_mask); 100 101int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); 102int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, 103 unsigned long max, gfp_t); 104int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); 105void *idr_remove(struct idr *, unsigned long id); 106void *idr_find(const struct idr *, unsigned long id); 107int idr_for_each(const struct idr *, 108 int (*fn)(int id, void *p, void *data), void *data); 109void *idr_get_next(struct idr *, int *nextid); 110void *idr_get_next_ul(struct idr *, unsigned long *nextid); 111void *idr_replace(struct idr *, void *, unsigned long id); 112void idr_destroy(struct idr *); 113 114/** 115 * idr_init_base() - Initialise an IDR. 116 * @idr: IDR handle. 117 * @base: The base value for the IDR. 118 * 119 * This variation of idr_init() creates an IDR which will allocate IDs 120 * starting at %base. 121 */ 122static inline void idr_init_base(struct idr *idr, int base) 123{ 124 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); 125 idr->idr_base = base; 126 idr->idr_next = 0; 127} 128 129/** 130 * idr_init() - Initialise an IDR. 131 * @idr: IDR handle. 132 * 133 * Initialise a dynamically allocated IDR. To initialise a 134 * statically allocated IDR, use DEFINE_IDR(). 135 */ 136static inline void idr_init(struct idr *idr) 137{ 138 idr_init_base(idr, 0); 139} 140 141/** 142 * idr_is_empty() - Are there any IDs allocated? 143 * @idr: IDR handle. 144 * 145 * Return: %true if any IDs have been allocated from this IDR. 146 */ 147static inline bool idr_is_empty(const struct idr *idr) 148{ 149 return radix_tree_empty(&idr->idr_rt) && 150 radix_tree_tagged(&idr->idr_rt, IDR_FREE); 151} 152 153/** 154 * idr_preload_end - end preload section started with idr_preload() 155 * 156 * Each idr_preload() should be matched with an invocation of this 157 * function. See idr_preload() for details. 158 */ 159static inline void idr_preload_end(void) 160{ 161 preempt_enable(); 162} 163 164/** 165 * idr_for_each_entry() - Iterate over an IDR's elements of a given type. 166 * @idr: IDR handle. 167 * @entry: The type * to use as cursor 168 * @id: Entry ID. 169 * 170 * @entry and @id do not need to be initialized before the loop, and 171 * after normal termination @entry is left with the value NULL. This 172 * is convenient for a "not found" value. 173 */ 174#define idr_for_each_entry(idr, entry, id) \ 175 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) 176 177/** 178 * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. 179 * @idr: IDR handle. 180 * @entry: The type * to use as cursor. 181 * @id: Entry ID. 182 * 183 * @entry and @id do not need to be initialized before the loop, and 184 * after normal termination @entry is left with the value NULL. This 185 * is convenient for a "not found" value. 186 */ 187#define idr_for_each_entry_ul(idr, entry, id) \ 188 for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) 189 190/** 191 * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type 192 * @idr: IDR handle. 193 * @entry: The type * to use as a cursor. 194 * @id: Entry ID. 195 * 196 * Continue to iterate over entries, continuing after the current position. 197 */ 198#define idr_for_each_entry_continue(idr, entry, id) \ 199 for ((entry) = idr_get_next((idr), &(id)); \ 200 entry; \ 201 ++id, (entry) = idr_get_next((idr), &(id))) 202 203/* 204 * IDA - IDR based id allocator, use when translation from id to 205 * pointer isn't necessary. 206 */ 207#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ 208#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) 209#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) 210 211struct ida_bitmap { 212 unsigned long bitmap[IDA_BITMAP_LONGS]; 213}; 214 215DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap); 216 217struct ida { 218 struct radix_tree_root ida_rt; 219}; 220 221#define IDA_INIT { \ 222 .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \ 223} 224#define DEFINE_IDA(name) struct ida name = IDA_INIT 225 226int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 227int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); 228void ida_remove(struct ida *ida, int id); 229void ida_destroy(struct ida *ida); 230 231int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 232 gfp_t gfp_mask); 233void ida_simple_remove(struct ida *ida, unsigned int id); 234 235static inline void ida_init(struct ida *ida) 236{ 237 INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT); 238} 239 240/** 241 * ida_get_new - allocate new ID 242 * @ida: idr handle 243 * @p_id: pointer to the allocated handle 244 * 245 * Simple wrapper around ida_get_new_above() w/ @starting_id of zero. 246 */ 247static inline int ida_get_new(struct ida *ida, int *p_id) 248{ 249 return ida_get_new_above(ida, 0, p_id); 250} 251 252static inline bool ida_is_empty(const struct ida *ida) 253{ 254 return radix_tree_empty(&ida->ida_rt); 255} 256#endif /* __IDR_H__ */