at v3.7 18 kB view raw
1#ifndef _LINUX_RCULIST_H 2#define _LINUX_RCULIST_H 3 4#ifdef __KERNEL__ 5 6/* 7 * RCU-protected list version 8 */ 9#include <linux/list.h> 10#include <linux/rcupdate.h> 11 12/* 13 * Why is there no list_empty_rcu()? Because list_empty() serves this 14 * purpose. The list_empty() function fetches the RCU-protected pointer 15 * and compares it to the address of the list head, but neither dereferences 16 * this pointer itself nor provides this pointer to the caller. Therefore, 17 * it is not necessary to use rcu_dereference(), so that list_empty() can 18 * be used anywhere you would want to use a list_empty_rcu(). 19 */ 20 21/* 22 * return the ->next pointer of a list_head in an rcu safe 23 * way, we must not access it directly 24 */ 25#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) 26 27/* 28 * Insert a new entry between two known consecutive entries. 29 * 30 * This is only for internal list manipulation where we know 31 * the prev/next entries already! 32 */ 33#ifndef CONFIG_DEBUG_LIST 34static inline void __list_add_rcu(struct list_head *new, 35 struct list_head *prev, struct list_head *next) 36{ 37 new->next = next; 38 new->prev = prev; 39 rcu_assign_pointer(list_next_rcu(prev), new); 40 next->prev = new; 41} 42#else 43extern void __list_add_rcu(struct list_head *new, 44 struct list_head *prev, struct list_head *next); 45#endif 46 47/** 48 * list_add_rcu - add a new entry to rcu-protected list 49 * @new: new entry to be added 50 * @head: list head to add it after 51 * 52 * Insert a new entry after the specified head. 53 * This is good for implementing stacks. 54 * 55 * The caller must take whatever precautions are necessary 56 * (such as holding appropriate locks) to avoid racing 57 * with another list-mutation primitive, such as list_add_rcu() 58 * or list_del_rcu(), running on this same list. 59 * However, it is perfectly legal to run concurrently with 60 * the _rcu list-traversal primitives, such as 61 * list_for_each_entry_rcu(). 62 */ 63static inline void list_add_rcu(struct list_head *new, struct list_head *head) 64{ 65 __list_add_rcu(new, head, head->next); 66} 67 68/** 69 * list_add_tail_rcu - add a new entry to rcu-protected list 70 * @new: new entry to be added 71 * @head: list head to add it before 72 * 73 * Insert a new entry before the specified head. 74 * This is useful for implementing queues. 75 * 76 * The caller must take whatever precautions are necessary 77 * (such as holding appropriate locks) to avoid racing 78 * with another list-mutation primitive, such as list_add_tail_rcu() 79 * or list_del_rcu(), running on this same list. 80 * However, it is perfectly legal to run concurrently with 81 * the _rcu list-traversal primitives, such as 82 * list_for_each_entry_rcu(). 83 */ 84static inline void list_add_tail_rcu(struct list_head *new, 85 struct list_head *head) 86{ 87 __list_add_rcu(new, head->prev, head); 88} 89 90/** 91 * list_del_rcu - deletes entry from list without re-initialization 92 * @entry: the element to delete from the list. 93 * 94 * Note: list_empty() on entry does not return true after this, 95 * the entry is in an undefined state. It is useful for RCU based 96 * lockfree traversal. 97 * 98 * In particular, it means that we can not poison the forward 99 * pointers that may still be used for walking the list. 100 * 101 * The caller must take whatever precautions are necessary 102 * (such as holding appropriate locks) to avoid racing 103 * with another list-mutation primitive, such as list_del_rcu() 104 * or list_add_rcu(), running on this same list. 105 * However, it is perfectly legal to run concurrently with 106 * the _rcu list-traversal primitives, such as 107 * list_for_each_entry_rcu(). 108 * 109 * Note that the caller is not permitted to immediately free 110 * the newly deleted entry. Instead, either synchronize_rcu() 111 * or call_rcu() must be used to defer freeing until an RCU 112 * grace period has elapsed. 113 */ 114static inline void list_del_rcu(struct list_head *entry) 115{ 116 __list_del_entry(entry); 117 entry->prev = LIST_POISON2; 118} 119 120/** 121 * hlist_del_init_rcu - deletes entry from hash list with re-initialization 122 * @n: the element to delete from the hash list. 123 * 124 * Note: list_unhashed() on the node return true after this. It is 125 * useful for RCU based read lockfree traversal if the writer side 126 * must know if the list entry is still hashed or already unhashed. 127 * 128 * In particular, it means that we can not poison the forward pointers 129 * that may still be used for walking the hash list and we can only 130 * zero the pprev pointer so list_unhashed() will return true after 131 * this. 132 * 133 * The caller must take whatever precautions are necessary (such as 134 * holding appropriate locks) to avoid racing with another 135 * list-mutation primitive, such as hlist_add_head_rcu() or 136 * hlist_del_rcu(), running on this same list. However, it is 137 * perfectly legal to run concurrently with the _rcu list-traversal 138 * primitives, such as hlist_for_each_entry_rcu(). 139 */ 140static inline void hlist_del_init_rcu(struct hlist_node *n) 141{ 142 if (!hlist_unhashed(n)) { 143 __hlist_del(n); 144 n->pprev = NULL; 145 } 146} 147 148/** 149 * list_replace_rcu - replace old entry by new one 150 * @old : the element to be replaced 151 * @new : the new element to insert 152 * 153 * The @old entry will be replaced with the @new entry atomically. 154 * Note: @old should not be empty. 155 */ 156static inline void list_replace_rcu(struct list_head *old, 157 struct list_head *new) 158{ 159 new->next = old->next; 160 new->prev = old->prev; 161 rcu_assign_pointer(list_next_rcu(new->prev), new); 162 new->next->prev = new; 163 old->prev = LIST_POISON2; 164} 165 166/** 167 * list_splice_init_rcu - splice an RCU-protected list into an existing list. 168 * @list: the RCU-protected list to splice 169 * @head: the place in the list to splice the first list into 170 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 171 * 172 * @head can be RCU-read traversed concurrently with this function. 173 * 174 * Note that this function blocks. 175 * 176 * Important note: the caller must take whatever action is necessary to 177 * prevent any other updates to @head. In principle, it is possible 178 * to modify the list as soon as sync() begins execution. 179 * If this sort of thing becomes necessary, an alternative version 180 * based on call_rcu() could be created. But only if -really- 181 * needed -- there is no shortage of RCU API members. 182 */ 183static inline void list_splice_init_rcu(struct list_head *list, 184 struct list_head *head, 185 void (*sync)(void)) 186{ 187 struct list_head *first = list->next; 188 struct list_head *last = list->prev; 189 struct list_head *at = head->next; 190 191 if (list_empty(list)) 192 return; 193 194 /* "first" and "last" tracking list, so initialize it. */ 195 196 INIT_LIST_HEAD(list); 197 198 /* 199 * At this point, the list body still points to the source list. 200 * Wait for any readers to finish using the list before splicing 201 * the list body into the new list. Any new readers will see 202 * an empty list. 203 */ 204 205 sync(); 206 207 /* 208 * Readers are finished with the source list, so perform splice. 209 * The order is important if the new list is global and accessible 210 * to concurrent RCU readers. Note that RCU readers are not 211 * permitted to traverse the prev pointers without excluding 212 * this function. 213 */ 214 215 last->next = at; 216 rcu_assign_pointer(list_next_rcu(head), first); 217 first->prev = head; 218 at->prev = last; 219} 220 221/** 222 * list_entry_rcu - get the struct for this entry 223 * @ptr: the &struct list_head pointer. 224 * @type: the type of the struct this is embedded in. 225 * @member: the name of the list_struct within the struct. 226 * 227 * This primitive may safely run concurrently with the _rcu list-mutation 228 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 229 */ 230#define list_entry_rcu(ptr, type, member) \ 231 ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ 232 container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ 233 }) 234 235/** 236 * Where are list_empty_rcu() and list_first_entry_rcu()? 237 * 238 * Implementing those functions following their counterparts list_empty() and 239 * list_first_entry() is not advisable because they lead to subtle race 240 * conditions as the following snippet shows: 241 * 242 * if (!list_empty_rcu(mylist)) { 243 * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); 244 * do_something(bar); 245 * } 246 * 247 * The list may not be empty when list_empty_rcu checks it, but it may be when 248 * list_first_entry_rcu rereads the ->next pointer. 249 * 250 * Rereading the ->next pointer is not a problem for list_empty() and 251 * list_first_entry() because they would be protected by a lock that blocks 252 * writers. 253 * 254 * See list_first_or_null_rcu for an alternative. 255 */ 256 257/** 258 * list_first_or_null_rcu - get the first element from a list 259 * @ptr: the list head to take the element from. 260 * @type: the type of the struct this is embedded in. 261 * @member: the name of the list_struct within the struct. 262 * 263 * Note that if the list is empty, it returns NULL. 264 * 265 * This primitive may safely run concurrently with the _rcu list-mutation 266 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 267 */ 268#define list_first_or_null_rcu(ptr, type, member) \ 269 ({struct list_head *__ptr = (ptr); \ 270 struct list_head __rcu *__next = list_next_rcu(__ptr); \ 271 likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \ 272 }) 273 274/** 275 * list_for_each_entry_rcu - iterate over rcu list of given type 276 * @pos: the type * to use as a loop cursor. 277 * @head: the head for your list. 278 * @member: the name of the list_struct within the struct. 279 * 280 * This list-traversal primitive may safely run concurrently with 281 * the _rcu list-mutation primitives such as list_add_rcu() 282 * as long as the traversal is guarded by rcu_read_lock(). 283 */ 284#define list_for_each_entry_rcu(pos, head, member) \ 285 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 286 &pos->member != (head); \ 287 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 288 289 290/** 291 * list_for_each_continue_rcu 292 * @pos: the &struct list_head to use as a loop cursor. 293 * @head: the head for your list. 294 * 295 * Iterate over an rcu-protected list, continuing after current point. 296 * 297 * This list-traversal primitive may safely run concurrently with 298 * the _rcu list-mutation primitives such as list_add_rcu() 299 * as long as the traversal is guarded by rcu_read_lock(). 300 */ 301#define list_for_each_continue_rcu(pos, head) \ 302 for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ 303 (pos) != (head); \ 304 (pos) = rcu_dereference_raw(list_next_rcu(pos))) 305 306/** 307 * list_for_each_entry_continue_rcu - continue iteration over list of given type 308 * @pos: the type * to use as a loop cursor. 309 * @head: the head for your list. 310 * @member: the name of the list_struct within the struct. 311 * 312 * Continue to iterate over list of given type, continuing after 313 * the current position. 314 */ 315#define list_for_each_entry_continue_rcu(pos, head, member) \ 316 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 317 &pos->member != (head); \ 318 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 319 320/** 321 * hlist_del_rcu - deletes entry from hash list without re-initialization 322 * @n: the element to delete from the hash list. 323 * 324 * Note: list_unhashed() on entry does not return true after this, 325 * the entry is in an undefined state. It is useful for RCU based 326 * lockfree traversal. 327 * 328 * In particular, it means that we can not poison the forward 329 * pointers that may still be used for walking the hash list. 330 * 331 * The caller must take whatever precautions are necessary 332 * (such as holding appropriate locks) to avoid racing 333 * with another list-mutation primitive, such as hlist_add_head_rcu() 334 * or hlist_del_rcu(), running on this same list. 335 * However, it is perfectly legal to run concurrently with 336 * the _rcu list-traversal primitives, such as 337 * hlist_for_each_entry(). 338 */ 339static inline void hlist_del_rcu(struct hlist_node *n) 340{ 341 __hlist_del(n); 342 n->pprev = LIST_POISON2; 343} 344 345/** 346 * hlist_replace_rcu - replace old entry by new one 347 * @old : the element to be replaced 348 * @new : the new element to insert 349 * 350 * The @old entry will be replaced with the @new entry atomically. 351 */ 352static inline void hlist_replace_rcu(struct hlist_node *old, 353 struct hlist_node *new) 354{ 355 struct hlist_node *next = old->next; 356 357 new->next = next; 358 new->pprev = old->pprev; 359 rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); 360 if (next) 361 new->next->pprev = &new->next; 362 old->pprev = LIST_POISON2; 363} 364 365/* 366 * return the first or the next element in an RCU protected hlist 367 */ 368#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) 369#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) 370#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) 371 372/** 373 * hlist_add_head_rcu 374 * @n: the element to add to the hash list. 375 * @h: the list to add to. 376 * 377 * Description: 378 * Adds the specified element to the specified hlist, 379 * while permitting racing traversals. 380 * 381 * The caller must take whatever precautions are necessary 382 * (such as holding appropriate locks) to avoid racing 383 * with another list-mutation primitive, such as hlist_add_head_rcu() 384 * or hlist_del_rcu(), running on this same list. 385 * However, it is perfectly legal to run concurrently with 386 * the _rcu list-traversal primitives, such as 387 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 388 * problems on Alpha CPUs. Regardless of the type of CPU, the 389 * list-traversal primitive must be guarded by rcu_read_lock(). 390 */ 391static inline void hlist_add_head_rcu(struct hlist_node *n, 392 struct hlist_head *h) 393{ 394 struct hlist_node *first = h->first; 395 396 n->next = first; 397 n->pprev = &h->first; 398 rcu_assign_pointer(hlist_first_rcu(h), n); 399 if (first) 400 first->pprev = &n->next; 401} 402 403/** 404 * hlist_add_before_rcu 405 * @n: the new element to add to the hash list. 406 * @next: the existing element to add the new element before. 407 * 408 * Description: 409 * Adds the specified element to the specified hlist 410 * before the specified node while permitting racing traversals. 411 * 412 * The caller must take whatever precautions are necessary 413 * (such as holding appropriate locks) to avoid racing 414 * with another list-mutation primitive, such as hlist_add_head_rcu() 415 * or hlist_del_rcu(), running on this same list. 416 * However, it is perfectly legal to run concurrently with 417 * the _rcu list-traversal primitives, such as 418 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 419 * problems on Alpha CPUs. 420 */ 421static inline void hlist_add_before_rcu(struct hlist_node *n, 422 struct hlist_node *next) 423{ 424 n->pprev = next->pprev; 425 n->next = next; 426 rcu_assign_pointer(hlist_pprev_rcu(n), n); 427 next->pprev = &n->next; 428} 429 430/** 431 * hlist_add_after_rcu 432 * @prev: the existing element to add the new element after. 433 * @n: the new element to add to the hash list. 434 * 435 * Description: 436 * Adds the specified element to the specified hlist 437 * after the specified node while permitting racing traversals. 438 * 439 * The caller must take whatever precautions are necessary 440 * (such as holding appropriate locks) to avoid racing 441 * with another list-mutation primitive, such as hlist_add_head_rcu() 442 * or hlist_del_rcu(), running on this same list. 443 * However, it is perfectly legal to run concurrently with 444 * the _rcu list-traversal primitives, such as 445 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 446 * problems on Alpha CPUs. 447 */ 448static inline void hlist_add_after_rcu(struct hlist_node *prev, 449 struct hlist_node *n) 450{ 451 n->next = prev->next; 452 n->pprev = &prev->next; 453 rcu_assign_pointer(hlist_next_rcu(prev), n); 454 if (n->next) 455 n->next->pprev = &n->next; 456} 457 458#define __hlist_for_each_rcu(pos, head) \ 459 for (pos = rcu_dereference(hlist_first_rcu(head)); \ 460 pos; \ 461 pos = rcu_dereference(hlist_next_rcu(pos))) 462 463/** 464 * hlist_for_each_entry_rcu - iterate over rcu list of given type 465 * @tpos: the type * to use as a loop cursor. 466 * @pos: the &struct hlist_node to use as a loop cursor. 467 * @head: the head for your list. 468 * @member: the name of the hlist_node within the struct. 469 * 470 * This list-traversal primitive may safely run concurrently with 471 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 472 * as long as the traversal is guarded by rcu_read_lock(). 473 */ 474#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 475 for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ 476 pos && \ 477 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 478 pos = rcu_dereference_raw(hlist_next_rcu(pos))) 479 480/** 481 * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type 482 * @tpos: the type * to use as a loop cursor. 483 * @pos: the &struct hlist_node to use as a loop cursor. 484 * @head: the head for your list. 485 * @member: the name of the hlist_node within the struct. 486 * 487 * This list-traversal primitive may safely run concurrently with 488 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 489 * as long as the traversal is guarded by rcu_read_lock(). 490 */ 491#define hlist_for_each_entry_rcu_bh(tpos, pos, head, member) \ 492 for (pos = rcu_dereference_bh((head)->first); \ 493 pos && \ 494 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 495 pos = rcu_dereference_bh(pos->next)) 496 497/** 498 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point 499 * @tpos: the type * to use as a loop cursor. 500 * @pos: the &struct hlist_node to use as a loop cursor. 501 * @member: the name of the hlist_node within the struct. 502 */ 503#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \ 504 for (pos = rcu_dereference((pos)->next); \ 505 pos && \ 506 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 507 pos = rcu_dereference(pos->next)) 508 509/** 510 * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point 511 * @tpos: the type * to use as a loop cursor. 512 * @pos: the &struct hlist_node to use as a loop cursor. 513 * @member: the name of the hlist_node within the struct. 514 */ 515#define hlist_for_each_entry_continue_rcu_bh(tpos, pos, member) \ 516 for (pos = rcu_dereference_bh((pos)->next); \ 517 pos && \ 518 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 519 pos = rcu_dereference_bh(pos->next)) 520 521 522#endif /* __KERNEL__ */ 523#endif