1#ifndef _LINUX_LIST_H 2#define _LINUX_LIST_H 3 4#ifdef __KERNEL__ 5 6#include <linux/stddef.h> 7#include <linux/prefetch.h> 8#include <asm/system.h> 9 10/* 11 * These are non-NULL pointers that will result in page faults 12 * under normal circumstances, used to verify that nobody uses 13 * non-initialized list entries. 14 */ 15#define LIST_POISON1 ((void *) 0x00100100) 16#define LIST_POISON2 ((void *) 0x00200200) 17 18/* 19 * Simple doubly linked list implementation. 20 * 21 * Some of the internal functions ("__xxx") are useful when 22 * manipulating whole lists rather than single entries, as 23 * sometimes we already know the next/prev entries and we can 24 * generate better code by using them directly rather than 25 * using the generic single-entry routines. 26 */ 27 28struct list_head { 29 struct list_head *next, *prev; 30}; 31 32#define LIST_HEAD_INIT(name) { &(name), &(name) } 33 34#define LIST_HEAD(name) \ 35 struct list_head name = LIST_HEAD_INIT(name) 36 37#define INIT_LIST_HEAD(ptr) do { \ 38 (ptr)->next = (ptr); (ptr)->prev = (ptr); \ 39} while (0) 40 41/* 42 * Insert a new entry between two known consecutive entries. 43 * 44 * This is only for internal list manipulation where we know 45 * the prev/next entries already! 46 */ 47static inline void __list_add(struct list_head *new, 48 struct list_head *prev, 49 struct list_head *next) 50{ 51 next->prev = new; 52 new->next = next; 53 new->prev = prev; 54 prev->next = new; 55} 56 57/** 58 * list_add - add a new entry 59 * @new: new entry to be added 60 * @head: list head to add it after 61 * 62 * Insert a new entry after the specified head. 63 * This is good for implementing stacks. 64 */ 65static inline void list_add(struct list_head *new, struct list_head *head) 66{ 67 __list_add(new, head, head->next); 68} 69 70/** 71 * list_add_tail - add a new entry 72 * @new: new entry to be added 73 * @head: list head to add it before 74 * 75 * Insert a new entry before the specified head. 76 * This is useful for implementing queues. 77 */ 78static inline void list_add_tail(struct list_head *new, struct list_head *head) 79{ 80 __list_add(new, head->prev, head); 81} 82 83/* 84 * Insert a new entry between two known consecutive entries. 85 * 86 * This is only for internal list manipulation where we know 87 * the prev/next entries already! 88 */ 89static inline void __list_add_rcu(struct list_head * new, 90 struct list_head * prev, struct list_head * next) 91{ 92 new->next = next; 93 new->prev = prev; 94 smp_wmb(); 95 next->prev = new; 96 prev->next = new; 97} 98 99/** 100 * list_add_rcu - add a new entry to rcu-protected list 101 * @new: new entry to be added 102 * @head: list head to add it after 103 * 104 * Insert a new entry after the specified head. 105 * This is good for implementing stacks. 106 * 107 * The caller must take whatever precautions are necessary 108 * (such as holding appropriate locks) to avoid racing 109 * with another list-mutation primitive, such as list_add_rcu() 110 * or list_del_rcu(), running on this same list. 111 * However, it is perfectly legal to run concurrently with 112 * the _rcu list-traversal primitives, such as 113 * list_for_each_entry_rcu(). 114 */ 115static inline void list_add_rcu(struct list_head *new, struct list_head *head) 116{ 117 __list_add_rcu(new, head, head->next); 118} 119 120/** 121 * list_add_tail_rcu - add a new entry to rcu-protected list 122 * @new: new entry to be added 123 * @head: list head to add it before 124 * 125 * Insert a new entry before the specified head. 126 * This is useful for implementing queues. 127 * 128 * The caller must take whatever precautions are necessary 129 * (such as holding appropriate locks) to avoid racing 130 * with another list-mutation primitive, such as list_add_tail_rcu() 131 * or list_del_rcu(), running on this same list. 132 * However, it is perfectly legal to run concurrently with 133 * the _rcu list-traversal primitives, such as 134 * list_for_each_entry_rcu(). 135 */ 136static inline void list_add_tail_rcu(struct list_head *new, 137 struct list_head *head) 138{ 139 __list_add_rcu(new, head->prev, head); 140} 141 142/* 143 * Delete a list entry by making the prev/next entries 144 * point to each other. 145 * 146 * This is only for internal list manipulation where we know 147 * the prev/next entries already! 148 */ 149static inline void __list_del(struct list_head * prev, struct list_head * next) 150{ 151 next->prev = prev; 152 prev->next = next; 153} 154 155/** 156 * list_del - deletes entry from list. 157 * @entry: the element to delete from the list. 158 * Note: list_empty on entry does not return true after this, the entry is 159 * in an undefined state. 160 */ 161static inline void list_del(struct list_head *entry) 162{ 163 __list_del(entry->prev, entry->next); 164 entry->next = LIST_POISON1; 165 entry->prev = LIST_POISON2; 166} 167 168/** 169 * list_del_rcu - deletes entry from list without re-initialization 170 * @entry: the element to delete from the list. 171 * 172 * Note: list_empty on entry does not return true after this, 173 * the entry is in an undefined state. It is useful for RCU based 174 * lockfree traversal. 175 * 176 * In particular, it means that we can not poison the forward 177 * pointers that may still be used for walking the list. 178 * 179 * The caller must take whatever precautions are necessary 180 * (such as holding appropriate locks) to avoid racing 181 * with another list-mutation primitive, such as list_del_rcu() 182 * or list_add_rcu(), running on this same list. 183 * However, it is perfectly legal to run concurrently with 184 * the _rcu list-traversal primitives, such as 185 * list_for_each_entry_rcu(). 186 * 187 * Note that the caller is not permitted to immediately free 188 * the newly deleted entry. Instead, either synchronize_rcu() 189 * or call_rcu() must be used to defer freeing until an RCU 190 * grace period has elapsed. 191 */ 192static inline void list_del_rcu(struct list_head *entry) 193{ 194 __list_del(entry->prev, entry->next); 195 entry->prev = LIST_POISON2; 196} 197 198/* 199 * list_replace_rcu - replace old entry by new one 200 * @old : the element to be replaced 201 * @new : the new element to insert 202 * 203 * The old entry will be replaced with the new entry atomically. 204 */ 205static inline void list_replace_rcu(struct list_head *old, struct list_head *new){ 206 new->next = old->next; 207 new->prev = old->prev; 208 smp_wmb(); 209 new->next->prev = new; 210 new->prev->next = new; 211} 212 213/** 214 * list_del_init - deletes entry from list and reinitialize it. 215 * @entry: the element to delete from the list. 216 */ 217static inline void list_del_init(struct list_head *entry) 218{ 219 __list_del(entry->prev, entry->next); 220 INIT_LIST_HEAD(entry); 221} 222 223/** 224 * list_move - delete from one list and add as another's head 225 * @list: the entry to move 226 * @head: the head that will precede our entry 227 */ 228static inline void list_move(struct list_head *list, struct list_head *head) 229{ 230 __list_del(list->prev, list->next); 231 list_add(list, head); 232} 233 234/** 235 * list_move_tail - delete from one list and add as another's tail 236 * @list: the entry to move 237 * @head: the head that will follow our entry 238 */ 239static inline void list_move_tail(struct list_head *list, 240 struct list_head *head) 241{ 242 __list_del(list->prev, list->next); 243 list_add_tail(list, head); 244} 245 246/** 247 * list_empty - tests whether a list is empty 248 * @head: the list to test. 249 */ 250static inline int list_empty(const struct list_head *head) 251{ 252 return head->next == head; 253} 254 255/** 256 * list_empty_careful - tests whether a list is 257 * empty _and_ checks that no other CPU might be 258 * in the process of still modifying either member 259 * 260 * NOTE: using list_empty_careful() without synchronization 261 * can only be safe if the only activity that can happen 262 * to the list entry is list_del_init(). Eg. it cannot be used 263 * if another CPU could re-list_add() it. 264 * 265 * @head: the list to test. 266 */ 267static inline int list_empty_careful(const struct list_head *head) 268{ 269 struct list_head *next = head->next; 270 return (next == head) && (next == head->prev); 271} 272 273static inline void __list_splice(struct list_head *list, 274 struct list_head *head) 275{ 276 struct list_head *first = list->next; 277 struct list_head *last = list->prev; 278 struct list_head *at = head->next; 279 280 first->prev = head; 281 head->next = first; 282 283 last->next = at; 284 at->prev = last; 285} 286 287/** 288 * list_splice - join two lists 289 * @list: the new list to add. 290 * @head: the place to add it in the first list. 291 */ 292static inline void list_splice(struct list_head *list, struct list_head *head) 293{ 294 if (!list_empty(list)) 295 __list_splice(list, head); 296} 297 298/** 299 * list_splice_init - join two lists and reinitialise the emptied list. 300 * @list: the new list to add. 301 * @head: the place to add it in the first list. 302 * 303 * The list at @list is reinitialised 304 */ 305static inline void list_splice_init(struct list_head *list, 306 struct list_head *head) 307{ 308 if (!list_empty(list)) { 309 __list_splice(list, head); 310 INIT_LIST_HEAD(list); 311 } 312} 313 314/** 315 * list_entry - get the struct for this entry 316 * @ptr: the &struct list_head pointer. 317 * @type: the type of the struct this is embedded in. 318 * @member: the name of the list_struct within the struct. 319 */ 320#define list_entry(ptr, type, member) \ 321 container_of(ptr, type, member) 322 323/** 324 * list_for_each - iterate over a list 325 * @pos: the &struct list_head to use as a loop counter. 326 * @head: the head for your list. 327 */ 328#define list_for_each(pos, head) \ 329 for (pos = (head)->next; prefetch(pos->next), pos != (head); \ 330 pos = pos->next) 331 332/** 333 * __list_for_each - iterate over a list 334 * @pos: the &struct list_head to use as a loop counter. 335 * @head: the head for your list. 336 * 337 * This variant differs from list_for_each() in that it's the 338 * simplest possible list iteration code, no prefetching is done. 339 * Use this for code that knows the list to be very short (empty 340 * or 1 entry) most of the time. 341 */ 342#define __list_for_each(pos, head) \ 343 for (pos = (head)->next; pos != (head); pos = pos->next) 344 345/** 346 * list_for_each_prev - iterate over a list backwards 347 * @pos: the &struct list_head to use as a loop counter. 348 * @head: the head for your list. 349 */ 350#define list_for_each_prev(pos, head) \ 351 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ 352 pos = pos->prev) 353 354/** 355 * list_for_each_safe - iterate over a list safe against removal of list entry 356 * @pos: the &struct list_head to use as a loop counter. 357 * @n: another &struct list_head to use as temporary storage 358 * @head: the head for your list. 359 */ 360#define list_for_each_safe(pos, n, head) \ 361 for (pos = (head)->next, n = pos->next; pos != (head); \ 362 pos = n, n = pos->next) 363 364/** 365 * list_for_each_entry - iterate over list of given type 366 * @pos: the type * to use as a loop counter. 367 * @head: the head for your list. 368 * @member: the name of the list_struct within the struct. 369 */ 370#define list_for_each_entry(pos, head, member) \ 371 for (pos = list_entry((head)->next, typeof(*pos), member); \ 372 prefetch(pos->member.next), &pos->member != (head); \ 373 pos = list_entry(pos->member.next, typeof(*pos), member)) 374 375/** 376 * list_for_each_entry_reverse - iterate backwards over list of given type. 377 * @pos: the type * to use as a loop counter. 378 * @head: the head for your list. 379 * @member: the name of the list_struct within the struct. 380 */ 381#define list_for_each_entry_reverse(pos, head, member) \ 382 for (pos = list_entry((head)->prev, typeof(*pos), member); \ 383 prefetch(pos->member.prev), &pos->member != (head); \ 384 pos = list_entry(pos->member.prev, typeof(*pos), member)) 385 386/** 387 * list_prepare_entry - prepare a pos entry for use as a start point in 388 * list_for_each_entry_continue 389 * @pos: the type * to use as a start point 390 * @head: the head of the list 391 * @member: the name of the list_struct within the struct. 392 */ 393#define list_prepare_entry(pos, head, member) \ 394 ((pos) ? : list_entry(head, typeof(*pos), member)) 395 396/** 397 * list_for_each_entry_continue - iterate over list of given type 398 * continuing after existing point 399 * @pos: the type * to use as a loop counter. 400 * @head: the head for your list. 401 * @member: the name of the list_struct within the struct. 402 */ 403#define list_for_each_entry_continue(pos, head, member) \ 404 for (pos = list_entry(pos->member.next, typeof(*pos), member); \ 405 prefetch(pos->member.next), &pos->member != (head); \ 406 pos = list_entry(pos->member.next, typeof(*pos), member)) 407 408/** 409 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry 410 * @pos: the type * to use as a loop counter. 411 * @n: another type * to use as temporary storage 412 * @head: the head for your list. 413 * @member: the name of the list_struct within the struct. 414 */ 415#define list_for_each_entry_safe(pos, n, head, member) \ 416 for (pos = list_entry((head)->next, typeof(*pos), member), \ 417 n = list_entry(pos->member.next, typeof(*pos), member); \ 418 &pos->member != (head); \ 419 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 420 421/** 422 * list_for_each_entry_safe_continue - iterate over list of given type 423 * continuing after existing point safe against removal of list entry 424 * @pos: the type * to use as a loop counter. 425 * @n: another type * to use as temporary storage 426 * @head: the head for your list. 427 * @member: the name of the list_struct within the struct. 428 */ 429#define list_for_each_entry_safe_continue(pos, n, head, member) \ 430 for (pos = list_entry(pos->member.next, typeof(*pos), member), \ 431 n = list_entry(pos->member.next, typeof(*pos), member); \ 432 &pos->member != (head); \ 433 pos = n, n = list_entry(n->member.next, typeof(*n), member)) 434 435/** 436 * list_for_each_rcu - iterate over an rcu-protected list 437 * @pos: the &struct list_head to use as a loop counter. 438 * @head: the head for your list. 439 * 440 * This list-traversal primitive may safely run concurrently with 441 * the _rcu list-mutation primitives such as list_add_rcu() 442 * as long as the traversal is guarded by rcu_read_lock(). 443 */ 444#define list_for_each_rcu(pos, head) \ 445 for (pos = (head)->next; prefetch(pos->next), pos != (head); \ 446 pos = rcu_dereference(pos->next)) 447 448#define __list_for_each_rcu(pos, head) \ 449 for (pos = (head)->next; pos != (head); \ 450 pos = rcu_dereference(pos->next)) 451 452/** 453 * list_for_each_safe_rcu - iterate over an rcu-protected list safe 454 * against removal of list entry 455 * @pos: the &struct list_head to use as a loop counter. 456 * @n: another &struct list_head to use as temporary storage 457 * @head: the head for your list. 458 * 459 * This list-traversal primitive may safely run concurrently with 460 * the _rcu list-mutation primitives such as list_add_rcu() 461 * as long as the traversal is guarded by rcu_read_lock(). 462 */ 463#define list_for_each_safe_rcu(pos, n, head) \ 464 for (pos = (head)->next, n = pos->next; pos != (head); \ 465 pos = rcu_dereference(n), n = pos->next) 466 467/** 468 * list_for_each_entry_rcu - iterate over rcu list of given type 469 * @pos: the type * to use as a loop counter. 470 * @head: the head for your list. 471 * @member: the name of the list_struct within the struct. 472 * 473 * This list-traversal primitive may safely run concurrently with 474 * the _rcu list-mutation primitives such as list_add_rcu() 475 * as long as the traversal is guarded by rcu_read_lock(). 476 */ 477#define list_for_each_entry_rcu(pos, head, member) \ 478 for (pos = list_entry((head)->next, typeof(*pos), member); \ 479 prefetch(pos->member.next), &pos->member != (head); \ 480 pos = rcu_dereference(list_entry(pos->member.next, \ 481 typeof(*pos), member))) 482 483 484/** 485 * list_for_each_continue_rcu - iterate over an rcu-protected list 486 * continuing after existing point. 487 * @pos: the &struct list_head to use as a loop counter. 488 * @head: the head for your list. 489 * 490 * This list-traversal primitive may safely run concurrently with 491 * the _rcu list-mutation primitives such as list_add_rcu() 492 * as long as the traversal is guarded by rcu_read_lock(). 493 */ 494#define list_for_each_continue_rcu(pos, head) \ 495 for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \ 496 (pos) = rcu_dereference((pos)->next)) 497 498/* 499 * Double linked lists with a single pointer list head. 500 * Mostly useful for hash tables where the two pointer list head is 501 * too wasteful. 502 * You lose the ability to access the tail in O(1). 503 */ 504 505struct hlist_head { 506 struct hlist_node *first; 507}; 508 509struct hlist_node { 510 struct hlist_node *next, **pprev; 511}; 512 513#define HLIST_HEAD_INIT { .first = NULL } 514#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } 515#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) 516#define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL) 517 518static inline int hlist_unhashed(const struct hlist_node *h) 519{ 520 return !h->pprev; 521} 522 523static inline int hlist_empty(const struct hlist_head *h) 524{ 525 return !h->first; 526} 527 528static inline void __hlist_del(struct hlist_node *n) 529{ 530 struct hlist_node *next = n->next; 531 struct hlist_node **pprev = n->pprev; 532 *pprev = next; 533 if (next) 534 next->pprev = pprev; 535} 536 537static inline void hlist_del(struct hlist_node *n) 538{ 539 __hlist_del(n); 540 n->next = LIST_POISON1; 541 n->pprev = LIST_POISON2; 542} 543 544/** 545 * hlist_del_rcu - deletes entry from hash list without re-initialization 546 * @n: the element to delete from the hash list. 547 * 548 * Note: list_unhashed() on entry does not return true after this, 549 * the entry is in an undefined state. It is useful for RCU based 550 * lockfree traversal. 551 * 552 * In particular, it means that we can not poison the forward 553 * pointers that may still be used for walking the hash list. 554 * 555 * The caller must take whatever precautions are necessary 556 * (such as holding appropriate locks) to avoid racing 557 * with another list-mutation primitive, such as hlist_add_head_rcu() 558 * or hlist_del_rcu(), running on this same list. 559 * However, it is perfectly legal to run concurrently with 560 * the _rcu list-traversal primitives, such as 561 * hlist_for_each_entry(). 562 */ 563static inline void hlist_del_rcu(struct hlist_node *n) 564{ 565 __hlist_del(n); 566 n->pprev = LIST_POISON2; 567} 568 569static inline void hlist_del_init(struct hlist_node *n) 570{ 571 if (n->pprev) { 572 __hlist_del(n); 573 INIT_HLIST_NODE(n); 574 } 575} 576 577static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) 578{ 579 struct hlist_node *first = h->first; 580 n->next = first; 581 if (first) 582 first->pprev = &n->next; 583 h->first = n; 584 n->pprev = &h->first; 585} 586 587 588/** 589 * hlist_add_head_rcu - adds the specified element to the specified hlist, 590 * while permitting racing traversals. 591 * @n: the element to add to the hash list. 592 * @h: the list to add to. 593 * 594 * The caller must take whatever precautions are necessary 595 * (such as holding appropriate locks) to avoid racing 596 * with another list-mutation primitive, such as hlist_add_head_rcu() 597 * or hlist_del_rcu(), running on this same list. 598 * However, it is perfectly legal to run concurrently with 599 * the _rcu list-traversal primitives, such as 600 * hlist_for_each_rcu(), used to prevent memory-consistency 601 * problems on Alpha CPUs. Regardless of the type of CPU, the 602 * list-traversal primitive must be guarded by rcu_read_lock(). 603 */ 604static inline void hlist_add_head_rcu(struct hlist_node *n, 605 struct hlist_head *h) 606{ 607 struct hlist_node *first = h->first; 608 n->next = first; 609 n->pprev = &h->first; 610 smp_wmb(); 611 if (first) 612 first->pprev = &n->next; 613 h->first = n; 614} 615 616/* next must be != NULL */ 617static inline void hlist_add_before(struct hlist_node *n, 618 struct hlist_node *next) 619{ 620 n->pprev = next->pprev; 621 n->next = next; 622 next->pprev = &n->next; 623 *(n->pprev) = n; 624} 625 626static inline void hlist_add_after(struct hlist_node *n, 627 struct hlist_node *next) 628{ 629 next->next = n->next; 630 n->next = next; 631 next->pprev = &n->next; 632 633 if(next->next) 634 next->next->pprev = &next->next; 635} 636 637/** 638 * hlist_add_before_rcu - adds the specified element to the specified hlist 639 * before the specified node while permitting racing traversals. 640 * @n: the new element to add to the hash list. 641 * @next: the existing element to add the new element before. 642 * 643 * The caller must take whatever precautions are necessary 644 * (such as holding appropriate locks) to avoid racing 645 * with another list-mutation primitive, such as hlist_add_head_rcu() 646 * or hlist_del_rcu(), running on this same list. 647 * However, it is perfectly legal to run concurrently with 648 * the _rcu list-traversal primitives, such as 649 * hlist_for_each_rcu(), used to prevent memory-consistency 650 * problems on Alpha CPUs. 651 */ 652static inline void hlist_add_before_rcu(struct hlist_node *n, 653 struct hlist_node *next) 654{ 655 n->pprev = next->pprev; 656 n->next = next; 657 smp_wmb(); 658 next->pprev = &n->next; 659 *(n->pprev) = n; 660} 661 662/** 663 * hlist_add_after_rcu - adds the specified element to the specified hlist 664 * after the specified node while permitting racing traversals. 665 * @prev: the existing element to add the new element after. 666 * @n: the new element to add to the hash list. 667 * 668 * The caller must take whatever precautions are necessary 669 * (such as holding appropriate locks) to avoid racing 670 * with another list-mutation primitive, such as hlist_add_head_rcu() 671 * or hlist_del_rcu(), running on this same list. 672 * However, it is perfectly legal to run concurrently with 673 * the _rcu list-traversal primitives, such as 674 * hlist_for_each_rcu(), used to prevent memory-consistency 675 * problems on Alpha CPUs. 676 */ 677static inline void hlist_add_after_rcu(struct hlist_node *prev, 678 struct hlist_node *n) 679{ 680 n->next = prev->next; 681 n->pprev = &prev->next; 682 smp_wmb(); 683 prev->next = n; 684 if (n->next) 685 n->next->pprev = &n->next; 686} 687 688#define hlist_entry(ptr, type, member) container_of(ptr,type,member) 689 690#define hlist_for_each(pos, head) \ 691 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ 692 pos = pos->next) 693 694#define hlist_for_each_safe(pos, n, head) \ 695 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ 696 pos = n) 697 698#define hlist_for_each_rcu(pos, head) \ 699 for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \ 700 (pos) = rcu_dereference((pos)->next)) 701 702/** 703 * hlist_for_each_entry - iterate over list of given type 704 * @tpos: the type * to use as a loop counter. 705 * @pos: the &struct hlist_node to use as a loop counter. 706 * @head: the head for your list. 707 * @member: the name of the hlist_node within the struct. 708 */ 709#define hlist_for_each_entry(tpos, pos, head, member) \ 710 for (pos = (head)->first; \ 711 pos && ({ prefetch(pos->next); 1;}) && \ 712 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 713 pos = pos->next) 714 715/** 716 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point 717 * @tpos: the type * to use as a loop counter. 718 * @pos: the &struct hlist_node to use as a loop counter. 719 * @member: the name of the hlist_node within the struct. 720 */ 721#define hlist_for_each_entry_continue(tpos, pos, member) \ 722 for (pos = (pos)->next; \ 723 pos && ({ prefetch(pos->next); 1;}) && \ 724 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 725 pos = pos->next) 726 727/** 728 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point 729 * @tpos: the type * to use as a loop counter. 730 * @pos: the &struct hlist_node to use as a loop counter. 731 * @member: the name of the hlist_node within the struct. 732 */ 733#define hlist_for_each_entry_from(tpos, pos, member) \ 734 for (; pos && ({ prefetch(pos->next); 1;}) && \ 735 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 736 pos = pos->next) 737 738/** 739 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry 740 * @tpos: the type * to use as a loop counter. 741 * @pos: the &struct hlist_node to use as a loop counter. 742 * @n: another &struct hlist_node to use as temporary storage 743 * @head: the head for your list. 744 * @member: the name of the hlist_node within the struct. 745 */ 746#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ 747 for (pos = (head)->first; \ 748 pos && ({ n = pos->next; 1; }) && \ 749 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 750 pos = n) 751 752/** 753 * hlist_for_each_entry_rcu - iterate over rcu list of given type 754 * @pos: the type * to use as a loop counter. 755 * @pos: the &struct hlist_node to use as a loop counter. 756 * @head: the head for your list. 757 * @member: the name of the hlist_node within the struct. 758 * 759 * This list-traversal primitive may safely run concurrently with 760 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 761 * as long as the traversal is guarded by rcu_read_lock(). 762 */ 763#define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 764 for (pos = (head)->first; \ 765 pos && ({ prefetch(pos->next); 1;}) && \ 766 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ 767 pos = rcu_dereference(pos->next)) 768 769#else 770#warning "don't include kernel headers in userspace" 771#endif /* __KERNEL__ */ 772#endif