Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net, rds, Replace xlist in net/rds/xlist.h with llist

The functionality of xlist and llist is almost same. This patch
replace xlist with llist to avoid code duplication.

Known issues: don't know how to test this, need special hardware?

Signed-off-by: Huang Ying <ying.huang@intel.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Andy Grover <andy.grover@oracle.com>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Huang Ying and committed by
David S. Miller
1bc144b6 dc00fd44

+51 -140
+1
net/rds/Kconfig
··· 9 9 10 10 config RDS_RDMA 11 11 tristate "RDS over Infiniband and iWARP" 12 + select LLIST 12 13 depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS 13 14 ---help--- 14 15 Allow RDS to use Infiniband and iWARP as a transport.
+50 -60
net/rds/ib_rdma.c
··· 33 33 #include <linux/kernel.h> 34 34 #include <linux/slab.h> 35 35 #include <linux/rculist.h> 36 + #include <linux/llist.h> 36 37 37 38 #include "rds.h" 38 39 #include "ib.h" 39 - #include "xlist.h" 40 40 41 41 static DEFINE_PER_CPU(unsigned long, clean_list_grace); 42 42 #define CLEAN_LIST_BUSY_BIT 0 ··· 49 49 struct rds_ib_mr_pool *pool; 50 50 struct ib_fmr *fmr; 51 51 52 - struct xlist_head xlist; 52 + struct llist_node llnode; 53 53 54 54 /* unmap_list is for freeing */ 55 55 struct list_head unmap_list; ··· 71 71 atomic_t item_count; /* total # of MRs */ 72 72 atomic_t dirty_count; /* # dirty of MRs */ 73 73 74 - struct xlist_head drop_list; /* MRs that have reached their max_maps limit */ 75 - struct xlist_head free_list; /* unused MRs */ 76 - struct xlist_head clean_list; /* global unused & unamapped MRs */ 74 + struct llist_head drop_list; /* MRs that have reached their max_maps limit */ 75 + struct llist_head free_list; /* unused MRs */ 76 + struct llist_head clean_list; /* global unused & unamapped MRs */ 77 77 wait_queue_head_t flush_wait; 78 78 79 79 atomic_t free_pinned; /* memory pinned by free MRs */ ··· 220 220 if (!pool) 221 221 return ERR_PTR(-ENOMEM); 222 222 223 - INIT_XLIST_HEAD(&pool->free_list); 224 - INIT_XLIST_HEAD(&pool->drop_list); 225 - INIT_XLIST_HEAD(&pool->clean_list); 223 + init_llist_head(&pool->free_list); 224 + init_llist_head(&pool->drop_list); 225 + init_llist_head(&pool->clean_list); 226 226 mutex_init(&pool->flush_lock); 227 227 init_waitqueue_head(&pool->flush_wait); 228 228 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); ··· 260 260 kfree(pool); 261 261 } 262 262 263 - static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl, 264 - struct rds_ib_mr **ibmr_ret) 265 - { 266 - struct xlist_head *ibmr_xl; 267 - ibmr_xl = xlist_del_head_fast(xl); 268 - *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist); 269 - } 270 - 271 263 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) 272 264 { 273 265 struct rds_ib_mr *ibmr = NULL; 274 - struct xlist_head *ret; 266 + struct llist_node *ret; 275 267 unsigned long *flag; 276 268 277 269 preempt_disable(); 278 270 flag = &__get_cpu_var(clean_list_grace); 279 271 set_bit(CLEAN_LIST_BUSY_BIT, flag); 280 - ret = xlist_del_head(&pool->clean_list); 272 + ret = llist_del_first(&pool->clean_list); 281 273 if (ret) 282 - ibmr = list_entry(ret, struct rds_ib_mr, xlist); 274 + ibmr = llist_entry(ret, struct rds_ib_mr, llnode); 283 275 284 276 clear_bit(CLEAN_LIST_BUSY_BIT, flag); 285 277 preempt_enable(); ··· 521 529 } 522 530 523 531 /* 524 - * given an xlist of mrs, put them all into the list_head for more processing 532 + * given an llist of mrs, put them all into the list_head for more processing 525 533 */ 526 - static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list) 534 + static void llist_append_to_list(struct llist_head *llist, struct list_head *list) 527 535 { 528 536 struct rds_ib_mr *ibmr; 529 - struct xlist_head splice; 530 - struct xlist_head *cur; 531 - struct xlist_head *next; 537 + struct llist_node *node; 538 + struct llist_node *next; 532 539 533 - splice.next = NULL; 534 - xlist_splice(xlist, &splice); 535 - cur = splice.next; 536 - while (cur) { 537 - next = cur->next; 538 - ibmr = list_entry(cur, struct rds_ib_mr, xlist); 540 + node = llist_del_all(llist); 541 + while (node) { 542 + next = node->next; 543 + ibmr = llist_entry(node, struct rds_ib_mr, llnode); 539 544 list_add_tail(&ibmr->unmap_list, list); 540 - cur = next; 545 + node = next; 541 546 } 542 547 } 543 548 544 549 /* 545 - * this takes a list head of mrs and turns it into an xlist of clusters. 546 - * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for 547 - * reuse. 550 + * this takes a list head of mrs and turns it into linked llist nodes 551 + * of clusters. Each cluster has linked llist nodes of 552 + * MR_CLUSTER_SIZE mrs that are ready for reuse. 548 553 */ 549 - static void list_append_to_xlist(struct rds_ib_mr_pool *pool, 550 - struct list_head *list, struct xlist_head *xlist, 551 - struct xlist_head **tail_ret) 554 + static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, 555 + struct list_head *list, 556 + struct llist_node **nodes_head, 557 + struct llist_node **nodes_tail) 552 558 { 553 559 struct rds_ib_mr *ibmr; 554 - struct xlist_head *cur_mr = xlist; 555 - struct xlist_head *tail_mr = NULL; 560 + struct llist_node *cur = NULL; 561 + struct llist_node **next = nodes_head; 556 562 557 563 list_for_each_entry(ibmr, list, unmap_list) { 558 - tail_mr = &ibmr->xlist; 559 - tail_mr->next = NULL; 560 - cur_mr->next = tail_mr; 561 - cur_mr = tail_mr; 564 + cur = &ibmr->llnode; 565 + *next = cur; 566 + next = &cur->next; 562 567 } 563 - *tail_ret = tail_mr; 568 + *next = NULL; 569 + *nodes_tail = cur; 564 570 } 565 571 566 572 /* ··· 571 581 int free_all, struct rds_ib_mr **ibmr_ret) 572 582 { 573 583 struct rds_ib_mr *ibmr, *next; 574 - struct xlist_head clean_xlist; 575 - struct xlist_head *clean_tail; 584 + struct llist_node *clean_nodes; 585 + struct llist_node *clean_tail; 576 586 LIST_HEAD(unmap_list); 577 587 LIST_HEAD(fmr_list); 578 588 unsigned long unpinned = 0; ··· 593 603 594 604 prepare_to_wait(&pool->flush_wait, &wait, 595 605 TASK_UNINTERRUPTIBLE); 596 - if (xlist_empty(&pool->clean_list)) 606 + if (llist_empty(&pool->clean_list)) 597 607 schedule(); 598 608 599 609 ibmr = rds_ib_reuse_fmr(pool); ··· 618 628 /* Get the list of all MRs to be dropped. Ordering matters - 619 629 * we want to put drop_list ahead of free_list. 620 630 */ 621 - xlist_append_to_list(&pool->drop_list, &unmap_list); 622 - xlist_append_to_list(&pool->free_list, &unmap_list); 631 + llist_append_to_list(&pool->drop_list, &unmap_list); 632 + llist_append_to_list(&pool->free_list, &unmap_list); 623 633 if (free_all) 624 - xlist_append_to_list(&pool->clean_list, &unmap_list); 634 + llist_append_to_list(&pool->clean_list, &unmap_list); 625 635 626 636 free_goal = rds_ib_flush_goal(pool, free_all); 627 637 ··· 653 663 if (!list_empty(&unmap_list)) { 654 664 /* we have to make sure that none of the things we're about 655 665 * to put on the clean list would race with other cpus trying 656 - * to pull items off. The xlist would explode if we managed to 666 + * to pull items off. The llist would explode if we managed to 657 667 * remove something from the clean list and then add it back again 658 - * while another CPU was spinning on that same item in xlist_del_head. 668 + * while another CPU was spinning on that same item in llist_del_first. 659 669 * 660 - * This is pretty unlikely, but just in case wait for an xlist grace period 670 + * This is pretty unlikely, but just in case wait for an llist grace period 661 671 * here before adding anything back into the clean list. 662 672 */ 663 673 wait_clean_list_grace(); 664 674 665 - list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail); 675 + list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); 666 676 if (ibmr_ret) 667 - refill_local(pool, &clean_xlist, ibmr_ret); 677 + *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); 668 678 669 - /* refill_local may have emptied our list */ 670 - if (!xlist_empty(&clean_xlist)) 671 - xlist_add(clean_xlist.next, clean_tail, &pool->clean_list); 679 + /* more than one entry in llist nodes */ 680 + if (clean_nodes->next) 681 + llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); 672 682 673 683 } 674 684 ··· 701 711 702 712 /* Return it to the pool's free list */ 703 713 if (ibmr->remap_count >= pool->fmr_attr.max_maps) 704 - xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list); 714 + llist_add(&ibmr->llnode, &pool->drop_list); 705 715 else 706 - xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list); 716 + llist_add(&ibmr->llnode, &pool->free_list); 707 717 708 718 atomic_add(ibmr->sg_len, &pool->free_pinned); 709 719 atomic_inc(&pool->dirty_count);
-80
net/rds/xlist.h
··· 1 - #ifndef _LINUX_XLIST_H 2 - #define _LINUX_XLIST_H 3 - 4 - #include <linux/stddef.h> 5 - #include <linux/poison.h> 6 - #include <linux/prefetch.h> 7 - #include <asm/system.h> 8 - 9 - struct xlist_head { 10 - struct xlist_head *next; 11 - }; 12 - 13 - static inline void INIT_XLIST_HEAD(struct xlist_head *list) 14 - { 15 - list->next = NULL; 16 - } 17 - 18 - static inline int xlist_empty(struct xlist_head *head) 19 - { 20 - return head->next == NULL; 21 - } 22 - 23 - static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail, 24 - struct xlist_head *head) 25 - { 26 - struct xlist_head *cur; 27 - struct xlist_head *check; 28 - 29 - while (1) { 30 - cur = head->next; 31 - tail->next = cur; 32 - check = cmpxchg(&head->next, cur, new); 33 - if (check == cur) 34 - break; 35 - } 36 - } 37 - 38 - static inline struct xlist_head *xlist_del_head(struct xlist_head *head) 39 - { 40 - struct xlist_head *cur; 41 - struct xlist_head *check; 42 - struct xlist_head *next; 43 - 44 - while (1) { 45 - cur = head->next; 46 - if (!cur) 47 - goto out; 48 - 49 - next = cur->next; 50 - check = cmpxchg(&head->next, cur, next); 51 - if (check == cur) 52 - goto out; 53 - } 54 - out: 55 - return cur; 56 - } 57 - 58 - static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head) 59 - { 60 - struct xlist_head *cur; 61 - 62 - cur = head->next; 63 - if (!cur) 64 - return NULL; 65 - 66 - head->next = cur->next; 67 - return cur; 68 - } 69 - 70 - static inline void xlist_splice(struct xlist_head *list, 71 - struct xlist_head *head) 72 - { 73 - struct xlist_head *cur; 74 - 75 - WARN_ON(head->next); 76 - cur = xchg(&list->next, NULL); 77 - head->next = cur; 78 - } 79 - 80 - #endif