Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/iw_cxgb4: Low resource fixes for Completion queue

Pre-allocate buffers to deallocate completion queue, so that completion
queue is deallocated during RDMA termination when system is running
out of memory.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Hariprasad S and committed by
Doug Ledford
dd6b0241 0f8ab0b6

+26 -17
+25 -17
drivers/infiniband/hw/cxgb4/cq.c
··· 33 33 #include "iw_cxgb4.h" 34 34 35 35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, 36 - struct c4iw_dev_ucontext *uctx) 36 + struct c4iw_dev_ucontext *uctx, struct sk_buff *skb) 37 37 { 38 38 struct fw_ri_res_wr *res_wr; 39 39 struct fw_ri_res *res; 40 40 int wr_len; 41 41 struct c4iw_wr_wait wr_wait; 42 - struct sk_buff *skb; 43 42 int ret; 44 43 45 44 wr_len = sizeof *res_wr + sizeof *res; 46 - skb = alloc_skb(wr_len, GFP_KERNEL); 47 - if (!skb) 48 - return -ENOMEM; 49 45 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 50 46 51 47 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); ··· 859 863 ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) 860 864 : NULL; 861 865 destroy_cq(&chp->rhp->rdev, &chp->cq, 862 - ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); 866 + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, 867 + chp->destroy_skb); 868 + chp->destroy_skb = NULL; 863 869 kfree(chp); 864 870 return 0; 865 871 } ··· 877 879 struct c4iw_cq *chp; 878 880 struct c4iw_create_cq_resp uresp; 879 881 struct c4iw_ucontext *ucontext = NULL; 880 - int ret; 882 + int ret, wr_len; 881 883 size_t memsize, hwentries; 882 884 struct c4iw_mm_entry *mm, *mm2; 883 885 ··· 893 895 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 894 896 if (!chp) 895 897 return ERR_PTR(-ENOMEM); 898 + 899 + wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res); 900 + chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); 901 + if (!chp->destroy_skb) { 902 + ret = -ENOMEM; 903 + goto err1; 904 + } 896 905 897 906 if (ib_context) 898 907 ucontext = to_c4iw_ucontext(ib_context); ··· 941 936 ret = create_cq(&rhp->rdev, &chp->cq, 942 937 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 943 938 if (ret) 944 - goto err1; 939 + goto err2; 945 940 946 941 chp->rhp = rhp; 947 942 chp->cq.size--; /* status page */ ··· 952 947 init_waitqueue_head(&chp->wait); 953 948 ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 954 949 if (ret) 955 - goto err2; 950 + goto err3; 956 951 957 952 if (ucontext) { 958 953 mm = kmalloc(sizeof *mm, GFP_KERNEL); 959 954 if (!mm) 960 - goto err3; 955 + goto err4; 961 956 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 962 957 if (!mm2) 963 - goto err4; 958 + goto err5; 964 959 965 960 uresp.qid_mask = rhp->rdev.cqmask; 966 961 uresp.cqid = chp->cq.cqid; ··· 975 970 ret = ib_copy_to_udata(udata, &uresp, 976 971 sizeof(uresp) - sizeof(uresp.reserved)); 977 972 if (ret) 978 - goto err5; 973 + goto err6; 979 974 980 975 mm->key = uresp.key; 981 976 mm->addr = virt_to_phys(chp->cq.queue); ··· 991 986 __func__, chp->cq.cqid, chp, chp->cq.size, 992 987 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); 993 988 return &chp->ibcq; 994 - err5: 989 + err6: 995 990 kfree(mm2); 996 - err4: 991 + err5: 997 992 kfree(mm); 998 - err3: 993 + err4: 999 994 remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); 1000 - err2: 995 + err3: 1001 996 destroy_cq(&chp->rhp->rdev, &chp->cq, 1002 - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 997 + ucontext ? &ucontext->uctx : &rhp->rdev.uctx, 998 + chp->destroy_skb); 999 + err2: 1000 + kfree_skb(chp->destroy_skb); 1003 1001 err1: 1004 1002 kfree(chp); 1005 1003 return ERR_PTR(ret);
+1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 414 414 struct c4iw_cq { 415 415 struct ib_cq ibcq; 416 416 struct c4iw_dev *rhp; 417 + struct sk_buff *destroy_skb; 417 418 struct t4_cq cq; 418 419 spinlock_t lock; 419 420 spinlock_t comp_handler_lock;