Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/iw_cxgb4: Low resource fixes for connection manager

Pre-allocate buffers for sending various control messages to close
connection, abort connection, etc so that we gracefully handle
connections when system is running out of memory.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Hariprasad S and committed by
Doug Ledford
4a740838 4c72efef

+114 -42
+89 -38
drivers/infiniband/hw/cxgb4/cm.c
··· 294 294 return; 295 295 } 296 296 297 + static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 298 + { 299 + struct sk_buff *skb; 300 + unsigned int i; 301 + size_t len; 302 + 303 + len = roundup(sizeof(union cpl_wr_size), 16); 304 + for (i = 0; i < size; i++) { 305 + skb = alloc_skb(len, GFP_KERNEL); 306 + if (!skb) 307 + goto fail; 308 + skb_queue_tail(ep_skb_list, skb); 309 + } 310 + return 0; 311 + fail: 312 + skb_queue_purge(ep_skb_list); 313 + return -ENOMEM; 314 + } 315 + 297 316 static void *alloc_ep(int size, gfp_t gfp) 298 317 { 299 318 struct c4iw_ep_common *epc; ··· 403 384 if (ep->mpa_skb) 404 385 kfree_skb(ep->mpa_skb); 405 386 } 387 + if (!skb_queue_empty(&ep->com.ep_skb_list)) 388 + skb_queue_purge(&ep->com.ep_skb_list); 406 389 kfree(ep); 407 390 } 408 391 ··· 641 620 } 642 621 } 643 622 644 - static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 623 + static int send_flowc(struct c4iw_ep *ep) 645 624 { 646 - unsigned int flowclen = 80; 647 625 struct fw_flowc_wr *flowc; 626 + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 648 627 int i; 649 628 u16 vlan = ep->l2t->vlan; 650 629 int nparams; 630 + 631 + if (WARN_ON(!skb)) 632 + return -ENOMEM; 651 633 652 634 if (vlan == CPL_L2T_VLAN_NONE) 653 635 nparams = 8; 654 636 else 655 637 nparams = 9; 656 638 657 - skb = get_skb(skb, flowclen, GFP_KERNEL); 658 - flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 639 + flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); 659 640 660 641 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 661 642 FW_FLOWC_WR_NPARAMS_V(nparams)); 662 - flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, 643 + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN, 663 644 16)) | FW_WR_FLOWID_V(ep->hwtid)); 664 645 665 646 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; ··· 702 679 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 703 680 } 704 681 705 - static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 682 + static int send_halfclose(struct c4iw_ep *ep) 706 683 { 707 684 struct cpl_close_con_req *req; 708 - struct sk_buff *skb; 685 + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 709 686 int wrlen = roundup(sizeof *req, 16); 710 687 711 688 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 712 - skb = get_skb(NULL, wrlen, gfp); 713 - if (!skb) { 714 - printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 689 + if (WARN_ON(!skb)) 715 690 return -ENOMEM; 716 - } 691 + 717 692 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 718 693 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 719 694 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); ··· 722 701 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 723 702 } 724 703 725 - static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 704 + static int send_abort(struct c4iw_ep *ep) 726 705 { 727 706 struct cpl_abort_req *req; 728 707 int wrlen = roundup(sizeof *req, 16); 708 + struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 729 709 730 710 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 731 - skb = get_skb(skb, wrlen, gfp); 732 - if (!skb) { 733 - printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 734 - __func__); 711 + if (WARN_ON(!req_skb)) 735 712 return -ENOMEM; 736 - } 737 - set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 738 - t4_set_arp_err_handler(skb, ep, abort_arp_failure); 739 - req = (struct cpl_abort_req *) skb_put(skb, wrlen); 713 + 714 + set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); 715 + t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); 716 + req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); 740 717 memset(req, 0, wrlen); 741 718 INIT_TP_WR(req, ep->hwtid); 742 719 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 743 720 req->cmd = CPL_ABORT_SEND_RST; 744 - return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 721 + return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 745 722 } 746 723 747 724 static void best_mtu(const unsigned short *mtus, unsigned short mtu, ··· 1280 1261 set_bit(ACT_ESTAB, &ep->com.history); 1281 1262 1282 1263 /* start MPA negotiation */ 1283 - ret = send_flowc(ep, NULL); 1264 + ret = send_flowc(ep); 1284 1265 if (ret) 1285 1266 goto err; 1286 1267 if (ep->retry_with_mpa_v1) ··· 2166 2147 static int c4iw_reconnect(struct c4iw_ep *ep) 2167 2148 { 2168 2149 int err = 0; 2150 + int size = 0; 2169 2151 struct sockaddr_in *laddr = (struct sockaddr_in *) 2170 2152 &ep->com.cm_id->m_local_addr; 2171 2153 struct sockaddr_in *raddr = (struct sockaddr_in *) ··· 2181 2161 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 2182 2162 init_timer(&ep->timer); 2183 2163 c4iw_init_wr_wait(&ep->com.wr_wait); 2164 + 2165 + /* When MPA revision is different on nodes, the node with MPA_rev=2 2166 + * tries to reconnect with MPA_rev 1 for the same EP through 2167 + * c4iw_reconnect(), where the same EP is assigned with new tid for 2168 + * further connection establishment. As we are using the same EP pointer 2169 + * for reconnect, few skbs are used during the previous c4iw_connect(), 2170 + * which leaves the EP with inadequate skbs for further 2171 + * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty 2172 + * skb_list() during peer_abort(). Allocate skbs which is already used. 2173 + */ 2174 + size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2175 + if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2176 + err = -ENOMEM; 2177 + goto fail1; 2178 + } 2184 2179 2185 2180 /* 2186 2181 * Allocate an active TID to initiate a TCP connection. ··· 2262 2227 * response of 1st connect request. 2263 2228 */ 2264 2229 connect_reply_upcall(ep, -ECONNRESET); 2230 + fail1: 2265 2231 c4iw_put_ep(&ep->com); 2266 2232 out: 2267 2233 return err; ··· 2629 2593 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2630 2594 child_ep->mtu = peer_mss + hdrs; 2631 2595 2596 + skb_queue_head_init(&child_ep->com.ep_skb_list); 2597 + if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2598 + goto fail; 2599 + 2632 2600 state_set(&child_ep->com, CONNECTING); 2633 2601 child_ep->com.dev = dev; 2634 2602 child_ep->com.cm_id = NULL; ··· 2697 2657 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2698 2658 } 2699 2659 goto out; 2660 + fail: 2661 + c4iw_put_ep(&child_ep->com); 2700 2662 reject: 2701 2663 reject_cr(dev, hwtid, skb); 2702 2664 if (parent_ep) ··· 2729 2687 ep->com.state = MPA_REQ_WAIT; 2730 2688 start_ep_timer(ep); 2731 2689 set_bit(PASS_ESTAB, &ep->com.history); 2732 - ret = send_flowc(ep, skb); 2690 + ret = send_flowc(ep); 2733 2691 mutex_unlock(&ep->com.mutex); 2734 2692 if (ret) 2735 2693 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); ··· 2930 2888 } 2931 2889 mutex_unlock(&ep->com.mutex); 2932 2890 2933 - rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2934 - if (!rpl_skb) { 2935 - printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2936 - __func__); 2891 + rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2892 + if (WARN_ON(!rpl_skb)) { 2937 2893 release = 1; 2938 2894 goto out; 2939 2895 } ··· 3302 3262 err = -ENOMEM; 3303 3263 goto out; 3304 3264 } 3265 + 3266 + skb_queue_head_init(&ep->com.ep_skb_list); 3267 + if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3268 + err = -ENOMEM; 3269 + goto fail1; 3270 + } 3271 + 3305 3272 init_timer(&ep->timer); 3306 3273 ep->plen = conn_param->private_data_len; 3307 3274 if (ep->plen) ··· 3327 3280 if (!ep->com.qp) { 3328 3281 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3329 3282 err = -EINVAL; 3330 - goto fail1; 3283 + goto fail2; 3331 3284 } 3332 3285 ref_qp(ep); 3333 3286 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, ··· 3340 3293 if (ep->atid == -1) { 3341 3294 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 3342 3295 err = -ENOMEM; 3343 - goto fail1; 3296 + goto fail2; 3344 3297 } 3345 3298 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 3346 3299 ··· 3364 3317 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3365 3318 err = pick_local_ipaddrs(dev, cm_id); 3366 3319 if (err) 3367 - goto fail1; 3320 + goto fail2; 3368 3321 } 3369 3322 3370 3323 /* find a route */ ··· 3384 3337 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3385 3338 err = pick_local_ip6addrs(dev, cm_id); 3386 3339 if (err) 3387 - goto fail1; 3340 + goto fail2; 3388 3341 } 3389 3342 3390 3343 /* find a route */ ··· 3400 3353 if (!ep->dst) { 3401 3354 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3402 3355 err = -EHOSTUNREACH; 3403 - goto fail2; 3356 + goto fail3; 3404 3357 } 3405 3358 3406 3359 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3407 3360 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3408 3361 if (err) { 3409 3362 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3410 - goto fail3; 3363 + goto fail4; 3411 3364 } 3412 3365 3413 3366 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", ··· 3423 3376 goto out; 3424 3377 3425 3378 cxgb4_l2t_release(ep->l2t); 3426 - fail3: 3379 + fail4: 3427 3380 dst_release(ep->dst); 3428 - fail2: 3381 + fail3: 3429 3382 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3430 3383 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3431 - fail1: 3384 + fail2: 3385 + skb_queue_purge(&ep->com.ep_skb_list); 3432 3386 deref_cm_id(&ep->com); 3387 + fail1: 3433 3388 c4iw_put_ep(&ep->com); 3434 3389 out: 3435 3390 return err; ··· 3524 3475 err = -ENOMEM; 3525 3476 goto fail1; 3526 3477 } 3478 + skb_queue_head_init(&ep->com.ep_skb_list); 3527 3479 PDBG("%s ep %p\n", __func__, ep); 3528 3480 ep->com.cm_id = cm_id; 3529 3481 ref_cm_id(&ep->com); ··· 3641 3591 case MPA_REQ_RCVD: 3642 3592 case MPA_REP_SENT: 3643 3593 case FPDU_MODE: 3594 + case CONNECTING: 3644 3595 close = 1; 3645 3596 if (abrupt) 3646 3597 ep->com.state = ABORTING; ··· 3676 3625 if (abrupt) { 3677 3626 set_bit(EP_DISC_ABORT, &ep->com.history); 3678 3627 close_complete_upcall(ep, -ECONNRESET); 3679 - ret = send_abort(ep, NULL, gfp); 3628 + ret = send_abort(ep); 3680 3629 } else { 3681 3630 set_bit(EP_DISC_CLOSE, &ep->com.history); 3682 - ret = send_halfclose(ep, gfp); 3631 + ret = send_halfclose(ep); 3683 3632 } 3684 3633 if (ret) { 3685 3634 set_bit(EP_DISC_FAIL, &ep->com.history);
+19
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 789 789 CM_ID_DEREFED = 28, 790 790 }; 791 791 792 + enum conn_pre_alloc_buffers { 793 + CN_ABORT_REQ_BUF, 794 + CN_ABORT_RPL_BUF, 795 + CN_CLOSE_CON_REQ_BUF, 796 + CN_DESTROY_BUF, 797 + CN_FLOWC_BUF, 798 + CN_MAX_CON_BUF 799 + }; 800 + 801 + #define FLOWC_LEN 80 802 + union cpl_wr_size { 803 + struct cpl_abort_req abrt_req; 804 + struct cpl_abort_rpl abrt_rpl; 805 + struct fw_ri_wr ri_req; 806 + struct cpl_close_con_req close_req; 807 + char flowc_buf[FLOWC_LEN]; 808 + }; 809 + 792 810 struct c4iw_ep_common { 793 811 struct iw_cm_id *cm_id; 794 812 struct c4iw_qp *qp; 795 813 struct c4iw_dev *dev; 814 + struct sk_buff_head ep_skb_list; 796 815 enum c4iw_ep_state state; 797 816 struct kref kref; 798 817 struct mutex mutex;
+6 -4
drivers/infiniband/hw/cxgb4/qp.c
··· 1081 1081 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1082 1082 qhp->ep->hwtid); 1083 1083 1084 - skb = alloc_skb(sizeof *wqe, gfp); 1085 - if (!skb) 1084 + skb = skb_dequeue(&qhp->ep->com.ep_skb_list); 1085 + if (WARN_ON(!skb)) 1086 1086 return; 1087 + 1087 1088 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); 1088 1089 1089 1090 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); ··· 1203 1202 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1204 1203 ep->hwtid); 1205 1204 1206 - skb = alloc_skb(sizeof *wqe, GFP_KERNEL); 1207 - if (!skb) 1205 + skb = skb_dequeue(&ep->com.ep_skb_list); 1206 + if (WARN_ON(!skb)) 1208 1207 return -ENOMEM; 1208 + 1209 1209 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1210 1210 1211 1211 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));