Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
"Several regression fixes from work that landed in the merge window,
particularly in the mlx5 driver:

- Various static checker and warning fixes

- General bug fixes in rvt, qedr, hns, mlx5 and hfi1

- Several regression fixes related to the ECE and QP changes in last
cycle

- Fixes for a few long standing crashers in CMA, uverbs ioctl, and
xrc"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (25 commits)
IB/hfi1: Add atomic triggered sleep/wakeup
IB/hfi1: Correct -EBUSY handling in tx code
IB/hfi1: Fix module use count flaw due to leftover module put calls
IB/hfi1: Restore kfree in dummy_netdev cleanup
IB/mad: Fix use after free when destroying MAD agent
RDMA/mlx5: Protect from kernel crash if XRC_TGT doesn't have udata
RDMA/counter: Query a counter before release
RDMA/mad: Fix possible memory leak in ib_mad_post_receive_mads()
RDMA/mlx5: Fix integrity enabled QP creation
RDMA/mlx5: Remove ECE limitation from the RAW_PACKET QPs
RDMA/mlx5: Fix remote gid value in query QP
RDMA/mlx5: Don't access ib_qp fields in internal destroy QP path
RDMA/core: Check that type_attrs is not NULL prior access
RDMA/hns: Fix an cmd queue issue when resetting
RDMA/hns: Fix a calltrace when registering MR from userspace
RDMA/mlx5: Add missed RST2INIT and INIT2INIT steps during ECE handshake
RDMA/cma: Protect bind_list and listen_list while finding matching cm id
RDMA/qedr: Fix KASAN: use-after-free in ucma_event_handler+0x532
RDMA/efa: Set maximum pkeys device attribute
RDMA/rvt: Fix potential memory leak caused by rvt_alloc_rq
...

+197 -124
+1
drivers/infiniband/core/cm.c
··· 918 918 919 919 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, 920 920 struct cm_work *work) 921 + __releases(&cm_id_priv->lock) 921 922 { 922 923 bool immediate; 923 924
+18
drivers/infiniband/core/cma.c
··· 1624 1624 { 1625 1625 struct rdma_id_private *id_priv, *id_priv_dev; 1626 1626 1627 + lockdep_assert_held(&lock); 1628 + 1627 1629 if (!bind_list) 1628 1630 return ERR_PTR(-EINVAL); 1629 1631 ··· 1672 1670 } 1673 1671 } 1674 1672 1673 + mutex_lock(&lock); 1675 1674 /* 1676 1675 * Net namespace might be getting deleted while route lookup, 1677 1676 * cm_id lookup is in progress. Therefore, perform netdevice ··· 1714 1711 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); 1715 1712 err: 1716 1713 rcu_read_unlock(); 1714 + mutex_unlock(&lock); 1717 1715 if (IS_ERR(id_priv) && *net_dev) { 1718 1716 dev_put(*net_dev); 1719 1717 *net_dev = NULL; ··· 2495 2491 struct rdma_cm_id *id; 2496 2492 struct net *net = id_priv->id.route.addr.dev_addr.net; 2497 2493 int ret; 2494 + 2495 + lockdep_assert_held(&lock); 2498 2496 2499 2497 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2500 2498 return; ··· 3348 3342 u64 sid, mask; 3349 3343 __be16 port; 3350 3344 3345 + lockdep_assert_held(&lock); 3346 + 3351 3347 addr = cma_src_addr(id_priv); 3352 3348 port = htons(bind_list->port); 3353 3349 ··· 3378 3370 struct rdma_bind_list *bind_list; 3379 3371 int ret; 3380 3372 3373 + lockdep_assert_held(&lock); 3374 + 3381 3375 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); 3382 3376 if (!bind_list) 3383 3377 return -ENOMEM; ··· 3405 3395 struct sockaddr *daddr = cma_dst_addr(id_priv); 3406 3396 struct sockaddr *saddr = cma_src_addr(id_priv); 3407 3397 __be16 dport = cma_port(daddr); 3398 + 3399 + lockdep_assert_held(&lock); 3408 3400 3409 3401 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3410 3402 struct sockaddr *cur_daddr = cma_dst_addr(cur_id); ··· 3446 3434 int low, high, remaining; 3447 3435 unsigned int rover; 3448 3436 struct net *net = id_priv->id.route.addr.dev_addr.net; 3437 + 3438 + lockdep_assert_held(&lock); 3449 3439 3450 3440 inet_get_local_port_range(net, &low, &high); 3451 3441 remaining = (high - low) + 1; ··· 3496 3482 struct rdma_id_private *cur_id; 3497 3483 struct sockaddr *addr, *cur_addr; 3498 3484 3485 + lockdep_assert_held(&lock); 3486 + 3499 3487 addr = cma_src_addr(id_priv); 3500 3488 hlist_for_each_entry(cur_id, &bind_list->owners, node) { 3501 3489 if (id_priv == cur_id) ··· 3527 3511 struct rdma_bind_list *bind_list; 3528 3512 unsigned short snum; 3529 3513 int ret; 3514 + 3515 + lockdep_assert_held(&lock); 3530 3516 3531 3517 snum = ntohs(cma_port(cma_src_addr(id_priv))); 3532 3518 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+3 -1
drivers/infiniband/core/counters.c
··· 202 202 return ret; 203 203 } 204 204 205 - static void counter_history_stat_update(const struct rdma_counter *counter) 205 + static void counter_history_stat_update(struct rdma_counter *counter) 206 206 { 207 207 struct ib_device *dev = counter->device; 208 208 struct rdma_port_counter *port_counter; ··· 211 211 port_counter = &dev->port_data[counter->port].port_counter; 212 212 if (!port_counter->hstats) 213 213 return; 214 + 215 + rdma_counter_query_stats(counter); 214 216 215 217 for (i = 0; i < counter->stats->num_counters; i++) 216 218 port_counter->hstats->value[i] += counter->stats->value[i];
+2 -1
drivers/infiniband/core/mad.c
··· 509 509 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); 510 510 511 511 flush_workqueue(port_priv->wq); 512 - ib_cancel_rmpp_recvs(mad_agent_priv); 513 512 514 513 deref_mad_agent(mad_agent_priv); 515 514 wait_for_completion(&mad_agent_priv->comp); 515 + ib_cancel_rmpp_recvs(mad_agent_priv); 516 516 517 517 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); 518 518 ··· 2718 2718 DMA_FROM_DEVICE); 2719 2719 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2720 2720 sg_list.addr))) { 2721 + kfree(mad_priv); 2721 2722 ret = -ENOMEM; 2722 2723 break; 2723 2724 }
+22 -16
drivers/infiniband/core/rdma_core.c
··· 470 470 alloc_begin_fd_uobject(const struct uverbs_api_object *obj, 471 471 struct uverbs_attr_bundle *attrs) 472 472 { 473 - const struct uverbs_obj_fd_type *fd_type = 474 - container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 473 + const struct uverbs_obj_fd_type *fd_type; 475 474 int new_fd; 476 - struct ib_uobject *uobj; 475 + struct ib_uobject *uobj, *ret; 477 476 struct file *filp; 478 - 479 - if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && 480 - fd_type->fops->release != &uverbs_async_event_release)) 481 - return ERR_PTR(-EINVAL); 482 - 483 - new_fd = get_unused_fd_flags(O_CLOEXEC); 484 - if (new_fd < 0) 485 - return ERR_PTR(new_fd); 486 477 487 478 uobj = alloc_uobj(attrs, obj); 488 479 if (IS_ERR(uobj)) 480 + return uobj; 481 + 482 + fd_type = 483 + container_of(obj->type_attrs, struct uverbs_obj_fd_type, type); 484 + if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release && 485 + fd_type->fops->release != &uverbs_async_event_release)) { 486 + ret = ERR_PTR(-EINVAL); 489 487 goto err_fd; 488 + } 489 + 490 + new_fd = get_unused_fd_flags(O_CLOEXEC); 491 + if (new_fd < 0) { 492 + ret = ERR_PTR(new_fd); 493 + goto err_fd; 494 + } 490 495 491 496 /* Note that uverbs_uobject_fd_release() is called during abort */ 492 497 filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, 493 498 fd_type->flags); 494 499 if (IS_ERR(filp)) { 495 - uverbs_uobject_put(uobj); 496 - uobj = ERR_CAST(filp); 497 - goto err_fd; 500 + ret = ERR_CAST(filp); 501 + goto err_getfile; 498 502 } 499 503 uobj->object = filp; 500 504 501 505 uobj->id = new_fd; 502 506 return uobj; 503 507 504 - err_fd: 508 + err_getfile: 505 509 put_unused_fd(new_fd); 506 - return uobj; 510 + err_fd: 511 + uverbs_uobject_put(uobj); 512 + return ret; 507 513 } 508 514 509 515 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+1
drivers/infiniband/hw/efa/efa_verbs.c
··· 212 212 props->max_send_sge = dev_attr->max_sq_sge; 213 213 props->max_recv_sge = dev_attr->max_rq_sge; 214 214 props->max_sge_rd = dev_attr->max_wr_rdma_sge; 215 + props->max_pkeys = 1; 215 216 216 217 if (udata && udata->outlen) { 217 218 resp.max_sq_sge = dev_attr->max_sq_sge;
+2 -17
drivers/infiniband/hw/hfi1/debugfs.c
··· 985 985 static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target) 986 986 { 987 987 struct hfi1_pportdata *ppd; 988 - int ret; 989 988 990 989 ppd = private2ppd(fp); 991 990 992 - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); 993 - if (ret) /* failed - release the module */ 994 - module_put(THIS_MODULE); 995 - 996 - return ret; 991 + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); 997 992 } 998 993 999 994 static int i2c1_debugfs_open(struct inode *in, struct file *fp) ··· 1008 1013 ppd = private2ppd(fp); 1009 1014 1010 1015 release_chip_resource(ppd->dd, i2c_target(target)); 1011 - module_put(THIS_MODULE); 1012 1016 1013 1017 return 0; 1014 1018 } ··· 1025 1031 static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target) 1026 1032 { 1027 1033 struct hfi1_pportdata *ppd; 1028 - int ret; 1029 - 1030 - if (!try_module_get(THIS_MODULE)) 1031 - return -ENODEV; 1032 1034 1033 1035 ppd = private2ppd(fp); 1034 1036 1035 - ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0); 1036 - if (ret) /* failed - release the module */ 1037 - module_put(THIS_MODULE); 1038 - 1039 - return ret; 1037 + return acquire_chip_resource(ppd->dd, i2c_target(target), 0); 1040 1038 } 1041 1039 1042 1040 static int qsfp1_debugfs_open(struct inode *in, struct file *fp) ··· 1048 1062 ppd = private2ppd(fp); 1049 1063 1050 1064 release_chip_resource(ppd->dd, i2c_target(target)); 1051 - module_put(THIS_MODULE); 1052 1065 1053 1066 return 0; 1054 1067 }
+1 -1
drivers/infiniband/hw/hfi1/iowait.h
··· 399 399 * @wait_head: the wait queue 400 400 * 401 401 * This function is called to insert an iowait struct into a 402 - * wait queue after a resource (eg, sdma decriptor or pio 402 + * wait queue after a resource (eg, sdma descriptor or pio 403 403 * buffer) is run out. 404 404 */ 405 405 static inline void iowait_queue(bool pkts_sent, struct iowait *w,
+6
drivers/infiniband/hw/hfi1/ipoib.h
··· 67 67 * @sde: sdma engine 68 68 * @tx_list: tx request list 69 69 * @sent_txreqs: count of txreqs posted to sdma 70 + * @stops: count of stops of queue 71 + * @ring_full: ring has been filled 72 + * @no_desc: descriptor shortage seen 70 73 * @flow: tracks when list needs to be flushed for a flow change 71 74 * @q_idx: ipoib Tx queue index 72 75 * @pkts_sent: indicator packets have been sent from this queue ··· 83 80 struct sdma_engine *sde; 84 81 struct list_head tx_list; 85 82 u64 sent_txreqs; 83 + atomic_t stops; 84 + atomic_t ring_full; 85 + atomic_t no_desc; 86 86 union hfi1_ipoib_flow flow; 87 87 u8 q_idx; 88 88 bool pkts_sent;
+65 -37
drivers/infiniband/hw/hfi1/ipoib_tx.c
··· 55 55 return sent - completed; 56 56 } 57 57 58 + static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) 59 + { 60 + return hfi1_ipoib_txreqs(txq->sent_txreqs, 61 + atomic64_read(&txq->complete_txreqs)); 62 + } 63 + 64 + static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) 65 + { 66 + if (atomic_inc_return(&txq->stops) == 1) 67 + netif_stop_subqueue(txq->priv->netdev, txq->q_idx); 68 + } 69 + 70 + static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) 71 + { 72 + if (atomic_dec_and_test(&txq->stops)) 73 + netif_wake_subqueue(txq->priv->netdev, txq->q_idx); 74 + } 75 + 76 + static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq) 77 + { 78 + return min_t(uint, txq->priv->netdev->tx_queue_len, 79 + txq->tx_ring.max_items - 1); 80 + } 81 + 82 + static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) 83 + { 84 + return min_t(uint, txq->priv->netdev->tx_queue_len, 85 + txq->tx_ring.max_items) >> 1; 86 + } 87 + 58 88 static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) 59 89 { 60 - if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs, 61 - atomic64_read(&txq->complete_txreqs)) >= 62 - min_t(unsigned int, txq->priv->netdev->tx_queue_len, 63 - txq->tx_ring.max_items - 1))) 64 - netif_stop_subqueue(txq->priv->netdev, txq->q_idx); 90 + ++txq->sent_txreqs; 91 + if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && 92 + !atomic_xchg(&txq->ring_full, 1)) 93 + hfi1_ipoib_stop_txq(txq); 65 94 } 66 95 67 96 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) 68 97 { 69 98 struct net_device *dev = txq->priv->netdev; 70 - 71 - /* If the queue is already running just return */ 72 - if (likely(!__netif_subqueue_stopped(dev, txq->q_idx))) 73 - return; 74 99 75 100 /* If shutting down just return as queue state is irrelevant */ 76 101 if (unlikely(dev->reg_state != NETREG_REGISTERED)) ··· 111 86 * Use the minimum of the current tx_queue_len or the rings max txreqs 112 87 * to protect against ring overflow. 113 88 */ 114 - if (hfi1_ipoib_txreqs(txq->sent_txreqs, 115 - atomic64_read(&txq->complete_txreqs)) 116 - < min_t(unsigned int, dev->tx_queue_len, 117 - txq->tx_ring.max_items) >> 1) 118 - netif_wake_subqueue(dev, txq->q_idx); 89 + if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && 90 + atomic_xchg(&txq->ring_full, 0)) 91 + hfi1_ipoib_wake_txq(txq); 119 92 } 120 93 121 94 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) ··· 387 364 if (unlikely(!tx)) 388 365 return ERR_PTR(-ENOMEM); 389 366 390 - /* so that we can test if the sdma decriptors are there */ 367 + /* so that we can test if the sdma descriptors are there */ 391 368 tx->txreq.num_desc = 0; 392 369 tx->priv = priv; 393 370 tx->txq = txp->txq; 394 371 tx->skb = skb; 372 + INIT_LIST_HEAD(&tx->txreq.list); 395 373 396 374 hfi1_ipoib_build_ib_tx_headers(tx, txp); 397 375 ··· 493 469 494 470 ret = hfi1_ipoib_submit_tx(txq, tx); 495 471 if (likely(!ret)) { 472 + tx_ok: 496 473 trace_sdma_output_ibhdr(tx->priv->dd, 497 474 &tx->sdma_hdr.hdr, 498 475 ib_is_sc5(txp->flow.sc5)); ··· 503 478 504 479 txq->pkts_sent = false; 505 480 506 - if (ret == -EBUSY) { 507 - list_add_tail(&tx->txreq.list, &txq->tx_list); 508 - 509 - trace_sdma_output_ibhdr(tx->priv->dd, 510 - &tx->sdma_hdr.hdr, 511 - ib_is_sc5(txp->flow.sc5)); 512 - hfi1_ipoib_check_queue_depth(txq); 513 - return NETDEV_TX_OK; 514 - } 515 - 516 - if (ret == -ECOMM) { 517 - hfi1_ipoib_check_queue_depth(txq); 518 - return NETDEV_TX_OK; 519 - } 481 + if (ret == -EBUSY || ret == -ECOMM) 482 + goto tx_ok; 520 483 521 484 sdma_txclean(priv->dd, &tx->txreq); 522 485 dev_kfree_skb_any(skb); ··· 522 509 struct ipoib_txreq *tx; 523 510 524 511 /* Has the flow change ? */ 525 - if (txq->flow.as_int != txp->flow.as_int) 526 - (void)hfi1_ipoib_flush_tx_list(dev, txq); 512 + if (txq->flow.as_int != txp->flow.as_int) { 513 + int ret; 527 514 515 + ret = hfi1_ipoib_flush_tx_list(dev, txq); 516 + if (unlikely(ret)) { 517 + if (ret == -EBUSY) 518 + ++dev->stats.tx_dropped; 519 + dev_kfree_skb_any(skb); 520 + return NETDEV_TX_OK; 521 + } 522 + } 528 523 tx = hfi1_ipoib_send_dma_common(dev, skb, txp); 529 524 if (IS_ERR(tx)) { 530 525 int ret = PTR_ERR(tx); ··· 631 610 return -EAGAIN; 632 611 } 633 612 634 - netif_stop_subqueue(txq->priv->netdev, txq->q_idx); 635 - 636 - if (list_empty(&txq->wait.list)) 613 + if (list_empty(&txreq->list)) 614 + /* came from non-list submit */ 615 + list_add_tail(&txreq->list, &txq->tx_list); 616 + if (list_empty(&txq->wait.list)) { 617 + if (!atomic_xchg(&txq->no_desc, 1)) 618 + hfi1_ipoib_stop_txq(txq); 637 619 iowait_queue(pkts_sent, wait->iow, &sde->dmawait); 620 + } 638 621 639 622 write_sequnlock(&sde->waitlock); 640 623 return -EBUSY; ··· 673 648 struct net_device *dev = txq->priv->netdev; 674 649 675 650 if (likely(dev->reg_state == NETREG_REGISTERED) && 676 - likely(__netif_subqueue_stopped(dev, txq->q_idx)) && 677 651 likely(!hfi1_ipoib_flush_tx_list(dev, txq))) 678 - netif_wake_subqueue(dev, txq->q_idx); 652 + if (atomic_xchg(&txq->no_desc, 0)) 653 + hfi1_ipoib_wake_txq(txq); 679 654 } 680 655 681 656 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) ··· 729 704 txq->sde = NULL; 730 705 INIT_LIST_HEAD(&txq->tx_list); 731 706 atomic64_set(&txq->complete_txreqs, 0); 707 + atomic_set(&txq->stops, 0); 708 + atomic_set(&txq->ring_full, 0); 709 + atomic_set(&txq->no_desc, 0); 732 710 txq->q_idx = i; 733 711 txq->flow.tx_queue = 0xff; 734 712 txq->flow.sc5 = 0xff; ··· 797 769 atomic64_inc(complete_txreqs); 798 770 } 799 771 800 - if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs))) 772 + if (hfi1_ipoib_used(txq)) 801 773 dd_dev_warn(txq->priv->dd, 802 774 "txq %d not empty found %llu requests\n", 803 775 txq->q_idx,
+1 -1
drivers/infiniband/hw/hfi1/netdev_rx.c
··· 373 373 { 374 374 if (dd->dummy_netdev) { 375 375 dd_dev_info(dd, "hfi1 netdev freed\n"); 376 - free_netdev(dd->dummy_netdev); 376 + kfree(dd->dummy_netdev); 377 377 dd->dummy_netdev = NULL; 378 378 } 379 379 }
+1 -1
drivers/infiniband/hw/hfi1/verbs_txreq.h
··· 91 91 tx->mr = NULL; 92 92 tx->sde = priv->s_sde; 93 93 tx->psc = priv->s_sendcontext; 94 - /* so that we can test if the sdma decriptors are there */ 94 + /* so that we can test if the sdma descriptors are there */ 95 95 tx->txreq.num_desc = 0; 96 96 /* Set the header type */ 97 97 tx->phdr.hdr.hdr_type = priv->hdr_type;
+4 -3
drivers/infiniband/hw/hns/hns_roce_device.h
··· 898 898 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); 899 899 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, 900 900 enum ib_mtu mtu); 901 - int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr, 902 - unsigned long mtpt_idx); 901 + int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, 902 + struct hns_roce_mr *mr, unsigned long mtpt_idx); 903 903 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, 904 904 struct hns_roce_mr *mr, int flags, u32 pdn, 905 905 int mr_access_flags, u64 iova, u64 size, 906 906 void *mb_buf); 907 - int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); 907 + int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, 908 + struct hns_roce_mr *mr); 908 909 int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); 909 910 void (*write_cqc)(struct hns_roce_dev *hr_dev, 910 911 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+2 -2
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 1756 1756 val); 1757 1757 } 1758 1758 1759 - static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, 1759 + static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, 1760 + struct hns_roce_mr *mr, 1760 1761 unsigned long mtpt_idx) 1761 1762 { 1762 - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); 1763 1763 u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; 1764 1764 struct ib_device *ibdev = &hr_dev->ib_dev; 1765 1765 struct hns_roce_v1_mpt_entry *mpt_entry;
+9 -8
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 910 910 instance_stage = handle->rinfo.instance_state; 911 911 reset_stage = handle->rinfo.reset_state; 912 912 reset_cnt = ops->ae_dev_reset_cnt(handle); 913 - hw_resetting = ops->get_hw_reset_stat(handle); 913 + hw_resetting = ops->get_cmdq_stat(handle); 914 914 sw_resetting = ops->ae_dev_resetting(handle); 915 915 916 916 if (reset_cnt != hr_dev->reset_cnt) ··· 2529 2529 return hns_roce_cmq_send(hr_dev, &desc, 1); 2530 2530 } 2531 2531 2532 - static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry, 2532 + static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, 2533 + struct hns_roce_v2_mpt_entry *mpt_entry, 2533 2534 struct hns_roce_mr *mr) 2534 2535 { 2535 - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); 2536 2536 u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; 2537 2537 struct ib_device *ibdev = &hr_dev->ib_dev; 2538 2538 dma_addr_t pbl_ba; ··· 2571 2571 return 0; 2572 2572 } 2573 2573 2574 - static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, 2574 + static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, 2575 + void *mb_buf, struct hns_roce_mr *mr, 2575 2576 unsigned long mtpt_idx) 2576 2577 { 2577 2578 struct hns_roce_v2_mpt_entry *mpt_entry; ··· 2621 2620 if (mr->type == MR_TYPE_DMA) 2622 2621 return 0; 2623 2622 2624 - ret = set_mtpt_pbl(mpt_entry, mr); 2623 + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); 2625 2624 2626 2625 return ret; 2627 2626 } ··· 2667 2666 mr->iova = iova; 2668 2667 mr->size = size; 2669 2668 2670 - ret = set_mtpt_pbl(mpt_entry, mr); 2669 + ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); 2671 2670 } 2672 2671 2673 2672 return ret; 2674 2673 } 2675 2674 2676 - static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) 2675 + static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, 2676 + void *mb_buf, struct hns_roce_mr *mr) 2677 2677 { 2678 - struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device); 2679 2678 struct ib_device *ibdev = &hr_dev->ib_dev; 2680 2679 struct hns_roce_v2_mpt_entry *mpt_entry; 2681 2680 dma_addr_t pbl_ba = 0;
+3 -2
drivers/infiniband/hw/hns/hns_roce_mr.c
··· 180 180 } 181 181 182 182 if (mr->type != MR_TYPE_FRMR) 183 - ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx); 183 + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, 184 + mtpt_idx); 184 185 else 185 - ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); 186 + ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); 186 187 if (ret) { 187 188 dev_err(dev, "Write mtpt fail!\n"); 188 189 goto err_page;
+25 -25
drivers/infiniband/hw/mlx5/qp.c
··· 1862 1862 if (!in) 1863 1863 return -ENOMEM; 1864 1864 1865 - if (MLX5_CAP_GEN(mdev, ece_support)) 1865 + if (MLX5_CAP_GEN(mdev, ece_support) && ucmd) 1866 1866 MLX5_SET(create_qp_in, in, ece, ucmd->ece_options); 1867 1867 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1868 1868 ··· 2341 2341 unsigned long flags; 2342 2342 int err; 2343 2343 2344 - if (qp->ibqp.rwq_ind_tbl) { 2344 + if (qp->is_rss) { 2345 2345 destroy_rss_raw_qp_tir(dev, qp); 2346 2346 return; 2347 2347 } 2348 2348 2349 - base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 2349 + base = (qp->type == IB_QPT_RAW_PACKET || 2350 2350 qp->flags & IB_QP_CREATE_SOURCE_QPN) ? 2351 - &qp->raw_packet_qp.rq.base : 2352 - &qp->trans_qp.base; 2351 + &qp->raw_packet_qp.rq.base : 2352 + &qp->trans_qp.base; 2353 2353 2354 2354 if (qp->state != IB_QPS_RESET) { 2355 - if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && 2355 + if (qp->type != IB_QPT_RAW_PACKET && 2356 2356 !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) { 2357 2357 err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0, 2358 2358 NULL, &base->mqp, NULL); ··· 2368 2368 base->mqp.qpn); 2369 2369 } 2370 2370 2371 - get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 2372 - &send_cq, &recv_cq); 2371 + get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq, 2372 + &recv_cq); 2373 2373 2374 2374 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 2375 2375 mlx5_ib_lock_cqs(send_cq, recv_cq); ··· 2391 2391 mlx5_ib_unlock_cqs(send_cq, recv_cq); 2392 2392 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 2393 2393 2394 - if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 2394 + if (qp->type == IB_QPT_RAW_PACKET || 2395 2395 qp->flags & IB_QP_CREATE_SOURCE_QPN) { 2396 2396 destroy_raw_packet_qp(dev, qp); 2397 2397 } else { ··· 2669 2669 return (create_flags) ? -EINVAL : 0; 2670 2670 2671 2671 process_create_flag(dev, &create_flags, 2672 + IB_QP_CREATE_INTEGRITY_EN, 2673 + MLX5_CAP_GEN(mdev, sho), qp); 2674 + process_create_flag(dev, &create_flags, 2672 2675 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 2673 2676 MLX5_CAP_GEN(mdev, block_lb_mc), qp); 2674 2677 process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL, ··· 2876 2873 static int check_ucmd_data(struct mlx5_ib_dev *dev, 2877 2874 struct mlx5_create_qp_params *params) 2878 2875 { 2879 - struct ib_qp_init_attr *attr = params->attr; 2880 2876 struct ib_udata *udata = params->udata; 2881 2877 size_t size, last; 2882 2878 int ret; ··· 2887 2885 */ 2888 2886 last = sizeof(struct mlx5_ib_create_qp_rss); 2889 2887 else 2890 - /* IB_QPT_RAW_PACKET doesn't have ECE data */ 2891 - switch (attr->qp_type) { 2892 - case IB_QPT_RAW_PACKET: 2893 - last = offsetof(struct mlx5_ib_create_qp, ece_options); 2894 - break; 2895 - default: 2896 - last = offsetof(struct mlx5_ib_create_qp, reserved); 2897 - } 2888 + last = offsetof(struct mlx5_ib_create_qp, reserved); 2898 2889 2899 2890 if (udata->inlen <= last) 2900 2891 return 0; ··· 2902 2907 if (!ret) 2903 2908 mlx5_ib_dbg( 2904 2909 dev, 2905 - "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n", 2910 + "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n", 2906 2911 udata->inlen, params->ucmd_size, last, size); 2907 2912 return ret ? 0 : -EINVAL; 2908 2913 } ··· 2997 3002 return &qp->ibqp; 2998 3003 2999 3004 destroy_qp: 3000 - if (qp->type == MLX5_IB_QPT_DCT) 3005 + if (qp->type == MLX5_IB_QPT_DCT) { 3001 3006 mlx5_ib_destroy_dct(qp); 3002 - else 3007 + } else { 3008 + /* 3009 + * The two lines below are temp solution till QP allocation 3010 + * will be moved to be under IB/core responsiblity. 3011 + */ 3012 + qp->ibqp.send_cq = attr->send_cq; 3013 + qp->ibqp.recv_cq = attr->recv_cq; 3003 3014 destroy_qp_common(dev, qp, udata); 3015 + } 3016 + 3004 3017 qp = NULL; 3005 3018 free_qp: 3006 3019 kfree(qp); ··· 4165 4162 4166 4163 if (udata->outlen < min_resp_len) 4167 4164 return -EINVAL; 4168 - resp.response_length = min_resp_len; 4169 - 4170 4165 /* 4171 4166 * If we don't have enough space for the ECE options, 4172 4167 * simply indicate it with resp.response_length. ··· 4385 4384 MLX5_GET(ads, path, src_addr_index), 4386 4385 MLX5_GET(ads, path, hop_limit), 4387 4386 MLX5_GET(ads, path, tclass)); 4388 - memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip), 4389 - MLX5_FLD_SZ_BYTES(ads, rgid_rip)); 4387 + rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip)); 4390 4388 } 4391 4389 } 4392 4390
+8
drivers/infiniband/hw/mlx5/qpc.c
··· 346 346 int ece = 0; 347 347 348 348 switch (opcode) { 349 + case MLX5_CMD_OP_INIT2INIT_QP: 350 + ece = MLX5_GET(init2init_qp_out, out, ece); 351 + break; 349 352 case MLX5_CMD_OP_INIT2RTR_QP: 350 353 ece = MLX5_GET(init2rtr_qp_out, out, ece); 351 354 break; ··· 357 354 break; 358 355 case MLX5_CMD_OP_RTS2RTS_QP: 359 356 ece = MLX5_GET(rts2rts_qp_out, out, ece); 357 + break; 358 + case MLX5_CMD_OP_RST2INIT_QP: 359 + ece = MLX5_GET(rst2init_qp_out, out, ece); 360 360 break; 361 361 default: 362 362 break; ··· 412 406 return -ENOMEM; 413 407 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, 414 408 opt_param_mask, qpc, uid); 409 + MLX5_SET(rst2init_qp_in, mbox->in, ece, ece); 415 410 break; 416 411 case MLX5_CMD_OP_INIT2RTR_QP: 417 412 if (MBOX_ALLOC(mbox, init2rtr_qp)) ··· 446 439 return -ENOMEM; 447 440 MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn, 448 441 opt_param_mask, qpc, uid); 442 + MLX5_SET(init2init_qp_in, mbox->in, ece, ece); 449 443 break; 450 444 default: 451 445 return -EINVAL;
+11 -2
drivers/infiniband/hw/qedr/qedr_iw_cm.c
··· 150 150 if (params->cm_info) { 151 151 event.ird = params->cm_info->ird; 152 152 event.ord = params->cm_info->ord; 153 - event.private_data_len = params->cm_info->private_data_len; 154 - event.private_data = (void *)params->cm_info->private_data; 153 + /* Only connect_request and reply have valid private data 154 + * the rest of the events this may be left overs from 155 + * connection establishment. CONNECT_REQUEST is issued via 156 + * qedr_iw_mpa_request 157 + */ 158 + if (event_type == IW_CM_EVENT_CONNECT_REPLY) { 159 + event.private_data_len = 160 + params->cm_info->private_data_len; 161 + event.private_data = 162 + (void *)params->cm_info->private_data; 163 + } 155 164 } 156 165 157 166 if (ep->cm_id)
+4 -2
drivers/infiniband/sw/rdmavt/qp.c
··· 1204 1204 err = alloc_ud_wq_attr(qp, rdi->dparms.node); 1205 1205 if (err) { 1206 1206 ret = (ERR_PTR(err)); 1207 - goto bail_driver_priv; 1207 + goto bail_rq_rvt; 1208 1208 } 1209 1209 1210 1210 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE) ··· 1314 1314 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num); 1315 1315 1316 1316 bail_rq_wq: 1317 - rvt_free_rq(&qp->r_rq); 1318 1317 free_ud_wq_attr(qp); 1318 + 1319 + bail_rq_rvt: 1320 + rvt_free_rq(&qp->r_rq); 1319 1321 1320 1322 bail_driver_priv: 1321 1323 rdi->driver_f.qp_priv_free(rdi, qp);
+2 -1
drivers/infiniband/sw/siw/siw_qp_rx.c
··· 139 139 break; 140 140 141 141 bytes = min(bytes, len); 142 - if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) { 142 + if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == 143 + bytes) { 143 144 copied += bytes; 144 145 offset += bytes; 145 146 len -= bytes;
+6 -4
include/linux/mlx5/mlx5_ifc.h
··· 4283 4283 4284 4284 u8 syndrome[0x20]; 4285 4285 4286 - u8 reserved_at_40[0x40]; 4286 + u8 reserved_at_40[0x20]; 4287 + u8 ece[0x20]; 4287 4288 }; 4288 4289 4289 4290 struct mlx5_ifc_rst2init_qp_in_bits { ··· 4301 4300 4302 4301 u8 opt_param_mask[0x20]; 4303 4302 4304 - u8 reserved_at_a0[0x20]; 4303 + u8 ece[0x20]; 4305 4304 4306 4305 struct mlx5_ifc_qpc_bits qpc; 4307 4306 ··· 6620 6619 6621 6620 u8 syndrome[0x20]; 6622 6621 6623 - u8 reserved_at_40[0x40]; 6622 + u8 reserved_at_40[0x20]; 6623 + u8 ece[0x20]; 6624 6624 }; 6625 6625 6626 6626 struct mlx5_ifc_init2init_qp_in_bits { ··· 6638 6636 6639 6637 u8 opt_param_mask[0x20]; 6640 6638 6641 - u8 reserved_at_a0[0x20]; 6639 + u8 ece[0x20]; 6642 6640 6643 6641 struct mlx5_ifc_qpc_bits qpc; 6644 6642