Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/mthca: Make all device methods truly reentrant

Documentation/infiniband/core_locking.txt says:

All of the methods in struct ib_device exported by a low-level
driver must be fully reentrant. The low-level driver is required to
perform all synchronization necessary to maintain consistency, even
if multiple function calls using the same object are run
simultaneously.

However, mthca's modify_qp, modify_srq and resize_cq methods are
currently not reentrant. Add a mutex to the QP, SRQ and CQ structures
so that these calls can be properly serialized.

Signed-off-by: Roland Dreier <rolandd@cisco.com>

+48 -22
+1
drivers/infiniband/hw/mthca/mthca_cq.c
··· 822 822 spin_lock_init(&cq->lock); 823 823 cq->refcount = 1; 824 824 init_waitqueue_head(&cq->wait); 825 + mutex_init(&cq->mutex); 825 826 826 827 memset(cq_context, 0, sizeof *cq_context); 827 828 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
+16 -7
drivers/infiniband/hw/mthca/mthca_provider.c
··· 793 793 if (entries < 1 || entries > dev->limits.max_cqes) 794 794 return -EINVAL; 795 795 796 + mutex_lock(&cq->mutex); 797 + 796 798 entries = roundup_pow_of_two(entries + 1); 797 - if (entries == ibcq->cqe + 1) 798 - return 0; 799 + if (entries == ibcq->cqe + 1) { 800 + ret = 0; 801 + goto out; 802 + } 799 803 800 804 if (cq->is_kernel) { 801 805 ret = mthca_alloc_resize_buf(dev, cq, entries); 802 806 if (ret) 803 - return ret; 807 + goto out; 804 808 lkey = cq->resize_buf->buf.mr.ibmr.lkey; 805 809 } else { 806 - if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 807 - return -EFAULT; 810 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 811 + ret = -EFAULT; 812 + goto out; 813 + } 808 814 lkey = ucmd.lkey; 809 815 } 810 816 ··· 827 821 cq->resize_buf = NULL; 828 822 spin_unlock_irq(&cq->lock); 829 823 } 830 - return ret; 824 + goto out; 831 825 } 832 826 833 827 if (cq->is_kernel) { ··· 854 848 } else 855 849 ibcq->cqe = entries - 1; 856 850 857 - return 0; 851 + out: 852 + mutex_unlock(&cq->mutex); 853 + 854 + return ret; 858 855 } 859 856 860 857 static int mthca_destroy_cq(struct ib_cq *cq)
+3
drivers/infiniband/hw/mthca/mthca_provider.h
··· 214 214 int arm_sn; 215 215 216 216 wait_queue_head_t wait; 217 + struct mutex mutex; 217 218 }; 218 219 219 220 struct mthca_srq { ··· 238 237 struct mthca_mr mr; 239 238 240 239 wait_queue_head_t wait; 240 + struct mutex mutex; 241 241 }; 242 242 243 243 struct mthca_wq { ··· 280 278 union mthca_buf queue; 281 279 282 280 wait_queue_head_t wait; 281 + struct mutex mutex; 283 282 }; 284 283 285 284 struct mthca_sqp {
+23 -15
drivers/infiniband/hw/mthca/mthca_qp.c
··· 536 536 u8 status; 537 537 int err = -EINVAL; 538 538 539 + mutex_lock(&qp->mutex); 540 + 539 541 if (attr_mask & IB_QP_CUR_STATE) { 540 542 cur_state = attr->cur_qp_state; 541 543 } else { ··· 555 553 "%d->%d with attr 0x%08x\n", 556 554 qp->transport, cur_state, new_state, 557 555 attr_mask); 558 - return -EINVAL; 556 + goto out; 559 557 } 560 558 561 559 if ((attr_mask & IB_QP_PKEY_INDEX) && 562 560 attr->pkey_index >= dev->limits.pkey_table_len) { 563 561 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 564 562 attr->pkey_index, dev->limits.pkey_table_len-1); 565 - return -EINVAL; 563 + goto out; 566 564 } 567 565 568 566 if ((attr_mask & IB_QP_PORT) && 569 567 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 570 568 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 571 - return -EINVAL; 569 + goto out; 572 570 } 573 571 574 572 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 575 573 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 576 574 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 577 575 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 578 - return -EINVAL; 576 + goto out; 579 577 } 580 578 581 579 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 582 580 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 583 581 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 584 582 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 585 - return -EINVAL; 583 + goto out; 586 584 } 587 585 588 586 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 589 - if (IS_ERR(mailbox)) 590 - return PTR_ERR(mailbox); 587 + if (IS_ERR(mailbox)) { 588 + err = PTR_ERR(mailbox); 589 + goto out; 590 + } 591 591 qp_param = mailbox->buf; 592 592 qp_context = &qp_param->context; 593 593 memset(qp_param, 0, sizeof *qp_param); ··· 622 618 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 623 619 mthca_dbg(dev, "path MTU (%u) is invalid\n", 624 620 attr->path_mtu); 625 - goto out; 621 + goto out_mailbox; 626 622 } 627 623 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 628 624 } ··· 676 672 if (attr_mask & IB_QP_AV) { 677 673 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 678 674 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 679 - goto out; 675 + goto out_mailbox; 680 676 681 677 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 682 678 } ··· 690 686 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 691 687 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 692 688 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 693 - goto out; 689 + goto out_mailbox; 694 690 } 695 691 696 692 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 697 693 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 698 694 attr->alt_port_num); 699 - goto out; 695 + goto out_mailbox; 700 696 } 701 697 702 698 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 703 699 attr->alt_ah_attr.port_num)) 704 - goto out; 700 + goto out_mailbox; 705 701 706 702 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 707 703 attr->alt_port_num << 24); ··· 797 793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 798 794 mailbox, sqd_event, &status); 799 795 if (err) 800 - goto out; 796 + goto out_mailbox; 801 797 if (status) { 802 798 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", 803 799 cur_state, new_state, status); 804 800 err = -EINVAL; 805 - goto out; 801 + goto out_mailbox; 806 802 } 807 803 808 804 qp->state = new_state; ··· 857 853 } 858 854 } 859 855 860 - out: 856 + out_mailbox: 861 857 mthca_free_mailbox(dev, mailbox); 858 + 859 + out: 860 + mutex_unlock(&qp->mutex); 862 861 return err; 863 862 } 864 863 ··· 1107 1100 1108 1101 qp->refcount = 1; 1109 1102 init_waitqueue_head(&qp->wait); 1103 + mutex_init(&qp->mutex); 1110 1104 qp->state = IB_QPS_RESET; 1111 1105 qp->atomic_rd_en = 0; 1112 1106 qp->resp_depth = 0;
+5
drivers/infiniband/hw/mthca/mthca_srq.c
··· 243 243 spin_lock_init(&srq->lock); 244 244 srq->refcount = 1; 245 245 init_waitqueue_head(&srq->wait); 246 + mutex_init(&srq->mutex); 246 247 247 248 if (mthca_is_memfree(dev)) 248 249 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); ··· 372 371 if (attr_mask & IB_SRQ_LIMIT) { 373 372 if (attr->srq_limit > srq->max) 374 373 return -EINVAL; 374 + 375 + mutex_lock(&srq->mutex); 375 376 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 377 + mutex_unlock(&srq->mutex); 378 + 376 379 if (ret) 377 380 return ret; 378 381 if (status)