Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes frfom Jason Gunthorpe:
"Not much so far. We have the usual batch of bugs and two fixes to code
merged this cycle:

- Restore valgrind support for the ioctl verbs interface merged this
window, and fix a missed error code on an error path from that
conversion

- A user reported crash on obsolete mthca hardware

- pvrdma was using the wrong command opcode toward the hypervisor

- NULL pointer crash regression when dumping rdma-cm over netlink

- Be conservative about exposing the global rkey"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
RDMA/uverbs: Mark ioctl responses with UVERBS_ATTR_F_VALID_OUTPUT
RDMA/mthca: Clear QP objects during their allocation
RDMA/vmw_pvrdma: Return the correct opcode when creating WR
RDMA/cma: Add cm_id restrack resource based on kernel or user cm_id type
RDMA/nldev: Don't expose unsafe global rkey to regular user
RDMA/uverbs: Fix post send success return value in case of error

Changed files
+109 -22
drivers
include
uapi
+4 -1
drivers/infiniband/core/cma.c
··· 494 494 id_priv->id.route.addr.dev_addr.transport = 495 495 rdma_node_get_transport(cma_dev->device->node_type); 496 496 list_add_tail(&id_priv->list, &cma_dev->id_list); 497 - rdma_restrack_kadd(&id_priv->res); 497 + if (id_priv->res.kern_name) 498 + rdma_restrack_kadd(&id_priv->res); 499 + else 500 + rdma_restrack_uadd(&id_priv->res); 498 501 } 499 502 500 503 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
-4
drivers/infiniband/core/nldev.c
··· 584 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 585 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 586 586 goto err; 587 - if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && 588 - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, 589 - pd->unsafe_global_rkey)) 590 - goto err; 591 587 592 588 if (fill_res_name_pid(msg, res)) 593 589 goto err;
+2
drivers/infiniband/core/rdma_core.h
··· 106 106 enum uverbs_obj_access access, 107 107 bool commit); 108 108 109 + int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); 110 + 109 111 void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 110 112 void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111 113
+10 -1
drivers/infiniband/core/uverbs_cmd.c
··· 60 60 { 61 61 int ret; 62 62 63 + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) 64 + return uverbs_copy_to_struct_or_zero( 65 + attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len); 66 + 63 67 if (copy_to_user(attrs->ucore.outbuf, resp, 64 68 min(attrs->ucore.outlen, resp_len))) 65 69 return -EFAULT; ··· 1185 1181 goto out_put; 1186 1182 } 1187 1183 1184 + if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT)) 1185 + ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT); 1186 + 1188 1187 ret = 0; 1189 1188 1190 1189 out_put: ··· 2019 2012 return -ENOMEM; 2020 2013 2021 2014 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); 2022 - if (!qp) 2015 + if (!qp) { 2016 + ret = -EINVAL; 2023 2017 goto out; 2018 + } 2024 2019 2025 2020 is_ud = qp->qp_type == IB_QPT_UD; 2026 2021 sg_ind = 0;
+49 -13
drivers/infiniband/core/uverbs_ioctl.c
··· 144 144 0, uattr->len - len); 145 145 } 146 146 147 + static int uverbs_set_output(const struct uverbs_attr_bundle *bundle, 148 + const struct uverbs_attr *attr) 149 + { 150 + struct bundle_priv *pbundle = 151 + container_of(bundle, struct bundle_priv, bundle); 152 + u16 flags; 153 + 154 + flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 155 + UVERBS_ATTR_F_VALID_OUTPUT; 156 + if (put_user(flags, 157 + &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) 158 + return -EFAULT; 159 + return 0; 160 + } 161 + 147 162 static int uverbs_process_idrs_array(struct bundle_priv *pbundle, 148 163 const struct uverbs_api_attr *attr_uapi, 149 164 struct uverbs_objs_arr_attr *attr, ··· 471 456 } 472 457 473 458 /* 459 + * Until the drivers are revised to use the bundle directly we have to 460 + * assume that the driver wrote to its UHW_OUT and flag userspace 461 + * appropriately. 462 + */ 463 + if (!ret && pbundle->method_elm->has_udata) { 464 + const struct uverbs_attr *attr = 465 + uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT); 466 + 467 + if (!IS_ERR(attr)) 468 + ret = uverbs_set_output(&pbundle->bundle, attr); 469 + } 470 + 471 + /* 474 472 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can 475 473 * not invoke the method because the request is not supported. No 476 474 * other cases should return this code. ··· 734 706 int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, 735 707 const void *from, size_t size) 736 708 { 737 - struct bundle_priv *pbundle = 738 - container_of(bundle, struct bundle_priv, bundle); 739 709 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 740 - u16 flags; 741 710 size_t min_size; 742 711 743 712 if (IS_ERR(attr)) ··· 744 719 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) 745 720 return -EFAULT; 746 721 747 - flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 748 - UVERBS_ATTR_F_VALID_OUTPUT; 749 - if (put_user(flags, 750 - &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags)) 751 - return -EFAULT; 752 - 753 - return 0; 722 + return uverbs_set_output(bundle, attr); 754 723 } 755 724 EXPORT_SYMBOL(uverbs_copy_to); 725 + 726 + 727 + /* 728 + * This is only used if the caller has directly used copy_to_use to write the 729 + * data. It signals to user space that the buffer is filled in. 730 + */ 731 + int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx) 732 + { 733 + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 734 + 735 + if (IS_ERR(attr)) 736 + return PTR_ERR(attr); 737 + 738 + return uverbs_set_output(bundle, attr); 739 + } 756 740 757 741 int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, 758 742 size_t idx, s64 lower_bound, u64 upper_bound, ··· 791 757 { 792 758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 793 759 794 - if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), 795 - attr->ptr_attr.len)) 796 - return -EFAULT; 760 + if (size < attr->ptr_attr.len) { 761 + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size, 762 + attr->ptr_attr.len - size)) 763 + return -EFAULT; 764 + } 797 765 return uverbs_copy_to(bundle, idx, from, size); 798 766 }
+1
drivers/infiniband/core/uverbs_main.c
··· 690 690 691 691 buf += sizeof(hdr); 692 692 693 + memset(bundle.attr_present, 0, sizeof(bundle.attr_present)); 693 694 bundle.ufile = file; 694 695 if (!method_elm->is_ex) { 695 696 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
+2 -2
drivers/infiniband/hw/mthca/mthca_provider.c
··· 534 534 { 535 535 struct mthca_ucontext *context; 536 536 537 - qp = kmalloc(sizeof *qp, GFP_KERNEL); 537 + qp = kzalloc(sizeof(*qp), GFP_KERNEL); 538 538 if (!qp) 539 539 return ERR_PTR(-ENOMEM); 540 540 ··· 600 600 if (udata) 601 601 return ERR_PTR(-EINVAL); 602 602 603 - qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 603 + qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL); 604 604 if (!qp) 605 605 return ERR_PTR(-ENOMEM); 606 606
+34 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
··· 427 427 428 428 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 429 429 { 430 - return (enum pvrdma_wr_opcode)op; 430 + switch (op) { 431 + case IB_WR_RDMA_WRITE: 432 + return PVRDMA_WR_RDMA_WRITE; 433 + case IB_WR_RDMA_WRITE_WITH_IMM: 434 + return PVRDMA_WR_RDMA_WRITE_WITH_IMM; 435 + case IB_WR_SEND: 436 + return PVRDMA_WR_SEND; 437 + case IB_WR_SEND_WITH_IMM: 438 + return PVRDMA_WR_SEND_WITH_IMM; 439 + case IB_WR_RDMA_READ: 440 + return PVRDMA_WR_RDMA_READ; 441 + case IB_WR_ATOMIC_CMP_AND_SWP: 442 + return PVRDMA_WR_ATOMIC_CMP_AND_SWP; 443 + case IB_WR_ATOMIC_FETCH_AND_ADD: 444 + return PVRDMA_WR_ATOMIC_FETCH_AND_ADD; 445 + case IB_WR_LSO: 446 + return PVRDMA_WR_LSO; 447 + case IB_WR_SEND_WITH_INV: 448 + return PVRDMA_WR_SEND_WITH_INV; 449 + case IB_WR_RDMA_READ_WITH_INV: 450 + return PVRDMA_WR_RDMA_READ_WITH_INV; 451 + case IB_WR_LOCAL_INV: 452 + return PVRDMA_WR_LOCAL_INV; 453 + case IB_WR_REG_MR: 454 + return PVRDMA_WR_FAST_REG_MR; 455 + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 456 + return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; 457 + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: 458 + return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; 459 + case IB_WR_REG_SIG_MR: 460 + return PVRDMA_WR_REG_SIG_MR; 461 + default: 462 + return PVRDMA_WR_ERROR; 463 + } 431 464 } 432 465 433 466 static inline enum ib_wc_status pvrdma_wc_status_to_ib(
+6
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
··· 721 721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 722 722 wqe_hdr->ex.imm_data = wr->ex.imm_data; 723 723 724 + if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) { 725 + *bad_wr = wr; 726 + ret = -EINVAL; 727 + goto out; 728 + } 729 + 724 730 switch (qp->ibqp.qp_type) { 725 731 case IB_QPT_GSI: 726 732 case IB_QPT_UD:
+1
include/uapi/rdma/vmw_pvrdma-abi.h
··· 78 78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, 79 79 PVRDMA_WR_BIND_MW, 80 80 PVRDMA_WR_REG_SIG_MR, 81 + PVRDMA_WR_ERROR, 81 82 }; 82 83 83 84 enum pvrdma_wc_status {