Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB: Pass uverbs_attr_bundle down uobject destroy path

Pass uverbs_attr_bundle down the uobject destroy path. The next patch will
use this to eliminate the dependecy of the drivers in ib_x->uobject
pointers.

Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Shamir Rabinovitch and committed by
Jason Gunthorpe
a6a3797d 70f06b26

+116 -78
+30 -18
drivers/infiniband/core/rdma_core.c
··· 125 125 * and consumes the kref on the uobj. 126 126 */ 127 127 static int uverbs_destroy_uobject(struct ib_uobject *uobj, 128 - enum rdma_remove_reason reason) 128 + enum rdma_remove_reason reason, 129 + struct uverbs_attr_bundle *attrs) 129 130 { 130 131 struct ib_uverbs_file *ufile = uobj->ufile; 131 132 unsigned long flags; ··· 136 135 assert_uverbs_usecnt(uobj, UVERBS_LOOKUP_WRITE); 137 136 138 137 if (uobj->object) { 139 - ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason); 138 + ret = uobj->uapi_object->type_class->destroy_hw(uobj, reason, 139 + attrs); 140 140 if (ret) { 141 141 if (ib_is_destroy_retryable(ret, reason, uobj)) 142 142 return ret; ··· 198 196 * version requires the caller to have already obtained an 199 197 * LOOKUP_DESTROY uobject kref. 200 198 */ 201 - int uobj_destroy(struct ib_uobject *uobj) 199 + int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) 202 200 { 203 201 struct ib_uverbs_file *ufile = uobj->ufile; 204 202 int ret; ··· 209 207 if (ret) 210 208 goto out_unlock; 211 209 212 - ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY); 210 + ret = uverbs_destroy_uobject(uobj, RDMA_REMOVE_DESTROY, attrs); 213 211 if (ret) { 214 212 atomic_set(&uobj->usecnt, 0); 215 213 goto out_unlock; ··· 236 234 if (IS_ERR(uobj)) 237 235 return uobj; 238 236 239 - ret = uobj_destroy(uobj); 237 + ret = uobj_destroy(uobj, attrs); 240 238 if (ret) { 241 239 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); 242 240 return ERR_PTR(ret); ··· 535 533 } 536 534 537 535 static int __must_check destroy_hw_idr_uobject(struct ib_uobject *uobj, 538 - enum rdma_remove_reason why) 536 + enum rdma_remove_reason why, 537 + struct uverbs_attr_bundle *attrs) 539 538 { 540 539 const struct uverbs_obj_idr_type *idr_type = 541 540 container_of(uobj->uapi_object->type_attrs, 542 541 struct uverbs_obj_idr_type, type); 543 - int ret = idr_type->destroy_object(uobj, why); 542 + int ret = idr_type->destroy_object(uobj, why, attrs); 544 543 545 544 /* 546 545 * We can only fail gracefully if the user requested to destroy the ··· 575 572 } 576 573 577 574 static int __must_check destroy_hw_fd_uobject(struct ib_uobject *uobj, 578 - enum rdma_remove_reason why) 575 + enum rdma_remove_reason why, 576 + struct uverbs_attr_bundle *attrs) 579 577 { 580 578 const struct uverbs_obj_fd_type *fd_type = container_of( 581 579 uobj->uapi_object->type_attrs, struct uverbs_obj_fd_type, type); ··· 652 648 * caller can no longer assume uobj is valid. If this function fails it 653 649 * destroys the uboject, including the attached HW object. 654 650 */ 655 - int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj) 651 + int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj, 652 + struct uverbs_attr_bundle *attrs) 656 653 { 657 654 struct ib_uverbs_file *ufile = uobj->ufile; 658 655 int ret; ··· 661 656 /* alloc_commit consumes the uobj kref */ 662 657 ret = uobj->uapi_object->type_class->alloc_commit(uobj); 663 658 if (ret) { 664 - uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT); 659 + uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); 665 660 up_read(&ufile->hw_destroy_rwsem); 666 661 return ret; 667 662 } ··· 685 680 * This consumes the kref for uobj. It is up to the caller to unwind the HW 686 681 * object and anything else connected to uobj before calling this. 687 682 */ 688 - void rdma_alloc_abort_uobject(struct ib_uobject *uobj) 683 + void rdma_alloc_abort_uobject(struct ib_uobject *uobj, 684 + struct uverbs_attr_bundle *attrs) 689 685 { 690 686 struct ib_uverbs_file *ufile = uobj->ufile; 691 687 692 688 uobj->object = NULL; 693 - uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT); 689 + uverbs_destroy_uobject(uobj, RDMA_REMOVE_ABORT, attrs); 694 690 695 691 /* Matches the down_read in rdma_alloc_begin_uobject */ 696 692 up_read(&ufile->hw_destroy_rwsem); ··· 793 787 { 794 788 struct ib_uobject *uobj = f->private_data; 795 789 struct ib_uverbs_file *ufile = uobj->ufile; 790 + struct uverbs_attr_bundle attrs = { 791 + .context = uobj->context, 792 + .ufile = ufile, 793 + }; 796 794 797 795 if (down_read_trylock(&ufile->hw_destroy_rwsem)) { 798 796 /* ··· 806 796 * write lock here, or we have a kernel bug. 807 797 */ 808 798 WARN_ON(uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE)); 809 - uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE); 799 + uverbs_destroy_uobject(uobj, RDMA_REMOVE_CLOSE, &attrs); 810 800 up_read(&ufile->hw_destroy_rwsem); 811 801 } 812 802 ··· 855 845 { 856 846 struct ib_uobject *obj, *next_obj; 857 847 int ret = -EINVAL; 848 + struct uverbs_attr_bundle attrs = { .ufile = ufile }; 858 849 859 850 /* 860 851 * This shouldn't run while executing other commands on this ··· 867 856 * other threads (which might still use the FDs) chance to run. 868 857 */ 869 858 list_for_each_entry_safe(obj, next_obj, &ufile->uobjects, list) { 859 + attrs.context = obj->context; 870 860 /* 871 861 * if we hit this WARN_ON, that means we are 872 862 * racing with a lookup_get. 873 863 */ 874 864 WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE)); 875 - if (!uverbs_destroy_uobject(obj, reason)) 865 + if (!uverbs_destroy_uobject(obj, reason, &attrs)) 876 866 ret = 0; 877 867 else 878 868 atomic_set(&obj->usecnt, 0); ··· 978 966 } 979 967 980 968 int uverbs_finalize_object(struct ib_uobject *uobj, 981 - enum uverbs_obj_access access, 982 - bool commit) 969 + enum uverbs_obj_access access, bool commit, 970 + struct uverbs_attr_bundle *attrs) 983 971 { 984 972 int ret = 0; 985 973 ··· 1002 990 break; 1003 991 case UVERBS_ACCESS_NEW: 1004 992 if (commit) 1005 - ret = rdma_alloc_commit_uobject(uobj); 993 + ret = rdma_alloc_commit_uobject(uobj, attrs); 1006 994 else 1007 - rdma_alloc_abort_uobject(uobj); 995 + rdma_alloc_abort_uobject(uobj, attrs); 1008 996 break; 1009 997 default: 1010 998 WARN_ON(true);
+3 -3
drivers/infiniband/core/rdma_core.h
··· 48 48 void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, 49 49 enum rdma_remove_reason reason); 50 50 51 - int uobj_destroy(struct ib_uobject *uobj); 51 + int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs); 52 52 53 53 /* 54 54 * uverbs_uobject_get is called in order to increase the reference count on ··· 102 102 * object. 103 103 */ 104 104 int uverbs_finalize_object(struct ib_uobject *uobj, 105 - enum uverbs_obj_access access, 106 - bool commit); 105 + enum uverbs_obj_access access, bool commit, 106 + struct uverbs_attr_bundle *attrs); 107 107 108 108 int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); 109 109
+26 -26
drivers/infiniband/core/uverbs_cmd.c
··· 436 436 if (ret) 437 437 goto err_copy; 438 438 439 - return uobj_alloc_commit(uobj); 439 + return uobj_alloc_commit(uobj, attrs); 440 440 441 441 err_copy: 442 442 ib_dealloc_pd(pd); ··· 444 444 err_alloc: 445 445 kfree(pd); 446 446 err: 447 - uobj_alloc_abort(uobj); 447 + uobj_alloc_abort(uobj, attrs); 448 448 return ret; 449 449 } 450 450 ··· 633 633 634 634 mutex_unlock(&ibudev->xrcd_tree_mutex); 635 635 636 - return uobj_alloc_commit(&obj->uobject); 636 + return uobj_alloc_commit(&obj->uobject, attrs); 637 637 638 638 err_copy: 639 639 if (inode) { ··· 646 646 ib_dealloc_xrcd(xrcd); 647 647 648 648 err: 649 - uobj_alloc_abort(&obj->uobject); 649 + uobj_alloc_abort(&obj->uobject, attrs); 650 650 651 651 err_tree_mutex_unlock: 652 652 if (f.file) ··· 763 763 764 764 uobj_put_obj_read(pd); 765 765 766 - return uobj_alloc_commit(uobj); 766 + return uobj_alloc_commit(uobj, attrs); 767 767 768 768 err_copy: 769 769 ib_dereg_mr(mr); ··· 772 772 uobj_put_obj_read(pd); 773 773 774 774 err_free: 775 - uobj_alloc_abort(uobj); 775 + uobj_alloc_abort(uobj, attrs); 776 776 return ret; 777 777 } 778 778 ··· 917 917 goto err_copy; 918 918 919 919 uobj_put_obj_read(pd); 920 - return uobj_alloc_commit(uobj); 920 + return uobj_alloc_commit(uobj, attrs); 921 921 922 922 err_copy: 923 923 uverbs_dealloc_mw(mw); 924 924 err_put: 925 925 uobj_put_obj_read(pd); 926 926 err_free: 927 - uobj_alloc_abort(uobj); 927 + uobj_alloc_abort(uobj, attrs); 928 928 return ret; 929 929 } 930 930 ··· 965 965 966 966 ret = uverbs_response(attrs, &resp, sizeof(resp)); 967 967 if (ret) { 968 - uobj_alloc_abort(uobj); 968 + uobj_alloc_abort(uobj, attrs); 969 969 return ret; 970 970 } 971 971 972 - return uobj_alloc_commit(uobj); 972 + return uobj_alloc_commit(uobj, attrs); 973 973 } 974 974 975 975 static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs, ··· 1036 1036 if (ret) 1037 1037 goto err_cb; 1038 1038 1039 - ret = uobj_alloc_commit(&obj->uobject); 1039 + ret = uobj_alloc_commit(&obj->uobject, attrs); 1040 1040 if (ret) 1041 1041 return ERR_PTR(ret); 1042 1042 return obj; ··· 1049 1049 ib_uverbs_release_ucq(attrs->ufile, ev_file, obj); 1050 1050 1051 1051 err: 1052 - uobj_alloc_abort(&obj->uobject); 1052 + uobj_alloc_abort(&obj->uobject, attrs); 1053 1053 1054 1054 return ERR_PTR(ret); 1055 1055 } ··· 1477 1477 if (ind_tbl) 1478 1478 uobj_put_obj_read(ind_tbl); 1479 1479 1480 - return uobj_alloc_commit(&obj->uevent.uobject); 1480 + return uobj_alloc_commit(&obj->uevent.uobject, attrs); 1481 1481 err_cb: 1482 1482 ib_destroy_qp(qp); 1483 1483 ··· 1495 1495 if (ind_tbl) 1496 1496 uobj_put_obj_read(ind_tbl); 1497 1497 1498 - uobj_alloc_abort(&obj->uevent.uobject); 1498 + uobj_alloc_abort(&obj->uevent.uobject, attrs); 1499 1499 return ret; 1500 1500 } 1501 1501 ··· 1609 1609 qp->uobject = &obj->uevent.uobject; 1610 1610 uobj_put_read(xrcd_uobj); 1611 1611 1612 - return uobj_alloc_commit(&obj->uevent.uobject); 1612 + return uobj_alloc_commit(&obj->uevent.uobject, attrs); 1613 1613 1614 1614 err_destroy: 1615 1615 ib_destroy_qp(qp); 1616 1616 err_xrcd: 1617 1617 uobj_put_read(xrcd_uobj); 1618 1618 err_put: 1619 - uobj_alloc_abort(&obj->uevent.uobject); 1619 + uobj_alloc_abort(&obj->uevent.uobject, attrs); 1620 1620 return ret; 1621 1621 } 1622 1622 ··· 2451 2451 goto err_copy; 2452 2452 2453 2453 uobj_put_obj_read(pd); 2454 - return uobj_alloc_commit(uobj); 2454 + return uobj_alloc_commit(uobj, attrs); 2455 2455 2456 2456 err_copy: 2457 2457 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); ··· 2460 2460 uobj_put_obj_read(pd); 2461 2461 2462 2462 err: 2463 - uobj_alloc_abort(uobj); 2463 + uobj_alloc_abort(uobj, attrs); 2464 2464 return ret; 2465 2465 } 2466 2466 ··· 2962 2962 2963 2963 uobj_put_obj_read(pd); 2964 2964 uobj_put_obj_read(cq); 2965 - return uobj_alloc_commit(&obj->uevent.uobject); 2965 + return uobj_alloc_commit(&obj->uevent.uobject, attrs); 2966 2966 2967 2967 err_copy: 2968 2968 ib_destroy_wq(wq); ··· 2971 2971 err_put_pd: 2972 2972 uobj_put_obj_read(pd); 2973 2973 err_uobj: 2974 - uobj_alloc_abort(&obj->uevent.uobject); 2974 + uobj_alloc_abort(&obj->uevent.uobject, attrs); 2975 2975 2976 2976 return err; 2977 2977 } ··· 3136 3136 for (j = 0; j < num_read_wqs; j++) 3137 3137 uobj_put_obj_read(wqs[j]); 3138 3138 3139 - return uobj_alloc_commit(uobj); 3139 + return uobj_alloc_commit(uobj, attrs); 3140 3140 3141 3141 err_copy: 3142 3142 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3143 3143 err_uobj: 3144 - uobj_alloc_abort(uobj); 3144 + uobj_alloc_abort(uobj, attrs); 3145 3145 put_wqs: 3146 3146 for (j = 0; j < num_read_wqs; j++) 3147 3147 uobj_put_obj_read(wqs[j]); ··· 3314 3314 kfree(flow_attr); 3315 3315 if (cmd.flow_attr.num_of_specs) 3316 3316 kfree(kern_flow_attr); 3317 - return uobj_alloc_commit(uobj); 3317 + return uobj_alloc_commit(uobj, attrs); 3318 3318 err_copy: 3319 3319 if (!qp->device->ops.destroy_flow(flow_id)) 3320 3320 atomic_dec(&qp->usecnt); ··· 3325 3325 err_put: 3326 3326 uobj_put_obj_read(qp); 3327 3327 err_uobj: 3328 - uobj_alloc_abort(uobj); 3328 + uobj_alloc_abort(uobj, attrs); 3329 3329 err_free_attr: 3330 3330 if (cmd.flow_attr.num_of_specs) 3331 3331 kfree(kern_flow_attr); ··· 3458 3458 uobj_put_obj_read(attr.ext.cq); 3459 3459 3460 3460 uobj_put_obj_read(pd); 3461 - return uobj_alloc_commit(&obj->uevent.uobject); 3461 + return uobj_alloc_commit(&obj->uevent.uobject, attrs); 3462 3462 3463 3463 err_copy: 3464 3464 ib_destroy_srq(srq); ··· 3477 3477 } 3478 3478 3479 3479 err: 3480 - uobj_alloc_abort(&obj->uevent.uobject); 3480 + uobj_alloc_abort(&obj->uevent.uobject, attrs); 3481 3481 return ret; 3482 3482 } 3483 3483
+9 -6
drivers/infiniband/core/uverbs_ioctl.c
··· 222 222 223 223 static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi, 224 224 struct uverbs_objs_arr_attr *attr, 225 - bool commit) 225 + bool commit, struct uverbs_attr_bundle *attrs) 226 226 { 227 227 const struct uverbs_attr_spec *spec = &attr_uapi->spec; 228 228 int current_ret; ··· 230 230 size_t i; 231 231 232 232 for (i = 0; i != attr->len; i++) { 233 - current_ret = uverbs_finalize_object( 234 - attr->uobjects[i], spec->u2.objs_arr.access, commit); 233 + current_ret = uverbs_finalize_object(attr->uobjects[i], 234 + spec->u2.objs_arr.access, 235 + commit, attrs); 235 236 if (!ret) 236 237 ret = current_ret; 237 238 } ··· 458 457 struct uverbs_obj_attr *destroy_attr = 459 458 &pbundle->bundle.attrs[destroy_bkey].obj_attr; 460 459 461 - ret = uobj_destroy(destroy_attr->uobject); 460 + ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle); 462 461 if (ret) 463 462 return ret; 464 463 __clear_bit(destroy_bkey, pbundle->uobj_finalize); ··· 509 508 510 509 current_ret = uverbs_finalize_object( 511 510 attr->obj_attr.uobject, 512 - attr->obj_attr.attr_elm->spec.u.obj.access, commit); 511 + attr->obj_attr.attr_elm->spec.u.obj.access, commit, 512 + &pbundle->bundle); 513 513 if (!ret) 514 514 ret = current_ret; 515 515 } ··· 533 531 534 532 if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) { 535 533 current_ret = uverbs_free_idrs_array( 536 - attr_uapi, &attr->objs_arr_attr, commit); 534 + attr_uapi, &attr->objs_arr_attr, commit, 535 + &pbundle->bundle); 537 536 if (!ret) 538 537 ret = current_ret; 539 538 }
+18 -9
drivers/infiniband/core/uverbs_std_types.c
··· 40 40 #include "uverbs.h" 41 41 42 42 static int uverbs_free_ah(struct ib_uobject *uobject, 43 - enum rdma_remove_reason why) 43 + enum rdma_remove_reason why, 44 + struct uverbs_attr_bundle *attrs) 44 45 { 45 46 return rdma_destroy_ah((struct ib_ah *)uobject->object, 46 47 RDMA_DESTROY_AH_SLEEPABLE); 47 48 } 48 49 49 50 static int uverbs_free_flow(struct ib_uobject *uobject, 50 - enum rdma_remove_reason why) 51 + enum rdma_remove_reason why, 52 + struct uverbs_attr_bundle *attrs) 51 53 { 52 54 struct ib_flow *flow = (struct ib_flow *)uobject->object; 53 55 struct ib_uflow_object *uflow = ··· 68 66 } 69 67 70 68 static int uverbs_free_mw(struct ib_uobject *uobject, 71 - enum rdma_remove_reason why) 69 + enum rdma_remove_reason why, 70 + struct uverbs_attr_bundle *attrs) 72 71 { 73 72 return uverbs_dealloc_mw((struct ib_mw *)uobject->object); 74 73 } 75 74 76 75 static int uverbs_free_qp(struct ib_uobject *uobject, 77 - enum rdma_remove_reason why) 76 + enum rdma_remove_reason why, 77 + struct uverbs_attr_bundle *attrs) 78 78 { 79 79 struct ib_qp *qp = uobject->object; 80 80 struct ib_uqp_object *uqp = ··· 109 105 } 110 106 111 107 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject, 112 - enum rdma_remove_reason why) 108 + enum rdma_remove_reason why, 109 + struct uverbs_attr_bundle *attrs) 113 110 { 114 111 struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object; 115 112 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl; ··· 125 120 } 126 121 127 122 static int uverbs_free_wq(struct ib_uobject *uobject, 128 - enum rdma_remove_reason why) 123 + enum rdma_remove_reason why, 124 + struct uverbs_attr_bundle *attrs) 129 125 { 130 126 struct ib_wq *wq = uobject->object; 131 127 struct ib_uwq_object *uwq = ··· 142 136 } 143 137 144 138 static int uverbs_free_srq(struct ib_uobject *uobject, 145 - enum rdma_remove_reason why) 139 + enum rdma_remove_reason why, 140 + struct uverbs_attr_bundle *attrs) 146 141 { 147 142 struct ib_srq *srq = uobject->object; 148 143 struct ib_uevent_object *uevent = ··· 167 160 } 168 161 169 162 static int uverbs_free_xrcd(struct ib_uobject *uobject, 170 - enum rdma_remove_reason why) 163 + enum rdma_remove_reason why, 164 + struct uverbs_attr_bundle *attrs) 171 165 { 172 166 struct ib_xrcd *xrcd = uobject->object; 173 167 struct ib_uxrcd_object *uxrcd = ··· 187 179 } 188 180 189 181 static int uverbs_free_pd(struct ib_uobject *uobject, 190 - enum rdma_remove_reason why) 182 + enum rdma_remove_reason why, 183 + struct uverbs_attr_bundle *attrs) 191 184 { 192 185 struct ib_pd *pd = uobject->object; 193 186 int ret;
+2 -1
drivers/infiniband/core/uverbs_std_types_counters.c
··· 36 36 #include <rdma/uverbs_std_types.h> 37 37 38 38 static int uverbs_free_counters(struct ib_uobject *uobject, 39 - enum rdma_remove_reason why) 39 + enum rdma_remove_reason why, 40 + struct uverbs_attr_bundle *attrs) 40 41 { 41 42 struct ib_counters *counters = uobject->object; 42 43 int ret;
+2 -1
drivers/infiniband/core/uverbs_std_types_cq.c
··· 35 35 #include "uverbs.h" 36 36 37 37 static int uverbs_free_cq(struct ib_uobject *uobject, 38 - enum rdma_remove_reason why) 38 + enum rdma_remove_reason why, 39 + struct uverbs_attr_bundle *attrs) 39 40 { 40 41 struct ib_cq *cq = uobject->object; 41 42 struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
+2 -1
drivers/infiniband/core/uverbs_std_types_dm.c
··· 35 35 #include <rdma/uverbs_std_types.h> 36 36 37 37 static int uverbs_free_dm(struct ib_uobject *uobject, 38 - enum rdma_remove_reason why) 38 + enum rdma_remove_reason why, 39 + struct uverbs_attr_bundle *attrs) 39 40 { 40 41 struct ib_dm *dm = uobject->object; 41 42 int ret;
+2 -1
drivers/infiniband/core/uverbs_std_types_flow_action.c
··· 35 35 #include <rdma/uverbs_std_types.h> 36 36 37 37 static int uverbs_free_flow_action(struct ib_uobject *uobject, 38 - enum rdma_remove_reason why) 38 + enum rdma_remove_reason why, 39 + struct uverbs_attr_bundle *attrs) 39 40 { 40 41 struct ib_flow_action *action = uobject->object; 41 42 int ret;
+2 -1
drivers/infiniband/core/uverbs_std_types_mr.c
··· 35 35 #include <rdma/uverbs_std_types.h> 36 36 37 37 static int uverbs_free_mr(struct ib_uobject *uobject, 38 - enum rdma_remove_reason why) 38 + enum rdma_remove_reason why, 39 + struct uverbs_attr_bundle *attrs) 39 40 { 40 41 return ib_dereg_mr((struct ib_mr *)uobject->object); 41 42 }
+4 -2
drivers/infiniband/hw/mlx5/devx.c
··· 1117 1117 } 1118 1118 1119 1119 static int devx_obj_cleanup(struct ib_uobject *uobject, 1120 - enum rdma_remove_reason why) 1120 + enum rdma_remove_reason why, 1121 + struct uverbs_attr_bundle *attrs) 1121 1122 { 1122 1123 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 1123 1124 struct devx_obj *obj = uobject->object; ··· 1600 1599 } 1601 1600 1602 1601 static int devx_umem_cleanup(struct ib_uobject *uobject, 1603 - enum rdma_remove_reason why) 1602 + enum rdma_remove_reason why, 1603 + struct uverbs_attr_bundle *attrs) 1604 1604 { 1605 1605 struct devx_umem *obj = uobject->object; 1606 1606 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+2 -1
drivers/infiniband/hw/mlx5/flow.c
··· 189 189 } 190 190 191 191 static int flow_matcher_cleanup(struct ib_uobject *uobject, 192 - enum rdma_remove_reason why) 192 + enum rdma_remove_reason why, 193 + struct uverbs_attr_bundle *attrs) 193 194 { 194 195 struct mlx5_ib_flow_matcher *obj = uobject->object; 195 196 int ret;
+6 -4
include/rdma/uverbs_std_types.h
··· 104 104 rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); 105 105 } 106 106 107 - static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj) 107 + static inline int __must_check 108 + uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) 108 109 { 109 - int ret = rdma_alloc_commit_uobject(uobj); 110 + int ret = rdma_alloc_commit_uobject(uobj, attrs); 110 111 111 112 if (ret) 112 113 return ret; 113 114 return 0; 114 115 } 115 116 116 - static inline void uobj_alloc_abort(struct ib_uobject *uobj) 117 + static inline void uobj_alloc_abort(struct ib_uobject *uobj, 118 + struct uverbs_attr_bundle *attrs) 117 119 { 118 - rdma_alloc_abort_uobject(uobj); 120 + rdma_alloc_abort_uobject(uobj, attrs); 119 121 } 120 122 121 123 static inline struct ib_uobject *
+8 -4
include/rdma/uverbs_types.h
··· 95 95 void (*lookup_put)(struct ib_uobject *uobj, enum rdma_lookup_mode mode); 96 96 /* This does not consume the kref on uobj */ 97 97 int __must_check (*destroy_hw)(struct ib_uobject *uobj, 98 - enum rdma_remove_reason why); 98 + enum rdma_remove_reason why, 99 + struct uverbs_attr_bundle *attrs); 99 100 void (*remove_handle)(struct ib_uobject *uobj); 100 101 u8 needs_kfree_rcu; 101 102 }; ··· 127 126 * completely unchanged. 128 127 */ 129 128 int __must_check (*destroy_object)(struct ib_uobject *uobj, 130 - enum rdma_remove_reason why); 129 + enum rdma_remove_reason why, 130 + struct uverbs_attr_bundle *attrs); 131 131 }; 132 132 133 133 struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, ··· 140 138 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, 141 139 struct ib_uverbs_file *ufile, 142 140 struct uverbs_attr_bundle *attrs); 143 - void rdma_alloc_abort_uobject(struct ib_uobject *uobj); 144 - int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj); 141 + void rdma_alloc_abort_uobject(struct ib_uobject *uobj, 142 + struct uverbs_attr_bundle *attrs); 143 + int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj, 144 + struct uverbs_attr_bundle *attrs); 145 145 146 146 struct uverbs_obj_fd_type { 147 147 /*