Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/restrack: Improve readability in task name management

Use rdma_restrack_set_name() and rdma_restrack_parent_name() instead of
tricky uses of rdma_restrack_attach_task()/rdma_restrack_uadd().

This uniformly makes all restracks add'd using rdma_restrack_add().

Link: https://lore.kernel.org/r/20200922091106.2152715-6-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Leon Romanovsky and committed by
Jason Gunthorpe
b09c4d70 c34a23c2

+172 -159
+88 -46
drivers/infiniband/core/cma.c
··· 453 453 id_priv->id.route.addr.dev_addr.transport = 454 454 rdma_node_get_transport(cma_dev->device->node_type); 455 455 list_add_tail(&id_priv->list, &cma_dev->id_list); 456 - if (id_priv->res.kern_name) 457 - rdma_restrack_add(&id_priv->res); 458 - else 459 - rdma_restrack_uadd(&id_priv->res); 456 + rdma_restrack_add(&id_priv->res); 457 + 460 458 trace_cm_id_attach(id_priv, cma_dev->device); 461 459 } 462 460 ··· 820 822 complete(&id_priv->comp); 821 823 } 822 824 823 - struct rdma_cm_id *__rdma_create_id(struct net *net, 824 - rdma_cm_event_handler event_handler, 825 - void *context, enum rdma_ucm_port_space ps, 826 - enum ib_qp_type qp_type, const char *caller) 825 + static struct rdma_id_private * 826 + __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, 827 + void *context, enum rdma_ucm_port_space ps, 828 + enum ib_qp_type qp_type, const struct rdma_id_private *parent) 827 829 { 828 830 struct rdma_id_private *id_priv; 829 831 ··· 851 853 id_priv->seq_num &= 0x00ffffff; 852 854 853 855 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); 854 - rdma_restrack_set_task(&id_priv->res, caller); 856 + if (parent) 857 + rdma_restrack_parent_name(&id_priv->res, &parent->res); 855 858 856 - return &id_priv->id; 859 + return id_priv; 857 860 } 858 - EXPORT_SYMBOL(__rdma_create_id); 861 + 862 + struct rdma_cm_id * 863 + __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, 864 + void *context, enum rdma_ucm_port_space ps, 865 + enum ib_qp_type qp_type, const char *caller) 866 + { 867 + struct rdma_id_private *ret; 868 + 869 + ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL); 870 + if (IS_ERR(ret)) 871 + return ERR_CAST(ret); 872 + 873 + rdma_restrack_set_name(&ret->res, caller); 874 + return &ret->id; 875 + } 876 + EXPORT_SYMBOL(__rdma_create_kernel_id); 877 + 878 + struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, 879 + void *context, 880 + enum rdma_ucm_port_space ps, 881 + enum ib_qp_type qp_type) 882 + { 883 + struct rdma_id_private *ret; 884 + 885 + ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, 886 + ps, qp_type, NULL); 887 + if (IS_ERR(ret)) 888 + return ERR_CAST(ret); 889 + 890 + rdma_restrack_set_name(&ret->res, NULL); 891 + return &ret->id; 892 + } 893 + EXPORT_SYMBOL(rdma_create_user_id); 859 894 860 895 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) 861 896 { ··· 2060 2029 int ret; 2061 2030 2062 2031 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2063 - id = __rdma_create_id(listen_id->route.addr.dev_addr.net, 2064 - listen_id->event_handler, listen_id->context, 2065 - listen_id->ps, ib_event->param.req_rcvd.qp_type, 2066 - listen_id_priv->res.kern_name); 2067 - if (IS_ERR(id)) 2032 + id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, 2033 + listen_id->event_handler, listen_id->context, 2034 + listen_id->ps, 2035 + ib_event->param.req_rcvd.qp_type, 2036 + listen_id_priv); 2037 + if (IS_ERR(id_priv)) 2068 2038 return NULL; 2069 2039 2070 - id_priv = container_of(id, struct rdma_id_private, id); 2040 + id = &id_priv->id; 2071 2041 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2072 2042 (struct sockaddr *)&id->route.addr.dst_addr, 2073 2043 listen_id, ib_event, ss_family, service_id)) ··· 2122 2090 int ret; 2123 2091 2124 2092 listen_id_priv = container_of(listen_id, struct rdma_id_private, id); 2125 - id = __rdma_create_id(net, listen_id->event_handler, listen_id->context, 2126 - listen_id->ps, IB_QPT_UD, 2127 - listen_id_priv->res.kern_name); 2128 - if (IS_ERR(id)) 2093 + id_priv = __rdma_create_id(net, listen_id->event_handler, 2094 + listen_id->context, listen_id->ps, IB_QPT_UD, 2095 + listen_id_priv); 2096 + if (IS_ERR(id_priv)) 2129 2097 return NULL; 2130 2098 2131 - id_priv = container_of(id, struct rdma_id_private, id); 2099 + id = &id_priv->id; 2132 2100 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, 2133 2101 (struct sockaddr *)&id->route.addr.dst_addr, 2134 2102 listen_id, ib_event, ss_family, ··· 2364 2332 static int iw_conn_req_handler(struct iw_cm_id *cm_id, 2365 2333 struct iw_cm_event *iw_event) 2366 2334 { 2367 - struct rdma_cm_id *new_cm_id; 2368 2335 struct rdma_id_private *listen_id, *conn_id; 2369 2336 struct rdma_cm_event event = {}; 2370 2337 int ret = -ECONNABORTED; ··· 2383 2352 goto out; 2384 2353 2385 2354 /* Create a new RDMA id for the new IW CM ID */ 2386 - new_cm_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2387 - listen_id->id.event_handler, 2388 - listen_id->id.context, 2389 - RDMA_PS_TCP, IB_QPT_RC, 2390 - listen_id->res.kern_name); 2391 - if (IS_ERR(new_cm_id)) { 2355 + conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, 2356 + listen_id->id.event_handler, 2357 + listen_id->id.context, RDMA_PS_TCP, 2358 + IB_QPT_RC, listen_id); 2359 + if (IS_ERR(conn_id)) { 2392 2360 ret = -ENOMEM; 2393 2361 goto out; 2394 2362 } 2395 - conn_id = container_of(new_cm_id, struct rdma_id_private, id); 2396 2363 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 2397 2364 conn_id->state = RDMA_CM_CONNECT; 2398 2365 ··· 2495 2466 struct cma_device *cma_dev) 2496 2467 { 2497 2468 struct rdma_id_private *dev_id_priv; 2498 - struct rdma_cm_id *id; 2499 2469 struct net *net = id_priv->id.route.addr.dev_addr.net; 2500 2470 int ret; 2501 2471 ··· 2503 2475 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) 2504 2476 return; 2505 2477 2506 - id = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, 2507 - id_priv->id.qp_type, id_priv->res.kern_name); 2508 - if (IS_ERR(id)) 2478 + dev_id_priv = 2479 + __rdma_create_id(net, cma_listen_handler, id_priv, 2480 + id_priv->id.ps, id_priv->id.qp_type, id_priv); 2481 + if (IS_ERR(dev_id_priv)) 2509 2482 return; 2510 - 2511 - dev_id_priv = container_of(id, struct rdma_id_private, id); 2512 2483 2513 2484 dev_id_priv->state = RDMA_CM_ADDR_BOUND; 2514 2485 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), ··· 2521 2494 dev_id_priv->tos_set = id_priv->tos_set; 2522 2495 dev_id_priv->tos = id_priv->tos; 2523 2496 2524 - ret = rdma_listen(id, id_priv->backlog); 2497 + ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); 2525 2498 if (ret) 2526 2499 dev_warn(&cma_dev->device->dev, 2527 2500 "RDMA CMA: cma_listen_on_dev, error %d\n", ret); ··· 4176 4149 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 4177 4150 } 4178 4151 4179 - int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4180 - const char *caller) 4152 + /** 4153 + * rdma_accept - Called to accept a connection request or response. 4154 + * @id: Connection identifier associated with the request. 4155 + * @conn_param: Information needed to establish the connection. This must be 4156 + * provided if accepting a connection request. If accepting a connection 4157 + * response, this parameter must be NULL. 4158 + * 4159 + * Typically, this routine is only called by the listener to accept a connection 4160 + * request. It must also be called on the active side of a connection if the 4161 + * user is performing their own QP transitions. 4162 + * 4163 + * In the case of error, a reject message is sent to the remote side and the 4164 + * state of the qp associated with the id is modified to error, such that any 4165 + * previously posted receive buffers would be flushed. 4166 + * 4167 + * This function is for use by kernel ULPs and must be called from under the 4168 + * handler callback. 4169 + */ 4170 + int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 4181 4171 { 4182 4172 struct rdma_id_private *id_priv = 4183 4173 container_of(id, struct rdma_id_private, id); 4184 4174 int ret; 4185 4175 4186 4176 lockdep_assert_held(&id_priv->handler_mutex); 4187 - 4188 - rdma_restrack_set_task(&id_priv->res, caller); 4189 4177 4190 4178 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) 4191 4179 return -EINVAL; ··· 4240 4198 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED); 4241 4199 return ret; 4242 4200 } 4243 - EXPORT_SYMBOL(__rdma_accept); 4201 + EXPORT_SYMBOL(rdma_accept); 4244 4202 4245 - int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4246 - const char *caller, struct rdma_ucm_ece *ece) 4203 + int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 4204 + struct rdma_ucm_ece *ece) 4247 4205 { 4248 4206 struct rdma_id_private *id_priv = 4249 4207 container_of(id, struct rdma_id_private, id); ··· 4251 4209 id_priv->ece.vendor_id = ece->vendor_id; 4252 4210 id_priv->ece.attr_mod = ece->attr_mod; 4253 4211 4254 - return __rdma_accept(id, conn_param, caller); 4212 + return rdma_accept(id, conn_param); 4255 4213 } 4256 - EXPORT_SYMBOL(__rdma_accept_ece); 4214 + EXPORT_SYMBOL(rdma_accept_ece); 4257 4215 4258 4216 void rdma_lock_handler(struct rdma_cm_id *id) 4259 4217 {
+3 -9
drivers/infiniband/core/core_priv.h
··· 361 361 */ 362 362 is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; 363 363 if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { 364 - if (uobj) 365 - rdma_restrack_uadd(&qp->res); 366 - else { 367 - rdma_restrack_set_task(&qp->res, pd->res.kern_name); 368 - rdma_restrack_add(&qp->res); 369 - } 370 - } else 371 - qp->res.valid = false; 372 - 364 + rdma_restrack_parent_name(&qp->res, &pd->res); 365 + rdma_restrack_add(&qp->res); 366 + } 373 367 return qp; 374 368 } 375 369
+2 -7
drivers/infiniband/core/counters.c
··· 250 250 static void rdma_counter_res_add(struct rdma_counter *counter, 251 251 struct ib_qp *qp) 252 252 { 253 - if (rdma_is_kernel_res(&qp->res)) { 254 - rdma_restrack_set_task(&counter->res, qp->res.kern_name); 255 - rdma_restrack_add(&counter->res); 256 - } else { 257 - rdma_restrack_attach_task(&counter->res, qp->res.task); 258 - rdma_restrack_uadd(&counter->res); 259 - } 253 + rdma_restrack_parent_name(&counter->res, &qp->res); 254 + rdma_restrack_add(&counter->res); 260 255 } 261 256 262 257 static void counter_release(struct kref *kref)
+1 -1
drivers/infiniband/core/cq.c
··· 236 236 goto out_free_cq; 237 237 238 238 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); 239 - rdma_restrack_set_task(&cq->res, caller); 239 + rdma_restrack_set_name(&cq->res, caller); 240 240 241 241 ret = dev->ops.create_cq(cq, &cq_attr, NULL); 242 242 if (ret)
+38 -35
drivers/infiniband/core/restrack.c
··· 147 147 } 148 148 } 149 149 150 - void rdma_restrack_set_task(struct rdma_restrack_entry *res, 151 - const char *caller) 150 + /** 151 + * rdma_restrack_attach_task() - attach the task onto this resource, 152 + * valid for user space restrack entries. 153 + * @res: resource entry 154 + * @task: the task to attach 155 + */ 156 + static void rdma_restrack_attach_task(struct rdma_restrack_entry *res, 157 + struct task_struct *task) 158 + { 159 + if (WARN_ON_ONCE(!task)) 160 + return; 161 + 162 + if (res->task) 163 + put_task_struct(res->task); 164 + get_task_struct(task); 165 + res->task = task; 166 + res->user = true; 167 + } 168 + 169 + /** 170 + * rdma_restrack_set_name() - set the task for this resource 171 + * @res: resource entry 172 + * @caller: kernel name, the current task will be used if the caller is NULL. 173 + */ 174 + void rdma_restrack_set_name(struct rdma_restrack_entry *res, const char *caller) 152 175 { 153 176 if (caller) { 154 177 res->kern_name = caller; 155 178 return; 156 179 } 157 180 158 - if (res->task) 159 - put_task_struct(res->task); 160 - get_task_struct(current); 161 - res->task = current; 181 + rdma_restrack_attach_task(res, current); 162 182 } 163 - EXPORT_SYMBOL(rdma_restrack_set_task); 183 + EXPORT_SYMBOL(rdma_restrack_set_name); 164 184 165 185 /** 166 - * rdma_restrack_attach_task() - attach the task onto this resource 167 - * @res: resource entry 168 - * @task: the task to attach, the current task will be used if it is NULL. 186 + * rdma_restrack_parent_name() - set the restrack name properties based 187 + * on parent restrack 188 + * @dst: destination resource entry 189 + * @parent: parent resource entry 169 190 */ 170 - void rdma_restrack_attach_task(struct rdma_restrack_entry *res, 171 - struct task_struct *task) 191 + void rdma_restrack_parent_name(struct rdma_restrack_entry *dst, 192 + const struct rdma_restrack_entry *parent) 172 193 { 173 - if (res->task) 174 - put_task_struct(res->task); 175 - get_task_struct(task); 176 - res->task = task; 194 + if (rdma_is_kernel_res(parent)) 195 + dst->kern_name = parent->kern_name; 196 + else 197 + rdma_restrack_attach_task(dst, parent->task); 177 198 } 199 + EXPORT_SYMBOL(rdma_restrack_parent_name); 178 200 179 201 /** 180 202 * rdma_restrack_new() - Initializes new restrack entry to allow _put() interface ··· 250 228 res->valid = true; 251 229 } 252 230 EXPORT_SYMBOL(rdma_restrack_add); 253 - 254 - /** 255 - * rdma_restrack_uadd() - add user object to the reource tracking database 256 - * @res: resource entry 257 - */ 258 - void rdma_restrack_uadd(struct rdma_restrack_entry *res) 259 - { 260 - if ((res->type != RDMA_RESTRACK_CM_ID) && 261 - (res->type != RDMA_RESTRACK_COUNTER)) 262 - res->task = NULL; 263 - 264 - if (!res->task) 265 - rdma_restrack_set_task(res, NULL); 266 - res->kern_name = NULL; 267 - 268 - res->user = true; 269 - rdma_restrack_add(res); 270 - } 271 - EXPORT_SYMBOL(rdma_restrack_uadd); 272 231 273 232 int __must_check rdma_restrack_get(struct rdma_restrack_entry *res) 274 233 {
+4 -2
drivers/infiniband/core/restrack.h
··· 29 29 void rdma_restrack_del(struct rdma_restrack_entry *res); 30 30 void rdma_restrack_new(struct rdma_restrack_entry *res, 31 31 enum rdma_restrack_type type); 32 - void rdma_restrack_attach_task(struct rdma_restrack_entry *res, 33 - struct task_struct *task); 32 + void rdma_restrack_set_name(struct rdma_restrack_entry *res, 33 + const char *caller); 34 + void rdma_restrack_parent_name(struct rdma_restrack_entry *dst, 35 + const struct rdma_restrack_entry *parent); 34 36 #endif /* _RDMA_CORE_RESTRACK_H_ */
+3 -4
drivers/infiniband/core/ucma.c
··· 456 456 return -ENOMEM; 457 457 458 458 ctx->uid = cmd.uid; 459 - cm_id = __rdma_create_id(current->nsproxy->net_ns, 460 - ucma_event_handler, ctx, cmd.ps, qp_type, NULL); 459 + cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type); 461 460 if (IS_ERR(cm_id)) { 462 461 ret = PTR_ERR(cm_id); 463 462 goto err1; ··· 1125 1126 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); 1126 1127 mutex_lock(&ctx->mutex); 1127 1128 rdma_lock_handler(ctx->cm_id); 1128 - ret = __rdma_accept_ece(ctx->cm_id, &conn_param, NULL, &ece); 1129 + ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece); 1129 1130 if (!ret) { 1130 1131 /* The uid must be set atomically with the handler */ 1131 1132 ctx->uid = cmd.uid; ··· 1135 1136 } else { 1136 1137 mutex_lock(&ctx->mutex); 1137 1138 rdma_lock_handler(ctx->cm_id); 1138 - ret = __rdma_accept_ece(ctx->cm_id, NULL, NULL, &ece); 1139 + ret = rdma_accept_ece(ctx->cm_id, NULL, &ece); 1139 1140 rdma_unlock_handler(ctx->cm_id); 1140 1141 mutex_unlock(&ctx->mutex); 1141 1142 }
+10 -4
drivers/infiniband/core/uverbs_cmd.c
··· 223 223 xa_init_flags(&ucontext->mmap_xa, XA_FLAGS_ALLOC); 224 224 225 225 rdma_restrack_new(&ucontext->res, RDMA_RESTRACK_CTX); 226 + rdma_restrack_set_name(&ucontext->res, NULL); 226 227 attrs->context = ucontext; 227 228 return 0; 228 229 } ··· 252 251 if (ret) 253 252 goto err_uncharge; 254 253 255 - rdma_restrack_uadd(&ucontext->res); 254 + rdma_restrack_add(&ucontext->res); 256 255 257 256 /* 258 257 * Make sure that ib_uverbs_get_ucontext() sees the pointer update ··· 444 443 atomic_set(&pd->usecnt, 0); 445 444 446 445 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); 446 + rdma_restrack_set_name(&pd->res, NULL); 447 + 447 448 ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata); 448 449 if (ret) 449 450 goto err_alloc; 450 - rdma_restrack_uadd(&pd->res); 451 + rdma_restrack_add(&pd->res); 451 452 452 453 uobj->object = pd; 453 454 uobj_finalize_uobj_create(uobj, attrs); ··· 751 748 mr->iova = cmd.hca_va; 752 749 753 750 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 754 - rdma_restrack_uadd(&mr->res); 751 + rdma_restrack_set_name(&mr->res, NULL); 752 + rdma_restrack_add(&mr->res); 755 753 756 754 uobj->object = mr; 757 755 uobj_put_obj_read(pd); ··· 1012 1008 atomic_set(&cq->usecnt, 0); 1013 1009 1014 1010 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); 1011 + rdma_restrack_set_name(&cq->res, NULL); 1012 + 1015 1013 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); 1016 1014 if (ret) 1017 1015 goto err_free; 1018 - rdma_restrack_uadd(&cq->res); 1016 + rdma_restrack_add(&cq->res); 1019 1017 1020 1018 obj->uevent.uobject.object = cq; 1021 1019 obj->uevent.event_file = READ_ONCE(attrs->ufile->default_async_file);
+3 -1
drivers/infiniband/core/uverbs_std_types_cq.c
··· 126 126 atomic_set(&cq->usecnt, 0); 127 127 128 128 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); 129 + rdma_restrack_set_name(&cq->res, NULL); 130 + 129 131 ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); 130 132 if (ret) 131 133 goto err_free; 132 134 133 135 obj->uevent.uobject.object = cq; 134 136 obj->uevent.uobject.user_handle = user_handle; 135 - rdma_restrack_uadd(&cq->res); 137 + rdma_restrack_add(&cq->res); 136 138 uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE); 137 139 138 140 ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
+5 -5
drivers/infiniband/core/verbs.c
··· 273 273 pd->flags = flags; 274 274 275 275 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); 276 - rdma_restrack_set_task(&pd->res, caller); 276 + rdma_restrack_set_name(&pd->res, caller); 277 277 278 278 ret = device->ops.alloc_pd(pd, NULL); 279 279 if (ret) { ··· 1999 1999 atomic_set(&cq->usecnt, 0); 2000 2000 2001 2001 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); 2002 - rdma_restrack_set_task(&cq->res, caller); 2002 + rdma_restrack_set_name(&cq->res, caller); 2003 2003 2004 2004 ret = device->ops.create_cq(cq, cq_attr, NULL); 2005 2005 if (ret) { ··· 2081 2081 atomic_inc(&pd->usecnt); 2082 2082 2083 2083 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2084 - rdma_restrack_set_task(&mr->res, pd->res.kern_name); 2084 + rdma_restrack_parent_name(&mr->res, &pd->res); 2085 2085 rdma_restrack_add(&mr->res); 2086 2086 2087 2087 return mr; ··· 2165 2165 mr->sig_attrs = NULL; 2166 2166 2167 2167 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2168 - rdma_restrack_set_task(&mr->res, pd->res.kern_name); 2168 + rdma_restrack_parent_name(&mr->res, &pd->res); 2169 2169 rdma_restrack_add(&mr->res); 2170 2170 out: 2171 2171 trace_mr_alloc(pd, mr_type, max_num_sg, mr); ··· 2226 2226 mr->sig_attrs = sig_attrs; 2227 2227 2228 2228 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); 2229 - rdma_restrack_set_task(&mr->res, pd->res.kern_name); 2229 + rdma_restrack_parent_name(&mr->res, &pd->res); 2230 2230 rdma_restrack_add(&mr->res); 2231 2231 out: 2232 2232 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
+14 -33
include/rdma/rdma_cm.h
··· 110 110 u8 port_num; 111 111 }; 112 112 113 - struct rdma_cm_id *__rdma_create_id(struct net *net, 114 - rdma_cm_event_handler event_handler, 115 - void *context, enum rdma_ucm_port_space ps, 116 - enum ib_qp_type qp_type, 117 - const char *caller); 113 + struct rdma_cm_id * 114 + __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, 115 + void *context, enum rdma_ucm_port_space ps, 116 + enum ib_qp_type qp_type, const char *caller); 117 + struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, 118 + void *context, 119 + enum rdma_ucm_port_space ps, 120 + enum ib_qp_type qp_type); 118 121 119 122 /** 120 123 * rdma_create_id - Create an RDMA identifier. ··· 135 132 * The event handler callback serializes on the id's mutex and is 136 133 * allowed to sleep. 137 134 */ 138 - #define rdma_create_id(net, event_handler, context, ps, qp_type) \ 139 - __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \ 140 - KBUILD_MODNAME) 135 + #define rdma_create_id(net, event_handler, context, ps, qp_type) \ 136 + __rdma_create_kernel_id(net, event_handler, context, ps, qp_type, \ 137 + KBUILD_MODNAME) 141 138 142 139 /** 143 140 * rdma_destroy_id - Destroys an RDMA identifier. ··· 253 250 */ 254 251 int rdma_listen(struct rdma_cm_id *id, int backlog); 255 252 256 - int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 257 - const char *caller); 253 + int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 258 254 259 255 void rdma_lock_handler(struct rdma_cm_id *id); 260 256 void rdma_unlock_handler(struct rdma_cm_id *id); 261 - int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 262 - const char *caller, struct rdma_ucm_ece *ece); 263 - 264 - /** 265 - * rdma_accept - Called to accept a connection request or response. 266 - * @id: Connection identifier associated with the request. 267 - * @conn_param: Information needed to establish the connection. This must be 268 - * provided if accepting a connection request. If accepting a connection 269 - * response, this parameter must be NULL. 270 - * 271 - * Typically, this routine is only called by the listener to accept a connection 272 - * request. It must also be called on the active side of a connection if the 273 - * user is performing their own QP transitions. 274 - * 275 - * In the case of error, a reject message is sent to the remote side and the 276 - * state of the qp associated with the id is modified to error, such that any 277 - * previously posted receive buffers would be flushed. 278 - * 279 - * This function is for use by kernel ULPs and must be called from under the 280 - * handler callback. 281 - */ 282 - #define rdma_accept(id, conn_param) \ 283 - __rdma_accept((id), (conn_param), KBUILD_MODNAME) 257 + int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, 258 + struct rdma_ucm_ece *ece); 284 259 285 260 /** 286 261 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
+1 -12
include/rdma/restrack.h
··· 106 106 107 107 int rdma_restrack_count(struct ib_device *dev, 108 108 enum rdma_restrack_type type); 109 - 110 - void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111 - 112 109 /** 113 110 * rdma_is_kernel_res() - check the owner of resource 114 111 * @res: resource entry 115 112 */ 116 - static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) 113 + static inline bool rdma_is_kernel_res(const struct rdma_restrack_entry *res) 117 114 { 118 115 return !res->user; 119 116 } ··· 126 129 * @res: resource entry 127 130 */ 128 131 int rdma_restrack_put(struct rdma_restrack_entry *res); 129 - 130 - /** 131 - * rdma_restrack_set_task() - set the task for this resource 132 - * @res: resource entry 133 - * @caller: kernel name, the current task will be used if the caller is NULL. 134 - */ 135 - void rdma_restrack_set_task(struct rdma_restrack_entry *res, 136 - const char *caller); 137 132 138 133 /* 139 134 * Helper functions for rdma drivers when filling out