Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/core: Properly increment and decrement QP usecnts

The QP usecnts were incremented through QP attributes structure while
decreased through QP itself. Rely on the ib_creat_qp_user() code that
initialized all QP parameters prior returning to the user and increment
exactly like destroy does.

Link: https://lore.kernel.org/r/25d256a3bb1fc480b77d7fe439817b993de48610.1628014762.git.leonro@nvidia.com
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Leon Romanovsky and committed by
Jason Gunthorpe
5507f67d 00a79d6b

+39 -49
+2
drivers/infiniband/core/core_priv.h
··· 320 320 struct ib_qp_init_attr *attr, 321 321 struct ib_udata *udata, struct ib_uqp_object *uobj, 322 322 const char *caller); 323 + void ib_qp_usecnt_inc(struct ib_qp *qp); 324 + void ib_qp_usecnt_dec(struct ib_qp *qp); 323 325 324 326 struct rdma_dev_addr; 325 327 int rdma_resolve_ip_route(struct sockaddr *src_addr,
+2 -11
drivers/infiniband/core/uverbs_cmd.c
··· 1445 1445 ret = PTR_ERR(qp); 1446 1446 goto err_put; 1447 1447 } 1448 + ib_qp_usecnt_inc(qp); 1448 1449 1449 - if (cmd->qp_type != IB_QPT_XRC_TGT) { 1450 - atomic_inc(&pd->usecnt); 1451 - if (attr.send_cq) 1452 - atomic_inc(&attr.send_cq->usecnt); 1453 - if (attr.recv_cq) 1454 - atomic_inc(&attr.recv_cq->usecnt); 1455 - if (attr.srq) 1456 - atomic_inc(&attr.srq->usecnt); 1457 - if (ind_tbl) 1458 - atomic_inc(&ind_tbl->usecnt); 1459 - } else { 1450 + if (cmd->qp_type == IB_QPT_XRC_TGT) { 1460 1451 /* It is done in _ib_create_qp for other QP types */ 1461 1452 qp->uobject = obj; 1462 1453 }
+2 -11
drivers/infiniband/core/uverbs_std_types_qp.c
··· 258 258 ret = PTR_ERR(qp); 259 259 goto err_put; 260 260 } 261 + ib_qp_usecnt_inc(qp); 261 262 262 - if (attr.qp_type != IB_QPT_XRC_TGT) { 263 - atomic_inc(&pd->usecnt); 264 - if (attr.send_cq) 265 - atomic_inc(&attr.send_cq->usecnt); 266 - if (attr.recv_cq) 267 - atomic_inc(&attr.recv_cq->usecnt); 268 - if (attr.srq) 269 - atomic_inc(&attr.srq->usecnt); 270 - if (attr.rwq_ind_tbl) 271 - atomic_inc(&attr.rwq_ind_tbl->usecnt); 272 - } else { 263 + if (attr.qp_type == IB_QPT_XRC_TGT) { 273 264 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 274 265 uobject); 275 266 atomic_inc(&obj->uxrcd->refcnt);
+33 -27
drivers/infiniband/core/verbs.c
··· 1274 1274 } 1275 1275 EXPORT_SYMBOL(_ib_create_qp); 1276 1276 1277 + void ib_qp_usecnt_inc(struct ib_qp *qp) 1278 + { 1279 + if (qp->pd) 1280 + atomic_inc(&qp->pd->usecnt); 1281 + if (qp->send_cq) 1282 + atomic_inc(&qp->send_cq->usecnt); 1283 + if (qp->recv_cq) 1284 + atomic_inc(&qp->recv_cq->usecnt); 1285 + if (qp->srq) 1286 + atomic_inc(&qp->srq->usecnt); 1287 + if (qp->rwq_ind_tbl) 1288 + atomic_inc(&qp->rwq_ind_tbl->usecnt); 1289 + } 1290 + EXPORT_SYMBOL(ib_qp_usecnt_inc); 1291 + 1292 + void ib_qp_usecnt_dec(struct ib_qp *qp) 1293 + { 1294 + if (qp->rwq_ind_tbl) 1295 + atomic_dec(&qp->rwq_ind_tbl->usecnt); 1296 + if (qp->srq) 1297 + atomic_dec(&qp->srq->usecnt); 1298 + if (qp->recv_cq) 1299 + atomic_dec(&qp->recv_cq->usecnt); 1300 + if (qp->send_cq) 1301 + atomic_dec(&qp->send_cq->usecnt); 1302 + if (qp->pd) 1303 + atomic_dec(&qp->pd->usecnt); 1304 + } 1305 + EXPORT_SYMBOL(ib_qp_usecnt_dec); 1306 + 1277 1307 struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd, 1278 1308 struct ib_qp_init_attr *qp_init_attr, 1279 1309 const char *caller) ··· 1336 1306 return xrc_qp; 1337 1307 } 1338 1308 1339 - if (qp_init_attr->recv_cq) 1340 - atomic_inc(&qp_init_attr->recv_cq->usecnt); 1341 - if (qp->srq) 1342 - atomic_inc(&qp_init_attr->srq->usecnt); 1343 - 1344 - atomic_inc(&pd->usecnt); 1345 - if (qp_init_attr->send_cq) 1346 - atomic_inc(&qp_init_attr->send_cq->usecnt); 1309 + ib_qp_usecnt_inc(qp); 1347 1310 1348 1311 if (qp_init_attr->cap.max_rdma_ctxs) { 1349 1312 ret = rdma_rw_init_mrs(qp, qp_init_attr); ··· 1994 1971 { 1995 1972 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; 1996 1973 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; 1997 - struct ib_pd *pd; 1998 - struct ib_cq *scq, *rcq; 1999 - struct ib_srq *srq; 2000 - struct ib_rwq_ind_table *ind_tbl; 2001 1974 struct ib_qp_security *sec; 2002 1975 int ret; 2003 1976 ··· 2005 1986 if (qp->real_qp != qp) 2006 1987 return __ib_destroy_shared_qp(qp); 2007 1988 2008 - pd = qp->pd; 2009 - scq = qp->send_cq; 2010 - rcq = qp->recv_cq; 2011 - srq = qp->srq; 2012 - ind_tbl = qp->rwq_ind_tbl; 2013 1989 sec = qp->qp_sec; 2014 1990 if (sec) 2015 1991 ib_destroy_qp_security_begin(sec); ··· 2024 2010 rdma_put_gid_attr(alt_path_sgid_attr); 2025 2011 if (av_sgid_attr) 2026 2012 rdma_put_gid_attr(av_sgid_attr); 2027 - if (pd) 2028 - atomic_dec(&pd->usecnt); 2029 - if (scq) 2030 - atomic_dec(&scq->usecnt); 2031 - if (rcq) 2032 - atomic_dec(&rcq->usecnt); 2033 - if (srq) 2034 - atomic_dec(&srq->usecnt); 2035 - if (ind_tbl) 2036 - atomic_dec(&ind_tbl->usecnt); 2013 + 2014 + ib_qp_usecnt_dec(qp); 2037 2015 if (sec) 2038 2016 ib_destroy_qp_security_end(sec); 2039 2017