Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/core: Guarantee that a local_dma_lkey is available

Every single ULP requires a local_dma_lkey to do anything with
a QP, so let us ensure one exists for every PD created.

If the driver can supply a global local_dma_lkey then use that, otherwise
ask the driver to create a local use all physical memory MR associated
with the new PD.

Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Reviewed-by: Sagi Grimberg <sagig@dev.mellanox.co.il>
Acked-by: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Tested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Jason Gunthorpe and committed by
Doug Ledford
96249d70 7332bed0

+45 -12
+1
drivers/infiniband/core/uverbs_cmd.c
··· 562 562 563 563 pd->device = file->device->ib_dev; 564 564 pd->uobject = uobj; 565 + pd->local_mr = NULL; 565 566 atomic_set(&pd->usecnt, 0); 566 567 567 568 uobj->object = pd;
+42 -5
drivers/infiniband/core/verbs.c
··· 213 213 214 214 /* Protection domains */ 215 215 216 + /** 217 + * ib_alloc_pd - Allocates an unused protection domain. 218 + * @device: The device on which to allocate the protection domain. 219 + * 220 + * A protection domain object provides an association between QPs, shared 221 + * receive queues, address handles, memory regions, and memory windows. 222 + * 223 + * Every PD has a local_dma_lkey which can be used as the lkey value for local 224 + * memory operations. 225 + */ 216 226 struct ib_pd *ib_alloc_pd(struct ib_device *device) 217 227 { 218 228 struct ib_pd *pd; 229 + struct ib_device_attr devattr; 230 + int rc; 231 + 232 + rc = ib_query_device(device, &devattr); 233 + if (rc) 234 + return ERR_PTR(rc); 219 235 220 236 pd = device->alloc_pd(device, NULL, NULL); 237 + if (IS_ERR(pd)) 238 + return pd; 221 239 222 - if (!IS_ERR(pd)) { 223 - pd->device = device; 224 - pd->uobject = NULL; 225 - atomic_set(&pd->usecnt, 0); 240 + pd->device = device; 241 + pd->uobject = NULL; 242 + pd->local_mr = NULL; 243 + atomic_set(&pd->usecnt, 0); 244 + 245 + if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 246 + pd->local_dma_lkey = device->local_dma_lkey; 247 + else { 248 + struct ib_mr *mr; 249 + 250 + mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); 251 + if (IS_ERR(mr)) { 252 + ib_dealloc_pd(pd); 253 + return (struct ib_pd *)mr; 254 + } 255 + 256 + pd->local_mr = mr; 257 + pd->local_dma_lkey = pd->local_mr->lkey; 226 258 } 227 - 228 259 return pd; 229 260 } 230 261 EXPORT_SYMBOL(ib_alloc_pd); 231 262 232 263 int ib_dealloc_pd(struct ib_pd *pd) 233 264 { 265 + if (pd->local_mr) { 266 + if (ib_dereg_mr(pd->local_mr)) 267 + return -EBUSY; 268 + pd->local_mr = NULL; 269 + } 270 + 234 271 if (atomic_read(&pd->usecnt)) 235 272 return -EBUSY; 236 273
+2 -7
include/rdma/ib_verbs.h
··· 1257 1257 }; 1258 1258 1259 1259 struct ib_pd { 1260 + u32 local_dma_lkey; 1260 1261 struct ib_device *device; 1261 1262 struct ib_uobject *uobject; 1262 1263 atomic_t usecnt; /* count all resources */ 1264 + struct ib_mr *local_mr; 1263 1265 }; 1264 1266 1265 1267 struct ib_xrcd { ··· 2194 2192 int ib_find_pkey(struct ib_device *device, 2195 2193 u8 port_num, u16 pkey, u16 *index); 2196 2194 2197 - /** 2198 - * ib_alloc_pd - Allocates an unused protection domain. 2199 - * @device: The device on which to allocate the protection domain. 2200 - * 2201 - * A protection domain object provides an association between QPs, shared 2202 - * receive queues, address handles, memory regions, and memory windows. 2203 - */ 2204 2195 struct ib_pd *ib_alloc_pd(struct ib_device *device); 2205 2196 2206 2197 /**