Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/qedr: Use the common mmap API

Remove all functions related to mmap from qedr and use the common API.

Link: https://lore.kernel.org/r/20191030094417.16866-7-michal.kalderon@marvell.com
Signed-off-by: Ariel Elior <ariel.elior@marvell.com>
Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Michal Kalderon and committed by
Jason Gunthorpe
4c6bb02d 11f1a755

+97 -120
+1
drivers/infiniband/hw/qedr/main.c
··· 212 212 .get_link_layer = qedr_link_layer, 213 213 .map_mr_sg = qedr_map_mr_sg, 214 214 .mmap = qedr_mmap, 215 + .mmap_free = qedr_mmap_free, 215 216 .modify_port = qedr_modify_port, 216 217 .modify_qp = qedr_modify_qp, 217 218 .modify_srq = qedr_modify_srq,
+17 -13
drivers/infiniband/hw/qedr/qedr.h
··· 231 231 struct qedr_dev *dev; 232 232 struct qedr_pd *pd; 233 233 void __iomem *dpi_addr; 234 + struct rdma_user_mmap_entry *db_mmap_entry; 234 235 u64 dpi_phys_addr; 235 236 u32 dpi_size; 236 237 u16 dpi; 237 - 238 - struct list_head mm_head; 239 - 240 - /* Lock to protect mm list */ 241 - struct mutex mm_list_lock; 242 238 }; 243 239 244 240 union db_prod64 { ··· 295 299 struct ib_pd ibpd; 296 300 u32 pd_id; 297 301 struct qedr_ucontext *uctx; 298 - }; 299 - 300 - struct qedr_mm { 301 - struct { 302 - u64 phy_addr; 303 - unsigned long len; 304 - } key; 305 - struct list_head entry; 306 302 }; 307 303 308 304 union db_prod32 { ··· 479 491 u32 npages; 480 492 }; 481 493 494 + struct qedr_user_mmap_entry { 495 + struct rdma_user_mmap_entry rdma_entry; 496 + struct qedr_dev *dev; 497 + u64 io_address; 498 + size_t length; 499 + u16 dpi; 500 + u8 mmap_flag; 501 + }; 502 + 482 503 #define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT))) 483 504 484 505 #define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \ ··· 585 588 static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq) 586 589 { 587 590 return container_of(ibsrq, struct qedr_srq, ibsrq); 591 + } 592 + 593 + static inline struct qedr_user_mmap_entry * 594 + get_qedr_mmap_entry(struct rdma_user_mmap_entry *rdma_entry) 595 + { 596 + return container_of(rdma_entry, struct qedr_user_mmap_entry, 597 + rdma_entry); 588 598 } 589 599 #endif
+77 -106
drivers/infiniband/hw/qedr/verbs.c
··· 59 59 60 60 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) 61 61 62 + enum { 63 + QEDR_USER_MMAP_IO_WC = 0, 64 + }; 65 + 62 66 static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src, 63 67 size_t len) 64 68 { ··· 261 257 return 0; 262 258 } 263 259 264 - static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr, 265 - unsigned long len) 266 - { 267 - struct qedr_mm *mm; 268 - 269 - mm = kzalloc(sizeof(*mm), GFP_KERNEL); 270 - if (!mm) 271 - return -ENOMEM; 272 - 273 - mm->key.phy_addr = phy_addr; 274 - /* This function might be called with a length which is not a multiple 275 - * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel 276 - * forces this granularity by increasing the requested size if needed. 277 - * When qedr_mmap is called, it will search the list with the updated 278 - * length as a key. To prevent search failures, the length is rounded up 279 - * in advance to PAGE_SIZE. 280 - */ 281 - mm->key.len = roundup(len, PAGE_SIZE); 282 - INIT_LIST_HEAD(&mm->entry); 283 - 284 - mutex_lock(&uctx->mm_list_lock); 285 - list_add(&mm->entry, &uctx->mm_head); 286 - mutex_unlock(&uctx->mm_list_lock); 287 - 288 - DP_DEBUG(uctx->dev, QEDR_MSG_MISC, 289 - "added (addr=0x%llx,len=0x%lx) for ctx=%p\n", 290 - (unsigned long long)mm->key.phy_addr, 291 - (unsigned long)mm->key.len, uctx); 292 - 293 - return 0; 294 - } 295 - 296 - static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr, 297 - unsigned long len) 298 - { 299 - bool found = false; 300 - struct qedr_mm *mm; 301 - 302 - mutex_lock(&uctx->mm_list_lock); 303 - list_for_each_entry(mm, &uctx->mm_head, entry) { 304 - if (len != mm->key.len || phy_addr != mm->key.phy_addr) 305 - continue; 306 - 307 - found = true; 308 - break; 309 - } 310 - mutex_unlock(&uctx->mm_list_lock); 311 - DP_DEBUG(uctx->dev, QEDR_MSG_MISC, 312 - "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n", 313 - mm->key.phy_addr, mm->key.len, uctx, found); 314 - 315 - return found; 316 - } 317 - 318 260 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) 319 261 { 320 262 struct ib_device *ibdev = uctx->device; ··· 269 319 struct qedr_alloc_ucontext_resp uresp = {}; 270 320 struct qedr_dev *dev = get_qedr_dev(ibdev); 271 321 struct qed_rdma_add_user_out_params oparams; 322 + struct qedr_user_mmap_entry *entry; 272 323 273 324 if (!udata) 274 325 return -EFAULT; ··· 286 335 ctx->dpi_addr = oparams.dpi_addr; 287 336 ctx->dpi_phys_addr = oparams.dpi_phys_addr; 288 337 ctx->dpi_size = oparams.dpi_size; 289 - INIT_LIST_HEAD(&ctx->mm_head); 290 - mutex_init(&ctx->mm_list_lock); 338 + entry = kzalloc(sizeof(*entry), GFP_KERNEL); 339 + if (!entry) { 340 + rc = -ENOMEM; 341 + goto err; 342 + } 343 + 344 + entry->io_address = ctx->dpi_phys_addr; 345 + entry->length = ctx->dpi_size; 346 + entry->mmap_flag = QEDR_USER_MMAP_IO_WC; 347 + entry->dpi = ctx->dpi; 348 + entry->dev = dev; 349 + rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry, 350 + ctx->dpi_size); 351 + if (rc) { 352 + kfree(entry); 353 + goto err; 354 + } 355 + ctx->db_mmap_entry = &entry->rdma_entry; 291 356 292 357 uresp.dpm_enabled = dev->user_dpm_enabled; 293 358 uresp.wids_enabled = 1; 294 359 uresp.wid_count = oparams.wid_count; 295 - uresp.db_pa = ctx->dpi_phys_addr; 360 + uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry); 296 361 uresp.db_size = ctx->dpi_size; 297 362 uresp.max_send_wr = dev->attr.max_sqe; 298 363 uresp.max_recv_wr = dev->attr.max_rqe; ··· 320 353 321 354 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 322 355 if (rc) 323 - return rc; 356 + goto err; 324 357 325 358 ctx->dev = dev; 326 - 327 - rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size); 328 - if (rc) 329 - return rc; 330 359 331 360 DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n", 332 361 &ctx->ibucontext); 333 362 return 0; 363 + 364 + err: 365 + if (!ctx->db_mmap_entry) 366 + dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi); 367 + else 368 + rdma_user_mmap_entry_remove(ctx->db_mmap_entry); 369 + 370 + return rc; 334 371 } 335 372 336 373 void qedr_dealloc_ucontext(struct ib_ucontext *ibctx) 337 374 { 338 375 struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx); 339 - struct qedr_mm *mm, *tmp; 340 376 341 377 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n", 342 378 uctx); 343 - uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi); 344 379 345 - list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) { 346 - DP_DEBUG(uctx->dev, QEDR_MSG_MISC, 347 - "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n", 348 - mm->key.phy_addr, mm->key.len, uctx); 349 - list_del(&mm->entry); 350 - kfree(mm); 351 - } 380 + rdma_user_mmap_entry_remove(uctx->db_mmap_entry); 352 381 } 353 382 354 - int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 383 + void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry) 355 384 { 356 - struct qedr_ucontext *ucontext = get_qedr_ucontext(context); 357 - struct qedr_dev *dev = get_qedr_dev(context->device); 358 - unsigned long phys_addr = vma->vm_pgoff << PAGE_SHIFT; 359 - unsigned long len = (vma->vm_end - vma->vm_start); 360 - unsigned long dpi_start; 385 + struct qedr_user_mmap_entry *entry = get_qedr_mmap_entry(rdma_entry); 386 + struct qedr_dev *dev = entry->dev; 361 387 362 - dpi_start = dev->db_phys_addr + (ucontext->dpi * ucontext->dpi_size); 388 + if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC) 389 + dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi); 363 390 364 - DP_DEBUG(dev, QEDR_MSG_INIT, 365 - "mmap invoked with vm_start=0x%pK, vm_end=0x%pK,vm_pgoff=0x%pK; dpi_start=0x%pK dpi_size=0x%x\n", 366 - (void *)vma->vm_start, (void *)vma->vm_end, 367 - (void *)vma->vm_pgoff, (void *)dpi_start, ucontext->dpi_size); 391 + kfree(entry); 392 + } 368 393 369 - if ((vma->vm_start & (PAGE_SIZE - 1)) || (len & (PAGE_SIZE - 1))) { 370 - DP_ERR(dev, 371 - "failed mmap, addresses must be page aligned: start=0x%pK, end=0x%pK\n", 372 - (void *)vma->vm_start, (void *)vma->vm_end); 394 + int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma) 395 + { 396 + struct ib_device *dev = ucontext->device; 397 + size_t length = vma->vm_end - vma->vm_start; 398 + struct rdma_user_mmap_entry *rdma_entry; 399 + struct qedr_user_mmap_entry *entry; 400 + int rc = 0; 401 + u64 pfn; 402 + 403 + ibdev_dbg(dev, 404 + "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n", 405 + vma->vm_start, vma->vm_end, length, vma->vm_pgoff); 406 + 407 + rdma_entry = rdma_user_mmap_entry_get(ucontext, vma); 408 + if (!rdma_entry) { 409 + ibdev_dbg(dev, "pgoff[%#lx] does not have valid entry\n", 410 + vma->vm_pgoff); 373 411 return -EINVAL; 374 412 } 413 + entry = get_qedr_mmap_entry(rdma_entry); 414 + ibdev_dbg(dev, 415 + "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n", 416 + entry->io_address, length, entry->mmap_flag); 375 417 376 - if (!qedr_search_mmap(ucontext, phys_addr, len)) { 377 - DP_ERR(dev, "failed mmap, vm_pgoff=0x%lx is not authorized\n", 378 - vma->vm_pgoff); 379 - return -EINVAL; 418 + switch (entry->mmap_flag) { 419 + case QEDR_USER_MMAP_IO_WC: 420 + pfn = entry->io_address >> PAGE_SHIFT; 421 + rc = rdma_user_mmap_io(ucontext, vma, pfn, length, 422 + pgprot_writecombine(vma->vm_page_prot), 423 + rdma_entry); 424 + break; 425 + default: 426 + rc = -EINVAL; 380 427 } 381 428 382 - if (phys_addr < dpi_start || 383 - ((phys_addr + len) > (dpi_start + ucontext->dpi_size))) { 384 - DP_ERR(dev, 385 - "failed mmap, pages are outside of dpi; page address=0x%pK, dpi_start=0x%pK, dpi_size=0x%x\n", 386 - (void *)phys_addr, (void *)dpi_start, 387 - ucontext->dpi_size); 388 - return -EINVAL; 389 - } 429 + if (rc) 430 + ibdev_dbg(dev, 431 + "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n", 432 + entry->io_address, length, entry->mmap_flag, rc); 390 433 391 - if (vma->vm_flags & VM_READ) { 392 - DP_ERR(dev, "failed mmap, cannot map doorbell bar for read\n"); 393 - return -EINVAL; 394 - } 395 - 396 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 397 - return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, len, 398 - vma->vm_page_prot); 434 + rdma_user_mmap_entry_put(rdma_entry); 435 + return rc; 399 436 } 400 437 401 438 int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+2 -1
drivers/infiniband/hw/qedr/verbs.h
··· 46 46 int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); 47 47 void qedr_dealloc_ucontext(struct ib_ucontext *uctx); 48 48 49 - int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma); 49 + int qedr_mmap(struct ib_ucontext *ucontext, struct vm_area_struct *vma); 50 + void qedr_mmap_free(struct rdma_user_mmap_entry *rdma_entry); 50 51 int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); 51 52 void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); 52 53