Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB: expand ib_umem_get() prototype

Add a new parameter, dmasync, to the ib_umem_get() prototype. Use dmasync = 1
when mapping user-allocated CQs with ib_umem_get().

Signed-off-by: Arthur Kepner <akepner@sgi.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Roland Dreier <rdreier@cisco.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Grundler <grundler@parisc-linux.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Arthur Kepner and committed by
Linus Torvalds
cb9fbc5c 309df0c5

+75 -19
+12 -5
drivers/infiniband/core/umem.c
··· 38 38 #include <linux/dma-mapping.h> 39 39 #include <linux/sched.h> 40 40 #include <linux/hugetlb.h> 41 + #include <linux/dma-attrs.h> 41 42 42 43 #include "uverbs.h" 43 44 ··· 73 72 * @addr: userspace virtual address to start at 74 73 * @size: length of region to pin 75 74 * @access: IB_ACCESS_xxx flags for memory being pinned 75 + * @dmasync: flush in-flight DMA when the memory region is written 76 76 */ 77 77 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 78 - size_t size, int access) 78 + size_t size, int access, int dmasync) 79 79 { 80 80 struct ib_umem *umem; 81 81 struct page **page_list; ··· 89 87 int ret; 90 88 int off; 91 89 int i; 90 + DEFINE_DMA_ATTRS(attrs); 91 + 92 + if (dmasync) 93 + dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 92 94 93 95 if (!can_do_mlock()) 94 96 return ERR_PTR(-EPERM); ··· 180 174 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); 181 175 } 182 176 183 - chunk->nmap = ib_dma_map_sg(context->device, 184 - &chunk->page_list[0], 185 - chunk->nents, 186 - DMA_BIDIRECTIONAL); 177 + chunk->nmap = ib_dma_map_sg_attrs(context->device, 178 + &chunk->page_list[0], 179 + chunk->nents, 180 + DMA_BIDIRECTIONAL, 181 + &attrs); 187 182 if (chunk->nmap <= 0) { 188 183 for (i = 0; i < chunk->nents; ++i) 189 184 put_page(sg_page(&chunk->page_list[i]));
+1 -1
drivers/infiniband/hw/amso1100/c2_provider.c
··· 452 452 return ERR_PTR(-ENOMEM); 453 453 c2mr->pd = c2pd; 454 454 455 - c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 455 + c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 456 456 if (IS_ERR(c2mr->umem)) { 457 457 err = PTR_ERR(c2mr->umem); 458 458 kfree(c2mr);
+1 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 602 602 if (!mhp) 603 603 return ERR_PTR(-ENOMEM); 604 604 605 - mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); 605 + mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 606 606 if (IS_ERR(mhp->umem)) { 607 607 err = PTR_ERR(mhp->umem); 608 608 kfree(mhp);
+1 -1
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 323 323 } 324 324 325 325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, 326 - mr_access_flags); 326 + mr_access_flags, 0); 327 327 if (IS_ERR(e_mr->umem)) { 328 328 ib_mr = (void *)e_mr->umem; 329 329 goto reg_user_mr_exit1;
+2 -1
drivers/infiniband/hw/ipath/ipath_mr.c
··· 195 195 goto bail; 196 196 } 197 197 198 - umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); 198 + umem = ib_umem_get(pd->uobject->context, start, length, 199 + mr_access_flags, 0); 199 200 if (IS_ERR(umem)) 200 201 return (void *) umem; 201 202
+1 -1
drivers/infiniband/hw/mlx4/cq.c
··· 137 137 int err; 138 138 139 139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), 140 - IB_ACCESS_LOCAL_WRITE); 140 + IB_ACCESS_LOCAL_WRITE, 1); 141 141 if (IS_ERR(*umem)) 142 142 return PTR_ERR(*umem); 143 143
+1 -1
drivers/infiniband/hw/mlx4/doorbell.c
··· 63 63 page->user_virt = (virt & PAGE_MASK); 64 64 page->refcnt = 0; 65 65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 66 - PAGE_SIZE, 0); 66 + PAGE_SIZE, 0, 0); 67 67 if (IS_ERR(page->umem)) { 68 68 err = PTR_ERR(page->umem); 69 69 kfree(page);
+2 -1
drivers/infiniband/hw/mlx4/mr.c
··· 132 132 if (!mr) 133 133 return ERR_PTR(-ENOMEM); 134 134 135 - mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); 135 + mr->umem = ib_umem_get(pd->uobject->context, start, length, 136 + access_flags, 0); 136 137 if (IS_ERR(mr->umem)) { 137 138 err = PTR_ERR(mr->umem); 138 139 goto err_free;
+1 -1
drivers/infiniband/hw/mlx4/qp.c
··· 482 482 goto err; 483 483 484 484 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 485 - qp->buf_size, 0); 485 + qp->buf_size, 0, 0); 486 486 if (IS_ERR(qp->umem)) { 487 487 err = PTR_ERR(qp->umem); 488 488 goto err;
+1 -1
drivers/infiniband/hw/mlx4/srq.c
··· 109 109 } 110 110 111 111 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 112 - buf_size, 0); 112 + buf_size, 0, 0); 113 113 if (IS_ERR(srq->umem)) { 114 114 err = PTR_ERR(srq->umem); 115 115 goto err_srq;
+7 -1
drivers/infiniband/hw/mthca/mthca_provider.c
··· 1006 1006 struct mthca_dev *dev = to_mdev(pd->device); 1007 1007 struct ib_umem_chunk *chunk; 1008 1008 struct mthca_mr *mr; 1009 + struct mthca_reg_mr ucmd; 1009 1010 u64 *pages; 1010 1011 int shift, n, len; 1011 1012 int i, j, k; 1012 1013 int err = 0; 1013 1014 int write_mtt_size; 1014 1015 1016 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 1017 + return ERR_PTR(-EFAULT); 1018 + 1015 1019 mr = kmalloc(sizeof *mr, GFP_KERNEL); 1016 1020 if (!mr) 1017 1021 return ERR_PTR(-ENOMEM); 1018 1022 1019 - mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 1023 + mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 1024 + ucmd.mr_attrs & MTHCA_MR_DMASYNC); 1025 + 1020 1026 if (IS_ERR(mr->umem)) { 1021 1027 err = PTR_ERR(mr->umem); 1022 1028 goto err;
+9 -1
drivers/infiniband/hw/mthca/mthca_user.h
··· 41 41 * Increment this value if any changes that break userspace ABI 42 42 * compatibility are made. 43 43 */ 44 - #define MTHCA_UVERBS_ABI_VERSION 1 44 + #define MTHCA_UVERBS_ABI_VERSION 2 45 45 46 46 /* 47 47 * Make sure that all structs defined in this file remain laid out so ··· 58 58 59 59 struct mthca_alloc_pd_resp { 60 60 __u32 pdn; 61 + __u32 reserved; 62 + }; 63 + 64 + struct mthca_reg_mr { 65 + __u32 mr_attrs; 66 + #define MTHCA_MR_DMASYNC 0x1 67 + /* mark the memory region with a DMA attribute that causes 68 + * in-flight DMA to be flushed when the region is written to */ 61 69 __u32 reserved; 62 70 }; 63 71
+1 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 2377 2377 u8 single_page = 1; 2378 2378 u8 stag_key; 2379 2379 2380 - region = ib_umem_get(pd->uobject->context, start, length, acc); 2380 + region = ib_umem_get(pd->uobject->context, start, length, acc, 0); 2381 2381 if (IS_ERR(region)) { 2382 2382 return (struct ib_mr *)region; 2383 2383 }
+2 -2
include/rdma/ib_umem.h
··· 62 62 #ifdef CONFIG_INFINIBAND_USER_MEM 63 63 64 64 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 65 - size_t size, int access); 65 + size_t size, int access, int dmasync); 66 66 void ib_umem_release(struct ib_umem *umem); 67 67 int ib_umem_page_count(struct ib_umem *umem); 68 68 ··· 72 72 73 73 static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, 74 74 unsigned long addr, size_t size, 75 - int access) { 75 + int access, int dmasync) { 76 76 return ERR_PTR(-EINVAL); 77 77 } 78 78 static inline void ib_umem_release(struct ib_umem *umem) { }
+33
include/rdma/ib_verbs.h
··· 1542 1542 dma_unmap_single(dev->dma_device, addr, size, direction); 1543 1543 } 1544 1544 1545 + static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 1546 + void *cpu_addr, size_t size, 1547 + enum dma_data_direction direction, 1548 + struct dma_attrs *attrs) 1549 + { 1550 + return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 1551 + direction, attrs); 1552 + } 1553 + 1554 + static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 1555 + u64 addr, size_t size, 1556 + enum dma_data_direction direction, 1557 + struct dma_attrs *attrs) 1558 + { 1559 + return dma_unmap_single_attrs(dev->dma_device, addr, size, 1560 + direction, attrs); 1561 + } 1562 + 1545 1563 /** 1546 1564 * ib_dma_map_page - Map a physical page to DMA address 1547 1565 * @dev: The device for which the dma_addr is to be created ··· 1629 1611 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1630 1612 } 1631 1613 1614 + static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 1615 + struct scatterlist *sg, int nents, 1616 + enum dma_data_direction direction, 1617 + struct dma_attrs *attrs) 1618 + { 1619 + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 1620 + } 1621 + 1622 + static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 1623 + struct scatterlist *sg, int nents, 1624 + enum dma_data_direction direction, 1625 + struct dma_attrs *attrs) 1626 + { 1627 + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 1628 + } 1632 1629 /** 1633 1630 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 1634 1631 * @dev: The device for which the DMA addresses were created