Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf: Use struct dma_buf_map in dma_buf_vmap() interfaces

This patch updates dma_buf_vmap() and dma-buf's vmap callback to use
struct dma_buf_map.

The interfaces used to return a buffer address. This address now gets
stored in an instance of the structure that is given as an additional
argument. The functions return an errno code on errors.

Users of the functions are updated accordingly. This is only an interface
change. It is currently expected that dma-buf memory can be accessed with
system memory load/store operations.

v3:
* update fastrpc driver (kernel test robot)
v2:
* always clear map parameter in dma_buf_vmap() (Daniel)
* include dma-buf-heaps and i915 selftests (kernel test robot)

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Tomasz Figa <tfiga@chromium.org>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20200925115601.23955-3-tzimmermann@suse.de

+143 -61
+16 -12
drivers/dma-buf/dma-buf.c
··· 1186 1186 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1187 1187 * address space. Same restrictions as for vmap and friends apply. 1188 1188 * @dmabuf: [in] buffer to vmap 1189 + * @map: [out] returns the vmap pointer 1189 1190 * 1190 1191 * This call may fail due to lack of virtual mapping address space. 1191 1192 * These calls are optional in drivers. The intended use for them 1192 1193 * is for mapping objects linear in kernel space for high use objects. 1193 1194 * Please attempt to use kmap/kunmap before thinking about these interfaces. 1194 1195 * 1195 - * Returns NULL on error. 1196 + * Returns 0 on success, or a negative errno code otherwise. 1196 1197 */ 1197 - void *dma_buf_vmap(struct dma_buf *dmabuf) 1198 + int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 1198 1199 { 1199 - void *ptr; 1200 + struct dma_buf_map ptr; 1201 + int ret = 0; 1202 + 1203 + dma_buf_map_clear(map); 1200 1204 1201 1205 if (WARN_ON(!dmabuf)) 1202 - return NULL; 1206 + return -EINVAL; 1203 1207 1204 1208 if (!dmabuf->ops->vmap) 1205 - return NULL; 1209 + return -EINVAL; 1206 1210 1207 1211 mutex_lock(&dmabuf->lock); 1208 1212 if (dmabuf->vmapping_counter) { 1209 1213 dmabuf->vmapping_counter++; 1210 1214 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); 1211 - ptr = dmabuf->vmap_ptr.vaddr; 1215 + *map = dmabuf->vmap_ptr; 1212 1216 goto out_unlock; 1213 1217 } 1214 1218 1215 1219 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr)); 1216 1220 1217 - ptr = dmabuf->ops->vmap(dmabuf); 1218 - if (WARN_ON_ONCE(IS_ERR(ptr))) 1219 - ptr = NULL; 1220 - if (!ptr) 1221 + ret = dmabuf->ops->vmap(dmabuf, &ptr); 1222 + if (WARN_ON_ONCE(ret)) 1221 1223 goto out_unlock; 1222 1224 1223 - dmabuf->vmap_ptr.vaddr = ptr; 1225 + dmabuf->vmap_ptr = ptr; 1224 1226 dmabuf->vmapping_counter = 1; 1227 + 1228 + *map = dmabuf->vmap_ptr; 1225 1229 1226 1230 out_unlock: 1227 1231 mutex_unlock(&dmabuf->lock); 1228 - return ptr; 1232 + return ret; 1229 1233 } 1230 1234 EXPORT_SYMBOL_GPL(dma_buf_vmap); 1231 1235
+6 -2
drivers/dma-buf/heaps/heap-helpers.c
··· 236 236 return 0; 237 237 } 238 238 239 - static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf) 239 + static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 240 240 { 241 241 struct heap_helper_buffer *buffer = dmabuf->priv; 242 242 void *vaddr; ··· 245 245 vaddr = dma_heap_buffer_vmap_get(buffer); 246 246 mutex_unlock(&buffer->lock); 247 247 248 - return vaddr; 248 + if (!vaddr) 249 + return -ENOMEM; 250 + dma_buf_map_set_vaddr(map, vaddr); 251 + 252 + return 0; 249 253 } 250 254 251 255 static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+7 -6
drivers/gpu/drm/drm_gem_cma_helper.c
··· 634 634 { 635 635 struct drm_gem_cma_object *cma_obj; 636 636 struct drm_gem_object *obj; 637 - void *vaddr; 637 + struct dma_buf_map map; 638 + int ret; 638 639 639 - vaddr = dma_buf_vmap(attach->dmabuf); 640 - if (!vaddr) { 640 + ret = dma_buf_vmap(attach->dmabuf, &map); 641 + if (ret) { 641 642 DRM_ERROR("Failed to vmap PRIME buffer\n"); 642 - return ERR_PTR(-ENOMEM); 643 + return ERR_PTR(ret); 643 644 } 644 645 645 646 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 646 647 if (IS_ERR(obj)) { 647 - dma_buf_vunmap(attach->dmabuf, vaddr); 648 + dma_buf_vunmap(attach->dmabuf, map.vaddr); 648 649 return obj; 649 650 } 650 651 651 652 cma_obj = to_drm_gem_cma_obj(obj); 652 - cma_obj->vaddr = vaddr; 653 + cma_obj->vaddr = map.vaddr; 653 654 654 655 return obj; 655 656 }
+9 -5
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 261 261 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) 262 262 { 263 263 struct drm_gem_object *obj = &shmem->base; 264 - int ret; 264 + struct dma_buf_map map; 265 + int ret = 0; 265 266 266 267 if (shmem->vmap_use_count++ > 0) 267 268 return shmem->vaddr; 268 269 269 270 if (obj->import_attach) { 270 - shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf); 271 + ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); 272 + if (!ret) 273 + shmem->vaddr = map.vaddr; 271 274 } else { 272 275 pgprot_t prot = PAGE_KERNEL; 273 276 ··· 282 279 prot = pgprot_writecombine(prot); 283 280 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 284 281 VM_MAP, prot); 282 + if (!shmem->vaddr) 283 + ret = -ENOMEM; 285 284 } 286 285 287 - if (!shmem->vaddr) { 288 - DRM_DEBUG_KMS("Failed to vmap pages\n"); 289 - ret = -ENOMEM; 286 + if (ret) { 287 + DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); 290 288 goto err_put_pages; 291 289 } 292 290
+6 -3
drivers/gpu/drm/drm_prime.c
··· 662 662 /** 663 663 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM 664 664 * @dma_buf: buffer to be mapped 665 + * @map: the virtual address of the buffer 665 666 * 666 667 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap 667 668 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. 668 669 * 669 670 * Returns the kernel virtual address or NULL on failure. 670 671 */ 671 - void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 672 + int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) 672 673 { 673 674 struct drm_gem_object *obj = dma_buf->priv; 674 675 void *vaddr; 675 676 676 677 vaddr = drm_gem_vmap(obj); 677 678 if (IS_ERR(vaddr)) 678 - vaddr = NULL; 679 + return PTR_ERR(vaddr); 679 680 680 - return vaddr; 681 + dma_buf_map_set_vaddr(map, vaddr); 682 + 683 + return 0; 681 684 } 682 685 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 683 686
+7 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
··· 85 85 86 86 static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) 87 87 { 88 + struct dma_buf_map map; 89 + int ret; 90 + 88 91 lockdep_assert_held(&etnaviv_obj->lock); 89 92 90 - return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf); 93 + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); 94 + if (ret) 95 + return NULL; 96 + return map.vaddr; 91 97 } 92 98 93 99 static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+9 -2
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
··· 82 82 i915_gem_object_unpin_pages(obj); 83 83 } 84 84 85 - static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 85 + static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) 86 86 { 87 87 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); 88 + void *vaddr; 88 89 89 - return i915_gem_object_pin_map(obj, I915_MAP_WB); 90 + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 91 + if (IS_ERR(vaddr)) 92 + return PTR_ERR(vaddr); 93 + 94 + dma_buf_map_set_vaddr(map, vaddr); 95 + 96 + return 0; 90 97 } 91 98 92 99 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+9 -3
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
··· 82 82 struct drm_i915_gem_object *obj; 83 83 struct dma_buf *dmabuf; 84 84 void *obj_map, *dma_map; 85 + struct dma_buf_map map; 85 86 u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff }; 86 87 int err, i; 87 88 ··· 111 110 goto out_obj; 112 111 } 113 112 114 - dma_map = dma_buf_vmap(dmabuf); 113 + err = dma_buf_vmap(dmabuf, &map); 114 + dma_map = err ? NULL : map.vaddr; 115 115 if (!dma_map) { 116 116 pr_err("dma_buf_vmap failed\n"); 117 117 err = -ENOMEM; ··· 165 163 struct drm_i915_private *i915 = arg; 166 164 struct drm_i915_gem_object *obj; 167 165 struct dma_buf *dmabuf; 166 + struct dma_buf_map map; 168 167 void *ptr; 169 168 int err; 170 169 ··· 173 170 if (IS_ERR(dmabuf)) 174 171 return PTR_ERR(dmabuf); 175 172 176 - ptr = dma_buf_vmap(dmabuf); 173 + err = dma_buf_vmap(dmabuf, &map); 174 + ptr = err ? NULL : map.vaddr; 177 175 if (!ptr) { 178 176 pr_err("dma_buf_vmap failed\n"); 179 177 err = -ENOMEM; ··· 216 212 struct drm_i915_private *i915 = arg; 217 213 struct drm_i915_gem_object *obj; 218 214 struct dma_buf *dmabuf; 215 + struct dma_buf_map map; 219 216 void *ptr; 220 217 int err; 221 218 ··· 233 228 } 234 229 i915_gem_object_put(obj); 235 230 236 - ptr = dma_buf_vmap(dmabuf); 231 + err = dma_buf_vmap(dmabuf, &map); 232 + ptr = err ? NULL : map.vaddr; 237 233 if (!ptr) { 238 234 pr_err("dma_buf_vmap failed\n"); 239 235 err = -ENOMEM;
+8 -2
drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
··· 62 62 kfree(mock); 63 63 } 64 64 65 - static void *mock_dmabuf_vmap(struct dma_buf *dma_buf) 65 + static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) 66 66 { 67 67 struct mock_dmabuf *mock = to_mock(dma_buf); 68 + void *vaddr; 68 69 69 - return vm_map_ram(mock->pages, mock->npages, 0); 70 + vaddr = vm_map_ram(mock->pages, mock->npages, 0); 71 + if (!vaddr) 72 + return -ENOMEM; 73 + dma_buf_map_set_vaddr(map, vaddr); 74 + 75 + return 0; 70 76 } 71 77 72 78 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+12 -6
drivers/gpu/drm/tegra/gem.c
··· 132 132 static void *tegra_bo_mmap(struct host1x_bo *bo) 133 133 { 134 134 struct tegra_bo *obj = host1x_to_tegra_bo(bo); 135 + struct dma_buf_map map; 136 + int ret; 135 137 136 - if (obj->vaddr) 138 + if (obj->vaddr) { 137 139 return obj->vaddr; 138 - else if (obj->gem.import_attach) 139 - return dma_buf_vmap(obj->gem.import_attach->dmabuf); 140 - else 140 + } else if (obj->gem.import_attach) { 141 + ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map); 142 + return ret ? NULL : map.vaddr; 143 + } else { 141 144 return vmap(obj->pages, obj->num_pages, VM_MAP, 142 145 pgprot_writecombine(PAGE_KERNEL)); 146 + } 143 147 } 144 148 145 149 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) ··· 653 649 return __tegra_gem_mmap(gem, vma); 654 650 } 655 651 656 - static void *tegra_gem_prime_vmap(struct dma_buf *buf) 652 + static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map) 657 653 { 658 654 struct drm_gem_object *gem = buf->priv; 659 655 struct tegra_bo *bo = to_tegra_bo(gem); 660 656 661 - return bo->vaddr; 657 + dma_buf_map_set_vaddr(map, bo->vaddr); 658 + 659 + return 0; 662 660 } 663 661 664 662 static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+10 -4
drivers/media/common/videobuf2/videobuf2-dma-contig.c
··· 81 81 static void *vb2_dc_vaddr(void *buf_priv) 82 82 { 83 83 struct vb2_dc_buf *buf = buf_priv; 84 + struct dma_buf_map map; 85 + int ret; 84 86 85 - if (!buf->vaddr && buf->db_attach) 86 - buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); 87 + if (!buf->vaddr && buf->db_attach) { 88 + ret = dma_buf_vmap(buf->db_attach->dmabuf, &map); 89 + buf->vaddr = ret ? NULL : map.vaddr; 90 + } 87 91 88 92 return buf->vaddr; 89 93 } ··· 369 365 return 0; 370 366 } 371 367 372 - static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf) 368 + static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 373 369 { 374 370 struct vb2_dc_buf *buf = dbuf->priv; 375 371 376 - return buf->vaddr; 372 + dma_buf_map_set_vaddr(map, buf->vaddr); 373 + 374 + return 0; 377 375 } 378 376 379 377 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+11 -5
drivers/media/common/videobuf2/videobuf2-dma-sg.c
··· 300 300 static void *vb2_dma_sg_vaddr(void *buf_priv) 301 301 { 302 302 struct vb2_dma_sg_buf *buf = buf_priv; 303 + struct dma_buf_map map; 304 + int ret; 303 305 304 306 BUG_ON(!buf); 305 307 306 308 if (!buf->vaddr) { 307 - if (buf->db_attach) 308 - buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); 309 - else 309 + if (buf->db_attach) { 310 + ret = dma_buf_vmap(buf->db_attach->dmabuf, &map); 311 + buf->vaddr = ret ? NULL : map.vaddr; 312 + } else { 310 313 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1); 314 + } 311 315 } 312 316 313 317 /* add offset in case userptr is not page-aligned */ ··· 493 489 return 0; 494 490 } 495 491 496 - static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) 492 + static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 497 493 { 498 494 struct vb2_dma_sg_buf *buf = dbuf->priv; 499 495 500 - return vb2_dma_sg_vaddr(buf); 496 + dma_buf_map_set_vaddr(map, buf->vaddr); 497 + 498 + return 0; 501 499 } 502 500 503 501 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
+11 -4
drivers/media/common/videobuf2/videobuf2-vmalloc.c
··· 318 318 vb2_vmalloc_put(dbuf->priv); 319 319 } 320 320 321 - static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) 321 + static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map) 322 322 { 323 323 struct vb2_vmalloc_buf *buf = dbuf->priv; 324 324 325 - return buf->vaddr; 325 + dma_buf_map_set_vaddr(map, buf->vaddr); 326 + 327 + return 0; 326 328 } 327 329 328 330 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, ··· 376 374 static int vb2_vmalloc_map_dmabuf(void *mem_priv) 377 375 { 378 376 struct vb2_vmalloc_buf *buf = mem_priv; 377 + struct dma_buf_map map; 378 + int ret; 379 379 380 - buf->vaddr = dma_buf_vmap(buf->dbuf); 380 + ret = dma_buf_vmap(buf->dbuf, &map); 381 + if (ret) 382 + return -EFAULT; 383 + buf->vaddr = map.vaddr; 381 384 382 - return buf->vaddr ? 0 : -EFAULT; 385 + return 0; 383 386 } 384 387 385 388 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+4 -2
drivers/misc/fastrpc.c
··· 581 581 kfree(a); 582 582 } 583 583 584 - static void *fastrpc_vmap(struct dma_buf *dmabuf) 584 + static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 585 585 { 586 586 struct fastrpc_buf *buf = dmabuf->priv; 587 587 588 - return buf->virt; 588 + dma_buf_map_set_vaddr(map, buf->virt); 589 + 590 + return 0; 589 591 } 590 592 591 593 static int fastrpc_mmap(struct dma_buf *dmabuf,
+2 -1
include/drm/drm_prime.h
··· 54 54 struct dma_buf_export_info; 55 55 struct dma_buf; 56 56 struct dma_buf_attachment; 57 + struct dma_buf_map; 57 58 58 59 enum dma_data_direction; 59 60 ··· 83 82 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 84 83 struct sg_table *sgt, 85 84 enum dma_data_direction dir); 86 - void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); 85 + int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map); 87 86 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); 88 87 89 88 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+13
include/linux/dma-buf-map.h
··· 23 23 bool is_iomem; 24 24 }; 25 25 26 + /** 27 + * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory 28 + * @map: The dma-buf mapping structure 29 + * @vaddr: A system-memory address 30 + * 31 + * Sets the address and clears the I/O-memory flag. 32 + */ 33 + static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr) 34 + { 35 + map->vaddr = vaddr; 36 + map->is_iomem = false; 37 + } 38 + 26 39 /* API transition helper */ 27 40 static inline bool dma_buf_map_is_vaddr(const struct dma_buf_map *map, const void *vaddr) 28 41 {
+3 -3
include/linux/dma-buf.h
··· 266 266 */ 267 267 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 268 268 269 - void *(*vmap)(struct dma_buf *); 269 + int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); 270 270 void (*vunmap)(struct dma_buf *, void *vaddr); 271 271 }; 272 272 ··· 503 503 504 504 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 505 505 unsigned long); 506 - void *dma_buf_vmap(struct dma_buf *); 507 - void dma_buf_vunmap(struct dma_buf *, void *vaddr); 506 + int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); 507 + void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr); 508 508 #endif /* __DMA_BUF_H__ */