Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/gem: Use struct dma_buf_map in GEM vmap ops and convert GEM backends

This patch replaces the vmap/vunmap's use of raw pointers in GEM object
functions with instances of struct dma_buf_map. GEM backends are
converted as well. For most of them, this simply changes the returned type.

TTM-based drivers now return information about the location of the memory,
either system or I/O memory. GEM VRAM helpers and qxl now use ttm_bo_vmap()
et al. Amdgpu, nouveau and radeon use drm_gem_ttm_vmap() et al instead of
implementing their own vmap callbacks.

v7:
* init QXL cursor to mapped BO buffer (kernel test robot)
v5:
* update vkms after switch to shmem
v4:
* use ttm_bo_vmap(), drm_gem_ttm_vmap(), et al. (Daniel, Christian)
* fix a trailing { in drm_gem_vmap()
* remove several empty functions instead of converting them (Daniel)
* comment uses of raw pointers with a TODO (Daniel)
* TODO list: convert more helpers to use struct dma_buf_map

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Christian König <christian.koenig@amd.com>
Tested-by: Sam Ravnborg <sam@ravnborg.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201103093015.1063-7-tzimmermann@suse.de

+359 -316
+18
Documentation/gpu/todo.rst
··· 450 450 451 451 Level: Intermediate 452 452 453 + Use struct dma_buf_map throughout codebase 454 + ------------------------------------------ 455 + 456 + Pointers to shared device memory are stored in struct dma_buf_map. Each 457 + instance knows whether it refers to system or I/O memory. Most of the DRM-wide 458 + interface have been converted to use struct dma_buf_map, but implementations 459 + often still use raw pointers. 460 + 461 + The task is to use struct dma_buf_map where it makes sense. 462 + 463 + * Memory managers should use struct dma_buf_map for dma-buf-imported buffers. 464 + * TTM might benefit from using struct dma_buf_map internally. 465 + * Framebuffer copying and blitting helpers should operate on struct dma_buf_map. 466 + 467 + Contact: Thomas Zimmermann <tzimmermann@suse.de>, Christian König, Daniel Vetter 468 + 469 + Level: Intermediate 470 + 453 471 454 472 Core refactorings 455 473 =================
+2
drivers/gpu/drm/Kconfig
··· 232 232 select FW_LOADER 233 233 select DRM_KMS_HELPER 234 234 select DRM_TTM 235 + select DRM_TTM_HELPER 235 236 select POWER_SUPPLY 236 237 select HWMON 237 238 select BACKLIGHT_CLASS_DEVICE ··· 253 252 select DRM_KMS_HELPER 254 253 select DRM_SCHED 255 254 select DRM_TTM 255 + select DRM_TTM_HELPER 256 256 select POWER_SUPPLY 257 257 select HWMON 258 258 select BACKLIGHT_CLASS_DEVICE
-36
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 42 42 #include <linux/pci-p2pdma.h> 43 43 44 44 /** 45 - * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation 46 - * @obj: GEM BO 47 - * 48 - * Sets up an in-kernel virtual mapping of the BO's memory. 49 - * 50 - * Returns: 51 - * The virtual address of the mapping or an error pointer. 52 - */ 53 - void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj) 54 - { 55 - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 56 - int ret; 57 - 58 - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 59 - &bo->dma_buf_vmap); 60 - if (ret) 61 - return ERR_PTR(ret); 62 - 63 - return bo->dma_buf_vmap.virtual; 64 - } 65 - 66 - /** 67 - * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation 68 - * @obj: GEM BO 69 - * @vaddr: Virtual address (unused) 70 - * 71 - * Tears down the in-kernel virtual mapping of the BO's memory. 72 - */ 73 - void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 74 - { 75 - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 76 - 77 - ttm_bo_kunmap(&bo->dma_buf_vmap); 78 - } 79 - 80 - /** 81 45 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation 82 46 * @obj: GEM BO 83 47 * @vma: Virtual memory area
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
··· 31 31 struct dma_buf *dma_buf); 32 32 bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, 33 33 struct amdgpu_bo *bo); 34 - void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); 35 - void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 36 34 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, 37 35 struct vm_area_struct *vma); 38 36
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 33 33 34 34 #include <drm/amdgpu_drm.h> 35 35 #include <drm/drm_debugfs.h> 36 + #include <drm/drm_gem_ttm_helper.h> 36 37 37 38 #include "amdgpu.h" 38 39 #include "amdgpu_display.h" ··· 221 220 .open = amdgpu_gem_object_open, 222 221 .close = amdgpu_gem_object_close, 223 222 .export = amdgpu_gem_prime_export, 224 - .vmap = amdgpu_gem_prime_vmap, 225 - .vunmap = amdgpu_gem_prime_vunmap, 223 + .vmap = drm_gem_ttm_vmap, 224 + .vunmap = drm_gem_ttm_vunmap, 226 225 }; 227 226 228 227 /*
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 100 100 struct amdgpu_bo *parent; 101 101 struct amdgpu_bo *shadow; 102 102 103 - struct ttm_bo_kmap_obj dma_buf_vmap; 104 103 struct amdgpu_mn *mn; 105 104 106 105
+13 -14
drivers/gpu/drm/ast/ast_cursor.c
··· 39 39 40 40 for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { 41 41 gbo = ast->cursor.gbo[i]; 42 - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); 42 + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); 43 43 drm_gem_vram_unpin(gbo); 44 44 drm_gem_vram_put(gbo); 45 45 } ··· 60 60 struct drm_device *dev = &ast->base; 61 61 size_t size, i; 62 62 struct drm_gem_vram_object *gbo; 63 - void __iomem *vaddr; 63 + struct dma_buf_map map; 64 64 int ret; 65 65 66 66 size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); ··· 77 77 drm_gem_vram_put(gbo); 78 78 goto err_drm_gem_vram_put; 79 79 } 80 - vaddr = drm_gem_vram_vmap(gbo); 81 - if (IS_ERR(vaddr)) { 82 - ret = PTR_ERR(vaddr); 80 + ret = drm_gem_vram_vmap(gbo, &map); 81 + if (ret) { 83 82 drm_gem_vram_unpin(gbo); 84 83 drm_gem_vram_put(gbo); 85 84 goto err_drm_gem_vram_put; 86 85 } 87 86 88 87 ast->cursor.gbo[i] = gbo; 89 - ast->cursor.vaddr[i] = vaddr; 88 + ast->cursor.map[i] = map; 90 89 } 91 90 92 91 return drmm_add_action_or_reset(dev, ast_cursor_release, NULL); ··· 94 95 while (i) { 95 96 --i; 96 97 gbo = ast->cursor.gbo[i]; 97 - drm_gem_vram_vunmap(gbo, ast->cursor.vaddr[i]); 98 + drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); 98 99 drm_gem_vram_unpin(gbo); 99 100 drm_gem_vram_put(gbo); 100 101 } ··· 169 170 { 170 171 struct drm_device *dev = &ast->base; 171 172 struct drm_gem_vram_object *gbo; 173 + struct dma_buf_map map; 172 174 int ret; 173 175 void *src; 174 176 void __iomem *dst; ··· 183 183 ret = drm_gem_vram_pin(gbo, 0); 184 184 if (ret) 185 185 return ret; 186 - src = drm_gem_vram_vmap(gbo); 187 - if (IS_ERR(src)) { 188 - ret = PTR_ERR(src); 186 + ret = drm_gem_vram_vmap(gbo, &map); 187 + if (ret) 189 188 goto err_drm_gem_vram_unpin; 190 - } 189 + src = map.vaddr; /* TODO: Use mapping abstraction properly */ 191 190 192 - dst = ast->cursor.vaddr[ast->cursor.next_index]; 191 + dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem; 193 192 194 193 /* do data transfer to cursor BO */ 195 194 update_cursor_image(dst, src, fb->width, fb->height); 196 195 197 - drm_gem_vram_vunmap(gbo, src); 196 + drm_gem_vram_vunmap(gbo, &map); 198 197 drm_gem_vram_unpin(gbo); 199 198 200 199 return 0; ··· 256 257 u8 __iomem *sig; 257 258 u8 jreg; 258 259 259 - dst = ast->cursor.vaddr[ast->cursor.next_index]; 260 + dst = ast->cursor.map[ast->cursor.next_index].vaddr; 260 261 261 262 sig = dst + AST_HWC_SIZE; 262 263 writel(x, sig + AST_HWC_SIGNATURE_X);
+4 -3
drivers/gpu/drm/ast/ast_drv.h
··· 28 28 #ifndef __AST_DRV_H__ 29 29 #define __AST_DRV_H__ 30 30 31 - #include <linux/types.h> 32 - #include <linux/io.h> 31 + #include <linux/dma-buf-map.h> 33 32 #include <linux/i2c.h> 34 33 #include <linux/i2c-algo-bit.h> 34 + #include <linux/io.h> 35 + #include <linux/types.h> 35 36 36 37 #include <drm/drm_connector.h> 37 38 #include <drm/drm_crtc.h> ··· 132 131 133 132 struct { 134 133 struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; 135 - void __iomem *vaddr[AST_DEFAULT_HWC_NUM]; 134 + struct dma_buf_map map[AST_DEFAULT_HWC_NUM]; 136 135 unsigned int next_index; 137 136 } cursor; 138 137
+14 -9
drivers/gpu/drm/drm_gem.c
··· 36 36 #include <linux/pagemap.h> 37 37 #include <linux/shmem_fs.h> 38 38 #include <linux/dma-buf.h> 39 + #include <linux/dma-buf-map.h> 39 40 #include <linux/mem_encrypt.h> 40 41 #include <linux/pagevec.h> 41 42 ··· 1208 1207 1209 1208 void *drm_gem_vmap(struct drm_gem_object *obj) 1210 1209 { 1211 - void *vaddr; 1210 + struct dma_buf_map map; 1211 + int ret; 1212 1212 1213 - if (obj->funcs->vmap) 1214 - vaddr = obj->funcs->vmap(obj); 1215 - else 1216 - vaddr = ERR_PTR(-EOPNOTSUPP); 1213 + if (!obj->funcs->vmap) 1214 + return ERR_PTR(-EOPNOTSUPP); 1217 1215 1218 - if (!vaddr) 1219 - vaddr = ERR_PTR(-ENOMEM); 1216 + ret = obj->funcs->vmap(obj, &map); 1217 + if (ret) 1218 + return ERR_PTR(ret); 1219 + else if (dma_buf_map_is_null(&map)) 1220 + return ERR_PTR(-ENOMEM); 1220 1221 1221 - return vaddr; 1222 + return map.vaddr; 1222 1223 } 1223 1224 1224 1225 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1225 1226 { 1227 + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(vaddr); 1228 + 1226 1229 if (!vaddr) 1227 1230 return; 1228 1231 1229 1232 if (obj->funcs->vunmap) 1230 - obj->funcs->vunmap(obj, vaddr); 1233 + obj->funcs->vunmap(obj, &map); 1231 1234 } 1232 1235 1233 1236 /**
+7 -3
drivers/gpu/drm/drm_gem_cma_helper.c
··· 519 519 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual 520 520 * address space 521 521 * @obj: GEM object 522 + * @map: Returns the kernel virtual address of the CMA GEM object's backing 523 + * store. 522 524 * 523 525 * This function maps a buffer exported via DRM PRIME into the kernel's 524 526 * virtual address space. Since the CMA buffers are already mapped into the ··· 529 527 * driver's &drm_gem_object_funcs.vmap callback. 530 528 * 531 529 * Returns: 532 - * The kernel virtual address of the CMA GEM object's backing store. 530 + * 0 on success, or a negative error code otherwise. 533 531 */ 534 - void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) 532 + int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 535 533 { 536 534 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 537 535 538 - return cma_obj->vaddr; 536 + dma_buf_map_set_vaddr(map, cma_obj->vaddr); 537 + 538 + return 0; 539 539 } 540 540 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); 541 541
+29 -19
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 258 258 } 259 259 EXPORT_SYMBOL(drm_gem_shmem_unpin); 260 260 261 - static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem) 261 + static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map) 262 262 { 263 263 struct drm_gem_object *obj = &shmem->base; 264 - struct dma_buf_map map; 265 264 int ret = 0; 266 265 267 - if (shmem->vmap_use_count++ > 0) 268 - return shmem->vaddr; 266 + if (shmem->vmap_use_count++ > 0) { 267 + dma_buf_map_set_vaddr(map, shmem->vaddr); 268 + return 0; 269 + } 269 270 270 271 if (obj->import_attach) { 271 - ret = dma_buf_vmap(obj->import_attach->dmabuf, &map); 272 - if (!ret) 273 - shmem->vaddr = map.vaddr; 272 + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); 273 + if (!ret) { 274 + if (WARN_ON(map->is_iomem)) { 275 + ret = -EIO; 276 + goto err_put_pages; 277 + } 278 + shmem->vaddr = map->vaddr; 279 + } 274 280 } else { 275 281 pgprot_t prot = PAGE_KERNEL; 276 282 ··· 290 284 VM_MAP, prot); 291 285 if (!shmem->vaddr) 292 286 ret = -ENOMEM; 287 + else 288 + dma_buf_map_set_vaddr(map, shmem->vaddr); 293 289 } 294 290 295 291 if (ret) { ··· 299 291 goto err_put_pages; 300 292 } 301 293 302 - return shmem->vaddr; 294 + return 0; 303 295 304 296 err_put_pages: 305 297 if (!obj->import_attach) ··· 307 299 err_zero_use: 308 300 shmem->vmap_use_count = 0; 309 301 310 - return ERR_PTR(ret); 302 + return ret; 311 303 } 312 304 313 305 /* 314 306 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object 315 307 * @shmem: shmem GEM object 308 + * @map: Returns the kernel virtual address of the SHMEM GEM object's backing 309 + * store. 316 310 * 317 311 * This function makes sure that a contiguous kernel virtual address mapping 318 312 * exists for the buffer backing the shmem GEM object. ··· 328 318 * Returns: 329 319 * 0 on success or a negative error code on failure. 330 320 */ 331 - void *drm_gem_shmem_vmap(struct drm_gem_object *obj) 321 + int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 332 322 { 333 323 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 334 - void *vaddr; 335 324 int ret; 336 325 337 326 ret = mutex_lock_interruptible(&shmem->vmap_lock); 338 327 if (ret) 339 - return ERR_PTR(ret); 340 - vaddr = drm_gem_shmem_vmap_locked(shmem); 328 + return ret; 329 + ret = drm_gem_shmem_vmap_locked(shmem, map); 341 330 mutex_unlock(&shmem->vmap_lock); 342 331 343 - return vaddr; 332 + return ret; 344 333 } 345 334 EXPORT_SYMBOL(drm_gem_shmem_vmap); 346 335 347 - static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem) 336 + static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, 337 + struct dma_buf_map *map) 348 338 { 349 339 struct drm_gem_object *obj = &shmem->base; 350 - struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr); 351 340 352 341 if (WARN_ON_ONCE(!shmem->vmap_use_count)) 353 342 return; ··· 355 346 return; 356 347 357 348 if (obj->import_attach) 358 - dma_buf_vunmap(obj->import_attach->dmabuf, &map); 349 + dma_buf_vunmap(obj->import_attach->dmabuf, map); 359 350 else 360 351 vunmap(shmem->vaddr); 361 352 ··· 366 357 /* 367 358 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object 368 359 * @shmem: shmem GEM object 360 + * @map: Kernel virtual address where the SHMEM GEM object was mapped 369 361 * 370 362 * This function cleans up a kernel virtual address mapping acquired by 371 363 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to ··· 376 366 * also be called by drivers directly, in which case it will hide the 377 367 * differences between dma-buf imported and natively allocated objects. 378 368 */ 379 - void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr) 369 + void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 380 370 { 381 371 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 382 372 383 373 mutex_lock(&shmem->vmap_lock); 384 - drm_gem_shmem_vunmap_locked(shmem); 374 + drm_gem_shmem_vunmap_locked(shmem, map); 385 375 mutex_unlock(&shmem->vmap_lock); 386 376 } 387 377 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+54 -53
drivers/gpu/drm/drm_gem_vram_helper.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-or-later 2 2 3 + #include <linux/dma-buf-map.h> 3 4 #include <linux/module.h> 4 5 5 6 #include <drm/drm_debugfs.h> ··· 113 112 * up; only release the GEM object. 114 113 */ 115 114 116 - WARN_ON(gbo->kmap_use_count); 117 - WARN_ON(gbo->kmap.virtual); 115 + WARN_ON(gbo->vmap_use_count); 116 + WARN_ON(dma_buf_map_is_set(&gbo->map)); 118 117 119 118 drm_gem_object_release(&gbo->bo.base); 120 119 } ··· 379 378 } 380 379 EXPORT_SYMBOL(drm_gem_vram_unpin); 381 380 382 - static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo) 381 + static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, 382 + struct dma_buf_map *map) 383 383 { 384 384 int ret; 385 - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 386 - bool is_iomem; 387 385 388 - if (gbo->kmap_use_count > 0) 386 + if (gbo->vmap_use_count > 0) 389 387 goto out; 390 388 391 - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); 389 + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); 392 390 if (ret) 393 - return ERR_PTR(ret); 391 + return ret; 394 392 395 393 out: 396 - ++gbo->kmap_use_count; 397 - return ttm_kmap_obj_virtual(kmap, &is_iomem); 394 + ++gbo->vmap_use_count; 395 + *map = gbo->map; 396 + 397 + return 0; 398 398 } 399 399 400 - static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) 400 + static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, 401 + struct dma_buf_map *map) 401 402 { 402 - if (WARN_ON_ONCE(!gbo->kmap_use_count)) 403 + struct drm_device *dev = gbo->bo.base.dev; 404 + 405 + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) 403 406 return; 404 - if (--gbo->kmap_use_count > 0) 407 + 408 + if (drm_WARN_ON_ONCE(dev, !dma_buf_map_is_equal(&gbo->map, map))) 409 + return; /* BUG: map not mapped from this BO */ 410 + 411 + if (--gbo->vmap_use_count > 0) 405 412 return; 406 413 407 414 /* ··· 423 414 /** 424 415 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address 425 416 * space 426 - * @gbo: The GEM VRAM object to map 417 + * @gbo: The GEM VRAM object to map 418 + * @map: Returns the kernel virtual address of the VRAM GEM object's backing 419 + * store. 427 420 * 428 421 * The vmap function pins a GEM VRAM object to its current location, either 429 422 * system or video memory, and maps its buffer into kernel address space. ··· 434 423 * unmap and unpin the GEM VRAM object. 435 424 * 436 425 * Returns: 437 - * The buffer's virtual address on success, or 438 - * an ERR_PTR()-encoded error code otherwise. 426 + * 0 on success, or a negative error code otherwise. 439 427 */ 440 - void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) 428 + int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) 441 429 { 442 430 int ret; 443 - void *base; 444 431 445 432 ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 446 433 if (ret) 447 - return ERR_PTR(ret); 434 + return ret; 448 435 449 436 ret = drm_gem_vram_pin_locked(gbo, 0); 450 437 if (ret) 451 438 goto err_ttm_bo_unreserve; 452 - base = drm_gem_vram_kmap_locked(gbo); 453 - if (IS_ERR(base)) { 454 - ret = PTR_ERR(base); 439 + ret = drm_gem_vram_kmap_locked(gbo, map); 440 + if (ret) 455 441 goto err_drm_gem_vram_unpin_locked; 456 - } 457 442 458 443 ttm_bo_unreserve(&gbo->bo); 459 444 460 - return base; 445 + return 0; 461 446 462 447 err_drm_gem_vram_unpin_locked: 463 448 drm_gem_vram_unpin_locked(gbo); 464 449 err_ttm_bo_unreserve: 465 450 ttm_bo_unreserve(&gbo->bo); 466 - return ERR_PTR(ret); 451 + return ret; 467 452 } 468 453 EXPORT_SYMBOL(drm_gem_vram_vmap); 469 454 470 455 /** 471 456 * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object 472 - * @gbo: The GEM VRAM object to unmap 473 - * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() 457 + * @gbo: The GEM VRAM object to unmap 458 + * @map: Kernel virtual address where the VRAM GEM object was mapped 474 459 * 475 460 * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See 476 461 * the documentation for drm_gem_vram_vmap() for more information. 477 462 */ 478 - void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) 463 + void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map) 479 464 { 480 465 int ret; 481 466 ··· 479 472 if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) 480 473 return; 481 474 482 - drm_gem_vram_kunmap_locked(gbo); 475 + drm_gem_vram_kunmap_locked(gbo, map); 483 476 drm_gem_vram_unpin_locked(gbo); 484 477 485 478 ttm_bo_unreserve(&gbo->bo); ··· 570 563 bool evict, 571 564 struct ttm_resource *new_mem) 572 565 { 573 - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; 566 + struct ttm_buffer_object *bo = &gbo->bo; 567 + struct drm_device *dev = bo->base.dev; 574 568 575 - if (WARN_ON_ONCE(gbo->kmap_use_count)) 569 + if (drm_WARN_ON_ONCE(dev, gbo->vmap_use_count)) 576 570 return; 577 571 578 - if (!kmap->virtual) 579 - return; 580 - ttm_bo_kunmap(kmap); 581 - kmap->virtual = NULL; 572 + ttm_bo_vunmap(bo, &gbo->map); 582 573 } 583 574 584 575 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, ··· 842 837 } 843 838 844 839 /** 845 - * drm_gem_vram_object_vmap() - \ 846 - Implements &struct drm_gem_object_funcs.vmap 847 - * @gem: The GEM object to map 840 + * drm_gem_vram_object_vmap() - 841 + * Implements &struct drm_gem_object_funcs.vmap 842 + * @gem: The GEM object to map 843 + * @map: Returns the kernel virtual address of the VRAM GEM object's backing 844 + * store. 848 845 * 849 846 * Returns: 850 - * The buffers virtual address on success, or 851 - * NULL otherwise. 847 + * 0 on success, or a negative error code otherwise. 852 848 */ 853 - static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) 849 + static int drm_gem_vram_object_vmap(struct drm_gem_object *gem, struct dma_buf_map *map) 854 850 { 855 851 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 856 - void *base; 857 852 858 - base = drm_gem_vram_vmap(gbo); 859 - if (IS_ERR(base)) 860 - return NULL; 861 - return base; 853 + return drm_gem_vram_vmap(gbo, map); 862 854 } 863 855 864 856 /** 865 - * drm_gem_vram_object_vunmap() - \ 866 - Implements &struct drm_gem_object_funcs.vunmap 867 - * @gem: The GEM object to unmap 868 - * @vaddr: The mapping's base address 857 + * drm_gem_vram_object_vunmap() - 858 + * Implements &struct drm_gem_object_funcs.vunmap 859 + * @gem: The GEM object to unmap 860 + * @map: Kernel virtual address where the VRAM GEM object was mapped 869 861 */ 870 - static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, 871 - void *vaddr) 862 + static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, struct dma_buf_map *map) 872 863 { 873 864 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 874 865 875 - drm_gem_vram_vunmap(gbo, vaddr); 866 + drm_gem_vram_vunmap(gbo, map); 876 867 } 877 868 878 869 /*
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_drv.h
··· 51 51 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); 52 52 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); 53 53 struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); 54 - void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); 54 + int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 55 55 int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, 56 56 struct vm_area_struct *vma); 57 57 struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
+9 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
··· 22 22 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); 23 23 } 24 24 25 - void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) 25 + int etnaviv_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 26 26 { 27 - return etnaviv_gem_vmap(obj); 27 + void *vaddr; 28 + 29 + vaddr = etnaviv_gem_vmap(obj); 30 + if (!vaddr) 31 + return -ENOMEM; 32 + dma_buf_map_set_vaddr(map, vaddr); 33 + 34 + return 0; 28 35 } 29 36 30 37 int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
+3 -3
drivers/gpu/drm/lima/lima_gem.c
··· 182 182 return drm_gem_shmem_pin(obj); 183 183 } 184 184 185 - static void *lima_gem_vmap(struct drm_gem_object *obj) 185 + static int lima_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 186 186 { 187 187 struct lima_bo *bo = to_lima_bo(obj); 188 188 189 189 if (bo->heap_size) 190 - return ERR_PTR(-EINVAL); 190 + return -EINVAL; 191 191 192 - return drm_gem_shmem_vmap(obj); 192 + return drm_gem_shmem_vmap(obj, map); 193 193 } 194 194 195 195 static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+7 -4
drivers/gpu/drm/lima/lima_sched.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 3 4 + #include <linux/dma-buf-map.h> 4 5 #include <linux/kthread.h> 5 6 #include <linux/slab.h> 6 7 #include <linux/vmalloc.h> ··· 304 303 struct lima_dump_chunk_buffer *buffer_chunk; 305 304 u32 size, task_size, mem_size; 306 305 int i; 306 + struct dma_buf_map map; 307 + int ret; 307 308 308 309 mutex_lock(&dev->error_task_list_lock); 309 310 ··· 391 388 } else { 392 389 buffer_chunk->size = lima_bo_size(bo); 393 390 394 - data = drm_gem_shmem_vmap(&bo->base.base); 395 - if (IS_ERR_OR_NULL(data)) { 391 + ret = drm_gem_shmem_vmap(&bo->base.base, &map); 392 + if (ret) { 396 393 kvfree(et); 397 394 goto out; 398 395 } 399 396 400 - memcpy(buffer_chunk + 1, data, buffer_chunk->size); 397 + memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); 401 398 402 - drm_gem_shmem_vunmap(&bo->base.base, data); 399 + drm_gem_shmem_vunmap(&bo->base.base, &map); 403 400 } 404 401 405 402 buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+7 -3
drivers/gpu/drm/mgag200/mgag200_mode.c
··· 9 9 */ 10 10 11 11 #include <linux/delay.h> 12 + #include <linux/dma-buf-map.h> 12 13 13 14 #include <drm/drm_atomic_helper.h> 14 15 #include <drm/drm_atomic_state_helper.h> ··· 1552 1551 struct drm_rect *clip) 1553 1552 { 1554 1553 struct drm_device *dev = &mdev->base; 1554 + struct dma_buf_map map; 1555 1555 void *vmap; 1556 + int ret; 1556 1557 1557 - vmap = drm_gem_shmem_vmap(fb->obj[0]); 1558 - if (drm_WARN_ON(dev, !vmap)) 1558 + ret = drm_gem_shmem_vmap(fb->obj[0], &map); 1559 + if (drm_WARN_ON(dev, ret)) 1559 1560 return; /* BUG: SHMEM BO should always be vmapped */ 1561 + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ 1560 1562 1561 1563 drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip); 1562 1564 1563 - drm_gem_shmem_vunmap(fb->obj[0], vmap); 1565 + drm_gem_shmem_vunmap(fb->obj[0], &map); 1564 1566 1565 1567 /* Always scanout image at VRAM offset 0 */ 1566 1568 mgag200_set_startadd(mdev, (u32)0);
+1
drivers/gpu/drm/nouveau/Kconfig
··· 6 6 select FW_LOADER 7 7 select DRM_KMS_HELPER 8 8 select DRM_TTM 9 + select DRM_TTM_HELPER 9 10 select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT 10 11 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT 11 12 select X86_PLATFORM_DEVICES if ACPI && X86
-2
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 39 39 unsigned mode; 40 40 41 41 struct nouveau_drm_tile *tile; 42 - 43 - struct ttm_bo_kmap_obj dma_buf_vmap; 44 42 }; 45 43 46 44 static inline struct nouveau_bo *
+4 -2
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 24 24 * 25 25 */ 26 26 27 + #include <drm/drm_gem_ttm_helper.h> 28 + 27 29 #include "nouveau_drv.h" 28 30 #include "nouveau_dma.h" 29 31 #include "nouveau_fence.h" ··· 178 176 .pin = nouveau_gem_prime_pin, 179 177 .unpin = nouveau_gem_prime_unpin, 180 178 .get_sg_table = nouveau_gem_prime_get_sg_table, 181 - .vmap = nouveau_gem_prime_vmap, 182 - .vunmap = nouveau_gem_prime_vunmap, 179 + .vmap = drm_gem_ttm_vmap, 180 + .vunmap = drm_gem_ttm_vunmap, 183 181 }; 184 182 185 183 int
-2
drivers/gpu/drm/nouveau/nouveau_gem.h
··· 37 37 extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *); 38 38 extern struct drm_gem_object *nouveau_gem_prime_import_sg_table( 39 39 struct drm_device *, struct dma_buf_attachment *, struct sg_table *); 40 - extern void *nouveau_gem_prime_vmap(struct drm_gem_object *); 41 - extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *); 42 40 43 41 #endif
-20
drivers/gpu/drm/nouveau/nouveau_prime.c
··· 35 35 return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); 36 36 } 37 37 38 - void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) 39 - { 40 - struct nouveau_bo *nvbo = nouveau_gem_object(obj); 41 - int ret; 42 - 43 - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, 44 - &nvbo->dma_buf_vmap); 45 - if (ret) 46 - return ERR_PTR(ret); 47 - 48 - return nvbo->dma_buf_vmap.virtual; 49 - } 50 - 51 - void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 52 - { 53 - struct nouveau_bo *nvbo = nouveau_gem_object(obj); 54 - 55 - ttm_bo_kunmap(&nvbo->dma_buf_vmap); 56 - } 57 - 58 38 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, 59 39 struct dma_buf_attachment *attach, 60 40 struct sg_table *sg)
+8 -6
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
··· 5 5 #include <drm/drm_gem_shmem_helper.h> 6 6 #include <drm/panfrost_drm.h> 7 7 #include <linux/completion.h> 8 + #include <linux/dma-buf-map.h> 8 9 #include <linux/iopoll.h> 9 10 #include <linux/pm_runtime.h> 10 11 #include <linux/slab.h> ··· 73 72 { 74 73 struct panfrost_file_priv *user = file_priv->driver_priv; 75 74 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 75 + struct dma_buf_map map; 76 76 struct drm_gem_shmem_object *bo; 77 77 u32 cfg, as; 78 78 int ret; ··· 105 103 goto err_close_bo; 106 104 } 107 105 108 - perfcnt->buf = drm_gem_shmem_vmap(&bo->base); 109 - if (IS_ERR(perfcnt->buf)) { 110 - ret = PTR_ERR(perfcnt->buf); 106 + ret = drm_gem_shmem_vmap(&bo->base, &map); 107 + if (ret) 111 108 goto err_put_mapping; 112 - } 109 + perfcnt->buf = map.vaddr; 113 110 114 111 /* 115 112 * Invalidate the cache and clear the counters to start from a fresh ··· 164 163 return 0; 165 164 166 165 err_vunmap: 167 - drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); 166 + drm_gem_shmem_vunmap(&bo->base, &map); 168 167 err_put_mapping: 169 168 panfrost_gem_mapping_put(perfcnt->mapping); 170 169 err_close_bo: ··· 181 180 { 182 181 struct panfrost_file_priv *user = file_priv->driver_priv; 183 182 struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; 183 + struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(perfcnt->buf); 184 184 185 185 if (user != perfcnt->user) 186 186 return -EINVAL; ··· 194 192 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); 195 193 196 194 perfcnt->user = NULL; 197 - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); 195 + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, &map); 198 196 perfcnt->buf = NULL; 199 197 panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); 200 198 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+12 -3
drivers/gpu/drm/qxl/qxl_display.c
··· 25 25 26 26 #include <linux/crc32.h> 27 27 #include <linux/delay.h> 28 + #include <linux/dma-buf-map.h> 28 29 29 30 #include <drm/drm_drv.h> 30 31 #include <drm/drm_atomic.h> ··· 582 581 struct drm_gem_object *obj; 583 582 struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; 584 583 int ret; 584 + struct dma_buf_map user_map; 585 + struct dma_buf_map cursor_map; 585 586 void *user_ptr; 586 587 int size = 64*64*4; 587 588 ··· 598 595 user_bo = gem_to_qxl_bo(obj); 599 596 600 597 /* pinning is done in the prepare/cleanup framevbuffer */ 601 - ret = qxl_bo_kmap(user_bo, &user_ptr); 598 + ret = qxl_bo_kmap(user_bo, &user_map); 602 599 if (ret) 603 600 goto out_free_release; 601 + user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */ 604 602 605 603 ret = qxl_alloc_bo_reserved(qdev, release, 606 604 sizeof(struct qxl_cursor) + size, ··· 617 613 if (ret) 618 614 goto out_unpin; 619 615 620 - ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 616 + ret = qxl_bo_kmap(cursor_bo, &cursor_map); 621 617 if (ret) 622 618 goto out_backoff; 619 + if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */ 620 + cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem; 621 + else 622 + cursor = (struct qxl_cursor *)cursor_map.vaddr; 623 623 624 624 cursor->header.unique = 0; 625 625 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; ··· 1141 1133 { 1142 1134 int ret; 1143 1135 struct drm_gem_object *gobj; 1136 + struct dma_buf_map map; 1144 1137 int monitors_config_size = sizeof(struct qxl_monitors_config) + 1145 1138 qxl_num_crtc * sizeof(struct qxl_head); 1146 1139 ··· 1158 1149 if (ret) 1159 1150 return ret; 1160 1151 1161 - qxl_bo_kmap(qdev->monitors_config_bo, NULL); 1152 + qxl_bo_kmap(qdev->monitors_config_bo, &map); 1162 1153 1163 1154 qdev->monitors_config = qdev->monitors_config_bo->kptr; 1164 1155 qdev->ram_header->monitors_config =
+10 -4
drivers/gpu/drm/qxl/qxl_draw.c
··· 20 20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 21 */ 22 22 23 + #include <linux/dma-buf-map.h> 24 + 23 25 #include <drm/drm_fourcc.h> 24 26 25 27 #include "qxl_drv.h" ··· 44 42 unsigned int num_clips, 45 43 struct qxl_bo *clips_bo) 46 44 { 45 + struct dma_buf_map map; 47 46 struct qxl_clip_rects *dev_clips; 48 47 int ret; 49 48 50 - ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); 51 - if (ret) { 49 + ret = qxl_bo_kmap(clips_bo, &map); 50 + if (ret) 52 51 return NULL; 53 - } 52 + dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */ 53 + 54 54 dev_clips->num_rects = num_clips; 55 55 dev_clips->chunk.next_chunk = 0; 56 56 dev_clips->chunk.prev_chunk = 0; ··· 146 142 int stride = fb->pitches[0]; 147 143 /* depth is not actually interesting, we don't mask with it */ 148 144 int depth = fb->format->cpp[0] * 8; 145 + struct dma_buf_map surface_map; 149 146 uint8_t *surface_base; 150 147 struct qxl_release *release; 151 148 struct qxl_bo *clips_bo; ··· 202 197 if (ret) 203 198 goto out_release_backoff; 204 199 205 - ret = qxl_bo_kmap(bo, (void **)&surface_base); 200 + ret = qxl_bo_kmap(bo, &surface_map); 206 201 if (ret) 207 202 goto out_release_backoff; 203 + surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */ 208 204 209 205 ret = qxl_image_init(qdev, release, dimage, surface_base, 210 206 left - dumb_shadow_offset,
+7 -4
drivers/gpu/drm/qxl/qxl_drv.h
··· 30 30 * Definitions taken from spice-protocol, plus kernel driver specific bits. 31 31 */ 32 32 33 + #include <linux/dma-buf-map.h> 33 34 #include <linux/dma-fence.h> 34 35 #include <linux/firmware.h> 35 36 #include <linux/platform_device.h> ··· 50 49 #include <drm/ttm/ttm_placement.h> 51 50 52 51 #include "qxl_dev.h" 52 + 53 + struct dma_buf_map; 53 54 54 55 #define DRIVER_AUTHOR "Dave Airlie" 55 56 ··· 82 79 /* Protected by tbo.reserved */ 83 80 struct ttm_place placements[3]; 84 81 struct ttm_placement placement; 85 - struct ttm_bo_kmap_obj kmap; 82 + struct dma_buf_map map; 86 83 void *kptr; 87 84 unsigned int map_count; 88 85 int type; ··· 338 335 void qxl_gem_object_close(struct drm_gem_object *obj, 339 336 struct drm_file *file_priv); 340 337 void qxl_bo_force_delete(struct qxl_device *qdev); 341 - int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 342 338 343 339 /* qxl_dumb.c */ 344 340 int qxl_mode_dumb_create(struct drm_file *file_priv, ··· 447 445 struct drm_gem_object *qxl_gem_prime_import_sg_table( 448 446 struct drm_device *dev, struct dma_buf_attachment *attach, 449 447 struct sg_table *sgt); 450 - void *qxl_gem_prime_vmap(struct drm_gem_object *obj); 451 - void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 448 + int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 449 + void qxl_gem_prime_vunmap(struct drm_gem_object *obj, 450 + struct dma_buf_map *map); 452 451 int qxl_gem_prime_mmap(struct drm_gem_object *obj, 453 452 struct vm_area_struct *vma); 454 453
+19 -12
drivers/gpu/drm/qxl/qxl_object.c
··· 23 23 * Alon Levy 24 24 */ 25 25 26 + #include <linux/dma-buf-map.h> 27 + #include <linux/io-mapping.h> 28 + 26 29 #include "qxl_drv.h" 27 30 #include "qxl_object.h" 28 31 29 - #include <linux/io-mapping.h> 30 32 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 31 33 { 32 34 struct qxl_bo *bo; ··· 154 152 return 0; 155 153 } 156 154 157 - int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) 155 + int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map) 158 156 { 159 - bool is_iomem; 160 157 int r; 161 158 162 159 if (bo->kptr) { 163 - if (ptr) 164 - *ptr = bo->kptr; 165 160 bo->map_count++; 166 - return 0; 161 + goto out; 167 162 } 168 - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 163 + r = ttm_bo_vmap(&bo->tbo, &bo->map); 169 164 if (r) 170 165 return r; 171 - bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 172 - if (ptr) 173 - *ptr = bo->kptr; 174 166 bo->map_count = 1; 167 + 168 + /* TODO: Remove kptr in favor of map everywhere. */ 169 + if (bo->map.is_iomem) 170 + bo->kptr = (void *)bo->map.vaddr_iomem; 171 + else 172 + bo->kptr = bo->map.vaddr; 173 + 174 + out: 175 + *map = bo->map; 175 176 return 0; 176 177 } 177 178 ··· 185 180 void *rptr; 186 181 int ret; 187 182 struct io_mapping *map; 183 + struct dma_buf_map bo_map; 188 184 189 185 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 190 186 map = qdev->vram_mapping; ··· 202 196 return rptr; 203 197 } 204 198 205 - ret = qxl_bo_kmap(bo, &rptr); 199 + ret = qxl_bo_kmap(bo, &bo_map); 206 200 if (ret) 207 201 return NULL; 202 + rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ 208 203 209 204 rptr += page_offset * PAGE_SIZE; 210 205 return rptr; ··· 219 212 if (bo->map_count > 0) 220 213 return; 221 214 bo->kptr = NULL; 222 - ttm_bo_kunmap(&bo->kmap); 215 + ttm_bo_vunmap(&bo->tbo, &bo->map); 223 216 } 224 217 225 218 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+1 -1
drivers/gpu/drm/qxl/qxl_object.h
··· 63 63 bool kernel, bool pinned, u32 domain, 64 64 struct qxl_surface *surf, 65 65 struct qxl_bo **bo_ptr); 66 - extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 66 + extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map); 67 67 extern void qxl_bo_kunmap(struct qxl_bo *bo); 68 68 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); 69 69 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+6 -6
drivers/gpu/drm/qxl/qxl_prime.c
··· 54 54 return ERR_PTR(-ENOSYS); 55 55 } 56 56 57 - void *qxl_gem_prime_vmap(struct drm_gem_object *obj) 57 + int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 58 58 { 59 59 struct qxl_bo *bo = gem_to_qxl_bo(obj); 60 - void *ptr; 61 60 int ret; 62 61 63 - ret = qxl_bo_kmap(bo, &ptr); 62 + ret = qxl_bo_kmap(bo, map); 64 63 if (ret < 0) 65 - return ERR_PTR(ret); 64 + return ret; 66 65 67 - return ptr; 66 + return 0; 68 67 } 69 68 70 - void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 69 + void qxl_gem_prime_vunmap(struct drm_gem_object *obj, 70 + struct dma_buf_map *map) 71 71 { 72 72 struct qxl_bo *bo = gem_to_qxl_bo(obj); 73 73
-1
drivers/gpu/drm/radeon/radeon.h
··· 509 509 /* Constant after initialization */ 510 510 struct radeon_device *rdev; 511 511 512 - struct ttm_bo_kmap_obj dma_buf_vmap; 513 512 pid_t pid; 514 513 515 514 #ifdef CONFIG_MMU_NOTIFIER
+3 -4
drivers/gpu/drm/radeon/radeon_gem.c
··· 31 31 #include <drm/drm_debugfs.h> 32 32 #include <drm/drm_device.h> 33 33 #include <drm/drm_file.h> 34 + #include <drm/drm_gem_ttm_helper.h> 34 35 #include <drm/radeon_drm.h> 35 36 36 37 #include "radeon.h" ··· 41 40 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); 42 41 int radeon_gem_prime_pin(struct drm_gem_object *obj); 43 42 void radeon_gem_prime_unpin(struct drm_gem_object *obj); 44 - void *radeon_gem_prime_vmap(struct drm_gem_object *obj); 45 - void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 46 43 47 44 static const struct drm_gem_object_funcs radeon_gem_object_funcs; 48 45 ··· 234 235 .pin = radeon_gem_prime_pin, 235 236 .unpin = radeon_gem_prime_unpin, 236 237 .get_sg_table = radeon_gem_prime_get_sg_table, 237 - .vmap = radeon_gem_prime_vmap, 238 - .vunmap = radeon_gem_prime_vunmap, 238 + .vmap = drm_gem_ttm_vmap, 239 + .vunmap = drm_gem_ttm_vunmap, 239 240 }; 240 241 241 242 /*
-20
drivers/gpu/drm/radeon/radeon_prime.c
··· 39 39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); 40 40 } 41 41 42 - void *radeon_gem_prime_vmap(struct drm_gem_object *obj) 43 - { 44 - struct radeon_bo *bo = gem_to_radeon_bo(obj); 45 - int ret; 46 - 47 - ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 48 - &bo->dma_buf_vmap); 49 - if (ret) 50 - return ERR_PTR(ret); 51 - 52 - return bo->dma_buf_vmap.virtual; 53 - } 54 - 55 - void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 56 - { 57 - struct radeon_bo *bo = gem_to_radeon_bo(obj); 58 - 59 - ttm_bo_kunmap(&bo->dma_buf_vmap); 60 - } 61 - 62 42 struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, 63 43 struct dma_buf_attachment *attach, 64 44 struct sg_table *sg)
+22 -16
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 532 532 return ERR_PTR(ret); 533 533 } 534 534 535 - void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) 536 - { 537 - struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 538 - 539 - if (rk_obj->pages) 540 - return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 541 - pgprot_writecombine(PAGE_KERNEL)); 542 - 543 - if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 544 - return NULL; 545 - 546 - return rk_obj->kvaddr; 547 - } 548 - 549 - void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 535 + int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 550 536 { 551 537 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 552 538 553 539 if (rk_obj->pages) { 554 - vunmap(vaddr); 540 + void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 541 + pgprot_writecombine(PAGE_KERNEL)); 542 + if (!vaddr) 543 + return -ENOMEM; 544 + dma_buf_map_set_vaddr(map, vaddr); 545 + return 0; 546 + } 547 + 548 + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 549 + return -ENOMEM; 550 + dma_buf_map_set_vaddr(map, rk_obj->kvaddr); 551 + 552 + return 0; 553 + } 554 + 555 + void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 556 + { 557 + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 558 + 559 + if (rk_obj->pages) { 560 + vunmap(map->vaddr); 555 561 return; 556 562 } 557 563
+2 -2
drivers/gpu/drm/rockchip/rockchip_drm_gem.h
··· 31 31 rockchip_gem_prime_import_sg_table(struct drm_device *dev, 32 32 struct dma_buf_attachment *attach, 33 33 struct sg_table *sg); 34 - void *rockchip_gem_prime_vmap(struct drm_gem_object *obj); 35 - void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 34 + int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 35 + void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); 36 36 37 37 /* drm driver mmap file operations */ 38 38 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+6 -4
drivers/gpu/drm/tiny/cirrus.c
··· 17 17 */ 18 18 19 19 #include <linux/console.h> 20 + #include <linux/dma-buf-map.h> 20 21 #include <linux/module.h> 21 22 #include <linux/pci.h> 22 23 ··· 315 314 struct drm_rect *rect) 316 315 { 317 316 struct cirrus_device *cirrus = to_cirrus(fb->dev); 317 + struct dma_buf_map map; 318 318 void *vmap; 319 319 int idx, ret; 320 320 ··· 323 321 if (!drm_dev_enter(&cirrus->dev, &idx)) 324 322 goto out; 325 323 326 - ret = -ENOMEM; 327 - vmap = drm_gem_shmem_vmap(fb->obj[0]); 328 - if (!vmap) 324 + ret = drm_gem_shmem_vmap(fb->obj[0], &map); 325 + if (ret) 329 326 goto out_dev_exit; 327 + vmap = map.vaddr; /* TODO: Use mapping abstraction properly */ 330 328 331 329 if (cirrus->cpp == fb->format->cpp[0]) 332 330 drm_fb_memcpy_dstclip(cirrus->vram, ··· 345 343 else 346 344 WARN_ON_ONCE("cpp mismatch"); 347 345 348 - drm_gem_shmem_vunmap(fb->obj[0], vmap); 346 + drm_gem_shmem_vunmap(fb->obj[0], &map); 349 347 ret = 0; 350 348 351 349 out_dev_exit:
+6 -4
drivers/gpu/drm/tiny/gm12u320.c
··· 250 250 { 251 251 int block, dst_offset, len, remain, ret, x1, x2, y1, y2; 252 252 struct drm_framebuffer *fb; 253 + struct dma_buf_map map; 253 254 void *vaddr; 254 255 u8 *src; 255 256 ··· 265 264 y1 = gm12u320->fb_update.rect.y1; 266 265 y2 = gm12u320->fb_update.rect.y2; 267 266 268 - vaddr = drm_gem_shmem_vmap(fb->obj[0]); 269 - if (IS_ERR(vaddr)) { 270 - GM12U320_ERR("failed to vmap fb: %ld\n", PTR_ERR(vaddr)); 267 + ret = drm_gem_shmem_vmap(fb->obj[0], &map); 268 + if (ret) { 269 + GM12U320_ERR("failed to vmap fb: %d\n", ret); 271 270 goto put_fb; 272 271 } 272 + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ 273 273 274 274 if (fb->obj[0]->import_attach) { 275 275 ret = dma_buf_begin_cpu_access( ··· 322 320 GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret); 323 321 } 324 322 vunmap: 325 - drm_gem_shmem_vunmap(fb->obj[0], vaddr); 323 + drm_gem_shmem_vunmap(fb->obj[0], &map); 326 324 put_fb: 327 325 drm_framebuffer_put(fb); 328 326 gm12u320->fb_update.fb = NULL;
+5 -3
drivers/gpu/drm/udl/udl_modeset.c
··· 276 276 struct urb *urb; 277 277 struct drm_rect clip; 278 278 int log_bpp; 279 + struct dma_buf_map map; 279 280 void *vaddr; 280 281 281 282 ret = udl_log_cpp(fb->format->cpp[0]); ··· 297 296 return ret; 298 297 } 299 298 300 - vaddr = drm_gem_shmem_vmap(fb->obj[0]); 301 - if (IS_ERR(vaddr)) { 299 + ret = drm_gem_shmem_vmap(fb->obj[0], &map); 300 + if (ret) { 302 301 DRM_ERROR("failed to vmap fb\n"); 303 302 goto out_dma_buf_end_cpu_access; 304 303 } 304 + vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */ 305 305 306 306 urb = udl_get_urb(dev); 307 307 if (!urb) ··· 335 333 ret = 0; 336 334 337 335 out_drm_gem_shmem_vunmap: 338 - drm_gem_shmem_vunmap(fb->obj[0], vaddr); 336 + drm_gem_shmem_vunmap(fb->obj[0], &map); 339 337 out_dma_buf_end_cpu_access: 340 338 if (import_attach) { 341 339 tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf,
+8 -3
drivers/gpu/drm/vboxvideo/vbox_mode.c
··· 9 9 * Michael Thayer <michael.thayer@oracle.com, 10 10 * Hans de Goede <hdegoede@redhat.com> 11 11 */ 12 + 13 + #include <linux/dma-buf-map.h> 12 14 #include <linux/export.h> 13 15 14 16 #include <drm/drm_atomic.h> ··· 386 384 u32 height = plane->state->crtc_h; 387 385 size_t data_size, mask_size; 388 386 u32 flags; 387 + struct dma_buf_map map; 388 + int ret; 389 389 u8 *src; 390 390 391 391 /* ··· 401 397 402 398 vbox_crtc->cursor_enabled = true; 403 399 404 - src = drm_gem_vram_vmap(gbo); 405 - if (IS_ERR(src)) { 400 + ret = drm_gem_vram_vmap(gbo, &map); 401 + if (ret) { 406 402 /* 407 403 * BUG: we should have pinned the BO in prepare_fb(). 408 404 */ ··· 410 406 DRM_WARN("Could not map cursor bo, skipping update\n"); 411 407 return; 412 408 } 409 + src = map.vaddr; /* TODO: Use mapping abstraction properly */ 413 410 414 411 /* 415 412 * The mask must be calculated based on the alpha ··· 421 416 data_size = width * height * 4 + mask_size; 422 417 423 418 copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); 424 - drm_gem_vram_vunmap(gbo, src); 419 + drm_gem_vram_vunmap(gbo, &map); 425 420 426 421 flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | 427 422 VBOX_MOUSE_POINTER_ALPHA;
+3 -3
drivers/gpu/drm/vc4/vc4_bo.c
··· 785 785 return drm_gem_cma_prime_mmap(obj, vma); 786 786 } 787 787 788 - void *vc4_prime_vmap(struct drm_gem_object *obj) 788 + int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 789 789 { 790 790 struct vc4_bo *bo = to_vc4_bo(obj); 791 791 792 792 if (bo->validated_shader) { 793 793 DRM_DEBUG("mmaping of shader BOs not allowed.\n"); 794 - return ERR_PTR(-EINVAL); 794 + return -EINVAL; 795 795 } 796 796 797 - return drm_gem_cma_prime_vmap(obj); 797 + return drm_gem_cma_prime_vmap(obj, map); 798 798 } 799 799 800 800 struct drm_gem_object *
+1 -1
drivers/gpu/drm/vc4/vc4_drv.h
··· 806 806 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, 807 807 struct dma_buf_attachment *attach, 808 808 struct sg_table *sgt); 809 - void *vc4_prime_vmap(struct drm_gem_object *obj); 809 + int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 810 810 int vc4_bo_cache_init(struct drm_device *dev); 811 811 void vc4_bo_cache_destroy(struct drm_device *dev); 812 812 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
+11 -5
drivers/gpu/drm/vgem/vgem_drv.c
··· 361 361 return &obj->base; 362 362 } 363 363 364 - static void *vgem_prime_vmap(struct drm_gem_object *obj) 364 + static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) 365 365 { 366 366 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 367 367 long n_pages = obj->size >> PAGE_SHIFT; 368 368 struct page **pages; 369 + void *vaddr; 369 370 370 371 pages = vgem_pin_pages(bo); 371 372 if (IS_ERR(pages)) 372 - return NULL; 373 + return PTR_ERR(pages); 373 374 374 - return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 375 + vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 376 + if (!vaddr) 377 + return -ENOMEM; 378 + dma_buf_map_set_vaddr(map, vaddr); 379 + 380 + return 0; 375 381 } 376 382 377 - static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 383 + static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) 378 384 { 379 385 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 380 386 381 - vunmap(vaddr); 387 + vunmap(map->vaddr); 382 388 vgem_unpin_pages(bo); 383 389 } 384 390
+10 -5
drivers/gpu/drm/vkms/vkms_plane.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 3 + #include <linux/dma-buf-map.h> 4 + 3 5 #include <drm/drm_atomic.h> 4 6 #include <drm/drm_atomic_helper.h> 5 7 #include <drm/drm_fourcc.h> ··· 148 146 struct drm_plane_state *state) 149 147 { 150 148 struct drm_gem_object *gem_obj; 151 - void *vaddr; 149 + struct dma_buf_map map; 150 + int ret; 152 151 153 152 if (!state->fb) 154 153 return 0; 155 154 156 155 gem_obj = drm_gem_fb_get_obj(state->fb, 0); 157 - vaddr = drm_gem_shmem_vmap(gem_obj); 158 - if (IS_ERR(vaddr)) 159 - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); 156 + ret = drm_gem_shmem_vmap(gem_obj, &map); 157 + if (ret) 158 + DRM_ERROR("vmap failed: %d\n", ret); 160 159 161 160 return drm_gem_fb_prepare_fb(plane, state); 162 161 } ··· 167 164 { 168 165 struct drm_gem_object *gem_obj; 169 166 struct drm_gem_shmem_object *shmem_obj; 167 + struct dma_buf_map map; 170 168 171 169 if (!old_state->fb) 172 170 return; 173 171 174 172 gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); 175 173 shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0)); 176 - drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr); 174 + dma_buf_map_set_vaddr(&map, shmem_obj->vaddr); 175 + drm_gem_shmem_vunmap(gem_obj, &map); 177 176 } 178 177 179 178 static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+14 -8
drivers/gpu/drm/vkms/vkms_writeback.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0+ 2 2 3 - #include "vkms_drv.h" 3 + #include <linux/dma-buf-map.h> 4 + 4 5 #include <drm/drm_fourcc.h> 5 6 #include <drm/drm_writeback.h> 6 7 #include <drm/drm_probe_helper.h> 7 8 #include <drm/drm_atomic_helper.h> 8 9 #include <drm/drm_gem_framebuffer_helper.h> 9 10 #include <drm/drm_gem_shmem_helper.h> 11 + 12 + #include "vkms_drv.h" 10 13 11 14 static const u32 vkms_wb_formats[] = { 12 15 DRM_FORMAT_XRGB8888, ··· 68 65 struct drm_writeback_job *job) 69 66 { 70 67 struct drm_gem_object *gem_obj; 71 - void *vaddr; 68 + struct dma_buf_map map; 69 + int ret; 72 70 73 71 if (!job->fb) 74 72 return 0; 75 73 76 74 gem_obj = drm_gem_fb_get_obj(job->fb, 0); 77 - vaddr = drm_gem_shmem_vmap(gem_obj); 78 - if (IS_ERR(vaddr)) { 79 - DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr)); 80 - return PTR_ERR(vaddr); 75 + ret = drm_gem_shmem_vmap(gem_obj, &map); 76 + if (ret) { 77 + DRM_ERROR("vmap failed: %d\n", ret); 78 + return ret; 81 79 } 82 80 83 - job->priv = vaddr; 81 + job->priv = map.vaddr; 84 82 85 83 return 0; 86 84 } ··· 91 87 { 92 88 struct drm_gem_object *gem_obj; 93 89 struct vkms_device *vkmsdev; 90 + struct dma_buf_map map; 94 91 95 92 if (!job->fb) 96 93 return; 97 94 98 95 gem_obj = drm_gem_fb_get_obj(job->fb, 0); 99 - drm_gem_shmem_vunmap(gem_obj, job->priv); 96 + dma_buf_map_set_vaddr(&map, job->priv); 97 + drm_gem_shmem_vunmap(gem_obj, &map); 100 98 101 99 vkmsdev = drm_device_to_vkms_device(gem_obj->dev); 102 100 vkms_set_composer(&vkmsdev->output, false);
+12 -6
drivers/gpu/drm/xen/xen_drm_front_gem.c
··· 290 290 return gem_mmap_obj(xen_obj, vma); 291 291 } 292 292 293 - void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj) 293 + int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct dma_buf_map *map) 294 294 { 295 295 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); 296 + void *vaddr; 296 297 297 298 if (!xen_obj->pages) 298 - return NULL; 299 + return -ENOMEM; 299 300 300 301 /* Please see comment in gem_mmap_obj on mapping and attributes. */ 301 - return vmap(xen_obj->pages, xen_obj->num_pages, 302 - VM_MAP, PAGE_KERNEL); 302 + vaddr = vmap(xen_obj->pages, xen_obj->num_pages, 303 + VM_MAP, PAGE_KERNEL); 304 + if (!vaddr) 305 + return -ENOMEM; 306 + dma_buf_map_set_vaddr(map, vaddr); 307 + 308 + return 0; 303 309 } 304 310 305 311 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, 306 - void *vaddr) 312 + struct dma_buf_map *map) 307 313 { 308 - vunmap(vaddr); 314 + vunmap(map->vaddr); 309 315 } 310 316 311 317 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj,
+4 -2
drivers/gpu/drm/xen/xen_drm_front_gem.h
··· 12 12 #define __XEN_DRM_FRONT_GEM_H 13 13 14 14 struct dma_buf_attachment; 15 + struct dma_buf_map; 15 16 struct drm_device; 16 17 struct drm_gem_object; 17 18 struct file; ··· 35 34 36 35 int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma); 37 36 38 - void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj); 37 + int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, 38 + struct dma_buf_map *map); 39 39 40 40 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, 41 - void *vaddr); 41 + struct dma_buf_map *map); 42 42 43 43 int xen_drm_front_gem_prime_mmap(struct drm_gem_object *gem_obj, 44 44 struct vm_area_struct *vma);
+3 -2
include/drm/drm_gem.h
··· 39 39 40 40 #include <drm/drm_vma_manager.h> 41 41 42 + struct dma_buf_map; 42 43 struct drm_gem_object; 43 44 44 45 /** ··· 139 138 * 140 139 * This callback is optional. 141 140 */ 142 - void *(*vmap)(struct drm_gem_object *obj); 141 + int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); 143 142 144 143 /** 145 144 * @vunmap: ··· 149 148 * 150 149 * This callback is optional. 151 150 */ 152 - void (*vunmap)(struct drm_gem_object *obj, void *vaddr); 151 + void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); 153 152 154 153 /** 155 154 * @mmap:
+1 -1
include/drm/drm_gem_cma_helper.h
··· 103 103 struct sg_table *sgt); 104 104 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, 105 105 struct vm_area_struct *vma); 106 - void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); 106 + int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 107 107 108 108 struct drm_gem_object * 109 109 drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size);
+2 -2
include/drm/drm_gem_shmem_helper.h
··· 113 113 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); 114 114 int drm_gem_shmem_pin(struct drm_gem_object *obj); 115 115 void drm_gem_shmem_unpin(struct drm_gem_object *obj); 116 - void *drm_gem_shmem_vmap(struct drm_gem_object *obj); 117 - void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); 116 + int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); 117 + void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); 118 118 119 119 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); 120 120
+7 -7
include/drm/drm_gem_vram_helper.h
··· 10 10 #include <drm/ttm/ttm_bo_api.h> 11 11 #include <drm/ttm/ttm_bo_driver.h> 12 12 13 + #include <linux/dma-buf-map.h> 13 14 #include <linux/kernel.h> /* for container_of() */ 14 15 15 16 struct drm_mode_create_dumb; ··· 30 29 31 30 /** 32 31 * struct drm_gem_vram_object - GEM object backed by VRAM 33 - * @gem: GEM object 34 32 * @bo: TTM buffer object 35 - * @kmap: Mapping information for @bo 33 + * @map: Mapping information for @bo 36 34 * @placement: TTM placement information. Supported placements are \ 37 35 %TTM_PL_VRAM and %TTM_PL_SYSTEM 38 36 * @placements: TTM placement information. ··· 50 50 */ 51 51 struct drm_gem_vram_object { 52 52 struct ttm_buffer_object bo; 53 - struct ttm_bo_kmap_obj kmap; 53 + struct dma_buf_map map; 54 54 55 55 /** 56 - * @kmap_use_count: 56 + * @vmap_use_count: 57 57 * 58 58 * Reference count on the virtual address. 59 59 * The address are un-mapped when the count reaches zero. 60 60 */ 61 - unsigned int kmap_use_count; 61 + unsigned int vmap_use_count; 62 62 63 63 /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ 64 64 struct ttm_placement placement; ··· 97 97 s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); 98 98 int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); 99 99 int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); 100 - void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo); 101 - void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr); 100 + int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); 101 + void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); 102 102 103 103 int drm_gem_vram_fill_create_dumb(struct drm_file *file, 104 104 struct drm_device *dev,