Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf: remove kmap_atomic interface

Neither used nor correctly implemented anywhere. Just completely remove
the interface.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/226645/

authored by

Christian König and committed by
Christian König
f664a526 a19741e5

+2 -165
+2 -52
drivers/dma-buf/dma-buf.c
··· 405 405 || !exp_info->ops->map_dma_buf 406 406 || !exp_info->ops->unmap_dma_buf 407 407 || !exp_info->ops->release 408 - || !exp_info->ops->map_atomic 409 408 || !exp_info->ops->map 410 409 || !exp_info->ops->mmap)) { 411 410 return ERR_PTR(-EINVAL); ··· 686 687 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); 687 688 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); 688 689 * 689 - * There are also atomic variants of these interfaces. Like for kmap they 690 - * facilitate non-blocking fast-paths. Neither the importer nor the exporter 691 - * (in the callback) is allowed to block when using these. 692 - * 693 - * Interfaces:: 694 - * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long); 695 - * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*); 696 - * 697 - * For importers all the restrictions of using kmap apply, like the limited 698 - * supply of kmap_atomic slots. Hence an importer shall only hold onto at 699 - * max 2 atomic dma_buf kmaps at the same time (in any given process context). 690 + * Implementing the functions is optional for exporters and for importers all 691 + * the restrictions of using kmap apply. 700 692 * 701 693 * dma_buf kmap calls outside of the range specified in begin_cpu_access are 702 694 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on 703 695 * the partial chunks at the beginning and end but may return stale or bogus 704 696 * data outside of the range (in these partial chunks). 705 - * 706 - * Note that these calls need to always succeed. The exporter needs to 707 - * complete any preparations that might fail in begin_cpu_access. 708 697 * 709 698 * For some cases the overhead of kmap can be too high, a vmap interface 710 699 * is introduced. This interface should be used very carefully, as vmalloc ··· 845 858 return ret; 846 859 } 847 860 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 848 - 849 - /** 850 - * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address 851 - * space. The same restrictions as for kmap_atomic and friends apply. 852 - * @dmabuf: [in] buffer to map page from. 853 - * @page_num: [in] page in PAGE_SIZE units to map. 854 - * 855 - * This call must always succeed, any necessary preparations that might fail 856 - * need to be done in begin_cpu_access. 857 - */ 858 - void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) 859 - { 860 - WARN_ON(!dmabuf); 861 - 862 - if (!dmabuf->ops->map_atomic) 863 - return NULL; 864 - return dmabuf->ops->map_atomic(dmabuf, page_num); 865 - } 866 - EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); 867 - 868 - /** 869 - * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic. 870 - * @dmabuf: [in] buffer to unmap page from. 871 - * @page_num: [in] page in PAGE_SIZE units to unmap. 872 - * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic. 873 - * 874 - * This call must always succeed. 875 - */ 876 - void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, 877 - void *vaddr) 878 - { 879 - WARN_ON(!dmabuf); 880 - 881 - if (dmabuf->ops->unmap_atomic) 882 - dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr); 883 - } 884 - EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); 885 861 886 862 /** 887 863 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
··· 238 238 .release = drm_gem_dmabuf_release, 239 239 .begin_cpu_access = amdgpu_gem_begin_cpu_access, 240 240 .map = drm_gem_dmabuf_kmap, 241 - .map_atomic = drm_gem_dmabuf_kmap_atomic, 242 241 .unmap = drm_gem_dmabuf_kunmap, 243 - .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, 244 242 .mmap = drm_gem_dmabuf_mmap, 245 243 .vmap = drm_gem_dmabuf_vmap, 246 244 .vunmap = drm_gem_dmabuf_vunmap,
-2
drivers/gpu/drm/armada/armada_gem.c
··· 490 490 .map_dma_buf = armada_gem_prime_map_dma_buf, 491 491 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, 492 492 .release = drm_gem_dmabuf_release, 493 - .map_atomic = armada_gem_dmabuf_no_kmap, 494 - .unmap_atomic = armada_gem_dmabuf_no_kunmap, 495 493 .map = armada_gem_dmabuf_no_kmap, 496 494 .unmap = armada_gem_dmabuf_no_kunmap, 497 495 .mmap = armada_gem_dmabuf_mmap,
-31
drivers/gpu/drm/drm_prime.c
··· 434 434 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 435 435 436 436 /** 437 - * drm_gem_dmabuf_kmap_atomic - map_atomic implementation for GEM 438 - * @dma_buf: buffer to be mapped 439 - * @page_num: page number within the buffer 440 - * 441 - * Not implemented. This can be used as the &dma_buf_ops.map_atomic callback. 442 - */ 443 - void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 444 - unsigned long page_num) 445 - { 446 - return NULL; 447 - } 448 - EXPORT_SYMBOL(drm_gem_dmabuf_kmap_atomic); 449 - 450 - /** 451 - * drm_gem_dmabuf_kunmap_atomic - unmap_atomic implementation for GEM 452 - * @dma_buf: buffer to be unmapped 453 - * @page_num: page number within the buffer 454 - * @addr: virtual address of the buffer 455 - * 456 - * Not implemented. This can be used as the &dma_buf_ops.unmap_atomic callback. 457 - */ 458 - void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 459 - unsigned long page_num, void *addr) 460 - { 461 - 462 - } 463 - EXPORT_SYMBOL(drm_gem_dmabuf_kunmap_atomic); 464 - 465 - /** 466 437 * drm_gem_dmabuf_kmap - map implementation for GEM 467 438 * @dma_buf: buffer to be mapped 468 439 * @page_num: page number within the buffer ··· 490 519 .unmap_dma_buf = drm_gem_unmap_dma_buf, 491 520 .release = drm_gem_dmabuf_release, 492 521 .map = drm_gem_dmabuf_kmap, 493 - .map_atomic = drm_gem_dmabuf_kmap_atomic, 494 522 .unmap = drm_gem_dmabuf_kunmap, 495 - .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, 496 523 .mmap = drm_gem_dmabuf_mmap, 497 524 .vmap = drm_gem_dmabuf_vmap, 498 525 .vunmap = drm_gem_dmabuf_vunmap,
-11
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 111 111 i915_gem_object_unpin_map(obj); 112 112 } 113 113 114 - static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) 115 - { 116 - return NULL; 117 - } 118 - 119 - static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 120 - { 121 - 122 - } 123 114 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) 124 115 { 125 116 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); ··· 216 225 .unmap_dma_buf = i915_gem_unmap_dma_buf, 217 226 .release = drm_gem_dmabuf_release, 218 227 .map = i915_gem_dmabuf_kmap, 219 - .map_atomic = i915_gem_dmabuf_kmap_atomic, 220 228 .unmap = i915_gem_dmabuf_kunmap, 221 - .unmap_atomic = i915_gem_dmabuf_kunmap_atomic, 222 229 .mmap = i915_gem_dmabuf_mmap, 223 230 .vmap = i915_gem_dmabuf_vmap, 224 231 .vunmap = i915_gem_dmabuf_vunmap,
-2
drivers/gpu/drm/i915/selftests/mock_dmabuf.c
··· 130 130 .unmap_dma_buf = mock_unmap_dma_buf, 131 131 .release = mock_dmabuf_release, 132 132 .map = mock_dmabuf_kmap, 133 - .map_atomic = mock_dmabuf_kmap_atomic, 134 133 .unmap = mock_dmabuf_kunmap, 135 - .unmap_atomic = mock_dmabuf_kunmap_atomic, 136 134 .mmap = mock_dmabuf_mmap, 137 135 .vmap = mock_dmabuf_vmap, 138 136 .vunmap = mock_dmabuf_vunmap,
-2
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
··· 148 148 .release = drm_gem_dmabuf_release, 149 149 .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, 150 150 .end_cpu_access = omap_gem_dmabuf_end_cpu_access, 151 - .map_atomic = omap_gem_dmabuf_kmap_atomic, 152 - .unmap_atomic = omap_gem_dmabuf_kunmap_atomic, 153 151 .map = omap_gem_dmabuf_kmap, 154 152 .unmap = omap_gem_dmabuf_kunmap, 155 153 .mmap = omap_gem_dmabuf_mmap,
-14
drivers/gpu/drm/tegra/gem.c
··· 596 596 return 0; 597 597 } 598 598 599 - static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf, 600 - unsigned long page) 601 - { 602 - return NULL; 603 - } 604 - 605 - static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf, 606 - unsigned long page, 607 - void *addr) 608 - { 609 - } 610 - 611 599 static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page) 612 600 { 613 601 return NULL; ··· 636 648 .release = tegra_gem_prime_release, 637 649 .begin_cpu_access = tegra_gem_prime_begin_cpu_access, 638 650 .end_cpu_access = tegra_gem_prime_end_cpu_access, 639 - .map_atomic = tegra_gem_prime_kmap_atomic, 640 - .unmap_atomic = tegra_gem_prime_kunmap_atomic, 641 651 .map = tegra_gem_prime_kmap, 642 652 .unmap = tegra_gem_prime_kunmap, 643 653 .mmap = tegra_gem_prime_mmap,
-17
drivers/gpu/drm/udl/udl_dmabuf.c
··· 157 157 return NULL; 158 158 } 159 159 160 - static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 161 - unsigned long page_num) 162 - { 163 - /* TODO */ 164 - 165 - return NULL; 166 - } 167 - 168 160 static void udl_dmabuf_kunmap(struct dma_buf *dma_buf, 169 161 unsigned long page_num, void *addr) 170 - { 171 - /* TODO */ 172 - } 173 - 174 - static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 175 - unsigned long page_num, 176 - void *addr) 177 162 { 178 163 /* TODO */ 179 164 } ··· 177 192 .map_dma_buf = udl_map_dma_buf, 178 193 .unmap_dma_buf = udl_unmap_dma_buf, 179 194 .map = udl_dmabuf_kmap, 180 - .map_atomic = udl_dmabuf_kmap_atomic, 181 195 .unmap = udl_dmabuf_kunmap, 182 - .unmap_atomic = udl_dmabuf_kunmap_atomic, 183 196 .mmap = udl_dmabuf_mmap, 184 197 .release = drm_gem_dmabuf_release, 185 198 };
-13
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
··· 71 71 { 72 72 } 73 73 74 - static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 75 - unsigned long page_num) 76 - { 77 - return NULL; 78 - } 79 - 80 - static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 81 - unsigned long page_num, void *addr) 82 - { 83 - 84 - } 85 74 static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf, 86 75 unsigned long page_num) 87 76 { ··· 97 108 .unmap_dma_buf = vmw_prime_unmap_dma_buf, 98 109 .release = NULL, 99 110 .map = vmw_prime_dmabuf_kmap, 100 - .map_atomic = vmw_prime_dmabuf_kmap_atomic, 101 111 .unmap = vmw_prime_dmabuf_kunmap, 102 - .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic, 103 112 .mmap = vmw_prime_dmabuf_mmap, 104 113 .vmap = vmw_prime_dmabuf_vmap, 105 114 .vunmap = vmw_prime_dmabuf_vunmap,
-1
drivers/media/common/videobuf2/videobuf2-dma-contig.c
··· 358 358 .map_dma_buf = vb2_dc_dmabuf_ops_map, 359 359 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, 360 360 .map = vb2_dc_dmabuf_ops_kmap, 361 - .map_atomic = vb2_dc_dmabuf_ops_kmap, 362 361 .vmap = vb2_dc_dmabuf_ops_vmap, 363 362 .mmap = vb2_dc_dmabuf_ops_mmap, 364 363 .release = vb2_dc_dmabuf_ops_release,
-1
drivers/media/common/videobuf2/videobuf2-dma-sg.c
··· 507 507 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, 508 508 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, 509 509 .map = vb2_dma_sg_dmabuf_ops_kmap, 510 - .map_atomic = vb2_dma_sg_dmabuf_ops_kmap, 511 510 .vmap = vb2_dma_sg_dmabuf_ops_vmap, 512 511 .mmap = vb2_dma_sg_dmabuf_ops_mmap, 513 512 .release = vb2_dma_sg_dmabuf_ops_release,
-1
drivers/media/common/videobuf2/videobuf2-vmalloc.c
··· 346 346 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, 347 347 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, 348 348 .map = vb2_vmalloc_dmabuf_ops_kmap, 349 - .map_atomic = vb2_vmalloc_dmabuf_ops_kmap, 350 349 .vmap = vb2_vmalloc_dmabuf_ops_vmap, 351 350 .mmap = vb2_vmalloc_dmabuf_ops_mmap, 352 351 .release = vb2_vmalloc_dmabuf_ops_release,
-2
drivers/staging/android/ion/ion.c
··· 369 369 .detach = ion_dma_buf_detatch, 370 370 .begin_cpu_access = ion_dma_buf_begin_cpu_access, 371 371 .end_cpu_access = ion_dma_buf_end_cpu_access, 372 - .map_atomic = ion_dma_buf_kmap, 373 - .unmap_atomic = ion_dma_buf_kunmap, 374 372 .map = ion_dma_buf_kmap, 375 373 .unmap = ion_dma_buf_kunmap, 376 374 };
-6
drivers/tee/tee_shm.c
··· 80 80 tee_shm_release(shm); 81 81 } 82 82 83 - static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum) 84 - { 85 - return NULL; 86 - } 87 - 88 83 static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum) 89 84 { 90 85 return NULL; ··· 102 107 .map_dma_buf = tee_shm_op_map_dma_buf, 103 108 .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 104 109 .release = tee_shm_op_release, 105 - .map_atomic = tee_shm_op_map_atomic, 106 110 .map = tee_shm_op_map, 107 111 .mmap = tee_shm_op_mmap, 108 112 };
-4
include/drm/drm_prime.h
··· 93 93 enum dma_data_direction dir); 94 94 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf); 95 95 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); 96 - void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 97 - unsigned long page_num); 98 - void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 99 - unsigned long page_num, void *addr); 100 96 void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num); 101 97 void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, 102 98 void *addr);
-4
include/linux/dma-buf.h
··· 205 205 * to be restarted. 206 206 */ 207 207 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 208 - void *(*map_atomic)(struct dma_buf *, unsigned long); 209 - void (*unmap_atomic)(struct dma_buf *, unsigned long, void *); 210 208 void *(*map)(struct dma_buf *, unsigned long); 211 209 void (*unmap)(struct dma_buf *, unsigned long, void *); 212 210 ··· 392 394 enum dma_data_direction dir); 393 395 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 394 396 enum dma_data_direction dir); 395 - void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 396 - void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 397 397 void *dma_buf_kmap(struct dma_buf *, unsigned long); 398 398 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 399 399