Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: add io_mem_pfn callback

This allows the driver to handle io_mem mappings on their own.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
ea642c32 018b7fc3

+39 -1
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1089 1089 .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, 1090 1090 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, 1091 1091 .io_mem_free = &amdgpu_ttm_io_mem_free, 1092 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 1092 1093 }; 1093 1094 1094 1095 int amdgpu_ttm_init(struct amdgpu_device *adev)
+1
drivers/gpu/drm/ast/ast_ttm.c
··· 236 236 .verify_access = ast_bo_verify_access, 237 237 .io_mem_reserve = &ast_ttm_io_mem_reserve, 238 238 .io_mem_free = &ast_ttm_io_mem_free, 239 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 239 240 }; 240 241 241 242 int ast_mm_init(struct ast_private *ast)
+1
drivers/gpu/drm/bochs/bochs_mm.c
··· 205 205 .verify_access = bochs_bo_verify_access, 206 206 .io_mem_reserve = &bochs_ttm_io_mem_reserve, 207 207 .io_mem_free = &bochs_ttm_io_mem_free, 208 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 208 209 }; 209 210 210 211 int bochs_mm_init(struct bochs_device *bochs)
+1
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 236 236 .verify_access = cirrus_bo_verify_access, 237 237 .io_mem_reserve = &cirrus_ttm_io_mem_reserve, 238 238 .io_mem_free = &cirrus_ttm_io_mem_free, 239 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 239 240 }; 240 241 241 242 int cirrus_mm_init(struct cirrus_device *cirrus)
+1
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 236 236 .verify_access = mgag200_bo_verify_access, 237 237 .io_mem_reserve = &mgag200_ttm_io_mem_reserve, 238 238 .io_mem_free = &mgag200_ttm_io_mem_free, 239 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 239 240 }; 240 241 241 242 int mgag200_mm_init(struct mga_device *mdev)
+1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1574 1574 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1575 1575 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1576 1576 .io_mem_free = &nouveau_ttm_io_mem_free, 1577 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 1577 1578 }; 1578 1579 1579 1580 struct nvkm_vma *
+1
drivers/gpu/drm/qxl/qxl_ttm.c
··· 393 393 .verify_access = &qxl_verify_access, 394 394 .io_mem_reserve = &qxl_ttm_io_mem_reserve, 395 395 .io_mem_free = &qxl_ttm_io_mem_free, 396 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 396 397 .move_notify = &qxl_bo_move_notify, 397 398 }; 398 399
+1
drivers/gpu/drm/radeon/radeon_ttm.c
··· 873 873 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 874 874 .io_mem_reserve = &radeon_ttm_io_mem_reserve, 875 875 .io_mem_free = &radeon_ttm_io_mem_free, 876 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 876 877 }; 877 878 878 879 int radeon_ttm_init(struct radeon_device *rdev)
+9 -1
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 231 231 */ 232 232 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 233 233 if (bo->mem.bus.is_iomem) 234 - pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; 234 + pfn = bdev->driver->io_mem_pfn(bo, page_offset); 235 235 else { 236 236 page = ttm->pages[page_offset]; 237 237 if (unlikely(!page && i == 0)) { ··· 323 323 324 324 return bo; 325 325 } 326 + 327 + unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, 328 + unsigned long page_offset) 329 + { 330 + return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) 331 + + page_offset; 332 + } 333 + EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn); 326 334 327 335 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, 328 336 struct ttm_bo_device *bdev)
+1
drivers/gpu/drm/virtio/virtgpu_ttm.c
··· 431 431 .verify_access = &virtio_gpu_verify_access, 432 432 .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, 433 433 .io_mem_free = &virtio_gpu_ttm_io_mem_free, 434 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 434 435 .move_notify = &virtio_gpu_bo_move_notify, 435 436 .swap_notify = &virtio_gpu_bo_swap_notify, 436 437 };
+1
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 859 859 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, 860 860 .io_mem_reserve = &vmw_ttm_io_mem_reserve, 861 861 .io_mem_free = &vmw_ttm_io_mem_free, 862 + .io_mem_pfn = ttm_bo_default_io_mem_pfn, 862 863 };
+11
include/drm/ttm/ttm_bo_api.h
··· 711 711 struct ttm_buffer_object *bo); 712 712 713 713 /** 714 + * ttm_bo_default_iomem_pfn - get a pfn for a page offset 715 + * 716 + * @bo: the BO we need to look up the pfn for 717 + * @page_offset: offset inside the BO to look up. 718 + * 719 + * Calculate the PFN for iomem based mappings during page fault 720 + */ 721 + unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, 722 + unsigned long page_offset); 723 + 724 + /** 714 725 * ttm_bo_mmap - mmap out of the ttm device address space. 715 726 * 716 727 * @filp: filp as input from the mmap method.
+9
include/drm/ttm/ttm_bo_driver.h
··· 462 462 struct ttm_mem_reg *mem); 463 463 void (*io_mem_free)(struct ttm_bo_device *bdev, 464 464 struct ttm_mem_reg *mem); 465 + 466 + /** 467 + * Return the pfn for a given page_offset inside the BO. 468 + * 469 + * @bo: the BO to look up the pfn for 470 + * @page_offset: the offset to look up 471 + */ 472 + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, 473 + unsigned long page_offset); 465 474 }; 466 475 467 476 /**