Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: Implement mmap as GEM object function

Moving the driver-specific mmap code into a GEM object function allows
for using DRM helpers for various mmap callbacks.

This change also allows to support prime-based mmap via DRM's helper
drm_gem_prime_mmap().

Permission checks are implemented by drm_gem_mmap(), with an additional
check for radeon_ttm_tt_has_userptr() in the GEM object function. The
function radeon_verify_access() is now unused and has thus been removed.

As a side effect, radeon_ttm_vm_ops and radeon_ttm_fault() are now
implemented in amdgpu's GEM code.

v3:
* remove unnecessary checks from mmap (Christian)
v2:
* rename radeon_ttm_vm_ops and radeon_ttm_fault() to
radeon_gem_vm_ops and radeon_gem_fault() (Christian)
* fix commit description (Alex)

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210525151055.8174-4-tzimmermann@suse.de

+51 -67
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 545 545 .open = drm_open, 546 546 .release = drm_release, 547 547 .unlocked_ioctl = radeon_drm_ioctl, 548 - .mmap = radeon_mmap, 548 + .mmap = drm_gem_mmap, 549 549 .poll = drm_poll, 550 550 .read = drm_read, 551 551 #ifdef CONFIG_COMPAT ··· 620 620 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 621 621 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 622 622 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, 623 + .gem_prime_mmap = drm_gem_prime_mmap, 623 624 624 625 .name = DRIVER_NAME, 625 626 .desc = DRIVER_DESC,
+49
drivers/gpu/drm/radeon/radeon_gem.c
··· 44 44 45 45 const struct drm_gem_object_funcs radeon_gem_object_funcs; 46 46 47 + static vm_fault_t radeon_gem_fault(struct vm_fault *vmf) 48 + { 49 + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; 50 + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); 51 + vm_fault_t ret; 52 + 53 + down_read(&rdev->pm.mclk_lock); 54 + 55 + ret = ttm_bo_vm_reserve(bo, vmf); 56 + if (ret) 57 + goto unlock_mclk; 58 + 59 + ret = radeon_bo_fault_reserve_notify(bo); 60 + if (ret) 61 + goto unlock_resv; 62 + 63 + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 64 + TTM_BO_VM_NUM_PREFAULT, 1); 65 + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 66 + goto unlock_mclk; 67 + 68 + unlock_resv: 69 + dma_resv_unlock(bo->base.resv); 70 + 71 + unlock_mclk: 72 + up_read(&rdev->pm.mclk_lock); 73 + return ret; 74 + } 75 + 76 + static const struct vm_operations_struct radeon_gem_vm_ops = { 77 + .fault = radeon_gem_fault, 78 + .open = ttm_bo_vm_open, 79 + .close = ttm_bo_vm_close, 80 + .access = ttm_bo_vm_access 81 + }; 82 + 47 83 static void radeon_gem_object_free(struct drm_gem_object *gobj) 48 84 { 49 85 struct radeon_bo *robj = gem_to_radeon_bo(gobj); ··· 262 226 return r; 263 227 } 264 228 229 + static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 230 + { 231 + struct radeon_bo *bo = gem_to_radeon_bo(obj); 232 + struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev); 233 + 234 + if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm)) 235 + return -EPERM; 236 + 237 + return drm_gem_ttm_mmap(obj, vma); 238 + } 239 + 265 240 const struct drm_gem_object_funcs radeon_gem_object_funcs = { 266 241 .free = radeon_gem_object_free, 267 242 .open = radeon_gem_object_open, ··· 283 236 .get_sg_table = radeon_gem_prime_get_sg_table, 284 237 .vmap = drm_gem_ttm_vmap, 285 238 .vunmap = drm_gem_ttm_vunmap, 239 + .mmap = radeon_gem_object_mmap, 240 + .vm_ops = &radeon_gem_vm_ops, 286 241 }; 287 242 288 243 /*
-65
drivers/gpu/drm/radeon/radeon_ttm.c
··· 135 135 *placement = rbo->placement; 136 136 } 137 137 138 - static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 139 - { 140 - struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 141 - struct radeon_device *rdev = radeon_get_rdev(bo->bdev); 142 - 143 - if (radeon_ttm_tt_has_userptr(rdev, bo->ttm)) 144 - return -EPERM; 145 - return drm_vma_node_verify_access(&rbo->tbo.base.vma_node, 146 - filp->private_data); 147 - } 148 - 149 138 static int radeon_move_blit(struct ttm_buffer_object *bo, 150 139 bool evict, 151 140 struct ttm_resource *new_mem, ··· 692 703 .eviction_valuable = ttm_bo_eviction_valuable, 693 704 .evict_flags = &radeon_evict_flags, 694 705 .move = &radeon_bo_move, 695 - .verify_access = &radeon_verify_access, 696 706 .delete_mem_notify = &radeon_bo_delete_mem_notify, 697 707 .io_mem_reserve = &radeon_ttm_io_mem_reserve, 698 708 }; ··· 786 798 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM); 787 799 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ 788 800 man->size = size >> PAGE_SHIFT; 789 - } 790 - 791 - static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf) 792 - { 793 - struct ttm_buffer_object *bo = vmf->vma->vm_private_data; 794 - struct radeon_device *rdev = radeon_get_rdev(bo->bdev); 795 - vm_fault_t ret; 796 - 797 - down_read(&rdev->pm.mclk_lock); 798 - 799 - ret = ttm_bo_vm_reserve(bo, vmf); 800 - if (ret) 801 - goto unlock_mclk; 802 - 803 - ret = radeon_bo_fault_reserve_notify(bo); 804 - if (ret) 805 - goto unlock_resv; 806 - 807 - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 808 - TTM_BO_VM_NUM_PREFAULT, 1); 809 - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 810 - goto unlock_mclk; 811 - 812 - unlock_resv: 813 - dma_resv_unlock(bo->base.resv); 814 - 815 - unlock_mclk: 816 - up_read(&rdev->pm.mclk_lock); 817 - return ret; 818 - } 819 - 820 - static const struct vm_operations_struct radeon_ttm_vm_ops = { 821 - .fault = radeon_ttm_fault, 822 - .open = ttm_bo_vm_open, 823 - .close = ttm_bo_vm_close, 824 - .access = ttm_bo_vm_access 825 - }; 826 - 827 - int radeon_mmap(struct file *filp, struct vm_area_struct *vma) 828 - { 829 - int r; 830 - struct drm_file *file_priv = filp->private_data; 831 - struct radeon_device *rdev = file_priv->minor->dev->dev_private; 832 - 833 - if (rdev == NULL) 834 - return -EINVAL; 835 - 836 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); 837 - if (unlikely(r != 0)) 838 - return r; 839 - 840 - vma->vm_ops = &radeon_ttm_vm_ops; 841 - return 0; 842 801 } 843 802 844 803 #if defined(CONFIG_DEBUG_FS)
-1
drivers/gpu/drm/radeon/radeon_ttm.h
··· 32 32 33 33 int radeon_ttm_init(struct radeon_device *rdev); 34 34 void radeon_ttm_fini(struct radeon_device *rdev); 35 - int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 36 35 37 36 #endif /* __RADEON_TTM_H__ */