Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/imagination: Hold drm_gem_gpuva lock for unmap

Avoid a warning from drm_gem_gpuva_assert_lock_held in drm_gpuva_unlink.

The Imagination driver uses the GEM object reservation lock to protect
the gpuva list, but the GEM object was not always known in the code
paths that ended up calling drm_gpuva_unlink. When the GEM object isn't
known, it is found by calling drm_gpuva_find to lookup the object
associated with a given virtual address range, or by calling
drm_gpuva_find_first when removing all mappings.

Cc: stable@vger.kernel.org
Fixes: 4bc736f890ce ("drm/imagination: vm: make use of GPUVM's drm_exec helper")
Signed-off-by: Brendan King <brendan.king@imgtec.com>
Reviewed-by: Matt Coster <matt.coster@imgtec.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250226-hold-drm_gem_gpuva-lock-for-unmap-v2-1-3fdacded227f@imgtec.com
Signed-off-by: Matt Coster <matt.coster@imgtec.com>

authored by

Brendan King and committed by
Matt Coster
a5c4c3ba df1a1ed5

+115 -28
+4 -2
drivers/gpu/drm/imagination/pvr_fw_meta.c
··· 527 527 static void 528 528 pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) 529 529 { 530 - pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start, 531 - fw_obj->fw_mm_node.size); 530 + struct pvr_gem_object *pvr_obj = fw_obj->gem; 531 + 532 + pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj, 533 + fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size); 532 534 } 533 535 534 536 static bool
+108 -26
drivers/gpu/drm/imagination/pvr_vm.c
··· 293 293 294 294 static int 295 295 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, 296 - struct pvr_vm_context *vm_ctx, u64 device_addr, 297 - u64 size) 296 + struct pvr_vm_context *vm_ctx, 297 + struct pvr_gem_object *pvr_obj, 298 + u64 device_addr, u64 size) 298 299 { 299 300 int err; 300 301 ··· 319 318 goto err_bind_op_fini; 320 319 } 321 320 321 + bind_op->pvr_obj = pvr_obj; 322 322 bind_op->vm_ctx = vm_ctx; 323 323 bind_op->device_addr = device_addr; 324 324 bind_op->size = size; ··· 600 598 } 601 599 602 600 /** 603 - * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. 604 - * @vm_ctx: Target VM context. 605 - * 606 - * This function ensures that no mappings are left dangling by unmapping them 607 - * all in order of ascending device-virtual address. 608 - */ 609 - void 610 - pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) 611 - { 612 - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, 613 - vm_ctx->gpuvm_mgr.mm_range)); 614 - } 615 - 616 - /** 617 601 * pvr_vm_context_release() - Teardown a VM context. 618 602 * @ref_count: Pointer to reference counter of the VM context. 619 603 * ··· 691 703 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; 692 704 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; 693 705 694 - /* Unmap operations don't have an object to lock. */ 695 - if (!pvr_obj) 696 - return 0; 697 - 698 - /* Acquire lock on the GEM being mapped. */ 706 + /* Acquire lock on the GEM object being mapped/unmapped. */ 699 707 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); 700 708 } 701 709 ··· 756 772 } 757 773 758 774 /** 759 - * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. 775 + * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual 776 + * memory. 760 777 * @vm_ctx: Target VM context. 778 + * @pvr_obj: Target PowerVR memory object. 761 779 * @device_addr: Virtual device address at the start of the target mapping. 762 780 * @size: Size of the target mapping. 763 781 * ··· 770 784 * * Any error encountered while performing internal operations required to 771 785 * destroy the mapping (returned from pvr_vm_gpuva_unmap or 772 786 * pvr_vm_gpuva_remap). 787 + * 788 + * The vm_ctx->lock must be held when calling this function. 773 789 */ 774 - int 775 - pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) 790 + static int 791 + pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx, 792 + struct pvr_gem_object *pvr_obj, 793 + u64 device_addr, u64 size) 776 794 { 777 795 struct pvr_vm_bind_op bind_op = {0}; 778 796 struct drm_gpuvm_exec vm_exec = { ··· 789 799 }, 790 800 }; 791 801 792 - int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, 793 - size); 802 + int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj, 803 + device_addr, size); 794 804 if (err) 795 805 return err; 806 + 807 + pvr_gem_object_get(pvr_obj); 796 808 797 809 err = drm_gpuvm_exec_lock(&vm_exec); 798 810 if (err) ··· 808 816 pvr_vm_bind_op_fini(&bind_op); 809 817 810 818 return err; 819 + } 820 + 821 + /** 822 + * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual 823 + * memory. 824 + * @vm_ctx: Target VM context. 825 + * @pvr_obj: Target PowerVR memory object. 826 + * @device_addr: Virtual device address at the start of the target mapping. 827 + * @size: Size of the target mapping. 828 + * 829 + * Return: 830 + * * 0 on success, 831 + * * Any error encountered by pvr_vm_unmap_obj_locked. 832 + */ 833 + int 834 + pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, 835 + u64 device_addr, u64 size) 836 + { 837 + int err; 838 + 839 + mutex_lock(&vm_ctx->lock); 840 + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size); 841 + mutex_unlock(&vm_ctx->lock); 842 + 843 + return err; 844 + } 845 + 846 + /** 847 + * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. 848 + * @vm_ctx: Target VM context. 849 + * @device_addr: Virtual device address at the start of the target mapping. 850 + * @size: Size of the target mapping. 851 + * 852 + * Return: 853 + * * 0 on success, 854 + * * Any error encountered by drm_gpuva_find, 855 + * * Any error encountered by pvr_vm_unmap_obj_locked. 856 + */ 857 + int 858 + pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) 859 + { 860 + struct pvr_gem_object *pvr_obj; 861 + struct drm_gpuva *va; 862 + int err; 863 + 864 + mutex_lock(&vm_ctx->lock); 865 + 866 + va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size); 867 + if (va) { 868 + pvr_obj = gem_to_pvr_gem(va->gem.obj); 869 + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, 870 + va->va.addr, va->va.range); 871 + } else { 872 + err = -ENOENT; 873 + } 874 + 875 + mutex_unlock(&vm_ctx->lock); 876 + 877 + return err; 878 + } 879 + 880 + /** 881 + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. 882 + * @vm_ctx: Target VM context. 883 + * 884 + * This function ensures that no mappings are left dangling by unmapping them 885 + * all in order of ascending device-virtual address. 886 + */ 887 + void 888 + pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) 889 + { 890 + mutex_lock(&vm_ctx->lock); 891 + 892 + for (;;) { 893 + struct pvr_gem_object *pvr_obj; 894 + struct drm_gpuva *va; 895 + 896 + va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, 897 + vm_ctx->gpuvm_mgr.mm_start, 898 + vm_ctx->gpuvm_mgr.mm_range); 899 + if (!va) 900 + break; 901 + 902 + pvr_obj = gem_to_pvr_gem(va->gem.obj); 903 + 904 + WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, 905 + va->va.addr, va->va.range)); 906 + } 907 + 908 + mutex_unlock(&vm_ctx->lock); 811 909 } 812 910 813 911 /* Static data areas are determined by firmware. */
+3
drivers/gpu/drm/imagination/pvr_vm.h
··· 38 38 int pvr_vm_map(struct pvr_vm_context *vm_ctx, 39 39 struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, 40 40 u64 device_addr, u64 size); 41 + int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, 42 + struct pvr_gem_object *pvr_obj, 43 + u64 device_addr, u64 size); 41 44 int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); 42 45 void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); 43 46