Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: rename direct to immediate for VM updates

To avoid confusion with direct ring submissions rename bottom
of pipe VM table changes to immediate updates.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
eaad0c3a 9ecefb19

+51 -50
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 282 282 !dma_fence_is_later(updates, (*id)->flushed_updates)) 283 283 updates = NULL; 284 284 285 - if ((*id)->owner != vm->direct.fence_context || 285 + if ((*id)->owner != vm->immediate.fence_context || 286 286 job->vm_pd_addr != (*id)->pd_gpu_addr || 287 287 updates || !(*id)->last_flush || 288 288 ((*id)->last_flush->context != fence_context && ··· 349 349 struct dma_fence *flushed; 350 350 351 351 /* Check all the prerequisites to using this VMID */ 352 - if ((*id)->owner != vm->direct.fence_context) 352 + if ((*id)->owner != vm->immediate.fence_context) 353 353 continue; 354 354 355 355 if ((*id)->pd_gpu_addr != job->vm_pd_addr) ··· 448 448 } 449 449 450 450 id->pd_gpu_addr = job->vm_pd_addr; 451 - id->owner = vm->direct.fence_context; 451 + id->owner = vm->immediate.fence_context; 452 452 453 453 if (job->vm_needs_flush) { 454 454 dma_fence_put(id->last_flush);
+30 -30
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 726 726 * @adev: amdgpu_device pointer 727 727 * @vm: VM to clear BO from 728 728 * @bo: BO to clear 729 - * @direct: use a direct update 729 + * @immediate: use an immediate update 730 730 * 731 731 * Root PD needs to be reserved when calling this. 732 732 * ··· 736 736 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, 737 737 struct amdgpu_vm *vm, 738 738 struct amdgpu_bo *bo, 739 - bool direct) 739 + bool immediate) 740 740 { 741 741 struct ttm_operation_ctx ctx = { true, false }; 742 742 unsigned level = adev->vm_manager.root_level; ··· 795 795 memset(&params, 0, sizeof(params)); 796 796 params.adev = adev; 797 797 params.vm = vm; 798 - params.direct = direct; 798 + params.immediate = immediate; 799 799 800 800 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); 801 801 if (r) ··· 850 850 * @adev: amdgpu_device pointer 851 851 * @vm: requesting vm 852 852 * @level: the page table level 853 - * @direct: use a direct update 853 + * @immediate: use a immediate update 854 854 * @bp: resulting BO allocation parameters 855 855 */ 856 856 static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, 857 - int level, bool direct, 857 + int level, bool immediate, 858 858 struct amdgpu_bo_param *bp) 859 859 { 860 860 memset(bp, 0, sizeof(*bp)); ··· 870 870 else if (!vm->root.base.bo || vm->root.base.bo->shadow) 871 871 bp->flags |= AMDGPU_GEM_CREATE_SHADOW; 872 872 bp->type = ttm_bo_type_kernel; 873 - bp->no_wait_gpu = direct; 873 + bp->no_wait_gpu = immediate; 874 874 if (vm->root.base.bo) 875 875 bp->resv = vm->root.base.bo->tbo.base.resv; 876 876 } ··· 881 881 * @adev: amdgpu_device pointer 882 882 * @vm: VM to allocate page tables for 883 883 * @cursor: Which page table to allocate 884 - * @direct: use a direct update 884 + * @immediate: use an immediate update 885 885 * 886 886 * Make sure a specific page table or directory is allocated. 887 887 * ··· 892 892 static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, 893 893 struct amdgpu_vm *vm, 894 894 struct amdgpu_vm_pt_cursor *cursor, 895 - bool direct) 895 + bool immediate) 896 896 { 897 897 struct amdgpu_vm_pt *entry = cursor->entry; 898 898 struct amdgpu_bo_param bp; ··· 913 913 if (entry->base.bo) 914 914 return 0; 915 915 916 - amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); 916 + amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp); 917 917 918 918 r = amdgpu_bo_create(adev, &bp, &pt); 919 919 if (r) ··· 925 925 pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); 926 926 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 927 927 928 - r = amdgpu_vm_clear_bo(adev, vm, pt, direct); 928 + r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); 929 929 if (r) 930 930 goto error_free_pt; 931 931 ··· 1276 1276 * 1277 1277 * @adev: amdgpu_device pointer 1278 1278 * @vm: requested vm 1279 - * @direct: submit directly to the paging queue 1279 + * @immediate: submit immediately to the paging queue 1280 1280 * 1281 1281 * Makes sure all directories are up to date. 1282 1282 * ··· 1284 1284 * 0 for success, error for failure. 1285 1285 */ 1286 1286 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 1287 - struct amdgpu_vm *vm, bool direct) 1287 + struct amdgpu_vm *vm, bool immediate) 1288 1288 { 1289 1289 struct amdgpu_vm_update_params params; 1290 1290 int r; ··· 1295 1295 memset(&params, 0, sizeof(params)); 1296 1296 params.adev = adev; 1297 1297 params.vm = vm; 1298 - params.direct = direct; 1298 + params.immediate = immediate; 1299 1299 1300 1300 r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); 1301 1301 if (r) ··· 1451 1451 * address range are actually allocated 1452 1452 */ 1453 1453 r = amdgpu_vm_alloc_pts(params->adev, params->vm, 1454 - &cursor, params->direct); 1454 + &cursor, params->immediate); 1455 1455 if (r) 1456 1456 return r; 1457 1457 } ··· 1557 1557 * 1558 1558 * @adev: amdgpu_device pointer 1559 1559 * @vm: requested vm 1560 - * @direct: direct submission in a page fault 1560 + * @immediate: immediate submission in a page fault 1561 1561 * @resv: fences we need to sync to 1562 1562 * @start: start of mapped range 1563 1563 * @last: last mapped entry ··· 1572 1572 * 0 for success, -EINVAL for failure. 1573 1573 */ 1574 1574 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 1575 - struct amdgpu_vm *vm, bool direct, 1575 + struct amdgpu_vm *vm, bool immediate, 1576 1576 struct dma_resv *resv, 1577 1577 uint64_t start, uint64_t last, 1578 1578 uint64_t flags, uint64_t addr, ··· 1586 1586 memset(&params, 0, sizeof(params)); 1587 1587 params.adev = adev; 1588 1588 params.vm = vm; 1589 - params.direct = direct; 1589 + params.immediate = immediate; 1590 1590 params.pages_addr = pages_addr; 1591 1591 1592 1592 /* Implicitly sync to command submissions in the same VM before ··· 1606 1606 if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1607 1607 struct amdgpu_bo *root = vm->root.base.bo; 1608 1608 1609 - if (!dma_fence_is_signaled(vm->last_direct)) 1610 - amdgpu_bo_fence(root, vm->last_direct, true); 1609 + if (!dma_fence_is_signaled(vm->last_immediate)) 1610 + amdgpu_bo_fence(root, vm->last_immediate, true); 1611 1611 } 1612 1612 1613 1613 r = vm->update_funcs->prepare(&params, resv, sync_mode); ··· 2582 2582 return false; 2583 2583 2584 2584 /* Don't evict VM page tables while they are updated */ 2585 - if (!dma_fence_is_signaled(bo_base->vm->last_direct)) { 2585 + if (!dma_fence_is_signaled(bo_base->vm->last_immediate)) { 2586 2586 amdgpu_vm_eviction_unlock(bo_base->vm); 2587 2587 return false; 2588 2588 } ··· 2759 2759 if (timeout <= 0) 2760 2760 return timeout; 2761 2761 2762 - return dma_fence_wait_timeout(vm->last_direct, true, timeout); 2762 + return dma_fence_wait_timeout(vm->last_immediate, true, timeout); 2763 2763 } 2764 2764 2765 2765 /** ··· 2795 2795 2796 2796 2797 2797 /* create scheduler entities for page table updates */ 2798 - r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, 2798 + r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, 2799 2799 adev->vm_manager.vm_pte_scheds, 2800 2800 adev->vm_manager.vm_pte_num_scheds, NULL); 2801 2801 if (r) ··· 2805 2805 adev->vm_manager.vm_pte_scheds, 2806 2806 adev->vm_manager.vm_pte_num_scheds, NULL); 2807 2807 if (r) 2808 - goto error_free_direct; 2808 + goto error_free_immediate; 2809 2809 2810 2810 vm->pte_support_ats = false; 2811 2811 vm->is_compute_context = false; ··· 2831 2831 else 2832 2832 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2833 2833 vm->last_update = NULL; 2834 - vm->last_direct = dma_fence_get_stub(); 2834 + vm->last_immediate = dma_fence_get_stub(); 2835 2835 2836 2836 mutex_init(&vm->eviction_lock); 2837 2837 vm->evicting = false; ··· 2885 2885 vm->root.base.bo = NULL; 2886 2886 2887 2887 error_free_delayed: 2888 - dma_fence_put(vm->last_direct); 2888 + dma_fence_put(vm->last_immediate); 2889 2889 drm_sched_entity_destroy(&vm->delayed); 2890 2890 2891 - error_free_direct: 2892 - drm_sched_entity_destroy(&vm->direct); 2891 + error_free_immediate: 2892 + drm_sched_entity_destroy(&vm->immediate); 2893 2893 2894 2894 return r; 2895 2895 } ··· 3086 3086 vm->pasid = 0; 3087 3087 } 3088 3088 3089 - dma_fence_wait(vm->last_direct, false); 3090 - dma_fence_put(vm->last_direct); 3089 + dma_fence_wait(vm->last_immediate, false); 3090 + dma_fence_put(vm->last_immediate); 3091 3091 3092 3092 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 3093 3093 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { ··· 3104 3104 amdgpu_bo_unref(&root); 3105 3105 WARN_ON(vm->root.base.bo); 3106 3106 3107 - drm_sched_entity_destroy(&vm->direct); 3107 + drm_sched_entity_destroy(&vm->immediate); 3108 3108 drm_sched_entity_destroy(&vm->delayed); 3109 3109 3110 3110 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 206 206 struct amdgpu_vm *vm; 207 207 208 208 /** 209 - * @direct: if changes should be made directly 209 + * @immediate: if changes should be made immediately 210 210 */ 211 - bool direct; 211 + bool immediate; 212 212 213 213 /** 214 214 * @pages_addr: ··· 274 274 struct dma_fence *last_update; 275 275 276 276 /* Scheduler entities for page table updates */ 277 - struct drm_sched_entity direct; 277 + struct drm_sched_entity immediate; 278 278 struct drm_sched_entity delayed; 279 279 280 280 /* Last submission to the scheduler entities */ 281 - struct dma_fence *last_direct; 281 + struct dma_fence *last_immediate; 282 282 283 283 unsigned int pasid; 284 284 /* dedicated to vm */ ··· 379 379 void *param); 380 380 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); 381 381 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 382 - struct amdgpu_vm *vm, bool direct); 382 + struct amdgpu_vm *vm, bool immediate); 383 383 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 384 384 struct amdgpu_vm *vm, 385 385 struct dma_fence **fence);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
··· 84 84 85 85 pe += (unsigned long)amdgpu_bo_kptr(bo); 86 86 87 - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); 87 + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 88 88 89 89 for (i = 0; i < count; i++) { 90 90 value = p->pages_addr ?
+12 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 61 61 struct dma_resv *resv, 62 62 enum amdgpu_sync_mode sync_mode) 63 63 { 64 - enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE : 65 - AMDGPU_IB_POOL_DELAYED; 64 + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 65 + : AMDGPU_IB_POOL_DELAYED; 66 66 unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 67 67 int r; 68 68 ··· 96 96 struct amdgpu_ring *ring; 97 97 int r; 98 98 99 - entity = p->direct ? &p->vm->direct : &p->vm->delayed; 99 + entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; 100 100 ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); 101 101 102 102 WARN_ON(ib->length_dw == 0); ··· 106 106 if (r) 107 107 goto error; 108 108 109 - if (p->direct) { 109 + if (p->immediate) { 110 110 tmp = dma_fence_get(f); 111 - swap(p->vm->last_direct, tmp); 111 + swap(p->vm->last_immediate, f); 112 112 dma_fence_put(tmp); 113 113 } else { 114 - dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f); 114 + dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, 115 + f); 115 116 } 116 117 117 - if (fence && !p->direct) 118 + if (fence && !p->immediate) 118 119 swap(*fence, f); 119 120 dma_fence_put(f); 120 121 return 0; ··· 145 144 src += p->num_dw_left * 4; 146 145 147 146 pe += amdgpu_gmc_sign_extend(bo->tbo.offset); 148 - trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); 147 + trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate); 149 148 150 149 amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); 151 150 } ··· 172 171 struct amdgpu_ib *ib = p->job->ibs; 173 172 174 173 pe += amdgpu_gmc_sign_extend(bo->tbo.offset); 175 - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); 174 + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); 176 175 if (count < 3) { 177 176 amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, 178 177 count, incr); ··· 201 200 uint64_t addr, unsigned count, uint32_t incr, 202 201 uint64_t flags) 203 202 { 204 - enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE : 205 - AMDGPU_IB_POOL_DELAYED; 203 + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 204 + : AMDGPU_IB_POOL_DELAYED; 206 205 unsigned int i, ndw, nptes; 207 206 uint64_t *pte; 208 207 int r;