Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: move VM table mapping into the backend as well

Clean that up further and also fix another case where the BO
wasn't kmapped for CPU based updates.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
ecf96b52 df399b06

+37 -27
+5 -26
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 659 659 if (bo->tbo.type != ttm_bo_type_kernel) { 660 660 amdgpu_vm_bo_moved(bo_base); 661 661 } else { 662 - if (vm->use_cpu_for_update) 663 - r = amdgpu_bo_kmap(bo, NULL); 664 - else 665 - r = amdgpu_ttm_alloc_gart(&bo->tbo); 666 - if (r) 667 - break; 668 - if (bo->shadow) { 669 - r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo); 670 - if (r) 671 - break; 672 - } 662 + vm->update_funcs->map_table(bo); 673 663 amdgpu_vm_bo_relocated(bo_base); 674 664 } 675 665 } ··· 741 751 if (r) 742 752 return r; 743 753 744 - r = amdgpu_ttm_alloc_gart(&bo->tbo); 745 - if (r) 746 - return r; 747 - 748 754 if (bo->shadow) { 749 755 r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, 750 756 &ctx); 751 757 if (r) 752 758 return r; 753 - 754 - r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo); 755 - if (r) 756 - return r; 757 - 758 759 } 760 + 761 + r = vm->update_funcs->map_table(bo); 762 + if (r) 763 + return r; 759 764 760 765 memset(&params, 0, sizeof(params)); 761 766 params.adev = adev; ··· 861 876 r = amdgpu_bo_create(adev, &bp, &pt); 862 877 if (r) 863 878 return r; 864 - 865 - if (vm->use_cpu_for_update) { 866 - r = amdgpu_bo_kmap(pt, NULL); 867 - if (r) 868 - goto error_free_pt; 869 - } 870 879 871 880 /* Keep a reference to the root directory to avoid 872 881 * freeing them up in the wrong order.
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 215 215 }; 216 216 217 217 struct amdgpu_vm_update_funcs { 218 - 218 + int (*map_table)(struct amdgpu_bo *bo); 219 219 int (*prepare)(struct amdgpu_vm_update_params *p, void * owner, 220 220 struct dma_fence *exclusive); 221 221 int (*update)(struct amdgpu_vm_update_params *p,
+11
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
··· 25 25 #include "amdgpu_trace.h" 26 26 27 27 /** 28 + * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped 29 + * 30 + * @table: newly allocated or validated PD/PT 31 + */ 32 + static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) 33 + { 34 + return amdgpu_bo_kmap(table, NULL); 35 + } 36 + 37 + /** 28 38 * amdgpu_vm_cpu_prepare - prepare page table update with the CPU 29 39 * 30 40 * @p: see amdgpu_vm_update_params definition ··· 120 110 } 121 111 122 112 const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = { 113 + .map_table = amdgpu_vm_cpu_map_table, 123 114 .prepare = amdgpu_vm_cpu_prepare, 124 115 .update = amdgpu_vm_cpu_update, 125 116 .commit = amdgpu_vm_cpu_commit
+20
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 29 29 #define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u) 30 30 31 31 /** 32 + * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped 33 + * 34 + * @table: newly allocated or validated PD/PT 35 + */ 36 + static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) 37 + { 38 + int r; 39 + 40 + r = amdgpu_ttm_alloc_gart(&table->tbo); 41 + if (r) 42 + return r; 43 + 44 + if (table->shadow) 45 + r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); 46 + 47 + return r; 48 + } 49 + 50 + /** 32 51 * amdgpu_vm_sdma_prepare - prepare SDMA command submission 33 52 * 34 53 * @p: see amdgpu_vm_update_params definition ··· 261 242 } 262 243 263 244 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = { 245 + .map_table = amdgpu_vm_sdma_map_table, 264 246 .prepare = amdgpu_vm_sdma_prepare, 265 247 .update = amdgpu_vm_sdma_update, 266 248 .commit = amdgpu_vm_sdma_commit