Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: remove amdgpu_vm_pt

Page table entries are now in embedded in VM BO, so
we do not need struct amdgpu_vm_pt. This patch replaces
struct amdgpu_vm_pt with struct amdgpu_vm_bo_base.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Nirmoy Das and committed by
Alex Deucher
391629bd ed4454c3

+105 -126
+13 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 356 356 */ 357 357 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) 358 358 { 359 - struct amdgpu_bo *pd = vm->root.base.bo; 359 + struct amdgpu_bo *pd = vm->root.bo; 360 360 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 361 361 int ret; 362 362 ··· 372 372 return ret; 373 373 } 374 374 375 - vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 375 + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); 376 376 377 377 if (vm->use_cpu_for_update) { 378 378 ret = amdgpu_bo_kmap(pd, NULL); ··· 387 387 388 388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) 389 389 { 390 - struct amdgpu_bo *pd = vm->root.base.bo; 390 + struct amdgpu_bo *pd = vm->root.bo; 391 391 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 392 392 int ret; 393 393 ··· 1153 1153 1154 1154 list_for_each_entry(peer_vm, &process_info->vm_list_head, 1155 1155 vm_list_node) { 1156 - struct amdgpu_bo *pd = peer_vm->root.base.bo; 1156 + struct amdgpu_bo *pd = peer_vm->root.bo; 1157 1157 1158 1158 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, 1159 1159 AMDGPU_SYNC_NE_OWNER, ··· 1220 1220 vm->process_info = *process_info; 1221 1221 1222 1222 /* Validate page directory and attach eviction fence */ 1223 - ret = amdgpu_bo_reserve(vm->root.base.bo, true); 1223 + ret = amdgpu_bo_reserve(vm->root.bo, true); 1224 1224 if (ret) 1225 1225 goto reserve_pd_fail; 1226 1226 ret = vm_validate_pt_pd_bos(vm); ··· 1228 1228 pr_err("validate_pt_pd_bos() failed\n"); 1229 1229 goto validate_pd_fail; 1230 1230 } 1231 - ret = amdgpu_bo_sync_wait(vm->root.base.bo, 1231 + ret = amdgpu_bo_sync_wait(vm->root.bo, 1232 1232 AMDGPU_FENCE_OWNER_KFD, false); 1233 1233 if (ret) 1234 1234 goto wait_pd_fail; 1235 - ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); 1235 + ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); 1236 1236 if (ret) 1237 1237 goto reserve_shared_fail; 1238 - amdgpu_bo_fence(vm->root.base.bo, 1238 + amdgpu_bo_fence(vm->root.bo, 1239 1239 &vm->process_info->eviction_fence->base, true); 1240 - amdgpu_bo_unreserve(vm->root.base.bo); 1240 + amdgpu_bo_unreserve(vm->root.bo); 1241 1241 1242 1242 /* Update process info */ 1243 1243 mutex_lock(&vm->process_info->lock); ··· 1251 1251 reserve_shared_fail: 1252 1252 wait_pd_fail: 1253 1253 validate_pd_fail: 1254 - amdgpu_bo_unreserve(vm->root.base.bo); 1254 + amdgpu_bo_unreserve(vm->root.bo); 1255 1255 reserve_pd_fail: 1256 1256 vm->process_info = NULL; 1257 1257 if (info) { ··· 1306 1306 struct amdgpu_vm *vm) 1307 1307 { 1308 1308 struct amdkfd_process_info *process_info = vm->process_info; 1309 - struct amdgpu_bo *pd = vm->root.base.bo; 1309 + struct amdgpu_bo *pd = vm->root.bo; 1310 1310 1311 1311 if (!process_info) 1312 1312 return; ··· 1362 1362 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) 1363 1363 { 1364 1364 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); 1365 - struct amdgpu_bo *pd = avm->root.base.bo; 1365 + struct amdgpu_bo *pd = avm->root.bo; 1366 1366 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 1367 1367 1368 1368 if (adev->asic_type < CHIP_VEGA10) ··· 2389 2389 /* Attach eviction fence to PD / PT BOs */ 2390 2390 list_for_each_entry(peer_vm, &process_info->vm_list_head, 2391 2391 vm_list_node) { 2392 - struct amdgpu_bo *bo = peer_vm->root.base.bo; 2392 + struct amdgpu_bo *bo = peer_vm->root.bo; 2393 2393 2394 2394 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); 2395 2395 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 832 832 if (r) 833 833 return r; 834 834 835 - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); 835 + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); 836 836 837 837 if (amdgpu_vm_debug) { 838 838 /* Invalidate all BOs to test for userspace bugs */
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 1304 1304 1305 1305 seq_printf(m, "pid:%d\tProcess:%s ----------\n", 1306 1306 vm->task_info.pid, vm->task_info.process_name); 1307 - r = amdgpu_bo_reserve(vm->root.base.bo, true); 1307 + r = amdgpu_bo_reserve(vm->root.bo, true); 1308 1308 if (r) 1309 1309 break; 1310 1310 amdgpu_debugfs_vm_bo_info(vm, m); 1311 - amdgpu_bo_unreserve(vm->root.base.bo); 1311 + amdgpu_bo_unreserve(vm->root.bo); 1312 1312 } 1313 1313 1314 1314 mutex_unlock(&dev->filelist_mutex);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 448 448 449 449 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 450 450 struct amdgpu_vm *vm = bo_base->vm; 451 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 451 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 452 452 453 453 if (ticket) { 454 454 /* When we get an error here it means that somebody
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
··· 69 69 dev = PCI_SLOT(adev->pdev->devfn); 70 70 fn = PCI_FUNC(adev->pdev->devfn); 71 71 72 - ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false); 72 + ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false); 73 73 if (ret) { 74 74 DRM_ERROR("Fail to reserve bo\n"); 75 75 return; 76 76 } 77 77 amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem); 78 - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); 78 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 79 79 seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus, 80 80 dev, fn, fpriv->vm.pasid); 81 81 seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 170 170 return -EPERM; 171 171 172 172 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && 173 - abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 173 + abo->tbo.base.resv != vm->root.bo->tbo.base.resv) 174 174 return -EPERM; 175 175 176 176 r = amdgpu_bo_reserve(abo, false); ··· 320 320 } 321 321 322 322 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { 323 - r = amdgpu_bo_reserve(vm->root.base.bo, false); 323 + r = amdgpu_bo_reserve(vm->root.bo, false); 324 324 if (r) 325 325 return r; 326 326 327 - resv = vm->root.base.bo->tbo.base.resv; 327 + resv = vm->root.bo->tbo.base.resv; 328 328 } 329 329 330 330 initial_domain = (u32)(0xffffffff & args->in.domains); ··· 353 353 if (!r) { 354 354 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 355 355 356 - abo->parent = amdgpu_bo_ref(vm->root.base.bo); 356 + abo->parent = amdgpu_bo_ref(vm->root.bo); 357 357 } 358 - amdgpu_bo_unreserve(vm->root.base.bo); 358 + amdgpu_bo_unreserve(vm->root.bo); 359 359 } 360 360 if (r) 361 361 return r; ··· 841 841 } 842 842 for (base = robj->vm_bo; base; base = base->next) 843 843 if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), 844 - amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { 844 + amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { 845 845 r = -EINVAL; 846 846 amdgpu_bo_unreserve(robj); 847 847 goto out;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 1262 1262 } 1263 1263 1264 1264 pasid = fpriv->vm.pasid; 1265 - pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); 1265 + pd = amdgpu_bo_ref(fpriv->vm.root.bo); 1266 1266 1267 1267 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 1268 1268 amdgpu_vm_fini(adev, &fpriv->vm);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 126 126 struct amdgpu_bo_vm { 127 127 struct amdgpu_bo bo; 128 128 struct amdgpu_bo *shadow; 129 - struct amdgpu_vm_pt entries[]; 129 + struct amdgpu_vm_bo_base entries[]; 130 130 }; 131 131 132 132 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+75 -89
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 332 332 base->next = bo->vm_bo; 333 333 bo->vm_bo = base; 334 334 335 - if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 335 + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 336 336 return; 337 337 338 338 vm->bulk_moveable = false; ··· 361 361 * Helper to get the parent entry for the child page table. NULL if we are at 362 362 * the root page directory. 363 363 */ 364 - static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) 364 + static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) 365 365 { 366 - struct amdgpu_bo *parent = pt->base.bo->parent; 366 + struct amdgpu_bo *parent = pt->bo->parent; 367 367 368 368 if (!parent) 369 369 return NULL; 370 370 371 - return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); 371 + return parent->vm_bo; 372 372 } 373 373 374 374 /* ··· 376 376 */ 377 377 struct amdgpu_vm_pt_cursor { 378 378 uint64_t pfn; 379 - struct amdgpu_vm_pt *parent; 380 - struct amdgpu_vm_pt *entry; 379 + struct amdgpu_vm_bo_base *parent; 380 + struct amdgpu_vm_bo_base *entry; 381 381 unsigned level; 382 382 }; 383 383 ··· 416 416 { 417 417 unsigned mask, shift, idx; 418 418 419 - if (!cursor->entry->entries) 419 + if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || 420 + !cursor->entry->bo) 420 421 return false; 421 422 422 - BUG_ON(!cursor->entry->base.bo); 423 423 mask = amdgpu_vm_entries_mask(adev, cursor->level); 424 424 shift = amdgpu_vm_level_shift(adev, cursor->level); 425 425 426 426 ++cursor->level; 427 427 idx = (cursor->pfn >> shift) & mask; 428 428 cursor->parent = cursor->entry; 429 - cursor->entry = &cursor->entry->entries[idx]; 429 + cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; 430 430 return true; 431 431 } 432 432 ··· 453 453 shift = amdgpu_vm_level_shift(adev, cursor->level - 1); 454 454 num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); 455 455 456 - if (cursor->entry == &cursor->parent->entries[num_entries - 1]) 456 + if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) 457 457 return false; 458 458 459 459 cursor->pfn += 1ULL << shift; ··· 539 539 * True when the search should continue, false otherwise. 540 540 */ 541 541 static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, 542 - struct amdgpu_vm_pt *entry) 542 + struct amdgpu_vm_bo_base *entry) 543 543 { 544 544 return entry && (!start || entry != start->entry); 545 545 } ··· 590 590 struct amdgpu_bo_list_entry *entry) 591 591 { 592 592 entry->priority = 0; 593 - entry->tv.bo = &vm->root.base.bo->tbo; 593 + entry->tv.bo = &vm->root.bo->tbo; 594 594 /* Two for VM updates, one for TTM and one for the CS job */ 595 595 entry->tv.num_shared = 4; 596 596 entry->user_pages = NULL; ··· 622 622 for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { 623 623 struct amdgpu_vm *vm = bo_base->vm; 624 624 625 - if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 625 + if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) 626 626 vm->bulk_moveable = false; 627 627 } 628 628 ··· 781 781 entries -= ats_entries; 782 782 783 783 } else { 784 - struct amdgpu_vm_pt *pt; 784 + struct amdgpu_vm_bo_base *pt; 785 785 786 - pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); 786 + pt = ancestor->vm_bo; 787 787 ats_entries = amdgpu_vm_num_ats_entries(adev); 788 - if ((pt - vm->root.entries) >= ats_entries) { 788 + if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { 789 789 ats_entries = 0; 790 790 } else { 791 791 ats_entries = entries; ··· 902 902 903 903 bp.type = ttm_bo_type_kernel; 904 904 bp.no_wait_gpu = immediate; 905 - if (vm->root.base.bo) 906 - bp.resv = vm->root.base.bo->tbo.base.resv; 905 + if (vm->root.bo) 906 + bp.resv = vm->root.bo->tbo.base.resv; 907 907 908 908 r = amdgpu_bo_create_vm(adev, &bp, vmbo); 909 909 if (r) ··· 962 962 struct amdgpu_vm_pt_cursor *cursor, 963 963 bool immediate) 964 964 { 965 - struct amdgpu_vm_pt *entry = cursor->entry; 965 + struct amdgpu_vm_bo_base *entry = cursor->entry; 966 966 struct amdgpu_bo *pt_bo; 967 967 struct amdgpu_bo_vm *pt; 968 968 int r; 969 969 970 - if (entry->base.bo) { 971 - if (cursor->level < AMDGPU_VM_PTB) 972 - entry->entries = 973 - to_amdgpu_bo_vm(entry->base.bo)->entries; 974 - else 975 - entry->entries = NULL; 970 + if (entry->bo) 976 971 return 0; 977 - } 978 972 979 973 r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); 980 974 if (r) ··· 978 984 * freeing them up in the wrong order. 979 985 */ 980 986 pt_bo = &pt->bo; 981 - pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo); 982 - amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo); 983 - if (cursor->level < AMDGPU_VM_PTB) 984 - entry->entries = pt->entries; 985 - else 986 - entry->entries = NULL; 987 - 987 + pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); 988 + amdgpu_vm_bo_base_init(entry, vm, pt_bo); 988 989 r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); 989 990 if (r) 990 991 goto error_free_pt; ··· 997 1008 * 998 1009 * @entry: PDE to free 999 1010 */ 1000 - static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) 1011 + static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) 1001 1012 { 1002 1013 struct amdgpu_bo *shadow; 1003 1014 1004 - if (entry->base.bo) { 1005 - shadow = amdgpu_bo_shadowed(entry->base.bo); 1006 - entry->base.bo->vm_bo = NULL; 1007 - list_del(&entry->base.vm_status); 1008 - amdgpu_bo_unref(&shadow); 1009 - amdgpu_bo_unref(&entry->base.bo); 1010 - } 1011 - entry->entries = NULL; 1015 + if (!entry->bo) 1016 + return; 1017 + shadow = amdgpu_bo_shadowed(entry->bo); 1018 + entry->bo->vm_bo = NULL; 1019 + list_del(&entry->vm_status); 1020 + amdgpu_bo_unref(&shadow); 1021 + amdgpu_bo_unref(&entry->bo); 1012 1022 } 1013 1023 1014 1024 /** ··· 1024 1036 struct amdgpu_vm_pt_cursor *start) 1025 1037 { 1026 1038 struct amdgpu_vm_pt_cursor cursor; 1027 - struct amdgpu_vm_pt *entry; 1039 + struct amdgpu_vm_bo_base *entry; 1028 1040 1029 1041 vm->bulk_moveable = false; 1030 1042 ··· 1292 1304 */ 1293 1305 static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, 1294 1306 struct amdgpu_vm *vm, 1295 - struct amdgpu_vm_pt *entry) 1307 + struct amdgpu_vm_bo_base *entry) 1296 1308 { 1297 - struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); 1298 - struct amdgpu_bo *bo = parent->base.bo, *pbo; 1309 + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); 1310 + struct amdgpu_bo *bo = parent->bo, *pbo; 1299 1311 uint64_t pde, pt, flags; 1300 1312 unsigned level; 1301 1313 ··· 1303 1315 pbo = pbo->parent; 1304 1316 1305 1317 level += params->adev->vm_manager.root_level; 1306 - amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); 1307 - pde = (entry - parent->entries) * 8; 1318 + amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); 1319 + pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; 1308 1320 return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, 1309 1321 1, 0, flags); 1310 1322 } ··· 1321 1333 struct amdgpu_vm *vm) 1322 1334 { 1323 1335 struct amdgpu_vm_pt_cursor cursor; 1324 - struct amdgpu_vm_pt *entry; 1336 + struct amdgpu_vm_bo_base *entry; 1325 1337 1326 1338 for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) 1327 - if (entry->base.bo && !entry->base.moved) 1328 - amdgpu_vm_bo_relocated(&entry->base); 1339 + if (entry->bo && !entry->moved) 1340 + amdgpu_vm_bo_relocated(entry); 1329 1341 } 1330 1342 1331 1343 /** ··· 1359 1371 return r; 1360 1372 1361 1373 while (!list_empty(&vm->relocated)) { 1362 - struct amdgpu_vm_pt *entry; 1374 + struct amdgpu_vm_bo_base *entry; 1363 1375 1364 - entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, 1365 - base.vm_status); 1366 - amdgpu_vm_bo_idle(&entry->base); 1376 + entry = list_first_entry(&vm->relocated, 1377 + struct amdgpu_vm_bo_base, 1378 + vm_status); 1379 + amdgpu_vm_bo_idle(entry); 1367 1380 1368 1381 r = amdgpu_vm_update_pde(&params, vm, entry); 1369 1382 if (r) ··· 1544 1555 continue; 1545 1556 } 1546 1557 1547 - pt = cursor.entry->base.bo; 1558 + pt = cursor.entry->bo; 1548 1559 if (!pt) { 1549 1560 /* We need all PDs and PTs for mapping something, */ 1550 1561 if (flags & AMDGPU_PTE_VALID) ··· 1556 1567 if (!amdgpu_vm_pt_ancestor(&cursor)) 1557 1568 return -EINVAL; 1558 1569 1559 - pt = cursor.entry->base.bo; 1570 + pt = cursor.entry->bo; 1560 1571 shift = parent_shift; 1561 1572 frag_end = max(frag_end, ALIGN(frag_start + 1, 1562 1573 1ULL << shift)); ··· 1611 1622 */ 1612 1623 while (cursor.pfn < frag_start) { 1613 1624 /* Make sure previous mapping is freed */ 1614 - if (cursor.entry->base.bo) { 1625 + if (cursor.entry->bo) { 1615 1626 params->table_freed = true; 1616 1627 amdgpu_vm_free_pts(adev, params->vm, &cursor); 1617 1628 } ··· 1693 1704 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { 1694 1705 struct dma_fence *tmp = dma_fence_get_stub(); 1695 1706 1696 - amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true); 1707 + amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); 1697 1708 swap(vm->last_unlocked, tmp); 1698 1709 dma_fence_put(tmp); 1699 1710 } ··· 1839 1850 1840 1851 if (clear || !bo) { 1841 1852 mem = NULL; 1842 - resv = vm->root.base.bo->tbo.base.resv; 1853 + resv = vm->root.bo->tbo.base.resv; 1843 1854 } else { 1844 1855 struct drm_gem_object *obj = &bo->tbo.base; 1845 1856 ··· 1870 1881 } 1871 1882 1872 1883 if (clear || (bo && bo->tbo.base.resv == 1873 - vm->root.base.bo->tbo.base.resv)) 1884 + vm->root.bo->tbo.base.resv)) 1874 1885 last_update = &vm->last_update; 1875 1886 else 1876 1887 last_update = &bo_va->last_pt_update; ··· 1912 1923 * the evicted list so that it gets validated again on the 1913 1924 * next command submission. 1914 1925 */ 1915 - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1926 + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1916 1927 uint32_t mem_type = bo->tbo.resource->mem_type; 1917 1928 1918 1929 if (!(bo->preferred_domains & ··· 2049 2060 */ 2050 2061 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2051 2062 { 2052 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 2063 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2053 2064 struct dma_fence *excl, **shared; 2054 2065 unsigned i, shared_count; 2055 2066 int r; ··· 2095 2106 struct amdgpu_vm *vm, 2096 2107 struct dma_fence **fence) 2097 2108 { 2098 - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; 2109 + struct dma_resv *resv = vm->root.bo->tbo.base.resv; 2099 2110 struct amdgpu_bo_va_mapping *mapping; 2100 2111 uint64_t init_pte_value = 0; 2101 2112 struct dma_fence *f = NULL; ··· 2254 2265 if (mapping->flags & AMDGPU_PTE_PRT) 2255 2266 amdgpu_vm_prt_get(adev); 2256 2267 2257 - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && 2268 + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 2258 2269 !bo_va->base.moved) { 2259 2270 list_move(&bo_va->base.vm_status, &vm->moved); 2260 2271 } ··· 2616 2627 struct amdgpu_vm_bo_base **base; 2617 2628 2618 2629 if (bo) { 2619 - if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2630 + if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2620 2631 vm->bulk_moveable = false; 2621 2632 2622 2633 for (base = &bo_va->base.bo->vm_bo; *base; ··· 2710 2721 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2711 2722 struct amdgpu_vm *vm = bo_base->vm; 2712 2723 2713 - if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 2724 + if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 2714 2725 amdgpu_vm_bo_evicted(bo_base); 2715 2726 continue; 2716 2727 } ··· 2721 2732 2722 2733 if (bo->tbo.type == ttm_bo_type_kernel) 2723 2734 amdgpu_vm_bo_relocated(bo_base); 2724 - else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 2735 + else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2725 2736 amdgpu_vm_bo_moved(bo_base); 2726 2737 else 2727 2738 amdgpu_vm_bo_invalidated(bo_base); ··· 2851 2862 */ 2852 2863 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2853 2864 { 2854 - timeout = dma_resv_wait_timeout(vm->root.base.bo->tbo.base.resv, true, 2865 + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, 2855 2866 true, timeout); 2856 2867 if (timeout <= 0) 2857 2868 return timeout; ··· 2937 2948 if (r) 2938 2949 goto error_unreserve; 2939 2950 2940 - amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo); 2951 + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); 2941 2952 2942 2953 r = amdgpu_vm_clear_bo(adev, vm, root, false); 2943 2954 if (r) 2944 2955 goto error_unreserve; 2945 2956 2946 - amdgpu_bo_unreserve(vm->root.base.bo); 2957 + amdgpu_bo_unreserve(vm->root.bo); 2947 2958 2948 2959 if (pasid) { 2949 2960 unsigned long flags; ··· 2963 2974 return 0; 2964 2975 2965 2976 error_unreserve: 2966 - amdgpu_bo_unreserve(vm->root.base.bo); 2977 + amdgpu_bo_unreserve(vm->root.bo); 2967 2978 2968 2979 error_free_root: 2969 2980 amdgpu_bo_unref(&root->shadow); 2970 2981 amdgpu_bo_unref(&root_bo); 2971 - vm->root.base.bo = NULL; 2982 + vm->root.bo = NULL; 2972 2983 2973 2984 error_free_delayed: 2974 2985 dma_fence_put(vm->last_unlocked); ··· 2994 3005 * 0 if this VM is clean 2995 3006 */ 2996 3007 static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, 2997 - struct amdgpu_vm *vm) 3008 + struct amdgpu_vm *vm) 2998 3009 { 2999 3010 enum amdgpu_vm_level root = adev->vm_manager.root_level; 3000 3011 unsigned int entries = amdgpu_vm_num_entries(adev, root); 3001 3012 unsigned int i = 0; 3002 3013 3003 - if (!(vm->root.entries)) 3004 - return 0; 3005 - 3006 3014 for (i = 0; i < entries; i++) { 3007 - if (vm->root.entries[i].base.bo) 3015 + if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) 3008 3016 return -EINVAL; 3009 3017 } 3010 3018 ··· 3035 3049 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 3036 3050 int r; 3037 3051 3038 - r = amdgpu_bo_reserve(vm->root.base.bo, true); 3052 + r = amdgpu_bo_reserve(vm->root.bo, true); 3039 3053 if (r) 3040 3054 return r; 3041 3055 ··· 3063 3077 if (pte_support_ats != vm->pte_support_ats) { 3064 3078 vm->pte_support_ats = pte_support_ats; 3065 3079 r = amdgpu_vm_clear_bo(adev, vm, 3066 - to_amdgpu_bo_vm(vm->root.base.bo), 3080 + to_amdgpu_bo_vm(vm->root.bo), 3067 3081 false); 3068 3082 if (r) 3069 3083 goto free_idr; ··· 3080 3094 3081 3095 if (vm->use_cpu_for_update) { 3082 3096 /* Sync with last SDMA update/clear before switching to CPU */ 3083 - r = amdgpu_bo_sync_wait(vm->root.base.bo, 3097 + r = amdgpu_bo_sync_wait(vm->root.bo, 3084 3098 AMDGPU_FENCE_OWNER_UNDEFINED, true); 3085 3099 if (r) 3086 3100 goto free_idr; ··· 3108 3122 } 3109 3123 3110 3124 /* Free the shadow bo for compute VM */ 3111 - amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow); 3125 + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); 3112 3126 3113 3127 if (pasid) 3114 3128 vm->pasid = pasid; ··· 3124 3138 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); 3125 3139 } 3126 3140 unreserve_bo: 3127 - amdgpu_bo_unreserve(vm->root.base.bo); 3141 + amdgpu_bo_unreserve(vm->root.bo); 3128 3142 return r; 3129 3143 } 3130 3144 ··· 3167 3181 3168 3182 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 3169 3183 3170 - root = amdgpu_bo_ref(vm->root.base.bo); 3184 + root = amdgpu_bo_ref(vm->root.bo); 3171 3185 amdgpu_bo_reserve(root, true); 3172 3186 if (vm->pasid) { 3173 3187 unsigned long flags; ··· 3194 3208 amdgpu_vm_free_pts(adev, vm, NULL); 3195 3209 amdgpu_bo_unreserve(root); 3196 3210 amdgpu_bo_unref(&root); 3197 - WARN_ON(vm->root.base.bo); 3211 + WARN_ON(vm->root.bo); 3198 3212 3199 3213 drm_sched_entity_destroy(&vm->immediate); 3200 3214 drm_sched_entity_destroy(&vm->delayed); ··· 3311 3325 /* Wait vm idle to make sure the vmid set in SPM_VMID is 3312 3326 * not referenced anymore. 3313 3327 */ 3314 - r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); 3328 + r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); 3315 3329 if (r) 3316 3330 return r; 3317 3331 ··· 3319 3333 if (r < 0) 3320 3334 return r; 3321 3335 3322 - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); 3336 + amdgpu_bo_unreserve(fpriv->vm.root.bo); 3323 3337 amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); 3324 3338 break; 3325 3339 default: ··· 3392 3406 spin_lock(&adev->vm_manager.pasid_lock); 3393 3407 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3394 3408 if (vm) { 3395 - root = amdgpu_bo_ref(vm->root.base.bo); 3409 + root = amdgpu_bo_ref(vm->root.bo); 3396 3410 is_compute_context = vm->is_compute_context; 3397 3411 } else { 3398 3412 root = NULL; ··· 3417 3431 /* Double check that the VM still exists */ 3418 3432 spin_lock(&adev->vm_manager.pasid_lock); 3419 3433 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3420 - if (vm && vm->root.base.bo != root) 3434 + if (vm && vm->root.bo != root) 3421 3435 vm = NULL; 3422 3436 spin_unlock(&adev->vm_manager.pasid_lock); 3423 3437 if (!vm)
+1 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 152 152 bool moved; 153 153 }; 154 154 155 - struct amdgpu_vm_pt { 156 - struct amdgpu_vm_bo_base base; 157 - 158 - /* array of page tables, one for each directory entry */ 159 - struct amdgpu_vm_pt *entries; 160 - }; 161 - 162 155 /* provided by hw blocks that can write ptes, e.g., sdma */ 163 156 struct amdgpu_vm_pte_funcs { 164 157 /* number of dw to reserve per operation */ ··· 277 284 struct list_head done; 278 285 279 286 /* contains the page directory */ 280 - struct amdgpu_vm_pt root; 287 + struct amdgpu_vm_bo_base root; 281 288 struct dma_fence *last_update; 282 289 283 290 /* Scheduler entities for page table updates */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 112 112 swap(p->vm->last_unlocked, f); 113 113 dma_fence_put(tmp); 114 114 } else { 115 - amdgpu_bo_fence(p->vm->root.base.bo, f, true); 115 + amdgpu_bo_fence(p->vm->root.bo, f, true); 116 116 } 117 117 118 118 if (fence && !p->immediate)
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1273 1273 adev = (struct amdgpu_device *)pdd->dev->kgd; 1274 1274 vm = drm_priv_to_vm(pdd->drm_priv); 1275 1275 1276 - ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo; 1276 + ctx->tv[gpuidx].bo = &vm->root.bo->tbo; 1277 1277 ctx->tv[gpuidx].num_shared = 4; 1278 1278 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); 1279 1279 }