Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: rename bo->mem and make it a pointer

When we want to decouble resource management from buffer management we need to
be able to handle resources separately.

Add a resource pointer and rename bo->mem so that all code needs to
change to access the pointer instead.

No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210430092508.60710-4-christian.koenig@amd.com

+274 -265
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1666 1666 * the next restore worker 1667 1667 */ 1668 1668 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && 1669 - bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 1669 + bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 1670 1670 is_invalid_userptr = true; 1671 1671 1672 1672 ret = vm_validate_pt_pd_bos(avm);
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4103 4103 list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { 4104 4104 4105 4105 /* No need to recover an evicted BO */ 4106 - if (shadow->tbo.mem.mem_type != TTM_PL_TT || 4107 - shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || 4108 - shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) 4106 + if (shadow->tbo.resource->mem_type != TTM_PL_TT || 4107 + shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || 4108 + shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) 4109 4109 continue; 4110 4110 4111 4111 r = amdgpu_bo_restore_shadow(shadow, &next);
+6 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 226 226 if (r) 227 227 return ERR_PTR(r); 228 228 229 - } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) & 229 + } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) & 230 230 AMDGPU_GEM_DOMAIN_GTT)) { 231 231 return ERR_PTR(-EBUSY); 232 232 } 233 233 234 - switch (bo->tbo.mem.mem_type) { 234 + switch (bo->tbo.resource->mem_type) { 235 235 case TTM_PL_TT: 236 236 sgt = drm_prime_pages_to_sg(obj->dev, 237 237 bo->tbo.ttm->pages, ··· 245 245 break; 246 246 247 247 case TTM_PL_VRAM: 248 - r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0, 249 - bo->tbo.base.size, attach->dev, dir, &sgt); 248 + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, 249 + bo->tbo.base.size, attach->dev, 250 + dir, &sgt); 250 251 if (r) 251 252 return ERR_PTR(r); 252 253 break; ··· 437 436 struct amdgpu_vm_bo_base *bo_base; 438 437 int r; 439 438 440 - if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) 439 + if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 441 440 return; 442 441 443 442 r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 101 101 { 102 102 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 103 103 104 - switch (bo->tbo.mem.mem_type) { 104 + switch (bo->tbo.resource->mem_type) { 105 105 case TTM_PL_TT: 106 106 *addr = bo->tbo.ttm->dma_address[0]; 107 107 break; ··· 112 112 *addr = 0; 113 113 break; 114 114 } 115 - *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); 115 + *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); 116 116 amdgpu_gmc_get_vm_pde(adev, level, addr, flags); 117 117 } 118 118
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 122 122 int r; 123 123 124 124 spin_lock(&mgr->lock); 125 - if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && 125 + if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) && 126 126 atomic64_read(&mgr->available) < mem->num_pages) { 127 127 spin_unlock(&mgr->lock); 128 128 return -ENOSPC;
+25 -25
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 362 362 if (cpu_addr) 363 363 amdgpu_bo_kunmap(*bo_ptr); 364 364 365 - ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); 365 + ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource); 366 366 367 367 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { 368 368 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; 369 369 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; 370 370 } 371 371 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, 372 - &(*bo_ptr)->tbo.mem, &ctx); 372 + (*bo_ptr)->tbo.resource, &ctx); 373 373 if (r) 374 374 goto error; 375 375 ··· 573 573 return r; 574 574 575 575 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && 576 - bo->tbo.mem.mem_type == TTM_PL_VRAM && 577 - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) 576 + bo->tbo.resource->mem_type == TTM_PL_VRAM && 577 + bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) 578 578 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 579 579 ctx.bytes_moved); 580 580 else 581 581 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); 582 582 583 583 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && 584 - bo->tbo.mem.mem_type == TTM_PL_VRAM) { 584 + bo->tbo.resource->mem_type == TTM_PL_VRAM) { 585 585 struct dma_fence *fence; 586 586 587 587 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); ··· 761 761 if (r < 0) 762 762 return r; 763 763 764 - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); 764 + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); 765 765 if (r) 766 766 return r; 767 767 ··· 884 884 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); 885 885 886 886 if (bo->tbo.pin_count) { 887 - uint32_t mem_type = bo->tbo.mem.mem_type; 888 - uint32_t mem_flags = bo->tbo.mem.placement; 887 + uint32_t mem_type = bo->tbo.resource->mem_type; 888 + uint32_t mem_flags = bo->tbo.resource->placement; 889 889 890 890 if (!(domain & amdgpu_mem_type_to_domain(mem_type))) 891 891 return -EINVAL; ··· 935 935 936 936 ttm_bo_pin(&bo->tbo); 937 937 938 - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 938 + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); 939 939 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 940 940 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); 941 941 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), ··· 987 987 if (bo->tbo.base.import_attach) 988 988 dma_buf_unpin(bo->tbo.base.import_attach); 989 989 990 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 990 + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { 991 991 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); 992 992 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), 993 993 &adev->visible_pin_size); 994 - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 994 + } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { 995 995 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); 996 996 } 997 997 } ··· 1223 1223 { 1224 1224 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 1225 1225 struct amdgpu_bo *abo; 1226 - struct ttm_resource *old_mem = &bo->mem; 1226 + struct ttm_resource *old_mem = bo->resource; 1227 1227 1228 1228 if (!amdgpu_bo_is_amdgpu_bo(bo)) 1229 1229 return; ··· 1234 1234 amdgpu_bo_kunmap(abo); 1235 1235 1236 1236 if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && 1237 - bo->mem.mem_type != TTM_PL_SYSTEM) 1237 + bo->resource->mem_type != TTM_PL_SYSTEM) 1238 1238 dma_buf_move_notify(abo->tbo.base.dma_buf); 1239 1239 1240 1240 /* remember the eviction */ ··· 1254 1254 { 1255 1255 unsigned int domain; 1256 1256 1257 - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 1257 + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); 1258 1258 switch (domain) { 1259 1259 case AMDGPU_GEM_DOMAIN_VRAM: 1260 1260 *vram_mem += amdgpu_bo_size(bo); ··· 1296 1296 if (bo->base.resv == &bo->base._resv) 1297 1297 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); 1298 1298 1299 - if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || 1299 + if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node || 1300 1300 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) 1301 1301 return; 1302 1302 ··· 1333 1333 /* Remember that this BO was accessed by the CPU */ 1334 1334 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 1335 1335 1336 - if (bo->mem.mem_type != TTM_PL_VRAM) 1336 + if (bo->resource->mem_type != TTM_PL_VRAM) 1337 1337 return 0; 1338 1338 1339 - offset = bo->mem.start << PAGE_SHIFT; 1339 + offset = bo->resource->start << PAGE_SHIFT; 1340 1340 if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) 1341 1341 return 0; 1342 1342 ··· 1359 1359 else if (unlikely(r)) 1360 1360 return VM_FAULT_SIGBUS; 1361 1361 1362 - offset = bo->mem.start << PAGE_SHIFT; 1362 + offset = bo->resource->start << PAGE_SHIFT; 1363 1363 /* this should never happen */ 1364 - if (bo->mem.mem_type == TTM_PL_VRAM && 1364 + if (bo->resource->mem_type == TTM_PL_VRAM && 1365 1365 (offset + bo->base.size) > adev->gmc.visible_vram_size) 1366 1366 return VM_FAULT_SIGBUS; 1367 1367 ··· 1446 1446 */ 1447 1447 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) 1448 1448 { 1449 - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); 1449 + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); 1450 1450 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && 1451 1451 !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); 1452 - WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); 1453 - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && 1452 + WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); 1453 + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && 1454 1454 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); 1455 1455 1456 1456 return amdgpu_bo_gpu_offset_no_check(bo); ··· 1468 1468 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 1469 1469 uint64_t offset; 1470 1470 1471 - offset = (bo->tbo.mem.start << PAGE_SHIFT) + 1472 - amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); 1471 + offset = (bo->tbo.resource->start << PAGE_SHIFT) + 1472 + amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); 1473 1473 1474 1474 return amdgpu_gmc_sign_extend(offset); 1475 1475 } ··· 1522 1522 unsigned int pin_count; 1523 1523 u64 size; 1524 1524 1525 - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 1525 + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); 1526 1526 switch (domain) { 1527 1527 case AMDGPU_GEM_DOMAIN_VRAM: 1528 1528 placement = "VRAM";
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 219 219 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 220 220 struct amdgpu_res_cursor cursor; 221 221 222 - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) 222 + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) 223 223 return false; 224 224 225 - amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor); 225 + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 226 226 while (cursor.remaining) { 227 227 if (cursor.start < adev->gmc.visible_vram_size) 228 228 return true;
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 127 127 128 128 TP_fast_assign( 129 129 __entry->bo = bo; 130 - __entry->pages = bo->tbo.mem.num_pages; 131 - __entry->type = bo->tbo.mem.mem_type; 130 + __entry->pages = bo->tbo.resource->num_pages; 131 + __entry->type = bo->tbo.resource->mem_type; 132 132 __entry->prefer = bo->preferred_domains; 133 133 __entry->allow = bo->allowed_domains; 134 134 __entry->visible = bo->flags;
+23 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 125 125 rcu_read_unlock(); 126 126 return; 127 127 } 128 - switch (bo->mem.mem_type) { 128 + 129 + switch (bo->resource->mem_type) { 129 130 case AMDGPU_PL_GDS: 130 131 case AMDGPU_PL_GWS: 131 132 case AMDGPU_PL_OA: ··· 459 458 { 460 459 struct amdgpu_device *adev; 461 460 struct amdgpu_bo *abo; 462 - struct ttm_resource *old_mem = &bo->mem; 461 + struct ttm_resource *old_mem = bo->resource; 463 462 int r; 464 463 465 464 if (new_mem->mem_type == TTM_PL_TT) { ··· 491 490 return r; 492 491 493 492 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); 494 - ttm_resource_free(bo, &bo->mem); 493 + ttm_resource_free(bo, bo->resource); 495 494 ttm_bo_assign_mem(bo, new_mem); 496 495 goto out; 497 496 } ··· 600 599 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 601 600 struct amdgpu_res_cursor cursor; 602 601 603 - amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor); 602 + amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, 603 + &cursor); 604 604 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; 605 605 } 606 606 ··· 956 954 uint64_t addr, flags; 957 955 int r; 958 956 959 - if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) 957 + if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) 960 958 return 0; 961 959 962 960 addr = amdgpu_gmc_agp_addr(bo); 963 961 if (addr != AMDGPU_BO_INVALID_OFFSET) { 964 - bo->mem.start = addr >> PAGE_SHIFT; 962 + bo->resource->start = addr >> PAGE_SHIFT; 965 963 } else { 966 964 967 965 /* allocate GART space */ ··· 972 970 placements.fpfn = 0; 973 971 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; 974 972 placements.mem_type = TTM_PL_TT; 975 - placements.flags = bo->mem.placement; 973 + placements.flags = bo->resource->placement; 976 974 977 975 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); 978 976 if (unlikely(r)) ··· 989 987 return r; 990 988 } 991 989 992 - ttm_resource_free(bo, &bo->mem); 993 - bo->mem = tmp; 990 + ttm_resource_free(bo, bo->resource); 991 + ttm_bo_assign_mem(bo, &tmp); 994 992 } 995 993 996 994 return 0; ··· 1011 1009 if (!tbo->ttm) 1012 1010 return 0; 1013 1011 1014 - flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); 1012 + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); 1015 1013 r = amdgpu_ttm_gart_bind(adev, tbo, flags); 1016 1014 1017 1015 return r; ··· 1324 1322 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1325 1323 const struct ttm_place *place) 1326 1324 { 1327 - unsigned long num_pages = bo->mem.num_pages; 1325 + unsigned long num_pages = bo->resource->num_pages; 1328 1326 struct amdgpu_res_cursor cursor; 1329 1327 struct dma_resv_list *flist; 1330 1328 struct dma_fence *f; ··· 1348 1346 } 1349 1347 } 1350 1348 1351 - switch (bo->mem.mem_type) { 1349 + switch (bo->resource->mem_type) { 1352 1350 case TTM_PL_TT: 1353 1351 if (amdgpu_bo_is_amdgpu_bo(bo) && 1354 1352 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) ··· 1357 1355 1358 1356 case TTM_PL_VRAM: 1359 1357 /* Check each drm MM node individually */ 1360 - amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT, 1358 + amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT, 1361 1359 &cursor); 1362 1360 while (cursor.remaining) { 1363 1361 if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) ··· 1399 1397 uint32_t value = 0; 1400 1398 int ret = 0; 1401 1399 1402 - if (bo->mem.mem_type != TTM_PL_VRAM) 1400 + if (bo->resource->mem_type != TTM_PL_VRAM) 1403 1401 return -EIO; 1404 1402 1405 - amdgpu_res_first(&bo->mem, offset, len, &cursor); 1403 + amdgpu_res_first(bo->resource, offset, len, &cursor); 1406 1404 while (cursor.remaining) { 1407 1405 uint64_t aligned_pos = cursor.start & ~(uint64_t)3; 1408 1406 uint64_t bytes = 4 - (cursor.start & 3); ··· 1919 1917 return -EINVAL; 1920 1918 } 1921 1919 1922 - if (bo->tbo.mem.mem_type == TTM_PL_TT) { 1920 + if (bo->tbo.resource->mem_type == TTM_PL_TT) { 1923 1921 r = amdgpu_ttm_alloc_gart(&bo->tbo); 1924 1922 if (r) 1925 1923 return r; 1926 1924 } 1927 1925 1928 - num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT; 1926 + num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT; 1929 1927 num_loops = 0; 1930 1928 1931 - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); 1929 + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); 1932 1930 while (cursor.remaining) { 1933 1931 num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); 1934 1932 amdgpu_res_next(&cursor, cursor.size); ··· 1953 1951 } 1954 1952 } 1955 1953 1956 - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); 1954 + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); 1957 1955 while (cursor.remaining) { 1958 1956 uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); 1959 1957 uint64_t dst_addr = cursor.start; 1960 1958 1961 - dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); 1959 + dst_addr += amdgpu_ttm_domain_start(adev, 1960 + bo->tbo.resource->mem_type); 1962 1961 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, 1963 1962 cur_size); 1964 1963
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 342 342 amdgpu_vm_bo_idle(base); 343 343 344 344 if (bo->preferred_domains & 345 - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) 345 + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) 346 346 return; 347 347 348 348 /* ··· 657 657 if (!bo->parent) 658 658 continue; 659 659 660 - ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, 660 + ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, 661 661 &vm->lru_bulk_move); 662 662 if (bo->shadow) 663 663 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, 664 - &bo->shadow->tbo.mem, 664 + bo->shadow->tbo.resource, 665 665 &vm->lru_bulk_move); 666 666 } 667 667 spin_unlock(&adev->mman.bdev.lru_lock); ··· 1818 1818 struct drm_gem_object *gobj = dma_buf->priv; 1819 1819 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 1820 1820 1821 - if (abo->tbo.mem.mem_type == TTM_PL_VRAM) 1821 + if (abo->tbo.resource->mem_type == TTM_PL_VRAM) 1822 1822 bo = gem_to_amdgpu_bo(gobj); 1823 1823 } 1824 - mem = &bo->tbo.mem; 1824 + mem = bo->tbo.resource; 1825 1825 if (mem->mem_type == TTM_PL_TT) 1826 1826 pages_addr = bo->tbo.ttm->dma_address; 1827 1827 } ··· 1881 1881 * next command submission. 1882 1882 */ 1883 1883 if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { 1884 - uint32_t mem_type = bo->tbo.mem.mem_type; 1884 + uint32_t mem_type = bo->tbo.resource->mem_type; 1885 1885 1886 1886 if (!(bo->preferred_domains & 1887 1887 amdgpu_mem_type_to_domain(mem_type)))
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 217 217 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 218 218 { 219 219 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 220 - struct ttm_resource *mem = &bo->tbo.mem; 220 + struct ttm_resource *mem = bo->tbo.resource; 221 221 struct drm_mm_node *nodes = mem->mm_node; 222 222 unsigned pages = mem->num_pages; 223 223 u64 usage;
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 409 409 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", 410 410 prange->svms, prange->start, prange->last); 411 411 412 - prange->ttm_res = &prange->svm_bo->bo->tbo.mem; 412 + prange->ttm_res = prange->svm_bo->bo->tbo.resource; 413 413 return true; 414 414 } 415 415 ··· 515 515 516 516 svm_bo->bo = bo; 517 517 prange->svm_bo = svm_bo; 518 - prange->ttm_res = &bo->tbo.mem; 518 + prange->ttm_res = bo->tbo.resource; 519 519 prange->offset = 0; 520 520 521 521 spin_lock(&svm_bo->list_lock);
+3 -3
drivers/gpu/drm/drm_gem_ttm_helper.c
··· 40 40 const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); 41 41 42 42 drm_printf_indent(p, indent, "placement="); 43 - drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname)); 43 + drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname)); 44 44 drm_printf(p, "\n"); 45 45 46 - if (bo->mem.bus.is_iomem) 46 + if (bo->resource->bus.is_iomem) 47 47 drm_printf_indent(p, indent, "bus.offset=%lx\n", 48 - (unsigned long)bo->mem.bus.offset); 48 + (unsigned long)bo->resource->bus.offset); 49 49 } 50 50 EXPORT_SYMBOL(drm_gem_ttm_print_info); 51 51
+2 -2
drivers/gpu/drm/drm_gem_vram_helper.c
··· 248 248 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) 249 249 { 250 250 /* Keep TTM behavior for now, remove when drivers are audited */ 251 - if (WARN_ON_ONCE(!gbo->bo.mem.mm_node)) 251 + if (WARN_ON_ONCE(!gbo->bo.resource->mm_node)) 252 252 return 0; 253 253 254 - return gbo->bo.mem.start; 254 + return gbo->bo.resource->start; 255 255 } 256 256 257 257 /**
+1 -1
drivers/gpu/drm/nouveau/nouveau_abi16.c
··· 312 312 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 313 313 NOUVEAU_GEM_DOMAIN_GART; 314 314 else 315 - if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) 315 + if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) 316 316 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 317 317 else 318 318 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+15 -15
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 433 433 if (nvbo->bo.pin_count) { 434 434 bool error = evict; 435 435 436 - switch (bo->mem.mem_type) { 436 + switch (bo->resource->mem_type) { 437 437 case TTM_PL_VRAM: 438 438 error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); 439 439 break; ··· 446 446 if (error) { 447 447 NV_ERROR(drm, "bo %p pinned elsewhere: " 448 448 "0x%08x vs 0x%08x\n", bo, 449 - bo->mem.mem_type, domain); 449 + bo->resource->mem_type, domain); 450 450 ret = -EBUSY; 451 451 } 452 452 ttm_bo_pin(&nvbo->bo); ··· 467 467 468 468 ttm_bo_pin(&nvbo->bo); 469 469 470 - switch (bo->mem.mem_type) { 470 + switch (bo->resource->mem_type) { 471 471 case TTM_PL_VRAM: 472 472 drm->gem.vram_available -= bo->base.size; 473 473 break; ··· 498 498 499 499 ttm_bo_unpin(&nvbo->bo); 500 500 if (!nvbo->bo.pin_count) { 501 - switch (bo->mem.mem_type) { 501 + switch (bo->resource->mem_type) { 502 502 case TTM_PL_VRAM: 503 503 drm->gem.vram_available += bo->base.size; 504 504 break; ··· 523 523 if (ret) 524 524 return ret; 525 525 526 - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); 526 + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); 527 527 528 528 ttm_bo_unreserve(&nvbo->bo); 529 529 return ret; ··· 737 737 { 738 738 struct nouveau_bo *nvbo = nouveau_bo(bo); 739 739 740 - switch (bo->mem.mem_type) { 740 + switch (bo->resource->mem_type) { 741 741 case TTM_PL_VRAM: 742 742 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 743 743 NOUVEAU_GEM_DOMAIN_CPU); ··· 754 754 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 755 755 struct ttm_resource *reg) 756 756 { 757 - struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); 757 + struct nouveau_mem *old_mem = nouveau_mem(bo->resource); 758 758 struct nouveau_mem *new_mem = nouveau_mem(reg); 759 759 struct nvif_vmm *vmm = &drm->client.vmm.vmm; 760 760 int ret; ··· 809 809 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 810 810 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); 811 811 if (ret == 0) { 812 - ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); 812 + ret = drm->ttm.move(chan, bo, bo->resource, new_reg); 813 813 if (ret == 0) { 814 814 ret = nouveau_fence_new(chan, false, &fence); 815 815 if (ret == 0) { ··· 969 969 { 970 970 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 971 971 struct nouveau_bo *nvbo = nouveau_bo(bo); 972 - struct ttm_resource *old_reg = &bo->mem; 972 + struct ttm_resource *old_reg = bo->resource; 973 973 struct nouveau_drm_tile *new_tile = NULL; 974 974 int ret = 0; 975 975 ··· 1009 1009 if (old_reg->mem_type == TTM_PL_TT && 1010 1010 new_reg->mem_type == TTM_PL_SYSTEM) { 1011 1011 nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); 1012 - ttm_resource_free(bo, &bo->mem); 1012 + ttm_resource_free(bo, bo->resource); 1013 1013 ttm_bo_assign_mem(bo, new_reg); 1014 1014 goto out; 1015 1015 } ··· 1045 1045 } 1046 1046 out_ntfy: 1047 1047 if (ret) { 1048 - nouveau_bo_move_ntfy(bo, &bo->mem); 1048 + nouveau_bo_move_ntfy(bo, bo->resource); 1049 1049 } 1050 1050 return ret; 1051 1051 } ··· 1170 1170 list_del_init(&nvbo->io_reserve_lru); 1171 1171 drm_vma_node_unmap(&nvbo->bo.base.vma_node, 1172 1172 bdev->dev_mapping); 1173 - nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); 1173 + nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); 1174 1174 goto retry; 1175 1175 } 1176 1176 ··· 1200 1200 /* as long as the bo isn't in vram, and isn't tiled, we've got 1201 1201 * nothing to do here. 1202 1202 */ 1203 - if (bo->mem.mem_type != TTM_PL_VRAM) { 1203 + if (bo->resource->mem_type != TTM_PL_VRAM) { 1204 1204 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || 1205 1205 !nvbo->kind) 1206 1206 return 0; 1207 1207 1208 - if (bo->mem.mem_type != TTM_PL_SYSTEM) 1208 + if (bo->resource->mem_type != TTM_PL_SYSTEM) 1209 1209 return 0; 1210 1210 1211 1211 nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); ··· 1213 1213 } else { 1214 1214 /* make sure bo is in mappable vram */ 1215 1215 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1216 - bo->mem.start + bo->mem.num_pages < mappable) 1216 + bo->resource->start + bo->resource->num_pages < mappable) 1217 1217 return 0; 1218 1218 1219 1219 for (i = 0; i < nvbo->placement.num_placement; ++i) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_chan.c
··· 212 212 args.start = 0; 213 213 args.limit = chan->vmm->vmm.limit - 1; 214 214 } else 215 - if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { 215 + if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) { 216 216 if (device->info.family == NV_DEVICE_INFO_V0_TNT) { 217 217 /* nv04 vram pushbuf hack, retarget to its location in 218 218 * the framebuffer bar rather than direct vram access..
+1 -1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 378 378 FBINFO_HWACCEL_FILLRECT | 379 379 FBINFO_HWACCEL_IMAGEBLIT; 380 380 info->fbops = &nouveau_fbcon_sw_ops; 381 - info->fix.smem_start = nvbo->bo.mem.bus.offset; 381 + info->fix.smem_start = nvbo->bo.resource->bus.offset; 382 382 info->fix.smem_len = nvbo->bo.base.size; 383 383 384 384 info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+8 -8
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 276 276 277 277 if (is_power_of_2(nvbo->valid_domains)) 278 278 rep->domain = nvbo->valid_domains; 279 - else if (nvbo->bo.mem.mem_type == TTM_PL_TT) 279 + else if (nvbo->bo.resource->mem_type == TTM_PL_TT) 280 280 rep->domain = NOUVEAU_GEM_DOMAIN_GART; 281 281 else 282 282 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; ··· 347 347 valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); 348 348 349 349 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 350 - bo->mem.mem_type == TTM_PL_VRAM) 350 + bo->resource->mem_type == TTM_PL_VRAM) 351 351 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; 352 352 353 353 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 354 - bo->mem.mem_type == TTM_PL_TT) 354 + bo->resource->mem_type == TTM_PL_TT) 355 355 pref_domains |= NOUVEAU_GEM_DOMAIN_GART; 356 356 357 357 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) ··· 561 561 562 562 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 563 563 if (nvbo->offset == b->presumed.offset && 564 - ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 564 + ((nvbo->bo.resource->mem_type == TTM_PL_VRAM && 565 565 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 566 - (nvbo->bo.mem.mem_type == TTM_PL_TT && 566 + (nvbo->bo.resource->mem_type == TTM_PL_TT && 567 567 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 568 568 continue; 569 569 570 - if (nvbo->bo.mem.mem_type == TTM_PL_TT) 570 + if (nvbo->bo.resource->mem_type == TTM_PL_TT) 571 571 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 572 572 else 573 573 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; ··· 681 681 } 682 682 683 683 if (!nvbo->kmap.virtual) { 684 - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 684 + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, 685 685 &nvbo->kmap); 686 686 if (ret) { 687 687 NV_PRINTK(err, cli, "failed kmap for reloc\n"); ··· 870 870 if (unlikely(cmd != req->suffix0)) { 871 871 if (!nvbo->kmap.virtual) { 872 872 ret = ttm_bo_kmap(&nvbo->bo, 0, 873 - nvbo->bo.mem. 873 + nvbo->bo.resource-> 874 874 num_pages, 875 875 &nvbo->kmap); 876 876 if (ret) {
+2 -2
drivers/gpu/drm/nouveau/nouveau_vmm.c
··· 77 77 nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm, 78 78 struct nouveau_vma **pvma) 79 79 { 80 - struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem); 80 + struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource); 81 81 struct nouveau_vma *vma; 82 82 struct nvif_vma tmp; 83 83 int ret; ··· 96 96 vma->fence = NULL; 97 97 list_add_tail(&vma->head, &nvbo->vma_list); 98 98 99 - if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && 99 + if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM && 100 100 mem->mem.page == nvbo->page) { 101 101 ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0, 102 102 mem->mem.size, &tmp);
+1 -1
drivers/gpu/drm/nouveau/nv17_fence.c
··· 77 77 nv17_fence_context_new(struct nouveau_channel *chan) 78 78 { 79 79 struct nv10_fence_priv *priv = chan->drm->fence; 80 + struct ttm_resource *reg = priv->bo->bo.resource; 80 81 struct nv10_fence_chan *fctx; 81 - struct ttm_resource *reg = &priv->bo->bo.mem; 82 82 u32 start = reg->start * PAGE_SIZE; 83 83 u32 limit = start + priv->bo->bo.base.size - 1; 84 84 int ret = 0;
+1 -1
drivers/gpu/drm/nouveau/nv50_fence.c
··· 37 37 { 38 38 struct nv10_fence_priv *priv = chan->drm->fence; 39 39 struct nv10_fence_chan *fctx; 40 - struct ttm_resource *reg = &priv->bo->bo.mem; 40 + struct ttm_resource *reg = priv->bo->bo.resource; 41 41 u32 start = reg->start * PAGE_SIZE; 42 42 u32 limit = start + priv->bo->bo.base.size - 1; 43 43 int ret;
+3 -3
drivers/gpu/drm/qxl/qxl_drv.h
··· 292 292 unsigned long offset) 293 293 { 294 294 struct qxl_memslot *slot = 295 - (bo->tbo.mem.mem_type == TTM_PL_VRAM) 295 + (bo->tbo.resource->mem_type == TTM_PL_VRAM) 296 296 ? &qdev->main_slot : &qdev->surfaces_slot; 297 297 298 - /* TODO - need to hold one of the locks to read bo->tbo.mem.start */ 298 + /* TODO - need to hold one of the locks to read bo->tbo.resource->start */ 299 299 300 - return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset); 300 + return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset); 301 301 } 302 302 303 303 /* qxl_display.c */
+5 -5
drivers/gpu/drm/qxl/qxl_object.c
··· 212 212 struct io_mapping *map; 213 213 struct dma_buf_map bo_map; 214 214 215 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 215 + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) 216 216 map = qdev->vram_mapping; 217 - else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) 217 + else if (bo->tbo.resource->mem_type == TTM_PL_PRIV) 218 218 map = qdev->surface_mapping; 219 219 else 220 220 goto fallback; 221 221 222 - offset = bo->tbo.mem.start << PAGE_SHIFT; 222 + offset = bo->tbo.resource->start << PAGE_SHIFT; 223 223 return io_mapping_map_atomic_wc(map, offset + page_offset); 224 224 fallback: 225 225 if (bo->kptr) { ··· 266 266 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 267 267 struct qxl_bo *bo, void *pmap) 268 268 { 269 - if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && 270 - (bo->tbo.mem.mem_type != TTM_PL_PRIV)) 269 + if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) && 270 + (bo->tbo.resource->mem_type != TTM_PL_PRIV)) 271 271 goto fallback; 272 272 273 273 io_mapping_unmap_atomic(pmap);
+2 -2
drivers/gpu/drm/qxl/qxl_ttm.c
··· 131 131 qbo = to_qxl_bo(bo); 132 132 qdev = to_qxl(qbo->tbo.base.dev); 133 133 134 - if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id) 134 + if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id) 135 135 qxl_surface_evict(qdev, qbo, new_mem ? true : false); 136 136 } 137 137 ··· 140 140 struct ttm_resource *new_mem, 141 141 struct ttm_place *hop) 142 142 { 143 - struct ttm_resource *old_mem = &bo->mem; 143 + struct ttm_resource *old_mem = bo->resource; 144 144 int ret; 145 145 146 146 qxl_bo_move_notify(bo, new_mem);
+4 -4
drivers/gpu/drm/radeon/radeon_cs.c
··· 400 400 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); 401 401 402 402 /* Sort A before B if A is smaller. */ 403 - return (int)la->robj->tbo.mem.num_pages - 404 - (int)lb->robj->tbo.mem.num_pages; 403 + return (int)la->robj->tbo.resource->num_pages - 404 + (int)lb->robj->tbo.resource->num_pages; 405 405 } 406 406 407 407 /** ··· 516 516 } 517 517 518 518 r = radeon_vm_bo_update(rdev, vm->ib_bo_va, 519 - &rdev->ring_tmp_bo.bo->tbo.mem); 519 + rdev->ring_tmp_bo.bo->tbo.resource); 520 520 if (r) 521 521 return r; 522 522 ··· 530 530 return -EINVAL; 531 531 } 532 532 533 - r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); 533 + r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource); 534 534 if (r) 535 535 return r; 536 536
+5 -5
drivers/gpu/drm/radeon/radeon_gem.c
··· 529 529 else 530 530 r = 0; 531 531 532 - cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 532 + cur_placement = READ_ONCE(robj->tbo.resource->mem_type); 533 533 args->domain = radeon_mem_type_to_domain(cur_placement); 534 534 drm_gem_object_put(gobj); 535 535 return r; ··· 559 559 r = ret; 560 560 561 561 /* Flush HDP cache via MMIO if necessary */ 562 - cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 562 + cur_placement = READ_ONCE(robj->tbo.resource->mem_type); 563 563 if (rdev->asic->mmio_hdp_flush && 564 564 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 565 565 robj->rdev->asic->mmio_hdp_flush(rdev); ··· 643 643 goto error_free; 644 644 645 645 list_for_each_entry(entry, &list, head) { 646 - domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); 646 + domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type); 647 647 /* if anything is swapped out don't swap it in here, 648 648 just abort and wait for the next CS */ 649 649 if (domain == RADEON_GEM_DOMAIN_CPU) ··· 656 656 goto error_unlock; 657 657 658 658 if (bo_va->it.start) 659 - r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); 659 + r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource); 660 660 661 661 error_unlock: 662 662 mutex_unlock(&bo_va->vm->mutex); ··· 860 860 unsigned domain; 861 861 const char *placement; 862 862 863 - domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); 863 + domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type); 864 864 switch (domain) { 865 865 case RADEON_GEM_DOMAIN_VRAM: 866 866 placement = "VRAM";
+11 -11
drivers/gpu/drm/radeon/radeon_object.c
··· 76 76 77 77 bo = container_of(tbo, struct radeon_bo, tbo); 78 78 79 - radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 79 + radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1); 80 80 81 81 mutex_lock(&bo->rdev->gem.mutex); 82 82 list_del_init(&bo->list); ··· 250 250 } 251 251 return 0; 252 252 } 253 - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); 253 + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); 254 254 if (r) { 255 255 return r; 256 256 } ··· 359 359 { 360 360 ttm_bo_unpin(&bo->tbo); 361 361 if (!bo->tbo.pin_count) { 362 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 362 + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) 363 363 bo->rdev->vram_pin_size -= radeon_bo_size(bo); 364 364 else 365 365 bo->rdev->gart_pin_size -= radeon_bo_size(bo); ··· 506 506 u32 domain = lobj->preferred_domains; 507 507 u32 allowed = lobj->allowed_domains; 508 508 u32 current_domain = 509 - radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 509 + radeon_mem_type_to_domain(bo->tbo.resource->mem_type); 510 510 511 511 /* Check if this buffer will be moved and don't move it 512 512 * if we have moved too many buffers for this IB already. ··· 605 605 606 606 out: 607 607 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, 608 - bo->tbo.mem.start << PAGE_SHIFT, 608 + bo->tbo.resource->start << PAGE_SHIFT, 609 609 bo->tbo.base.size); 610 610 return 0; 611 611 } ··· 711 711 return 0; 712 712 } 713 713 714 - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { 714 + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) { 715 715 if (!has_moved) 716 716 return 0; 717 717 ··· 743 743 if (!new_mem) 744 744 return; 745 745 746 - radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); 746 + radeon_update_memory_usage(rbo, bo->resource->mem_type, -1); 747 747 radeon_update_memory_usage(rbo, new_mem->mem_type, 1); 748 748 } 749 749 ··· 760 760 rbo = container_of(bo, struct radeon_bo, tbo); 761 761 radeon_bo_check_tiling(rbo, 0, 0); 762 762 rdev = rbo->rdev; 763 - if (bo->mem.mem_type != TTM_PL_VRAM) 763 + if (bo->resource->mem_type != TTM_PL_VRAM) 764 764 return 0; 765 765 766 - size = bo->mem.num_pages << PAGE_SHIFT; 767 - offset = bo->mem.start << PAGE_SHIFT; 766 + size = bo->resource->num_pages << PAGE_SHIFT; 767 + offset = bo->resource->start << PAGE_SHIFT; 768 768 if ((offset + size) <= rdev->mc.visible_vram_size) 769 769 return 0; 770 770 ··· 786 786 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 787 787 r = ttm_bo_validate(bo, &rbo->placement, &ctx); 788 788 } else if (likely(!r)) { 789 - offset = bo->mem.start << PAGE_SHIFT; 789 + offset = bo->resource->start << PAGE_SHIFT; 790 790 /* this should never happen */ 791 791 if ((offset + size) > rdev->mc.visible_vram_size) 792 792 return VM_FAULT_SIGBUS;
+2 -2
drivers/gpu/drm/radeon/radeon_object.h
··· 95 95 96 96 rdev = radeon_get_rdev(bo->tbo.bdev); 97 97 98 - switch (bo->tbo.mem.mem_type) { 98 + switch (bo->tbo.resource->mem_type) { 99 99 case TTM_PL_TT: 100 100 start = rdev->mc.gtt_start; 101 101 break; ··· 104 104 break; 105 105 } 106 106 107 - return (bo->tbo.mem.start << PAGE_SHIFT) + start; 107 + return (bo->tbo.resource->start << PAGE_SHIFT) + start; 108 108 } 109 109 110 110 static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+1 -1
drivers/gpu/drm/radeon/radeon_pm.c
··· 154 154 return; 155 155 156 156 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 157 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 157 + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) 158 158 ttm_bo_unmap_virtual(&bo->tbo); 159 159 } 160 160 }
+1 -1
drivers/gpu/drm/radeon/radeon_trace.h
··· 22 22 23 23 TP_fast_assign( 24 24 __entry->bo = bo; 25 - __entry->pages = bo->tbo.mem.num_pages; 25 + __entry->pages = bo->tbo.resource->num_pages; 26 26 ), 27 27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 28 28 );
+4 -4
drivers/gpu/drm/radeon/radeon_ttm.c
··· 98 98 return; 99 99 } 100 100 rbo = container_of(bo, struct radeon_bo, tbo); 101 - switch (bo->mem.mem_type) { 101 + switch (bo->resource->mem_type) { 102 102 case TTM_PL_VRAM: 103 103 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) 104 104 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); 105 105 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && 106 - bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { 106 + bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { 107 107 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 108 108 int i; 109 109 ··· 195 195 struct ttm_resource *new_mem, 196 196 struct ttm_place *hop) 197 197 { 198 + struct ttm_resource *old_mem = bo->resource; 198 199 struct radeon_device *rdev; 199 200 struct radeon_bo *rbo; 200 - struct ttm_resource *old_mem = &bo->mem; 201 201 int r; 202 202 203 203 if (new_mem->mem_type == TTM_PL_TT) { ··· 229 229 if (old_mem->mem_type == TTM_PL_TT && 230 230 new_mem->mem_type == TTM_PL_SYSTEM) { 231 231 radeon_ttm_tt_unbind(bo->bdev, bo->ttm); 232 - ttm_resource_free(bo, &bo->mem); 232 + ttm_resource_free(bo, bo->resource); 233 233 ttm_bo_assign_mem(bo, new_mem); 234 234 goto out; 235 235 }
+20 -17
drivers/gpu/drm/ttm/ttm_bo.c
··· 58 58 int i, mem_type; 59 59 60 60 drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", 61 - bo, bo->mem.num_pages, bo->base.size >> 10, 61 + bo, bo->resource->num_pages, bo->base.size >> 10, 62 62 bo->base.size >> 20); 63 63 for (i = 0; i < placement->num_placement; i++) { 64 64 mem_type = placement->placement[i].mem_type; ··· 109 109 bdev->funcs->del_from_lru_notify(bo); 110 110 111 111 if (bulk && !bo->pin_count) { 112 - switch (bo->mem.mem_type) { 112 + switch (bo->resource->mem_type) { 113 113 case TTM_PL_TT: 114 114 ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo); 115 115 break; ··· 163 163 struct ttm_operation_ctx *ctx, 164 164 struct ttm_place *hop) 165 165 { 166 + struct ttm_resource_manager *old_man, *new_man; 166 167 struct ttm_device *bdev = bo->bdev; 167 - struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type); 168 - struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type); 169 168 int ret; 169 + 170 + old_man = ttm_manager_type(bdev, bo->resource->mem_type); 171 + new_man = ttm_manager_type(bdev, mem->mem_type); 170 172 171 173 ttm_bo_unmap_virtual(bo); 172 174 ··· 202 200 return 0; 203 201 204 202 out_err: 205 - new_man = ttm_manager_type(bdev, bo->mem.mem_type); 203 + new_man = ttm_manager_type(bdev, bo->resource->mem_type); 206 204 if (!new_man->use_tt) 207 205 ttm_bo_tt_destroy(bo); 208 206 ··· 223 221 bo->bdev->funcs->delete_mem_notify(bo); 224 222 225 223 ttm_bo_tt_destroy(bo); 226 - ttm_resource_free(bo, &bo->mem); 224 + ttm_resource_free(bo, bo->resource); 227 225 } 228 226 229 227 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo) ··· 419 417 bo->bdev->funcs->release_notify(bo); 420 418 421 419 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); 422 - ttm_mem_io_free(bdev, &bo->mem); 420 + ttm_mem_io_free(bdev, bo->resource); 423 421 } 424 422 425 423 if (!dma_resv_test_signaled_rcu(bo->base.resv, true) || ··· 440 438 */ 441 439 if (bo->pin_count) { 442 440 bo->pin_count = 0; 443 - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); 441 + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); 444 442 } 445 443 446 444 kref_init(&bo->kref); ··· 536 534 /* Don't evict this BO if it's outside of the 537 535 * requested placement range 538 536 */ 539 - if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) || 540 - (place->lpfn && place->lpfn <= bo->mem.start)) 537 + if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) || 538 + (place->lpfn && place->lpfn <= bo->resource->start)) 541 539 return false; 542 540 543 541 return true; ··· 853 851 } 854 852 855 853 error: 856 - if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count) 854 + if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count) 857 855 ttm_bo_move_to_lru_tail_unlocked(bo); 858 856 859 857 return ret; ··· 989 987 /* 990 988 * Check whether we need to move buffer. 991 989 */ 992 - if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { 990 + if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) { 993 991 ret = ttm_bo_move_buffer(bo, placement, ctx); 994 992 if (ret) 995 993 return ret; ··· 997 995 /* 998 996 * We might need to add a TTM. 999 997 */ 1000 - if (bo->mem.mem_type == TTM_PL_SYSTEM) { 998 + if (bo->resource->mem_type == TTM_PL_SYSTEM) { 1001 999 ret = ttm_tt_create(bo, true); 1002 1000 if (ret) 1003 1001 return ret; ··· 1029 1027 bo->bdev = bdev; 1030 1028 bo->type = type; 1031 1029 bo->page_alignment = page_alignment; 1032 - ttm_resource_alloc(bo, &sys_mem, &bo->mem); 1030 + bo->resource = &bo->_mem; 1031 + ttm_resource_alloc(bo, &sys_mem, bo->resource); 1033 1032 bo->moving = NULL; 1034 1033 bo->pin_count = 0; 1035 1034 bo->sg = sg; ··· 1049 1046 if (bo->type == ttm_bo_type_device || 1050 1047 bo->type == ttm_bo_type_sg) 1051 1048 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, 1052 - bo->mem.num_pages); 1049 + bo->resource->num_pages); 1053 1050 1054 1051 /* passed reservation objects should already be locked, 1055 1052 * since otherwise lockdep will be angered in radeon. ··· 1111 1108 struct ttm_device *bdev = bo->bdev; 1112 1109 1113 1110 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1114 - ttm_mem_io_free(bdev, &bo->mem); 1111 + ttm_mem_io_free(bdev, bo->resource); 1115 1112 } 1116 1113 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1117 1114 ··· 1168 1165 /* 1169 1166 * Move to system cached 1170 1167 */ 1171 - if (bo->mem.mem_type != TTM_PL_SYSTEM) { 1168 + if (bo->resource->mem_type != TTM_PL_SYSTEM) { 1172 1169 struct ttm_operation_ctx ctx = { false, false }; 1173 1170 struct ttm_resource evict_mem; 1174 1171 struct ttm_place place, hop;
+25 -24
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 179 179 struct ttm_device *bdev = bo->bdev; 180 180 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 181 181 struct ttm_tt *ttm = bo->ttm; 182 - struct ttm_resource *old_mem = &bo->mem; 182 + struct ttm_resource *old_mem = bo->resource; 183 183 struct ttm_resource old_copy = *old_mem; 184 184 void *old_iomap; 185 185 void *new_iomap; ··· 365 365 unsigned long size, 366 366 struct ttm_bo_kmap_obj *map) 367 367 { 368 - struct ttm_resource *mem = &bo->mem; 368 + struct ttm_resource *mem = bo->resource; 369 369 370 - if (bo->mem.bus.addr) { 370 + if (bo->resource->bus.addr) { 371 371 map->bo_kmap_type = ttm_bo_map_premapped; 372 - map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 372 + map->virtual = ((u8 *)bo->resource->bus.addr) + offset; 373 373 } else { 374 + resource_size_t res = bo->resource->bus.offset + offset; 375 + 374 376 map->bo_kmap_type = ttm_bo_map_iomap; 375 377 if (mem->bus.caching == ttm_write_combined) 376 - map->virtual = ioremap_wc(bo->mem.bus.offset + offset, 377 - size); 378 + map->virtual = ioremap_wc(res, size); 378 379 #ifdef CONFIG_X86 379 380 else if (mem->bus.caching == ttm_cached) 380 - map->virtual = ioremap_cache(bo->mem.bus.offset + offset, 381 - size); 381 + map->virtual = ioremap_cache(res, size); 382 382 #endif 383 383 else 384 - map->virtual = ioremap(bo->mem.bus.offset + offset, 385 - size); 384 + map->virtual = ioremap(res, size); 386 385 } 387 386 return (!map->virtual) ? -ENOMEM : 0; 388 387 } ··· 391 392 unsigned long num_pages, 392 393 struct ttm_bo_kmap_obj *map) 393 394 { 394 - struct ttm_resource *mem = &bo->mem; 395 + struct ttm_resource *mem = bo->resource; 395 396 struct ttm_operation_ctx ctx = { 396 397 .interruptible = false, 397 398 .no_wait_gpu = false ··· 437 438 438 439 map->virtual = NULL; 439 440 map->bo = bo; 440 - if (num_pages > bo->mem.num_pages) 441 + if (num_pages > bo->resource->num_pages) 441 442 return -EINVAL; 442 - if ((start_page + num_pages) > bo->mem.num_pages) 443 + if ((start_page + num_pages) > bo->resource->num_pages) 443 444 return -EINVAL; 444 445 445 - ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 446 + ret = ttm_mem_io_reserve(bo->bdev, bo->resource); 446 447 if (ret) 447 448 return ret; 448 - if (!bo->mem.bus.is_iomem) { 449 + if (!bo->resource->bus.is_iomem) { 449 450 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 450 451 } else { 451 452 offset = start_page << PAGE_SHIFT; ··· 474 475 default: 475 476 BUG(); 476 477 } 477 - ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 478 + ttm_mem_io_free(map->bo->bdev, map->bo->resource); 478 479 map->virtual = NULL; 479 480 map->page = NULL; 480 481 } ··· 482 483 483 484 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) 484 485 { 485 - struct ttm_resource *mem = &bo->mem; 486 + struct ttm_resource *mem = bo->resource; 486 487 int ret; 487 488 488 489 ret = ttm_mem_io_reserve(bo->bdev, mem); ··· 541 542 542 543 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) 543 544 { 544 - struct ttm_resource *mem = &bo->mem; 545 + struct ttm_resource *mem = bo->resource; 545 546 546 547 if (dma_buf_map_is_null(map)) 547 548 return; ··· 552 553 iounmap(map->vaddr_iomem); 553 554 dma_buf_map_clear(map); 554 555 555 - ttm_mem_io_free(bo->bdev, &bo->mem); 556 + ttm_mem_io_free(bo->bdev, bo->resource); 556 557 } 557 558 EXPORT_SYMBOL(ttm_bo_vunmap); 558 559 ··· 566 567 567 568 if (!dst_use_tt) 568 569 ttm_bo_tt_destroy(bo); 569 - ttm_resource_free(bo, &bo->mem); 570 + ttm_resource_free(bo, bo->resource); 570 571 return 0; 571 572 } 572 573 ··· 614 615 struct dma_fence *fence) 615 616 { 616 617 struct ttm_device *bdev = bo->bdev; 617 - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); 618 + struct ttm_resource_manager *from; 619 + 620 + from = ttm_manager_type(bdev, bo->resource->mem_type); 618 621 619 622 /** 620 623 * BO doesn't have a TTM we need to bind/unbind. Just remember ··· 629 628 } 630 629 spin_unlock(&from->move_lock); 631 630 632 - ttm_resource_free(bo, &bo->mem); 631 + ttm_resource_free(bo, bo->resource); 633 632 634 633 dma_fence_put(bo->moving); 635 634 bo->moving = dma_fence_get(fence); ··· 642 641 struct ttm_resource *new_mem) 643 642 { 644 643 struct ttm_device *bdev = bo->bdev; 645 - struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); 644 + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type); 646 645 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 647 646 int ret = 0; 648 647 ··· 678 677 if (ret) 679 678 ttm_bo_wait(bo, false, false); 680 679 681 - ttm_resource_alloc(bo, &sys_mem, &bo->mem); 680 + ttm_resource_alloc(bo, &sys_mem, bo->resource); 682 681 bo->ttm = NULL; 683 682 684 683 dma_resv_unlock(&ghost->base._resv);
+11 -11
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 102 102 if (bdev->funcs->io_mem_pfn) 103 103 return bdev->funcs->io_mem_pfn(bo, page_offset); 104 104 105 - return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; 105 + return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; 106 106 } 107 107 108 108 /** ··· 200 200 201 201 /* Fault should not cross bo boundary. */ 202 202 page_offset &= ~(fault_page_size - 1); 203 - if (page_offset + fault_page_size > bo->mem.num_pages) 203 + if (page_offset + fault_page_size > bo->resource->num_pages) 204 204 goto out_fallback; 205 205 206 - if (bo->mem.bus.is_iomem) 206 + if (bo->resource->bus.is_iomem) 207 207 pfn = ttm_bo_io_mem_pfn(bo, page_offset); 208 208 else 209 209 pfn = page_to_pfn(ttm->pages[page_offset]); ··· 213 213 goto out_fallback; 214 214 215 215 /* Check that memory is contiguous. */ 216 - if (!bo->mem.bus.is_iomem) { 216 + if (!bo->resource->bus.is_iomem) { 217 217 for (i = 1; i < fault_page_size; ++i) { 218 218 if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) 219 219 goto out_fallback; ··· 299 299 if (unlikely(ret != 0)) 300 300 return ret; 301 301 302 - err = ttm_mem_io_reserve(bdev, &bo->mem); 302 + err = ttm_mem_io_reserve(bdev, bo->resource); 303 303 if (unlikely(err != 0)) 304 304 return VM_FAULT_SIGBUS; 305 305 ··· 308 308 page_last = vma_pages(vma) + vma->vm_pgoff - 309 309 drm_vma_node_start(&bo->base.vma_node); 310 310 311 - if (unlikely(page_offset >= bo->mem.num_pages)) 311 + if (unlikely(page_offset >= bo->resource->num_pages)) 312 312 return VM_FAULT_SIGBUS; 313 313 314 - prot = ttm_io_prot(bo, &bo->mem, prot); 315 - if (!bo->mem.bus.is_iomem) { 314 + prot = ttm_io_prot(bo, bo->resource, prot); 315 + if (!bo->resource->bus.is_iomem) { 316 316 struct ttm_operation_ctx ctx = { 317 317 .interruptible = false, 318 318 .no_wait_gpu = false, ··· 337 337 * first page. 338 338 */ 339 339 for (i = 0; i < num_prefault; ++i) { 340 - if (bo->mem.bus.is_iomem) { 340 + if (bo->resource->bus.is_iomem) { 341 341 pfn = ttm_bo_io_mem_pfn(bo, page_offset); 342 342 } else { 343 343 page = ttm->pages[page_offset]; ··· 521 521 << PAGE_SHIFT); 522 522 int ret; 523 523 524 - if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages) 524 + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages) 525 525 return -EIO; 526 526 527 527 ret = ttm_bo_reserve(bo, true, false, NULL); 528 528 if (ret) 529 529 return ret; 530 530 531 - switch (bo->mem.mem_type) { 531 + switch (bo->resource->mem_type) { 532 532 case TTM_PL_SYSTEM: 533 533 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { 534 534 ret = ttm_tt_swapin(bo->ttm);
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 483 483 d.src_addr = NULL; 484 484 d.dst_pages = dst->ttm->pages; 485 485 d.src_pages = src->ttm->pages; 486 - d.dst_num_pages = dst->mem.num_pages; 487 - d.src_num_pages = src->mem.num_pages; 488 - d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL); 489 - d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL); 486 + d.dst_num_pages = dst->resource->num_pages; 487 + d.src_num_pages = src->resource->num_pages; 488 + d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); 489 + d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL); 490 490 d.diff = diff; 491 491 492 492 for (j = 0; j < h; ++j) {
+18 -18
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 103 103 goto err; 104 104 105 105 if (buf->base.pin_count > 0) 106 - ret = ttm_bo_mem_compat(placement, &bo->mem, 106 + ret = ttm_bo_mem_compat(placement, bo->resource, 107 107 &new_flags) == true ? 0 : -EINVAL; 108 108 else 109 109 ret = ttm_bo_validate(bo, placement, &ctx); ··· 145 145 goto err; 146 146 147 147 if (buf->base.pin_count > 0) { 148 - ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem, 148 + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource, 149 149 &new_flags) == true ? 0 : -EINVAL; 150 150 goto out_unreserve; 151 151 } ··· 211 211 uint32_t new_flags; 212 212 213 213 place = vmw_vram_placement.placement[0]; 214 - place.lpfn = bo->mem.num_pages; 214 + place.lpfn = bo->resource->num_pages; 215 215 placement.num_placement = 1; 216 216 placement.placement = &place; 217 217 placement.num_busy_placement = 1; ··· 227 227 * In that case, evict it first because TTM isn't good at handling 228 228 * that situation. 229 229 */ 230 - if (bo->mem.mem_type == TTM_PL_VRAM && 231 - bo->mem.start < bo->mem.num_pages && 232 - bo->mem.start > 0 && 230 + if (bo->resource->mem_type == TTM_PL_VRAM && 231 + bo->resource->start < bo->resource->num_pages && 232 + bo->resource->start > 0 && 233 233 buf->base.pin_count == 0) { 234 234 ctx.interruptible = false; 235 235 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx); 236 236 } 237 237 238 238 if (buf->base.pin_count > 0) 239 - ret = ttm_bo_mem_compat(&placement, &bo->mem, 239 + ret = ttm_bo_mem_compat(&placement, bo->resource, 240 240 &new_flags) == true ? 0 : -EINVAL; 241 241 else 242 242 ret = ttm_bo_validate(bo, &placement, &ctx); 243 243 244 244 /* For some reason we didn't end up at the start of vram */ 245 - WARN_ON(ret == 0 && bo->mem.start != 0); 245 + WARN_ON(ret == 0 && bo->resource->start != 0); 246 246 if (!ret) 247 247 vmw_bo_pin_reserved(buf, true); 248 248 ··· 293 293 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, 294 294 SVGAGuestPtr *ptr) 295 295 { 296 - if (bo->mem.mem_type == TTM_PL_VRAM) { 296 + if (bo->resource->mem_type == TTM_PL_VRAM) { 297 297 ptr->gmrId = SVGA_GMR_FRAMEBUFFER; 298 - ptr->offset = bo->mem.start << PAGE_SHIFT; 298 + ptr->offset = bo->resource->start << PAGE_SHIFT; 299 299 } else { 300 - ptr->gmrId = bo->mem.start; 300 + ptr->gmrId = bo->resource->start; 301 301 ptr->offset = 0; 302 302 } 303 303 } ··· 316 316 struct ttm_place pl; 317 317 struct ttm_placement placement; 318 318 struct ttm_buffer_object *bo = &vbo->base; 319 - uint32_t old_mem_type = bo->mem.mem_type; 319 + uint32_t old_mem_type = bo->resource->mem_type; 320 320 int ret; 321 321 322 322 dma_resv_assert_held(bo->base.resv); ··· 326 326 327 327 pl.fpfn = 0; 328 328 pl.lpfn = 0; 329 - pl.mem_type = bo->mem.mem_type; 330 - pl.flags = bo->mem.placement; 329 + pl.mem_type = bo->resource->mem_type; 330 + pl.flags = bo->resource->placement; 331 331 332 332 memset(&placement, 0, sizeof(placement)); 333 333 placement.num_placement = 1; ··· 335 335 336 336 ret = ttm_bo_validate(bo, &placement, &ctx); 337 337 338 - BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 338 + BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type); 339 339 340 340 if (pin) 341 341 ttm_bo_pin(bo); ··· 369 369 if (virtual) 370 370 return virtual; 371 371 372 - ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map); 372 + ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); 373 373 if (ret) 374 374 DRM_ERROR("Buffer object map failed: %d.\n", ret); 375 375 ··· 1197 1197 * With other types of moves, the underlying pages stay the same, 1198 1198 * and the map can be kept. 1199 1199 */ 1200 - if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM) 1200 + if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM) 1201 1201 vmw_bo_unmap(vbo); 1202 1202 1203 1203 /* ··· 1205 1205 * read back all resource content first, and unbind the MOB from 1206 1206 * the resource. 1207 1207 */ 1208 - if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB) 1208 + if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) 1209 1209 vmw_resource_unbind_list(vbo); 1210 1210 }
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
··· 576 576 cmd->body.cid = cid; 577 577 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 578 578 579 - if (bo->mem.mem_type == TTM_PL_VRAM) { 579 + if (bo->resource->mem_type == TTM_PL_VRAM) { 580 580 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; 581 - cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT; 581 + cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT; 582 582 } else { 583 - cmd->body.guestResult.gmrId = bo->mem.start; 583 + cmd->body.guestResult.gmrId = bo->resource->start; 584 584 cmd->body.guestResult.offset = 0; 585 585 } 586 586 ··· 621 621 cmd->header.size = sizeof(cmd->body); 622 622 cmd->body.cid = cid; 623 623 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 624 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 625 - cmd->body.mobid = bo->mem.start; 624 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 625 + cmd->body.mobid = bo->resource->start; 626 626 cmd->body.offset = 0; 627 627 628 628 vmw_cmd_commit(dev_priv, sizeof(*cmd));
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 889 889 header->cmd = man->map + offset; 890 890 if (man->using_mob) { 891 891 cb_hdr->flags = SVGA_CB_FLAG_MOB; 892 - cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 892 + cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start; 893 893 cb_hdr->ptr.mob.mobOffset = offset; 894 894 } else { 895 895 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
+6 -6
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 346 346 } *cmd; 347 347 struct ttm_buffer_object *bo = val_buf->bo; 348 348 349 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 349 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 350 350 351 351 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 352 352 if (unlikely(cmd == NULL)) ··· 355 355 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 356 356 cmd->header.size = sizeof(cmd->body); 357 357 cmd->body.cid = res->id; 358 - cmd->body.mobid = bo->mem.start; 358 + cmd->body.mobid = bo->resource->start; 359 359 cmd->body.validContents = res->backup_dirty; 360 360 res->backup_dirty = false; 361 361 vmw_cmd_commit(dev_priv, sizeof(*cmd)); ··· 385 385 uint8_t *cmd; 386 386 387 387 388 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 388 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 389 389 390 390 mutex_lock(&dev_priv->binding_mutex); 391 391 vmw_binding_state_scrub(uctx->cbs); ··· 513 513 } *cmd; 514 514 struct ttm_buffer_object *bo = val_buf->bo; 515 515 516 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 516 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 517 517 518 518 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 519 519 if (unlikely(cmd == NULL)) ··· 522 522 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; 523 523 cmd->header.size = sizeof(cmd->body); 524 524 cmd->body.cid = res->id; 525 - cmd->body.mobid = bo->mem.start; 525 + cmd->body.mobid = bo->resource->start; 526 526 cmd->body.validContents = res->backup_dirty; 527 527 res->backup_dirty = false; 528 528 vmw_cmd_commit(dev_priv, sizeof(*cmd)); ··· 594 594 uint8_t *cmd; 595 595 596 596 597 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 597 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 598 598 599 599 mutex_lock(&dev_priv->binding_mutex); 600 600 vmw_dx_context_scrub_cotables(res, readback);
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 173 173 SVGA3dCmdDXSetCOTable body; 174 174 } *cmd; 175 175 176 - WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 176 + WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB); 177 177 dma_resv_assert_held(bo->base.resv); 178 178 179 179 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); ··· 181 181 return -ENOMEM; 182 182 183 183 WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); 184 - WARN_ON(bo->mem.mem_type != VMW_PL_MOB); 184 + WARN_ON(bo->resource->mem_type != VMW_PL_MOB); 185 185 cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; 186 186 cmd->header.size = sizeof(cmd->body); 187 187 cmd->body.cid = vcotbl->ctx->id; 188 188 cmd->body.type = vcotbl->type; 189 - cmd->body.mobid = bo->mem.start; 189 + cmd->body.mobid = bo->resource->start; 190 190 cmd->body.validSizeInBytes = vcotbl->size_read_back; 191 191 192 192 vmw_cmd_commit_flush(dev_priv, sizeof(*cmd)); ··· 315 315 if (!vmw_resource_mob_attached(res)) 316 316 return 0; 317 317 318 - WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 318 + WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB); 319 319 dma_resv_assert_held(bo->base.resv); 320 320 321 321 mutex_lock(&dev_priv->binding_mutex); ··· 431 431 * Do a page by page copy of COTables. This eliminates slow vmap()s. 432 432 * This should really be a TTM utility. 433 433 */ 434 - for (i = 0; i < old_bo->mem.num_pages; ++i) { 434 + for (i = 0; i < old_bo->resource->num_pages; ++i) { 435 435 bool dummy; 436 436 437 437 ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
+6 -6
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 735 735 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; 736 736 cmd->header.size = sizeof(cmd->body); 737 737 cmd->body.cid = ctx_res->id; 738 - cmd->body.mobid = dx_query_mob->base.mem.start; 738 + cmd->body.mobid = dx_query_mob->base.resource->start; 739 739 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 740 740 741 741 vmw_context_bind_dx_query(ctx_res, dx_query_mob); ··· 1046 1046 1047 1047 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 1048 1048 1049 - if (unlikely(new_query_bo->base.mem.num_pages > 4)) { 1049 + if (unlikely(new_query_bo->base.resource->num_pages > 4)) { 1050 1050 VMW_DEBUG_USER("Query buffer too large.\n"); 1051 1051 return -EINVAL; 1052 1052 } ··· 3710 3710 3711 3711 list_for_each_entry(reloc, &sw_context->bo_relocations, head) { 3712 3712 bo = &reloc->vbo->base; 3713 - switch (bo->mem.mem_type) { 3713 + switch (bo->resource->mem_type) { 3714 3714 case TTM_PL_VRAM: 3715 - reloc->location->offset += bo->mem.start << PAGE_SHIFT; 3715 + reloc->location->offset += bo->resource->start << PAGE_SHIFT; 3716 3716 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 3717 3717 break; 3718 3718 case VMW_PL_GMR: 3719 - reloc->location->gmrId = bo->mem.start; 3719 + reloc->location->gmrId = bo->resource->start; 3720 3720 break; 3721 3721 case VMW_PL_MOB: 3722 - *reloc->mob_loc = bo->mem.start; 3722 + *reloc->mob_loc = bo->resource->start; 3723 3723 break; 3724 3724 default: 3725 3725 BUG();
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
··· 232 232 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) 233 233 { 234 234 struct vmw_bo_dirty *dirty = vbo->dirty; 235 - pgoff_t num_pages = vbo->base.mem.num_pages; 235 + pgoff_t num_pages = vbo->base.resource->num_pages; 236 236 size_t size, acc_size; 237 237 int ret; 238 238 static struct ttm_operation_ctx ctx = { ··· 413 413 return ret; 414 414 415 415 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); 416 - if (unlikely(page_offset >= bo->mem.num_pages)) { 416 + if (unlikely(page_offset >= bo->resource->num_pages)) { 417 417 ret = VM_FAULT_SIGBUS; 418 418 goto out_unlock; 419 419 } ··· 456 456 457 457 page_offset = vmf->pgoff - 458 458 drm_vma_node_start(&bo->base.vma_node); 459 - if (page_offset >= bo->mem.num_pages || 459 + if (page_offset >= bo->resource->num_pages || 460 460 vmw_resources_clean(vbo, page_offset, 461 461 page_offset + PAGE_SIZE, 462 462 &allowed_prefault)) { ··· 529 529 530 530 page_offset = vmf->pgoff - 531 531 drm_vma_node_start(&bo->base.vma_node); 532 - if (page_offset >= bo->mem.num_pages || 532 + if (page_offset >= bo->resource->num_pages || 533 533 vmw_resources_clean(vbo, page_offset, 534 534 page_offset + PAGE_SIZE, 535 535 &allowed_prefault)) {
+6 -6
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 254 254 } *cmd; 255 255 struct ttm_buffer_object *bo = val_buf->bo; 256 256 257 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 257 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 258 258 259 259 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 260 260 if (unlikely(cmd == NULL)) ··· 263 263 cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; 264 264 cmd->header.size = sizeof(cmd->body); 265 265 cmd->body.shid = res->id; 266 - cmd->body.mobid = bo->mem.start; 266 + cmd->body.mobid = bo->resource->start; 267 267 cmd->body.offsetInBytes = res->backup_offset; 268 268 res->backup_dirty = false; 269 269 vmw_cmd_commit(dev_priv, sizeof(*cmd)); ··· 282 282 } *cmd; 283 283 struct vmw_fence_obj *fence; 284 284 285 - BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); 285 + BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); 286 286 287 287 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); 288 288 if (unlikely(cmd == NULL)) ··· 402 402 cmd->header.size = sizeof(cmd->body); 403 403 cmd->body.cid = shader->ctx->id; 404 404 cmd->body.shid = shader->id; 405 - cmd->body.mobid = res->backup->base.mem.start; 405 + cmd->body.mobid = res->backup->base.resource->start; 406 406 cmd->body.offsetInBytes = res->backup_offset; 407 407 vmw_cmd_commit(dev_priv, sizeof(*cmd)); 408 408 ··· 450 450 struct vmw_private *dev_priv = res->dev_priv; 451 451 struct ttm_buffer_object *bo = val_buf->bo; 452 452 453 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 453 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 454 454 mutex_lock(&dev_priv->binding_mutex); 455 455 vmw_dx_shader_unscrub(res); 456 456 mutex_unlock(&dev_priv->binding_mutex); ··· 513 513 struct vmw_fence_obj *fence; 514 514 int ret; 515 515 516 - BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); 516 + BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB); 517 517 518 518 mutex_lock(&dev_priv->binding_mutex); 519 519 ret = vmw_dx_shader_scrub(res);
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
··· 106 106 cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT; 107 107 cmd->header.size = sizeof(cmd->body); 108 108 cmd->body.soid = so->id; 109 - cmd->body.mobid = res->backup->base.mem.start; 109 + cmd->body.mobid = res->backup->base.resource->start; 110 110 cmd->body.offsetInBytes = res->backup_offset; 111 111 cmd->body.sizeInBytes = so->size; 112 112 vmw_cmd_commit(dev_priv, sizeof(*cmd)); ··· 142 142 struct ttm_buffer_object *bo = val_buf->bo; 143 143 int ret; 144 144 145 - if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB)) 145 + if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB)) 146 146 return -EINVAL; 147 147 148 148 mutex_lock(&dev_priv->binding_mutex); ··· 197 197 struct vmw_fence_obj *fence; 198 198 int ret; 199 199 200 - if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB)) 200 + if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB)) 201 201 return -EINVAL; 202 202 203 203 mutex_lock(&dev_priv->binding_mutex);
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 1212 1212 uint32_t submit_size; 1213 1213 struct ttm_buffer_object *bo = val_buf->bo; 1214 1214 1215 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 1215 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1216 1216 1217 1217 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 1218 1218 ··· 1223 1223 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; 1224 1224 cmd1->header.size = sizeof(cmd1->body); 1225 1225 cmd1->body.sid = res->id; 1226 - cmd1->body.mobid = bo->mem.start; 1226 + cmd1->body.mobid = bo->resource->start; 1227 1227 if (res->backup_dirty) { 1228 1228 cmd2 = (void *) &cmd1[1]; 1229 1229 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; ··· 1266 1266 uint8_t *cmd; 1267 1267 1268 1268 1269 - BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 1269 + BUG_ON(bo->resource->mem_type != VMW_PL_MOB); 1270 1270 1271 1271 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1272 1272 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 719 719 struct ttm_resource *new_mem, 720 720 struct ttm_place *hop) 721 721 { 722 - struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type); 722 + struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type); 723 723 struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); 724 724 int ret; 725 725 ··· 729 729 return ret; 730 730 } 731 731 732 - vmw_move_notify(bo, &bo->mem, new_mem); 732 + vmw_move_notify(bo, bo->resource, new_mem); 733 733 734 734 if (old_man->use_tt && new_man->use_tt) { 735 - if (bo->mem.mem_type == TTM_PL_SYSTEM) { 735 + if (bo->resource->mem_type == TTM_PL_SYSTEM) { 736 736 ttm_bo_assign_mem(bo, new_mem); 737 737 return 0; 738 738 } ··· 741 741 goto fail; 742 742 743 743 vmw_ttm_unbind(bo->bdev, bo->ttm); 744 - ttm_resource_free(bo, &bo->mem); 744 + ttm_resource_free(bo, bo->resource); 745 745 ttm_bo_assign_mem(bo, new_mem); 746 746 return 0; 747 747 } else { ··· 751 751 } 752 752 return 0; 753 753 fail: 754 - vmw_move_notify(bo, new_mem, &bo->mem); 754 + vmw_move_notify(bo, new_mem, bo->resource); 755 755 return ret; 756 756 } 757 757
+2 -1
include/drm/ttm/ttm_bo_api.h
··· 136 136 * Members protected by the bo::resv::reserved lock. 137 137 */ 138 138 139 - struct ttm_resource mem; 139 + struct ttm_resource *resource; 140 + struct ttm_resource _mem; 140 141 struct ttm_tt *ttm; 141 142 bool deleted; 142 143
+3 -3
include/drm/ttm/ttm_bo_driver.h
··· 181 181 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) 182 182 { 183 183 spin_lock(&bo->bdev->lru_lock); 184 - ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); 184 + ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); 185 185 spin_unlock(&bo->bdev->lru_lock); 186 186 } 187 187 188 188 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, 189 189 struct ttm_resource *new_mem) 190 190 { 191 - bo->mem = *new_mem; 191 + bo->_mem = *new_mem; 192 192 new_mem->mm_node = NULL; 193 193 } 194 194 ··· 202 202 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, 203 203 struct ttm_resource *new_mem) 204 204 { 205 - struct ttm_resource *old_mem = &bo->mem; 205 + struct ttm_resource *old_mem = bo->resource; 206 206 207 207 WARN_ON(old_mem->mm_node != NULL); 208 208 ttm_bo_assign_mem(bo, new_mem);