Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe: Drop bo->size

bo->size is redundant because the base GEM object already has a size
field with the same value. Drop bo->size and use the base GEM object’s
size instead. While at it, introduce xe_bo_size() to abstract the BO
size.

v2:
- Fix typo in kernel doc (Ashutosh)
- Fix kunit (CI)
- Fix line wrap (Checkpatch)
v3:
- Fix sriov build (CI)
v4:
- Fix display build (CI)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Link: https://lore.kernel.org/r/20250625144128.2827577-1-matthew.brost@intel.com

+113 -100
+1 -1
drivers/gpu/drm/xe/display/xe_fb_pin.c
··· 227 227 goto out_unlock; 228 228 } 229 229 230 - ret = xe_ggtt_node_insert_locked(vma->node, bo->size, align, 0); 230 + ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0); 231 231 if (ret) { 232 232 xe_ggtt_node_fini(vma->node); 233 233 goto out_unlock;
+1 -1
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
··· 85 85 86 86 cmd_in = xe_bo_ggtt_addr(bo); 87 87 cmd_out = cmd_in + PAGE_SIZE; 88 - xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); 88 + xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); 89 89 90 90 gsc_context->hdcp_bo = bo; 91 91 gsc_context->hdcp_cmd_in = cmd_in;
+1 -1
drivers/gpu/drm/xe/tests/xe_bo.c
··· 106 106 } 107 107 108 108 /* Check last CCS value, or at least last value in page. */ 109 - offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size); 109 + offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo)); 110 110 offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1; 111 111 if (cpu_map[offset] != get_val) { 112 112 KUNIT_FAIL(test,
+2 -2
drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
··· 32 32 33 33 bo->tile = tile; 34 34 bo->ttm.bdev = &xe->ttm; 35 - bo->size = size; 35 + bo->ttm.base.size = size; 36 36 iosys_map_set_vaddr(&bo->vmap, buf); 37 37 38 38 if (flags & XE_BO_FLAG_GGTT) { ··· 43 43 44 44 KUNIT_ASSERT_EQ(test, 0, 45 45 xe_ggtt_node_insert(bo->ggtt_node[tile->id], 46 - bo->size, SZ_4K)); 46 + xe_bo_size(bo), SZ_4K)); 47 47 } 48 48 49 49 return bo;
+26 -26
drivers/gpu/drm/xe/tests/xe_migrate.c
··· 74 74 { 75 75 struct xe_device *xe = tile_to_xe(m->tile); 76 76 u64 retval, expected = 0; 77 - bool big = bo->size >= SZ_2M; 77 + bool big = xe_bo_size(bo) >= SZ_2M; 78 78 struct dma_fence *fence; 79 79 const char *str = big ? "Copying big bo" : "Copying small bo"; 80 80 int err; 81 81 82 82 struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, 83 - bo->size, 83 + xe_bo_size(bo), 84 84 ttm_bo_type_kernel, 85 85 region | 86 86 XE_BO_FLAG_NEEDS_CPU_ACCESS | ··· 105 105 goto out_unlock; 106 106 } 107 107 108 - xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); 108 + xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); 109 109 fence = xe_migrate_clear(m, remote, remote->ttm.resource, 110 110 XE_MIGRATE_CLEAR_FLAG_FULL); 111 111 if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" : ··· 113 113 retval = xe_map_rd(xe, &remote->vmap, 0, u64); 114 114 check(retval, expected, "remote first offset should be cleared", 115 115 test); 116 - retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64); 116 + retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64); 117 117 check(retval, expected, "remote last offset should be cleared", 118 118 test); 119 119 } 120 120 dma_fence_put(fence); 121 121 122 122 /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */ 123 - xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size); 124 - xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size); 123 + xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote)); 124 + xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo)); 125 125 126 126 expected = 0xc0c0c0c0c0c0c0c0; 127 127 fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource, ··· 131 131 retval = xe_map_rd(xe, &bo->vmap, 0, u64); 132 132 check(retval, expected, 133 133 "remote -> vram bo first offset should be copied", test); 134 - retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64); 134 + retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64); 135 135 check(retval, expected, 136 136 "remote -> vram bo offset should be copied", test); 137 137 } 138 138 dma_fence_put(fence); 139 139 140 140 /* And other way around.. slightly hacky.. */ 141 - xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); 142 - xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size); 141 + xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); 142 + xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo)); 143 143 144 144 fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource, 145 145 remote->ttm.resource, false); ··· 148 148 retval = xe_map_rd(xe, &remote->vmap, 0, u64); 149 149 check(retval, expected, 150 150 "vram -> remote bo first offset should be copied", test); 151 - retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64); 151 + retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64); 152 152 check(retval, expected, 153 153 "vram -> remote bo last offset should be copied", test); 154 154 } ··· 245 245 if (m->q->vm->flags & XE_VM_FLAG_64K) 246 246 expected |= XE_PTE_PS64; 247 247 if (xe_bo_is_vram(pt)) 248 - xe_res_first(pt->ttm.resource, 0, pt->size, &src_it); 248 + xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it); 249 249 else 250 - xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it); 250 + xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it); 251 251 252 252 emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false, 253 253 &src_it, XE_PAGE_SIZE, pt->ttm.resource); ··· 276 276 277 277 /* Clear a small bo */ 278 278 kunit_info(test, "Clearing small buffer object\n"); 279 - xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size); 279 + xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny)); 280 280 expected = 0; 281 281 fence = xe_migrate_clear(m, tiny, tiny->ttm.resource, 282 282 XE_MIGRATE_CLEAR_FLAG_FULL); ··· 286 286 dma_fence_put(fence); 287 287 retval = xe_map_rd(xe, &tiny->vmap, 0, u32); 288 288 check(retval, expected, "Command clear small first value", test); 289 - retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32); 289 + retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32); 290 290 check(retval, expected, "Command clear small last value", test); 291 291 292 292 kunit_info(test, "Copying small buffer object to system\n"); ··· 298 298 299 299 /* Clear a big bo */ 300 300 kunit_info(test, "Clearing big buffer object\n"); 301 - xe_map_memset(xe, &big->vmap, 0, 0x11, big->size); 301 + xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big)); 302 302 expected = 0; 303 303 fence = xe_migrate_clear(m, big, big->ttm.resource, 304 304 XE_MIGRATE_CLEAR_FLAG_FULL); ··· 308 308 dma_fence_put(fence); 309 309 retval = xe_map_rd(xe, &big->vmap, 0, u32); 310 310 check(retval, expected, "Command clear big first value", test); 311 - retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32); 311 + retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32); 312 312 check(retval, expected, "Command clear big last value", test); 313 313 314 314 kunit_info(test, "Copying big buffer object to system\n"); ··· 370 370 struct xe_migrate *m = tile->migrate; 371 371 struct xe_device *xe = gt_to_xe(gt); 372 372 struct dma_fence *fence = NULL; 373 - u64 size = src_bo->size; 373 + u64 size = xe_bo_size(src_bo); 374 374 struct xe_res_cursor src_it, dst_it; 375 375 struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource; 376 376 u64 src_L0_ofs, dst_L0_ofs; ··· 498 498 long ret; 499 499 500 500 expected = 0xd0d0d0d0d0d0d0d0; 501 - xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); 501 + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo)); 502 502 503 503 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); 504 504 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { ··· 523 523 524 524 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); 525 525 check(retval, expected, "Clear evicted vram data first value", test); 526 - retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); 526 + retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64); 527 527 check(retval, expected, "Clear evicted vram data last value", test); 528 528 529 529 fence = blt_copy(tile, vram_bo, ccs_bo, ··· 532 532 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); 533 533 check(retval, 0, "Clear ccs data first value", test); 534 534 535 - retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); 535 + retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64); 536 536 check(retval, 0, "Clear ccs data last value", test); 537 537 } 538 538 dma_fence_put(fence); ··· 562 562 563 563 retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); 564 564 check(retval, expected, "Restored value must be equal to initial value", test); 565 - retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); 565 + retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64); 566 566 check(retval, expected, "Restored value must be equal to initial value", test); 567 567 568 568 fence = blt_copy(tile, vram_bo, ccs_bo, ··· 570 570 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { 571 571 retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); 572 572 check(retval, 0, "Clear ccs data first value", test); 573 - retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); 573 + retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64); 574 574 check(retval, 0, "Clear ccs data last value", test); 575 575 } 576 576 dma_fence_put(fence); ··· 583 583 u64 expected, retval; 584 584 585 585 expected = 0xd0d0d0d0d0d0d0d0; 586 - xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); 586 + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo)); 587 587 588 588 fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); 589 589 if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { ··· 597 597 if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) { 598 598 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); 599 599 check(retval, expected, "Decompressed value must be equal to initial value", test); 600 - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); 600 + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); 601 601 check(retval, expected, "Decompressed value must be equal to initial value", test); 602 602 } 603 603 dma_fence_put(fence); ··· 615 615 if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) { 616 616 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); 617 617 check(retval, expected, "Clear main buffer first value", test); 618 - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); 618 + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); 619 619 check(retval, expected, "Clear main buffer last value", test); 620 620 } 621 621 dma_fence_put(fence); ··· 625 625 if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { 626 626 retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); 627 627 check(retval, expected, "Clear ccs data first value", test); 628 - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); 628 + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); 629 629 check(retval, expected, "Clear ccs data last value", test); 630 630 } 631 631 dma_fence_put(fence);
+10 -10
drivers/gpu/drm/xe/xe_bo.c
··· 437 437 438 438 extra_pages = 0; 439 439 if (xe_bo_needs_ccs_pages(bo)) 440 - extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), 440 + extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)), 441 441 PAGE_SIZE); 442 442 443 443 /* ··· 1122 1122 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) 1123 1123 goto out_unlock_bo; 1124 1124 1125 - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, 1125 + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), 1126 1126 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, 1127 1127 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | 1128 1128 XE_BO_FLAG_PINNED); ··· 1200 1200 goto out_unlock_bo; 1201 1201 1202 1202 if (!backup) { 1203 - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, 1203 + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, 1204 + NULL, xe_bo_size(bo), 1204 1205 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, 1205 1206 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | 1206 1207 XE_BO_FLAG_PINNED); ··· 1255 1254 } 1256 1255 1257 1256 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, 1258 - bo->size); 1257 + xe_bo_size(bo)); 1259 1258 } 1260 1259 1261 1260 if (!bo->backup_obj) ··· 1348 1347 } 1349 1348 1350 1349 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, 1351 - bo->size); 1350 + xe_bo_size(bo)); 1352 1351 } 1353 1352 1354 1353 bo->backup_obj = NULL; ··· 1559 1558 1560 1559 vram = res_to_mem_region(ttm_bo->resource); 1561 1560 xe_res_first(ttm_bo->resource, offset & PAGE_MASK, 1562 - bo->size - (offset & PAGE_MASK), &cursor); 1561 + xe_bo_size(bo) - (offset & PAGE_MASK), &cursor); 1563 1562 1564 1563 do { 1565 1564 unsigned long page_offset = (offset & ~PAGE_MASK); ··· 1859 1858 1860 1859 bo->ccs_cleared = false; 1861 1860 bo->tile = tile; 1862 - bo->size = size; 1863 1861 bo->flags = flags; 1864 1862 bo->cpu_caching = cpu_caching; 1865 1863 bo->ttm.base.funcs = &xe_gem_object_funcs; ··· 2036 2036 2037 2037 if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { 2038 2038 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, 2039 - start + bo->size, U64_MAX); 2039 + start + xe_bo_size(bo), U64_MAX); 2040 2040 } else { 2041 2041 err = xe_ggtt_insert_bo(t->mem.ggtt, bo); 2042 2042 } ··· 2234 2234 xe_assert(xe, !(*src)->vmap.is_iomem); 2235 2235 2236 2236 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, 2237 - (*src)->size, dst_flags); 2237 + xe_bo_size(*src), dst_flags); 2238 2238 if (IS_ERR(bo)) 2239 2239 return PTR_ERR(bo); 2240 2240 ··· 2524 2524 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap 2525 2525 * to use struct iosys_map. 2526 2526 */ 2527 - ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); 2527 + ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap); 2528 2528 if (ret) 2529 2529 return ret; 2530 2530
+15 -2
drivers/gpu/drm/xe/xe_bo.h
··· 238 238 return xe_bo_addr(bo, 0, page_size); 239 239 } 240 240 241 + /** 242 + * xe_bo_size() - Xe BO size 243 + * @bo: The bo object. 244 + * 245 + * Simple helper to return Xe BO's size. 246 + * 247 + * Return: Xe BO's size 248 + */ 249 + static inline size_t xe_bo_size(struct xe_bo *bo) 250 + { 251 + return bo->ttm.base.size; 252 + } 253 + 241 254 static inline u32 242 255 __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id) 243 256 { ··· 259 246 if (XE_WARN_ON(!ggtt_node)) 260 247 return 0; 261 248 262 - XE_WARN_ON(ggtt_node->base.size > bo->size); 249 + XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo)); 263 250 XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32)); 264 251 return ggtt_node->base.start; 265 252 } ··· 313 300 314 301 static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo) 315 302 { 316 - return PAGE_ALIGN(bo->ttm.base.size); 303 + return PAGE_ALIGN(xe_bo_size(bo)); 317 304 } 318 305 319 306 static inline bool xe_bo_has_pages(struct xe_bo *bo)
-2
drivers/gpu/drm/xe/xe_bo_types.h
··· 32 32 struct xe_bo *backup_obj; 33 33 /** @parent_obj: Ref to parent bo if this a backup_obj */ 34 34 struct xe_bo *parent_obj; 35 - /** @size: Size of this buffer object */ 36 - size_t size; 37 35 /** @flags: flags for this buffer object */ 38 36 u32 flags; 39 37 /** @vm: VM this BO is attached to, for extobj this will be NULL */
+1 -1
drivers/gpu/drm/xe/xe_drm_client.c
··· 167 167 static void bo_meminfo(struct xe_bo *bo, 168 168 struct drm_memory_stats stats[TTM_NUM_MEM_TYPES]) 169 169 { 170 - u64 sz = bo->size; 170 + u64 sz = xe_bo_size(bo); 171 171 u32 mem_type = bo->ttm.resource->mem_type; 172 172 173 173 xe_bo_assert_held(bo);
+7 -7
drivers/gpu/drm/xe/xe_ggtt.c
··· 421 421 goto err; 422 422 } 423 423 424 - xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size); 424 + xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch)); 425 425 426 426 xe_ggtt_initial_clear(ggtt); 427 427 ··· 693 693 return; 694 694 695 695 start = node->base.start; 696 - end = start + bo->size; 696 + end = start + xe_bo_size(bo); 697 697 698 698 pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index); 699 699 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { 700 700 xe_assert(xe_bo_device(bo), bo->ttm.ttm); 701 701 702 - for (xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &cur); 702 + for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur); 703 703 cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) 704 704 ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, 705 705 pte | xe_res_dma(&cur)); ··· 707 707 /* Prepend GPU offset */ 708 708 pte |= vram_region_gpu_offset(bo->ttm.resource); 709 709 710 - for (xe_res_first(bo->ttm.resource, 0, bo->size, &cur); 710 + for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur); 711 711 cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) 712 712 ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, 713 713 pte + cur.start); ··· 743 743 744 744 if (XE_WARN_ON(bo->ggtt_node[tile_id])) { 745 745 /* Someone's already inserted this BO in the GGTT */ 746 - xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); 746 + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo)); 747 747 return 0; 748 748 } 749 749 ··· 762 762 763 763 mutex_lock(&ggtt->lock); 764 764 err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base, 765 - bo->size, alignment, 0, start, end, 0); 765 + xe_bo_size(bo), alignment, 0, start, end, 0); 766 766 if (err) { 767 767 xe_ggtt_node_fini(bo->ggtt_node[tile_id]); 768 768 bo->ggtt_node[tile_id] = NULL; ··· 823 823 return; 824 824 825 825 /* This BO is not currently in the GGTT */ 826 - xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); 826 + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo)); 827 827 828 828 xe_ggtt_node_remove(bo->ggtt_node[tile_id], 829 829 bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
+4 -2
drivers/gpu/drm/xe/xe_gsc.c
··· 59 59 60 60 xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size); 61 61 xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size); 62 - xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size); 62 + xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, 63 + xe_bo_size(gsc->private) - fw_size); 63 64 64 65 kfree(storage); 65 66 ··· 83 82 bb->cs[bb->len++] = GSC_FW_LOAD; 84 83 bb->cs[bb->len++] = lower_32_bits(offset); 85 84 bb->cs[bb->len++] = upper_32_bits(offset); 86 - bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID; 85 + bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) | 86 + GSC_FW_LOAD_LIMIT_VALID; 87 87 88 88 job = xe_bb_create_job(gsc->q, bb); 89 89 if (IS_ERR(job)) {
+8 -8
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
··· 282 282 283 283 if (config->lmem_obj) { 284 284 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); 285 - cfg[n++] = lower_32_bits(config->lmem_obj->size); 286 - cfg[n++] = upper_32_bits(config->lmem_obj->size); 285 + cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj)); 286 + cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj)); 287 287 } 288 288 289 289 cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); ··· 1299 1299 struct xe_bo *bo; 1300 1300 1301 1301 bo = config->lmem_obj; 1302 - return bo ? bo->size : 0; 1302 + return bo ? xe_bo_size(bo) : 0; 1303 1303 } 1304 1304 1305 1305 static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) ··· 1388 1388 err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); 1389 1389 if (err) 1390 1390 goto fail; 1391 - offset += bo->size; 1391 + offset += xe_bo_size(bo); 1392 1392 } 1393 1393 } 1394 1394 ··· 1469 1469 goto release; 1470 1470 } 1471 1471 1472 - err = pf_push_vf_cfg_lmem(gt, vfid, bo->size); 1472 + err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo)); 1473 1473 if (unlikely(err)) 1474 1474 goto reset_lmtt; 1475 1475 1476 1476 xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", 1477 - vfid, bo->size, bo->size / SZ_1M); 1477 + vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M); 1478 1478 return 0; 1479 1479 1480 1480 reset_lmtt: ··· 2552 2552 if (!config->lmem_obj) 2553 2553 continue; 2554 2554 2555 - string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2, 2555 + string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2, 2556 2556 buf, sizeof(buf)); 2557 2557 drm_printf(p, "VF%u:\t%zu\t(%s)\n", 2558 - n, config->lmem_obj->size, buf); 2558 + n, xe_bo_size(config->lmem_obj), buf); 2559 2559 } 2560 2560 2561 2561 mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+2 -2
drivers/gpu/drm/xe/xe_guc.c
··· 60 60 /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */ 61 61 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); 62 62 xe_assert(xe, addr < GUC_GGTT_TOP); 63 - xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr); 63 + xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr); 64 64 65 65 return addr; 66 66 } ··· 421 421 buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE; 422 422 423 423 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); 424 - xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size); 424 + xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo)); 425 425 426 426 return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev, 427 427 desc, buf, G2G_BUFFER_SIZE);
+2 -2
drivers/gpu/drm/xe/xe_guc_ads.c
··· 890 890 891 891 xe_gt_assert(gt, ads->bo); 892 892 893 - xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); 893 + xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); 894 894 guc_policies_init(ads); 895 895 guc_golden_lrc_init(ads); 896 896 guc_mapping_table_init_invalid(gt, &info_map); ··· 914 914 915 915 xe_gt_assert(gt, ads->bo); 916 916 917 - xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); 917 + xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); 918 918 guc_policies_init(ads); 919 919 fill_engine_enable_masks(gt, &info_map); 920 920 guc_mmio_reg_state_init(ads);
+2 -2
drivers/gpu/drm/xe/xe_guc_ct.c
··· 453 453 454 454 xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); 455 455 456 - xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); 456 + xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo)); 457 457 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); 458 458 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); 459 459 ··· 1907 1907 return NULL; 1908 1908 1909 1909 if (ct->bo && want_ctb) { 1910 - snapshot->ctb_size = ct->bo->size; 1910 + snapshot->ctb_size = xe_bo_size(ct->bo); 1911 1911 snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); 1912 1912 } 1913 1913
+1 -1
drivers/gpu/drm/xe/xe_guc_log.c
··· 79 79 * Also, can't use vmalloc as might be called from atomic context. So need 80 80 * to break the buffer up into smaller chunks that can be allocated. 81 81 */ 82 - snapshot->size = log->bo->size; 82 + snapshot->size = xe_bo_size(log->bo); 83 83 snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE); 84 84 85 85 snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
+1 -1
drivers/gpu/drm/xe/xe_huc.c
··· 171 171 sizeof(struct pxp43_new_huc_auth_in)); 172 172 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, 173 173 xe_bo_ggtt_addr(huc->fw.bo), 174 - huc->fw.bo->size); 174 + xe_bo_size(huc->fw.bo)); 175 175 do { 176 176 err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset, 177 177 ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
+2 -2
drivers/gpu/drm/xe/xe_lmtt.c
··· 386 386 u64 addr, vram_offset; 387 387 388 388 lmtt_assert(lmtt, IS_ALIGNED(start, page_size)); 389 - lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size)); 389 + lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size)); 390 390 lmtt_assert(lmtt, xe_bo_is_vram(bo)); 391 391 392 392 vram_offset = vram_region_gpu_offset(bo->ttm.resource); 393 - xe_res_first(bo->ttm.resource, 0, bo->size, &cur); 393 + xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur); 394 394 while (cur.remaining) { 395 395 addr = xe_res_dma(&cur); 396 396 addr += vram_offset; /* XXX */
+2 -2
drivers/gpu/drm/xe/xe_lrc.c
··· 975 975 976 976 static size_t wa_bb_offset(struct xe_lrc *lrc) 977 977 { 978 - return lrc->bo->size - LRC_WA_BB_SIZE; 978 + return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE; 979 979 } 980 980 981 981 static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) ··· 1859 1859 snapshot->seqno = xe_lrc_seqno(lrc); 1860 1860 snapshot->lrc_bo = xe_bo_get(lrc->bo); 1861 1861 snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc); 1862 - snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset - 1862 + snapshot->lrc_size = xe_bo_size(lrc->bo) - snapshot->lrc_offset - 1863 1863 LRC_WA_BB_SIZE; 1864 1864 snapshot->lrc_snapshot = NULL; 1865 1865 snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
+15 -15
drivers/gpu/drm/xe/xe_migrate.c
··· 203 203 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); 204 204 205 205 /* Need to be sure everything fits in the first PT, or create more */ 206 - xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M); 206 + xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M); 207 207 208 208 bo = xe_bo_create_pin_map(vm->xe, tile, vm, 209 209 num_entries * XE_PAGE_SIZE, ··· 214 214 return PTR_ERR(bo); 215 215 216 216 /* PT30 & PT31 reserved for 2M identity map */ 217 - pt29_ofs = bo->size - 3 * XE_PAGE_SIZE; 217 + pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE; 218 218 entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index); 219 219 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); 220 220 ··· 236 236 if (!IS_DGFX(xe)) { 237 237 /* Write out batch too */ 238 238 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; 239 - for (i = 0; i < batch->size; 239 + for (i = 0; i < xe_bo_size(batch); 240 240 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 241 241 XE_PAGE_SIZE) { 242 242 entry = vm->pt_ops->pte_encode_bo(batch, i, ··· 247 247 level++; 248 248 } 249 249 if (xe->info.has_usm) { 250 - xe_tile_assert(tile, batch->size == SZ_1M); 250 + xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M); 251 251 252 252 batch = tile->primary_gt->usm.bb_pool->bo; 253 253 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; 254 - xe_tile_assert(tile, batch->size == SZ_512K); 254 + xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K); 255 255 256 - for (i = 0; i < batch->size; 256 + for (i = 0; i < xe_bo_size(batch); 257 257 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 258 258 XE_PAGE_SIZE) { 259 259 entry = vm->pt_ops->pte_encode_bo(batch, i, ··· 306 306 307 307 /* Identity map the entire vram at 256GiB offset */ 308 308 if (IS_DGFX(xe)) { 309 - u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; 309 + u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE; 310 310 311 311 xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, 312 312 pat_index, pt30_ofs); ··· 321 321 u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; 322 322 u64 vram_offset = IDENTITY_OFFSET + 323 323 DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); 324 - u64 pt31_ofs = bo->size - XE_PAGE_SIZE; 324 + u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE; 325 325 326 326 xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE - 327 327 IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G); ··· 768 768 struct xe_gt *gt = m->tile->primary_gt; 769 769 struct xe_device *xe = gt_to_xe(gt); 770 770 struct dma_fence *fence = NULL; 771 - u64 size = src_bo->size; 771 + u64 size = xe_bo_size(src_bo); 772 772 struct xe_res_cursor src_it, dst_it, ccs_it; 773 773 u64 src_L0_ofs, dst_L0_ofs; 774 774 u32 src_L0_pt, dst_L0_pt; ··· 791 791 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) 792 792 return ERR_PTR(-EINVAL); 793 793 794 - if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size)) 794 + if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo))) 795 795 return ERR_PTR(-EINVAL); 796 796 797 797 if (!src_is_vram) ··· 1064 1064 struct xe_device *xe = gt_to_xe(gt); 1065 1065 bool clear_only_system_ccs = false; 1066 1066 struct dma_fence *fence = NULL; 1067 - u64 size = bo->size; 1067 + u64 size = xe_bo_size(bo); 1068 1068 struct xe_res_cursor src_it; 1069 1069 struct ttm_resource *src = dst; 1070 1070 int err; ··· 1076 1076 clear_only_system_ccs = true; 1077 1077 1078 1078 if (!clear_vram) 1079 - xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it); 1079 + xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it); 1080 1080 else 1081 - xe_res_first(src, 0, bo->size, &src_it); 1081 + xe_res_first(src, 0, xe_bo_size(bo), &src_it); 1082 1082 1083 1083 while (size) { 1084 1084 u64 clear_L0_ofs; ··· 1407 1407 if (idx == chunk) 1408 1408 goto next_cmd; 1409 1409 1410 - xe_tile_assert(tile, pt_bo->size == SZ_4K); 1410 + xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K); 1411 1411 1412 1412 /* Map a PT at most once */ 1413 1413 if (pt_bo->update_index < 0) ··· 1868 1868 if (IS_ERR(dma_addr)) 1869 1869 return PTR_ERR(dma_addr); 1870 1870 1871 - xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor); 1871 + xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor); 1872 1872 1873 1873 do { 1874 1874 struct dma_fence *__fence;
+5 -5
drivers/gpu/drm/xe/xe_oa.c
··· 403 403 static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) 404 404 { 405 405 u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); 406 - int size_exponent = __ffs(stream->oa_buffer.bo->size); 406 + int size_exponent = __ffs(xe_bo_size(stream->oa_buffer.bo)); 407 407 u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT; 408 408 struct xe_mmio *mmio = &stream->gt->mmio; 409 409 unsigned long flags; ··· 435 435 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); 436 436 437 437 /* Zero out the OA buffer since we rely on zero report id and timestamp fields */ 438 - memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size); 438 + memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo)); 439 439 } 440 440 441 441 static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask) ··· 1065 1065 static u32 oag_buf_size_select(const struct xe_oa_stream *stream) 1066 1066 { 1067 1067 return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT, 1068 - stream->oa_buffer.bo->size > SZ_16M ? 1068 + xe_bo_size(stream->oa_buffer.bo) > SZ_16M ? 1069 1069 OAG_OA_DEBUG_BUF_SIZE_SELECT : 0); 1070 1070 } 1071 1071 ··· 1582 1582 1583 1583 static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg) 1584 1584 { 1585 - struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, }; 1585 + struct drm_xe_oa_stream_info info = { .oa_buf_size = xe_bo_size(stream->oa_buffer.bo), }; 1586 1586 void __user *uaddr = (void __user *)arg; 1587 1587 1588 1588 if (copy_to_user(uaddr, &info, sizeof(info))) ··· 1668 1668 } 1669 1669 1670 1670 /* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */ 1671 - if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) { 1671 + if (vma->vm_end - vma->vm_start != xe_bo_size(stream->oa_buffer.bo)) { 1672 1672 drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n"); 1673 1673 return -EINVAL; 1674 1674 }
+2 -2
drivers/gpu/drm/xe/xe_trace_bo.h
··· 33 33 34 34 TP_fast_assign( 35 35 __assign_str(dev); 36 - __entry->size = bo->size; 36 + __entry->size = xe_bo_size(bo); 37 37 __entry->flags = bo->flags; 38 38 __entry->vm = bo->vm; 39 39 ), ··· 73 73 74 74 TP_fast_assign( 75 75 __entry->bo = bo; 76 - __entry->size = bo->size; 76 + __entry->size = xe_bo_size(bo); 77 77 __assign_str(new_placement_name); 78 78 __assign_str(old_placement_name); 79 79 __assign_str(device_id);
+3 -3
drivers/gpu/drm/xe/xe_vm.c
··· 3466 3466 { 3467 3467 u16 coh_mode; 3468 3468 3469 - if (XE_IOCTL_DBG(xe, range > bo->size) || 3469 + if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) || 3470 3470 XE_IOCTL_DBG(xe, obj_offset > 3471 - bo->size - range)) { 3471 + xe_bo_size(bo) - range)) { 3472 3472 return -EINVAL; 3473 3473 } 3474 3474 ··· 3771 3771 3772 3772 xe_vma_ops_init(&vops, vm, q, NULL, 0); 3773 3773 3774 - ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, bo->size, 3774 + ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), 3775 3775 DRM_XE_VM_BIND_OP_MAP, 0, 0, 3776 3776 vm->xe->pat.idx[cache_lvl]); 3777 3777 if (IS_ERR(ops)) {