Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe: Unify the initialization of VRAM regions

Currently in the drivers we have defined VRAM regions per device and per
tile. Initialization of these regions is done in two completely different
ways. To simplify the logic of the code and make it easier to add new
regions in the future, let's unify the way we initialize VRAM regions.

v2:
- fix doc comments in struct xe_vram_region
- remove unnecessary includes (Jani)
v3:
- move code from xe_vram_init_regions_managers to xe_tile_init_noalloc
(Matthew)
- replace ioremap_wc to devm_ioremap_wc for mapping VRAM BAR
(Matthew)
- Replace the tile id parameter with vram region in the xe_pf_begin
function.
v4:
- remove tile back pointer from struct xe_vram_region
- add new back pointers: xe and migarte to xe_vram_region

Signed-off-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com> # rev3
Acked-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250714184818.89201-6-piotr.piorkowski@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>

authored by

Piotr Piórkowski and committed by
Lucas De Marchi
4b0a5f5c d65ff1ec

+164 -125
+3 -1
drivers/gpu/drm/xe/xe_bo.h
··· 12 12 #include "xe_macros.h" 13 13 #include "xe_vm_types.h" 14 14 #include "xe_vm.h" 15 + #include "xe_vram_types.h" 15 16 16 17 #define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 17 18 ··· 24 23 #define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1) 25 24 /* -- */ 26 25 #define XE_BO_FLAG_STOLEN BIT(4) 26 + #define XE_BO_FLAG_VRAM(vram) (XE_BO_FLAG_VRAM0 << ((vram)->id)) 27 27 #define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \ 28 - XE_BO_FLAG_VRAM0 << (tile)->id : \ 28 + XE_BO_FLAG_VRAM((tile)->mem.vram) : \ 29 29 XE_BO_FLAG_SYSTEM) 30 30 #define XE_BO_FLAG_GGTT BIT(5) 31 31 #define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
+8 -5
drivers/gpu/drm/xe/xe_gt_pagefault.c
··· 23 23 #include "xe_svm.h" 24 24 #include "xe_trace_bo.h" 25 25 #include "xe_vm.h" 26 + #include "xe_vram_types.h" 26 27 27 28 struct pagefault { 28 29 u64 page_addr; ··· 75 74 } 76 75 77 76 static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, 78 - bool atomic, unsigned int id) 77 + bool atomic, struct xe_vram_region *vram) 79 78 { 80 79 struct xe_bo *bo = xe_vma_bo(vma); 81 80 struct xe_vm *vm = xe_vma_vm(vma); ··· 85 84 if (err) 86 85 return err; 87 86 88 - if (atomic && IS_DGFX(vm->xe)) { 87 + if (atomic && vram) { 88 + xe_assert(vm->xe, IS_DGFX(vm->xe)); 89 + 89 90 if (xe_vma_is_userptr(vma)) { 90 91 err = -EACCES; 91 92 return err; 92 93 } 93 94 94 95 /* Migrate to VRAM, move should invalidate the VMA first */ 95 - err = xe_bo_migrate(bo, XE_PL_VRAM0 + id); 96 + err = xe_bo_migrate(bo, vram->placement); 96 97 if (err) 97 98 return err; 98 99 } else if (bo) { ··· 141 138 /* Lock VM and BOs dma-resv */ 142 139 drm_exec_init(&exec, 0, 0); 143 140 drm_exec_until_all_locked(&exec) { 144 - err = xe_pf_begin(&exec, vma, atomic, tile->id); 141 + err = xe_pf_begin(&exec, vma, atomic, tile->mem.vram); 145 142 drm_exec_retry_on_contention(&exec); 146 143 if (xe_vm_validate_should_retry(&exec, err, &end)) 147 144 err = -EAGAIN; ··· 576 573 /* Lock VM and BOs dma-resv */ 577 574 drm_exec_init(&exec, 0, 0); 578 575 drm_exec_until_all_locked(&exec) { 579 - ret = xe_pf_begin(&exec, vma, true, tile->id); 576 + ret = xe_pf_begin(&exec, vma, true, tile->mem.vram); 580 577 drm_exec_retry_on_contention(&exec); 581 578 if (ret) 582 579 break;
+2 -1
drivers/gpu/drm/xe/xe_query.c
··· 27 27 #include "xe_oa.h" 28 28 #include "xe_pxp.h" 29 29 #include "xe_ttm_vram_mgr.h" 30 + #include "xe_vram_types.h" 30 31 #include "xe_wa.h" 31 32 32 33 static const u16 xe_to_user_engine_class[] = { ··· 411 410 gt_list->gt_list[iter].near_mem_regions = 0x1; 412 411 else 413 412 gt_list->gt_list[iter].near_mem_regions = 414 - BIT(gt_to_tile(gt)->id) << 1; 413 + BIT(gt_to_tile(gt)->mem.vram->id) << 1; 415 414 gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ 416 415 gt_list->gt_list[iter].near_mem_regions; 417 416
+19 -24
drivers/gpu/drm/xe/xe_svm.c
··· 311 311 struct page *page) 312 312 { 313 313 u64 dpa; 314 - struct xe_tile *tile = vr->tile; 315 314 u64 pfn = page_to_pfn(page); 316 315 u64 offset; 317 316 318 - xe_tile_assert(tile, is_device_private_page(page)); 319 - xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base); 317 + xe_assert(vr->xe, is_device_private_page(page)); 318 + xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base); 320 319 321 320 offset = (pfn << PAGE_SHIFT) - vr->hpa_base; 322 321 dpa = vr->dpa_base + offset; ··· 332 333 unsigned long npages, const enum xe_svm_copy_dir dir) 333 334 { 334 335 struct xe_vram_region *vr = NULL; 335 - struct xe_tile *tile; 336 + struct xe_device *xe; 336 337 struct dma_fence *fence = NULL; 337 338 unsigned long i; 338 339 #define XE_VRAM_ADDR_INVALID ~0x0ull ··· 365 366 366 367 if (!vr && spage) { 367 368 vr = page_to_vr(spage); 368 - tile = vr->tile; 369 + xe = vr->xe; 369 370 } 370 371 XE_WARN_ON(spage && page_to_vr(spage) != vr); 371 372 ··· 397 398 398 399 if (vram_addr != XE_VRAM_ADDR_INVALID) { 399 400 if (sram) { 400 - vm_dbg(&tile->xe->drm, 401 + vm_dbg(&xe->drm, 401 402 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", 402 403 vram_addr, (u64)dma_addr[pos], i - pos + incr); 403 - __fence = xe_migrate_from_vram(tile->migrate, 404 + __fence = xe_migrate_from_vram(vr->migrate, 404 405 i - pos + incr, 405 406 vram_addr, 406 407 dma_addr + pos); 407 408 } else { 408 - vm_dbg(&tile->xe->drm, 409 + vm_dbg(&xe->drm, 409 410 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", 410 411 (u64)dma_addr[pos], vram_addr, i - pos + incr); 411 - __fence = xe_migrate_to_vram(tile->migrate, 412 + __fence = xe_migrate_to_vram(vr->migrate, 412 413 i - pos + incr, 413 414 dma_addr + pos, 414 415 vram_addr); ··· 433 434 /* Extra mismatched device page, copy it */ 434 435 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) { 435 436 if (sram) { 436 - vm_dbg(&tile->xe->drm, 437 + vm_dbg(&xe->drm, 437 438 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", 438 439 vram_addr, (u64)dma_addr[pos], 1); 439 - __fence = xe_migrate_from_vram(tile->migrate, 1, 440 + __fence = xe_migrate_from_vram(vr->migrate, 1, 440 441 vram_addr, 441 442 dma_addr + pos); 442 443 } else { 443 - vm_dbg(&tile->xe->drm, 444 + vm_dbg(&xe->drm, 444 445 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", 445 446 (u64)dma_addr[pos], vram_addr, 1); 446 - __fence = xe_migrate_to_vram(tile->migrate, 1, 447 + __fence = xe_migrate_to_vram(vr->migrate, 1, 447 448 dma_addr + pos, 448 449 vram_addr); 449 450 } ··· 501 502 return PHYS_PFN(offset + vr->hpa_base); 502 503 } 503 504 504 - static struct drm_buddy *tile_to_buddy(struct xe_tile *tile) 505 + static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram) 505 506 { 506 - return &tile->mem.vram->ttm.mm; 507 + return &vram->ttm.mm; 507 508 } 508 509 509 510 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation, ··· 517 518 518 519 list_for_each_entry(block, blocks, link) { 519 520 struct xe_vram_region *vr = block->private; 520 - struct xe_tile *tile = vr->tile; 521 - struct drm_buddy *buddy = tile_to_buddy(tile); 521 + struct drm_buddy *buddy = vram_to_buddy(vr); 522 522 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block)); 523 523 int i; 524 524 ··· 683 685 unsigned long timeslice_ms) 684 686 { 685 687 struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap); 686 - struct xe_tile *tile = vr->tile; 687 - struct xe_device *xe = tile_to_xe(tile); 688 + struct xe_device *xe = vr->xe; 688 689 struct device *dev = xe->drm.dev; 689 690 struct drm_buddy_block *block; 690 691 struct list_head *blocks; ··· 697 700 xe_pm_runtime_get(xe); 698 701 699 702 retry: 700 - bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start, 703 + bo = xe_bo_create_locked(vr->xe, NULL, NULL, end - start, 701 704 ttm_bo_type_device, 702 - XE_BO_FLAG_VRAM_IF_DGFX(tile) | 705 + (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) | 703 706 XE_BO_FLAG_CPU_ADDR_MIRROR); 704 707 if (IS_ERR(bo)) { 705 708 err = PTR_ERR(bo); ··· 709 712 } 710 713 711 714 drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, 712 - &dpagemap_devmem_ops, 713 - &tile->mem.vram->dpagemap, 714 - end - start); 715 + &dpagemap_devmem_ops, dpagemap, end - start); 715 716 716 717 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; 717 718 list_for_each_entry(block, blocks, link)
+12 -25
drivers/gpu/drm/xe/xe_tile.c
··· 7 7 8 8 #include <drm/drm_managed.h> 9 9 10 + #include "xe_bo.h" 10 11 #include "xe_device.h" 11 12 #include "xe_ggtt.h" 12 13 #include "xe_gt.h" ··· 119 118 if (!IS_DGFX(xe)) 120 119 return 0; 121 120 122 - vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL); 123 - if (!vram) 124 - return -ENOMEM; 125 - 126 - vram->tile = tile; 121 + vram = xe_vram_region_alloc(xe, tile->id, XE_PL_VRAM0 + tile->id); 122 + if (IS_ERR(vram)) 123 + return PTR_ERR(vram); 127 124 tile->mem.vram = vram; 128 125 129 126 return 0; ··· 159 160 } 160 161 ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */ 161 162 162 - static int tile_ttm_mgr_init(struct xe_tile *tile) 163 - { 164 - struct xe_device *xe = tile_to_xe(tile); 165 - int err; 166 - 167 - if (tile->mem.vram) { 168 - err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram->ttm); 169 - if (err) 170 - return err; 171 - xe->info.mem_region_mask |= BIT(tile->id) << 1; 172 - } 173 - 174 - return 0; 175 - } 176 - 177 163 /** 178 164 * xe_tile_init_noalloc - Init tile up to the point where allocations can happen. 179 165 * @tile: The tile to initialize. ··· 176 192 int xe_tile_init_noalloc(struct xe_tile *tile) 177 193 { 178 194 struct xe_device *xe = tile_to_xe(tile); 179 - int err; 180 - 181 - err = tile_ttm_mgr_init(tile); 182 - if (err) 183 - return err; 184 195 185 196 xe_wa_apply_tile_workarounds(tile); 186 197 187 198 if (xe->info.has_usm && IS_DGFX(xe)) 188 199 xe_devm_add(tile, tile->mem.vram); 200 + 201 + if (IS_DGFX(xe) && !ttm_resource_manager_used(&tile->mem.vram->ttm.manager)) { 202 + int err = xe_ttm_vram_mgr_init(xe, tile->mem.vram); 203 + 204 + if (err) 205 + return err; 206 + xe->info.mem_region_mask |= BIT(tile->mem.vram->id) << 1; 207 + } 189 208 190 209 return xe_tile_sysfs_init(tile); 191 210 }
+11 -5
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
··· 338 338 return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr); 339 339 } 340 340 341 - int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr) 341 + /** 342 + * xe_ttm_vram_mgr_init - initialize TTM VRAM region 343 + * @xe: pointer to Xe device 344 + * @vram: pointer to xe_vram_region that contains the memory region attributes 345 + * 346 + * Initialize the Xe TTM for given @vram region using the given parameters. 347 + * 348 + * Returns 0 for success, negative error code otherwise. 349 + */ 350 + int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram) 342 351 { 343 - struct xe_device *xe = tile_to_xe(tile); 344 - struct xe_vram_region *vram = tile->mem.vram; 345 - 346 - return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id, 352 + return __xe_ttm_vram_mgr_init(xe, &vram->ttm, vram->placement, 347 353 xe_vram_region_usable_size(vram), 348 354 xe_vram_region_io_size(vram), 349 355 PAGE_SIZE);
+2 -1
drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
··· 11 11 enum dma_data_direction; 12 12 struct xe_device; 13 13 struct xe_tile; 14 + struct xe_vram_region; 14 15 15 16 int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr, 16 17 u32 mem_type, u64 size, u64 io_size, 17 18 u64 default_page_size); 18 - int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr); 19 + int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram); 19 20 int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe, 20 21 struct ttm_resource *res, 21 22 u64 offset, u64 length,
+91 -60
drivers/gpu/drm/xe/xe_vram.c
··· 20 20 #include "xe_mmio.h" 21 21 #include "xe_module.h" 22 22 #include "xe_sriov.h" 23 + #include "xe_ttm_vram_mgr.h" 23 24 #include "xe_vram.h" 24 25 #include "xe_vram_types.h" 25 26 ··· 139 138 return true; 140 139 } 141 140 142 - static int determine_lmem_bar_size(struct xe_device *xe) 141 + static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *lmem_bar) 143 142 { 144 143 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 145 144 ··· 150 149 151 150 resize_vram_bar(xe); 152 151 153 - xe->mem.vram->io_start = pci_resource_start(pdev, LMEM_BAR); 154 - xe->mem.vram->io_size = pci_resource_len(pdev, LMEM_BAR); 155 - if (!xe->mem.vram->io_size) 152 + lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR); 153 + lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR); 154 + if (!lmem_bar->io_size) 156 155 return -EIO; 157 156 158 157 /* XXX: Need to change when xe link code is ready */ 159 - xe->mem.vram->dpa_base = 0; 158 + lmem_bar->dpa_base = 0; 160 159 161 160 /* set up a map to the total memory area. */ 162 - xe->mem.vram->mapping = devm_ioremap_wc(&pdev->dev, xe->mem.vram->io_start, 163 - xe->mem.vram->io_size); 161 + lmem_bar->mapping = devm_ioremap_wc(&pdev->dev, lmem_bar->io_start, lmem_bar->io_size); 164 162 165 163 return 0; 166 164 } ··· 287 287 tile->mem.vram->mapping = NULL; 288 288 } 289 289 290 + struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement) 291 + { 292 + struct xe_vram_region *vram; 293 + struct drm_device *drm = &xe->drm; 294 + 295 + xe_assert(xe, id < xe->info.tile_count); 296 + 297 + vram = drmm_kzalloc(drm, sizeof(*vram), GFP_KERNEL); 298 + if (!vram) 299 + return NULL; 300 + 301 + vram->xe = xe; 302 + vram->id = id; 303 + vram->placement = placement; 304 + #if defined(CONFIG_DRM_XE_PAGEMAP) 305 + vram->migrate = xe->tiles[id].migrate; 306 + #endif 307 + return vram; 308 + } 309 + 310 + static void print_vram_region_info(struct xe_device *xe, struct xe_vram_region *vram) 311 + { 312 + struct drm_device *drm = &xe->drm; 313 + 314 + if (vram->io_size < vram->usable_size) 315 + drm_info(drm, "Small BAR device\n"); 316 + 317 + drm_info(drm, 318 + "VRAM[%u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", 319 + vram->id, &vram->actual_physical_size, &vram->usable_size, &vram->io_size); 320 + drm_info(drm, "VRAM[%u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", 321 + vram->id, &vram->dpa_base, vram->dpa_base + (u64)vram->actual_physical_size, 322 + &vram->io_start, vram->io_start + (u64)vram->io_size); 323 + } 324 + 325 + static int vram_region_init(struct xe_device *xe, struct xe_vram_region *vram, 326 + struct xe_vram_region *lmem_bar, u64 offset, u64 usable_size, 327 + u64 region_size, resource_size_t remain_io_size) 328 + { 329 + /* Check if VRAM region is already initialized */ 330 + if (vram->mapping) 331 + return 0; 332 + 333 + vram->actual_physical_size = region_size; 334 + vram->io_start = lmem_bar->io_start + offset; 335 + vram->io_size = min_t(u64, usable_size, remain_io_size); 336 + 337 + if (!vram->io_size) { 338 + drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n"); 339 + return -ENODEV; 340 + } 341 + 342 + vram->dpa_base = lmem_bar->dpa_base + offset; 343 + vram->mapping = lmem_bar->mapping + offset; 344 + vram->usable_size = usable_size; 345 + 346 + print_vram_region_info(xe, vram); 347 + 348 + return 0; 349 + } 350 + 290 351 /** 291 352 * xe_vram_probe() - Probe VRAM configuration 292 353 * @xe: the &xe_device ··· 359 298 int xe_vram_probe(struct xe_device *xe) 360 299 { 361 300 struct xe_tile *tile; 362 - resource_size_t io_size; 301 + struct xe_vram_region lmem_bar; 302 + resource_size_t remain_io_size; 363 303 u64 available_size = 0; 364 304 u64 total_size = 0; 365 - u64 tile_offset; 366 - u64 tile_size; 367 - u64 vram_size; 368 305 int err; 369 306 u8 id; 370 307 371 308 if (!IS_DGFX(xe)) 372 309 return 0; 373 310 374 - /* Get the size of the root tile's vram for later accessibility comparison */ 375 - tile = xe_device_get_root_tile(xe); 376 - err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset); 311 + err = determine_lmem_bar_size(xe, &lmem_bar); 377 312 if (err) 378 313 return err; 314 + drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &lmem_bar.io_start, &lmem_bar.io_size); 379 315 380 - err = determine_lmem_bar_size(xe); 381 - if (err) 382 - return err; 316 + remain_io_size = lmem_bar.io_size; 383 317 384 - drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram->io_start, 385 - &xe->mem.vram->io_size); 386 - 387 - io_size = xe->mem.vram->io_size; 388 - 389 - /* tile specific ranges */ 390 318 for_each_tile(tile, xe, id) { 391 - err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset); 319 + u64 region_size; 320 + u64 usable_size; 321 + u64 tile_offset; 322 + 323 + err = tile_vram_size(tile, &usable_size, &region_size, &tile_offset); 392 324 if (err) 393 325 return err; 394 326 395 - tile->mem.vram->actual_physical_size = tile_size; 396 - tile->mem.vram->io_start = xe->mem.vram->io_start + tile_offset; 397 - tile->mem.vram->io_size = min_t(u64, vram_size, io_size); 327 + total_size += region_size; 328 + available_size += usable_size; 398 329 399 - if (!tile->mem.vram->io_size) { 400 - drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n"); 401 - return -ENODEV; 402 - } 330 + err = vram_region_init(xe, tile->mem.vram, &lmem_bar, tile_offset, usable_size, 331 + region_size, remain_io_size); 332 + if (err) 333 + return err; 403 334 404 - tile->mem.vram->dpa_base = xe->mem.vram->dpa_base + tile_offset; 405 - tile->mem.vram->usable_size = vram_size; 406 - tile->mem.vram->mapping = xe->mem.vram->mapping + tile_offset; 407 - 408 - if (tile->mem.vram->io_size < tile->mem.vram->usable_size) 409 - drm_info(&xe->drm, "Small BAR device\n"); 410 - drm_info(&xe->drm, 411 - "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", 412 - id, tile->id, &tile->mem.vram->actual_physical_size, 413 - &tile->mem.vram->usable_size, &tile->mem.vram->io_size); 414 - drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", 415 - id, tile->id, &tile->mem.vram->dpa_base, 416 - tile->mem.vram->dpa_base + (u64)tile->mem.vram->actual_physical_size, 417 - &tile->mem.vram->io_start, 418 - tile->mem.vram->io_start + (u64)tile->mem.vram->io_size); 419 - 420 - /* calculate total size using tile size to get the correct HW sizing */ 421 - total_size += tile_size; 422 - available_size += vram_size; 423 - 424 - if (total_size > xe->mem.vram->io_size) { 335 + if (total_size > lmem_bar.io_size) { 425 336 drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n", 426 - &total_size, &xe->mem.vram->io_size); 337 + &total_size, &lmem_bar.io_size); 427 338 } 428 339 429 - io_size -= min_t(u64, tile_size, io_size); 340 + remain_io_size -= min_t(u64, tile->mem.vram->actual_physical_size, remain_io_size); 430 341 } 431 342 432 - xe->mem.vram->actual_physical_size = total_size; 433 - 434 - drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram->io_start, 435 - &xe->mem.vram->actual_physical_size); 436 - drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram->io_start, 437 - &available_size); 343 + err = vram_region_init(xe, xe->mem.vram, &lmem_bar, 0, available_size, total_size, 344 + lmem_bar.io_size); 345 + if (err) 346 + return err; 438 347 439 348 return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe); 440 349 }
+2
drivers/gpu/drm/xe/xe_vram.h
··· 13 13 14 14 int xe_vram_probe(struct xe_device *xe); 15 15 16 + struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement); 17 + 16 18 resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram); 17 19 resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram); 18 20 resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram);
+14 -3
drivers/gpu/drm/xe/xe_vram_types.h
··· 12 12 13 13 #include "xe_ttm_vram_mgr_types.h" 14 14 15 - struct xe_tile; 15 + struct xe_device; 16 + struct xe_migrate; 16 17 17 18 /** 18 19 * struct xe_vram_region - memory region structure ··· 21 20 * device, such as HBM memory or CXL extension memory. 22 21 */ 23 22 struct xe_vram_region { 24 - /** @tile: Back pointer to tile */ 25 - struct xe_tile *tile; 23 + /** @xe: Back pointer to xe device */ 24 + struct xe_device *xe; 25 + /** 26 + * @id: VRAM region instance id 27 + * 28 + * The value should be unique for VRAM region. 29 + */ 30 + u8 id; 26 31 /** @io_start: IO start address of this VRAM instance */ 27 32 resource_size_t io_start; 28 33 /** ··· 61 54 void __iomem *mapping; 62 55 /** @ttm: VRAM TTM manager */ 63 56 struct xe_ttm_vram_mgr ttm; 57 + /** @placement: TTM placement dedicated for this region */ 58 + u32 placement; 64 59 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) 60 + /** @migrate: Back pointer to migrate */ 61 + struct xe_migrate *migrate; 65 62 /** @pagemap: Used to remap device memory as ZONE_DEVICE */ 66 63 struct dev_pagemap pagemap; 67 64 /**