Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe: Introduce xe_ggtt_largest_hole

Introduce a new xe_ggtt_largest_hole helper that attends the SRIOV
demand and continue with the goal of limiting drm_mm access to xe_ggtt.

v2: Fix a typo (Michal)

Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240821193842.352557-8-rodrigo.vivi@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>

+38 -21
+35
drivers/gpu/drm/xe/xe_ggtt.c
··· 584 584 bo->flags & XE_BO_FLAG_GGTT_INVALIDATE); 585 585 } 586 586 587 + /** 588 + * xe_ggtt_largest_hole - Largest GGTT hole 589 + * @ggtt: the &xe_ggtt that will be inspected 590 + * @alignment: minimum alignment 591 + * @spare: If not NULL: in: desired memory size to be spared / out: Adjusted possible spare 592 + * 593 + * Return: size of the largest continuous GGTT region 594 + */ 595 + u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare) 596 + { 597 + const struct drm_mm *mm = &ggtt->mm; 598 + const struct drm_mm_node *entry; 599 + u64 hole_min_start = xe_wopcm_size(tile_to_xe(ggtt->tile)); 600 + u64 hole_start, hole_end, hole_size; 601 + u64 max_hole = 0; 602 + 603 + mutex_lock(&ggtt->lock); 604 + 605 + drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 606 + hole_start = max(hole_start, hole_min_start); 607 + hole_start = ALIGN(hole_start, alignment); 608 + hole_end = ALIGN_DOWN(hole_end, alignment); 609 + if (hole_start >= hole_end) 610 + continue; 611 + hole_size = hole_end - hole_start; 612 + if (spare) 613 + *spare -= min3(*spare, hole_size, max_hole); 614 + max_hole = max(max_hole, hole_size); 615 + } 616 + 617 + mutex_unlock(&ggtt->lock); 618 + 619 + return max_hole; 620 + } 621 + 587 622 #ifdef CONFIG_PCI_IOV 588 623 static u64 xe_encode_vfid_pte(u16 vfid) 589 624 {
+1
drivers/gpu/drm/xe/xe_ggtt.h
··· 29 29 int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, 30 30 u64 start, u64 end); 31 31 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); 32 + u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare); 32 33 33 34 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p); 34 35
+2 -21
drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
··· 590 590 static u64 pf_get_max_ggtt(struct xe_gt *gt) 591 591 { 592 592 struct xe_ggtt *ggtt = gt_to_tile(gt)->mem.ggtt; 593 - const struct drm_mm *mm = &ggtt->mm; 594 - const struct drm_mm_node *entry; 595 593 u64 alignment = pf_get_ggtt_alignment(gt); 596 594 u64 spare = pf_get_spare_ggtt(gt); 597 - u64 hole_min_start = xe_wopcm_size(gt_to_xe(gt)); 598 - u64 hole_start, hole_end, hole_size; 599 - u64 max_hole = 0; 595 + u64 max_hole; 600 596 601 - mutex_lock(&ggtt->lock); 602 - 603 - drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { 604 - hole_start = max(hole_start, hole_min_start); 605 - hole_start = ALIGN(hole_start, alignment); 606 - hole_end = ALIGN_DOWN(hole_end, alignment); 607 - if (hole_start >= hole_end) 608 - continue; 609 - hole_size = hole_end - hole_start; 610 - xe_gt_sriov_dbg_verbose(gt, "HOLE start %llx size %lluK\n", 611 - hole_start, hole_size / SZ_1K); 612 - spare -= min3(spare, hole_size, max_hole); 613 - max_hole = max(max_hole, hole_size); 614 - } 615 - 616 - mutex_unlock(&ggtt->lock); 597 + max_hole = xe_ggtt_largest_hole(ggtt, alignment, &spare); 617 598 618 599 xe_gt_sriov_dbg_verbose(gt, "HOLE max %lluK reserved %lluK\n", 619 600 max_hole / SZ_1K, spare / SZ_1K);