Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: switch the TTM backends to self alloc

Similar to the TTM range manager.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-9-christian.koenig@amd.com

+31 -24
+11 -7
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 57 57 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); 58 58 int id; 59 59 60 + mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL); 61 + if (!mem->mm_node) 62 + return -ENOMEM; 63 + 64 + ttm_resource_init(bo, place, mem->mm_node); 65 + 60 66 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); 61 67 if (id < 0) 62 68 return id; ··· 93 87 { 94 88 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); 95 89 96 - if (mem->mm_node) { 97 - ida_free(&gman->gmr_ida, mem->start); 98 - spin_lock(&gman->lock); 99 - gman->used_gmr_pages -= mem->num_pages; 100 - spin_unlock(&gman->lock); 101 - mem->mm_node = NULL; 102 - } 90 + ida_free(&gman->gmr_ida, mem->start); 91 + spin_lock(&gman->lock); 92 + gman->used_gmr_pages -= mem->num_pages; 93 + spin_unlock(&gman->lock); 94 + kfree(mem->mm_node); 103 95 } 104 96 105 97 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
+20 -17
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
··· 7 7 #include "vmwgfx_drv.h" 8 8 #include <drm/ttm/ttm_bo_driver.h> 9 9 #include <drm/ttm/ttm_placement.h> 10 + #include <drm/ttm/ttm_range_manager.h> 10 11 11 12 /** 12 13 * struct vmw_thp_manager - Range manager implementing huge page alignment ··· 55 54 { 56 55 struct vmw_thp_manager *rman = to_thp_manager(man); 57 56 struct drm_mm *mm = &rman->mm; 58 - struct drm_mm_node *node; 57 + struct ttm_range_mgr_node *node; 59 58 unsigned long align_pages; 60 59 unsigned long lpfn; 61 60 enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST; 62 61 int ret; 63 62 64 - node = kzalloc(sizeof(*node), GFP_KERNEL); 63 + node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL); 65 64 if (!node) 66 65 return -ENOMEM; 66 + 67 + ttm_resource_init(bo, place, &node->base); 67 68 68 69 lpfn = place->lpfn; 69 70 if (!lpfn) ··· 79 76 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { 80 77 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); 81 78 if (mem->num_pages >= align_pages) { 82 - ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, 83 - place, mem, lpfn, mode); 79 + ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], 80 + align_pages, place, mem, 81 + lpfn, mode); 84 82 if (!ret) 85 83 goto found_unlock; 86 84 } ··· 89 85 90 86 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); 91 87 if (mem->num_pages >= align_pages) { 92 - ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place, 93 - mem, lpfn, mode); 88 + ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], 89 + align_pages, place, mem, lpfn, 90 + mode); 94 91 if (!ret) 95 92 goto found_unlock; 96 93 } 97 94 98 - ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, 99 - bo->page_alignment, 0, 95 + ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], 96 + mem->num_pages, bo->page_alignment, 0, 100 97 place->fpfn, lpfn, mode); 101 98 found_unlock: 102 99 spin_unlock(&rman->lock); ··· 105 100 if (unlikely(ret)) { 106 101 kfree(node); 107 102 } else { 108 - mem->mm_node = node; 109 - mem->start = node->start; 103 + mem->mm_node = &node->mm_nodes[0]; 104 + mem->start = node->mm_nodes[0].start; 110 105 } 111 106 112 107 return ret; ··· 118 113 struct ttm_resource *mem) 119 114 { 120 115 struct vmw_thp_manager *rman = to_thp_manager(man); 116 + struct ttm_range_mgr_node * node = mem->mm_node; 121 117 122 - if (mem->mm_node) { 123 - spin_lock(&rman->lock); 124 - drm_mm_remove_node(mem->mm_node); 125 - spin_unlock(&rman->lock); 118 + spin_lock(&rman->lock); 119 + drm_mm_remove_node(&node->mm_nodes[0]); 120 + spin_unlock(&rman->lock); 126 121 127 - kfree(mem->mm_node); 128 - mem->mm_node = NULL; 129 - } 122 + kfree(node); 130 123 } 131 124 132 125 int vmw_thp_init(struct vmw_private *dev_priv)