Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: flip the switch for driver allocated resources v2

Instead of both driver and TTM allocating memory finalize embedding the
ttm_resource object as base into the driver backends.

v2: fix typo in vmwgfx grid mgr and double init in amdgpu_vram_mgr.c

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-10-christian.koenig@amd.com

+140 -189
+19 -25
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 40 40 static inline struct amdgpu_gtt_node * 41 41 to_amdgpu_gtt_node(struct ttm_resource *res) 42 42 { 43 - return container_of(res->mm_node, struct amdgpu_gtt_node, 44 - base.mm_nodes[0]); 43 + return container_of(res, struct amdgpu_gtt_node, base.base); 45 44 } 46 45 47 46 /** ··· 101 102 /** 102 103 * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space 103 104 * 104 - * @mem: the mem object to check 105 + * @res: the mem object to check 105 106 * 106 107 * Check if a mem object has already address space allocated. 107 108 */ 108 - bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) 109 + bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) 109 110 { 110 - struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem); 111 + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); 111 112 112 113 return drm_mm_node_allocated(&node->base.mm_nodes[0]); 113 114 } ··· 125 126 static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, 126 127 struct ttm_buffer_object *tbo, 127 128 const struct ttm_place *place, 128 - struct ttm_resource *mem) 129 + struct ttm_resource **res) 129 130 { 130 131 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); 132 + uint32_t num_pages = PFN_UP(tbo->base.size); 131 133 struct amdgpu_gtt_node *node; 132 134 int r; 133 135 134 136 spin_lock(&mgr->lock); 135 - if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) && 136 - atomic64_read(&mgr->available) < mem->num_pages) { 137 + if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && 138 + atomic64_read(&mgr->available) < num_pages) { 137 139 spin_unlock(&mgr->lock); 138 140 return -ENOSPC; 139 141 } 140 - atomic64_sub(mem->num_pages, &mgr->available); 142 + atomic64_sub(num_pages, &mgr->available); 141 143 spin_unlock(&mgr->lock); 142 144 143 145 node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); ··· 154 154 spin_lock(&mgr->lock); 155 155 r = drm_mm_insert_node_in_range(&mgr->mm, 156 156 &node->base.mm_nodes[0], 157 - mem->num_pages, 158 - tbo->page_alignment, 0, 159 - place->fpfn, place->lpfn, 157 + num_pages, tbo->page_alignment, 158 + 0, place->fpfn, place->lpfn, 160 159 DRM_MM_INSERT_BEST); 161 160 spin_unlock(&mgr->lock); 162 161 if (unlikely(r)) 163 162 goto err_free; 164 163 165 - mem->start = node->base.mm_nodes[0].start; 164 + node->base.base.start = node->base.mm_nodes[0].start; 166 165 } else { 167 166 node->base.mm_nodes[0].start = 0; 168 - node->base.mm_nodes[0].size = mem->num_pages; 169 - mem->start = AMDGPU_BO_INVALID_OFFSET; 167 + node->base.mm_nodes[0].size = node->base.base.num_pages; 168 + node->base.base.start = AMDGPU_BO_INVALID_OFFSET; 170 169 } 171 170 172 - mem->mm_node = &node->base.mm_nodes[0]; 171 + *res = &node->base.base; 173 172 return 0; 174 173 175 174 err_free: 176 175 kfree(node); 177 176 178 177 err_out: 179 - atomic64_add(mem->num_pages, &mgr->available); 178 + atomic64_add(num_pages, &mgr->available); 180 179 181 180 return r; 182 181 } ··· 189 190 * Free the allocated GTT again. 190 191 */ 191 192 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, 192 - struct ttm_resource *mem) 193 + struct ttm_resource *res) 193 194 { 195 + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); 194 196 struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); 195 - struct amdgpu_gtt_node *node; 196 - 197 - if (!mem->mm_node) 198 - return; 199 - 200 - node = to_amdgpu_gtt_node(mem); 201 197 202 198 spin_lock(&mgr->lock); 203 199 if (drm_mm_node_allocated(&node->base.mm_nodes[0])) 204 200 drm_mm_remove_node(&node->base.mm_nodes[0]); 205 201 spin_unlock(&mgr->lock); 206 - atomic64_add(mem->num_pages, &mgr->available); 202 + atomic64_add(res->num_pages, &mgr->available); 207 203 208 204 kfree(node); 209 205 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1296 1296 if (bo->base.resv == &bo->base._resv) 1297 1297 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); 1298 1298 1299 - if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node || 1299 + if (bo->resource->mem_type != TTM_PL_VRAM || 1300 1300 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) 1301 1301 return; 1302 1302
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
··· 28 28 29 29 #include <drm/drm_mm.h> 30 30 #include <drm/ttm/ttm_resource.h> 31 + #include <drm/ttm/ttm_range_manager.h> 31 32 32 33 /* state back for walking over vram_mgr and gtt_mgr allocations */ 33 34 struct amdgpu_res_cursor { ··· 54 53 { 55 54 struct drm_mm_node *node; 56 55 57 - if (!res || !res->mm_node) { 56 + if (!res) { 58 57 cur->start = start; 59 58 cur->size = size; 60 59 cur->remaining = size; ··· 64 63 65 64 BUG_ON(start + size > res->num_pages << PAGE_SHIFT); 66 65 67 - node = res->mm_node; 66 + node = to_ttm_range_mgr_node(res)->mm_nodes; 68 67 while (start >= node->size << PAGE_SHIFT) 69 68 start -= node++->size << PAGE_SHIFT; 70 69
+27 -33
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 219 219 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) 220 220 { 221 221 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 222 - struct ttm_resource *mem = bo->tbo.resource; 223 - struct drm_mm_node *nodes = mem->mm_node; 224 - unsigned pages = mem->num_pages; 222 + struct ttm_resource *res = bo->tbo.resource; 223 + unsigned pages = res->num_pages; 224 + struct drm_mm_node *mm; 225 225 u64 usage; 226 226 227 227 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) 228 228 return amdgpu_bo_size(bo); 229 229 230 - if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 230 + if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 231 231 return 0; 232 232 233 - for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) 234 - usage += amdgpu_vram_mgr_vis_size(adev, nodes); 233 + mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; 234 + for (usage = 0; pages; pages -= mm->size, mm++) 235 + usage += amdgpu_vram_mgr_vis_size(adev, mm); 235 236 236 237 return usage; 237 238 } ··· 368 367 static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, 369 368 struct ttm_buffer_object *tbo, 370 369 const struct ttm_place *place, 371 - struct ttm_resource *mem) 370 + struct ttm_resource **res) 372 371 { 373 372 unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; 374 373 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); ··· 389 388 max_bytes -= AMDGPU_VM_RESERVED_VRAM; 390 389 391 390 /* bail out quickly if there's likely not enough VRAM for this BO */ 392 - mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 391 + mem_bytes = tbo->base.size; 393 392 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { 394 393 r = -ENOSPC; 395 394 goto error_sub; ··· 407 406 #endif 408 407 pages_per_node = max_t(uint32_t, pages_per_node, 409 408 tbo->page_alignment); 410 - num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 409 + num_nodes = DIV_ROUND_UP(PFN_UP(mem_bytes), pages_per_node); 411 410 } 412 411 413 412 node = kvmalloc(struct_size(node, mm_nodes, num_nodes), ··· 423 422 if (place->flags & TTM_PL_FLAG_TOPDOWN) 424 423 mode = DRM_MM_INSERT_HIGH; 425 424 426 - mem->start = 0; 427 - pages_left = mem->num_pages; 425 + pages_left = node->base.num_pages; 428 426 429 427 /* Limit maximum size to 2GB due to SG table limitations */ 430 428 pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); ··· 451 451 } 452 452 453 453 vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); 454 - amdgpu_vram_mgr_virt_start(mem, &node->mm_nodes[i]); 454 + amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); 455 455 pages_left -= pages; 456 456 ++i; 457 457 ··· 461 461 spin_unlock(&mgr->lock); 462 462 463 463 if (i == 1) 464 - mem->placement |= TTM_PL_FLAG_CONTIGUOUS; 464 + node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; 465 465 466 466 atomic64_add(vis_usage, &mgr->vis_usage); 467 - mem->mm_node = &node->mm_nodes[0]; 467 + *res = &node->base; 468 468 return 0; 469 469 470 470 error_free: ··· 487 487 * Free the allocated VRAM again. 488 488 */ 489 489 static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, 490 - struct ttm_resource *mem) 490 + struct ttm_resource *res) 491 491 { 492 + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); 492 493 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 493 494 struct amdgpu_device *adev = to_amdgpu_device(mgr); 494 - struct ttm_range_mgr_node *node; 495 495 uint64_t usage = 0, vis_usage = 0; 496 - unsigned pages = mem->num_pages; 497 - struct drm_mm_node *nodes; 498 - 499 - if (!mem->mm_node) 500 - return; 501 - 502 - node = to_ttm_range_mgr_node(mem); 503 - nodes = &node->mm_nodes[0]; 496 + unsigned i, pages; 504 497 505 498 spin_lock(&mgr->lock); 506 - while (pages) { 507 - pages -= nodes->size; 508 - drm_mm_remove_node(nodes); 509 - usage += nodes->size << PAGE_SHIFT; 510 - vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); 511 - ++nodes; 499 + for (i = 0, pages = res->num_pages; pages; 500 + pages -= node->mm_nodes[i].size, ++i) { 501 + struct drm_mm_node *mm = &node->mm_nodes[i]; 502 + 503 + drm_mm_remove_node(mm); 504 + usage += mm->size << PAGE_SHIFT; 505 + vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); 512 506 } 513 507 amdgpu_vram_mgr_do_reserve(man); 514 508 spin_unlock(&mgr->lock); ··· 527 533 * Allocate and fill a sg table from a VRAM allocation. 528 534 */ 529 535 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 530 - struct ttm_resource *mem, 536 + struct ttm_resource *res, 531 537 u64 offset, u64 length, 532 538 struct device *dev, 533 539 enum dma_data_direction dir, ··· 543 549 return -ENOMEM; 544 550 545 551 /* Determine the number of DRM_MM nodes to export */ 546 - amdgpu_res_first(mem, offset, length, &cursor); 552 + amdgpu_res_first(res, offset, length, &cursor); 547 553 while (cursor.remaining) { 548 554 num_entries++; 549 555 amdgpu_res_next(&cursor, cursor.size); ··· 563 569 * and the number of bytes from it. Access the following 564 570 * DRM_MM node(s) if more buffer needs to exported 565 571 */ 566 - amdgpu_res_first(mem, offset, length, &cursor); 572 + amdgpu_res_first(res, offset, length, &cursor); 567 573 for_each_sgtable_sg((*sgt), sg, i) { 568 574 phys_addr_t phys = cursor.start + adev->gmc.aper_base; 569 575 size_t size = cursor.size;
+2 -1
drivers/gpu/drm/drm_gem_vram_helper.c
··· 250 250 static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo) 251 251 { 252 252 /* Keep TTM behavior for now, remove when drivers are audited */ 253 - if (WARN_ON_ONCE(!gbo->bo.resource->mm_node)) 253 + if (WARN_ON_ONCE(!gbo->bo.resource || 254 + gbo->bo.resource->mem_type == TTM_PL_SYSTEM)) 254 255 return 0; 255 256 256 257 return gbo->bo.resource->start;
+2 -6
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 918 918 } 919 919 } 920 920 921 - if (new_reg) { 922 - if (new_reg->mm_node) 923 - nvbo->offset = (new_reg->start << PAGE_SHIFT); 924 - else 925 - nvbo->offset = 0; 926 - } 921 + if (new_reg) 922 + nvbo->offset = (new_reg->start << PAGE_SHIFT); 927 923 928 924 } 929 925
+5 -6
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 178 178 nouveau_mem_del(struct ttm_resource *reg) 179 179 { 180 180 struct nouveau_mem *mem = nouveau_mem(reg); 181 - if (!mem) 182 - return; 181 + 183 182 nouveau_mem_fini(mem); 184 - kfree(reg->mm_node); 185 - reg->mm_node = NULL; 183 + kfree(mem); 186 184 } 187 185 188 186 int 189 187 nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp, 190 - struct ttm_resource *reg) 188 + struct ttm_resource **res) 191 189 { 192 190 struct nouveau_mem *mem; 193 191 194 192 if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL))) 195 193 return -ENOMEM; 194 + 196 195 mem->cli = cli; 197 196 mem->kind = kind; 198 197 mem->comp = comp; 199 198 200 - reg->mm_node = mem; 199 + *res = &mem->base; 201 200 return 0; 202 201 }
+7 -7
drivers/gpu/drm/nouveau/nouveau_mem.h
··· 6 6 #include <nvif/mem.h> 7 7 #include <nvif/vmm.h> 8 8 9 - static inline struct nouveau_mem * 10 - nouveau_mem(struct ttm_resource *reg) 11 - { 12 - return reg->mm_node; 13 - } 14 - 15 9 struct nouveau_mem { 16 10 struct ttm_resource base; 17 11 struct nouveau_cli *cli; ··· 15 21 struct nvif_vma vma[2]; 16 22 }; 17 23 24 + static inline struct nouveau_mem * 25 + nouveau_mem(struct ttm_resource *reg) 26 + { 27 + return container_of(reg, struct nouveau_mem, base); 28 + } 29 + 18 30 int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, 19 - struct ttm_resource *); 31 + struct ttm_resource **); 20 32 void nouveau_mem_del(struct ttm_resource *); 21 33 int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); 22 34 int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
+16 -16
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 45 45 nouveau_vram_manager_new(struct ttm_resource_manager *man, 46 46 struct ttm_buffer_object *bo, 47 47 const struct ttm_place *place, 48 - struct ttm_resource *reg) 48 + struct ttm_resource **res) 49 49 { 50 50 struct nouveau_bo *nvbo = nouveau_bo(bo); 51 51 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); ··· 54 54 if (drm->client.device.info.ram_size == 0) 55 55 return -ENOMEM; 56 56 57 - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); 57 + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); 58 58 if (ret) 59 59 return ret; 60 60 61 - ttm_resource_init(bo, place, reg->mm_node); 61 + ttm_resource_init(bo, place, *res); 62 62 63 - ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); 63 + ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page); 64 64 if (ret) { 65 - nouveau_mem_del(reg); 65 + nouveau_mem_del(*res); 66 66 return ret; 67 67 } 68 68 ··· 78 78 nouveau_gart_manager_new(struct ttm_resource_manager *man, 79 79 struct ttm_buffer_object *bo, 80 80 const struct ttm_place *place, 81 - struct ttm_resource *reg) 81 + struct ttm_resource **res) 82 82 { 83 83 struct nouveau_bo *nvbo = nouveau_bo(bo); 84 84 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 85 85 int ret; 86 86 87 - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); 87 + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); 88 88 if (ret) 89 89 return ret; 90 90 91 - ttm_resource_init(bo, place, reg->mm_node); 92 - reg->start = 0; 91 + ttm_resource_init(bo, place, *res); 92 + (*res)->start = 0; 93 93 return 0; 94 94 } 95 95 ··· 102 102 nv04_gart_manager_new(struct ttm_resource_manager *man, 103 103 struct ttm_buffer_object *bo, 104 104 const struct ttm_place *place, 105 - struct ttm_resource *reg) 105 + struct ttm_resource **res) 106 106 { 107 107 struct nouveau_bo *nvbo = nouveau_bo(bo); 108 108 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 109 109 struct nouveau_mem *mem; 110 110 int ret; 111 111 112 - ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg); 113 - mem = nouveau_mem(reg); 112 + ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res); 114 113 if (ret) 115 114 return ret; 116 115 117 - ttm_resource_init(bo, place, reg->mm_node); 116 + mem = nouveau_mem(*res); 117 + ttm_resource_init(bo, place, *res); 118 118 ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, 119 - (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); 119 + (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); 120 120 if (ret) { 121 - nouveau_mem_del(reg); 121 + nouveau_mem_del(*res); 122 122 return ret; 123 123 } 124 124 125 - reg->start = mem->vma[0].addr >> PAGE_SHIFT; 125 + (*res)->start = mem->vma[0].addr >> PAGE_SHIFT; 126 126 return 0; 127 127 } 128 128
+8 -15
drivers/gpu/drm/ttm/ttm_range_manager.c
··· 58 58 static int ttm_range_man_alloc(struct ttm_resource_manager *man, 59 59 struct ttm_buffer_object *bo, 60 60 const struct ttm_place *place, 61 - struct ttm_resource *mem) 61 + struct ttm_resource **res) 62 62 { 63 63 struct ttm_range_manager *rman = to_range_manager(man); 64 64 struct ttm_range_mgr_node *node; ··· 83 83 84 84 spin_lock(&rman->lock); 85 85 ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], 86 - mem->num_pages, bo->page_alignment, 0, 86 + node->base.num_pages, 87 + bo->page_alignment, 0, 87 88 place->fpfn, lpfn, mode); 88 89 spin_unlock(&rman->lock); 89 90 90 - if (unlikely(ret)) { 91 + if (unlikely(ret)) 91 92 kfree(node); 92 - } else { 93 - mem->mm_node = &node->mm_nodes[0]; 94 - mem->start = node->mm_nodes[0].start; 95 - } 93 + else 94 + node->base.start = node->mm_nodes[0].start; 96 95 97 96 return ret; 98 97 } 99 98 100 99 static void ttm_range_man_free(struct ttm_resource_manager *man, 101 - struct ttm_resource *mem) 100 + struct ttm_resource *res) 102 101 { 102 + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); 103 103 struct ttm_range_manager *rman = to_range_manager(man); 104 - struct ttm_range_mgr_node *node; 105 - 106 - if (!mem->mm_node) 107 - return; 108 - 109 - node = to_ttm_range_mgr_node(mem); 110 104 111 105 spin_lock(&rman->lock); 112 106 drm_mm_remove_node(&node->mm_nodes[0]); 113 107 spin_unlock(&rman->lock); 114 108 115 109 kfree(node); 116 - mem->mm_node = NULL; 117 110 } 118 111 119 112 static void ttm_range_man_debug(struct ttm_resource_manager *man,
+1 -17
drivers/gpu/drm/ttm/ttm_resource.c
··· 29 29 const struct ttm_place *place, 30 30 struct ttm_resource *res) 31 31 { 32 - res->mm_node = NULL; 33 32 res->start = 0; 34 33 res->num_pages = PFN_UP(bo->base.size); 35 34 res->mem_type = place->mem_type; ··· 46 47 { 47 48 struct ttm_resource_manager *man = 48 49 ttm_manager_type(bo->bdev, place->mem_type); 49 - struct ttm_resource *res; 50 - int r; 51 50 52 - res = kmalloc(sizeof(*res), GFP_KERNEL); 53 - if (!res) 54 - return -ENOMEM; 55 - 56 - ttm_resource_init(bo, place, res); 57 - r = man->func->alloc(man, bo, place, res); 58 - if (r) { 59 - kfree(res); 60 - return r; 61 - } 62 - 63 - *res_ptr = res; 64 - return 0; 51 + return man->func->alloc(man, bo, place, res_ptr); 65 52 } 66 53 67 54 void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) ··· 59 74 60 75 man = ttm_manager_type(bo->bdev, (*res)->mem_type); 61 76 man->func->free(man, *res); 62 - kfree(*res); 63 77 *res = NULL; 64 78 } 65 79 EXPORT_SYMBOL(ttm_resource_free);
+6 -6
drivers/gpu/drm/ttm/ttm_sys_manager.c
··· 10 10 static int ttm_sys_man_alloc(struct ttm_resource_manager *man, 11 11 struct ttm_buffer_object *bo, 12 12 const struct ttm_place *place, 13 - struct ttm_resource *mem) 13 + struct ttm_resource **res) 14 14 { 15 - mem->mm_node = kzalloc(sizeof(*mem), GFP_KERNEL); 16 - if (!mem->mm_node) 15 + *res = kzalloc(sizeof(**res), GFP_KERNEL); 16 + if (!*res) 17 17 return -ENOMEM; 18 18 19 - ttm_resource_init(bo, place, mem->mm_node); 19 + ttm_resource_init(bo, place, *res); 20 20 return 0; 21 21 } 22 22 23 23 static void ttm_sys_man_free(struct ttm_resource_manager *man, 24 - struct ttm_resource *mem) 24 + struct ttm_resource *res) 25 25 { 26 - kfree(mem->mm_node); 26 + kfree(res); 27 27 } 28 28 29 29 static const struct ttm_resource_manager_func ttm_sys_manager_func = {
+12 -12
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 52 52 static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, 53 53 struct ttm_buffer_object *bo, 54 54 const struct ttm_place *place, 55 - struct ttm_resource *mem) 55 + struct ttm_resource **res) 56 56 { 57 57 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); 58 58 int id; 59 59 60 - mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL); 61 - if (!mem->mm_node) 60 + *res = kmalloc(sizeof(**res), GFP_KERNEL); 61 + if (!*res) 62 62 return -ENOMEM; 63 63 64 - ttm_resource_init(bo, place, mem->mm_node); 64 + ttm_resource_init(bo, place, *res); 65 65 66 66 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); 67 67 if (id < 0) ··· 70 70 spin_lock(&gman->lock); 71 71 72 72 if (gman->max_gmr_pages > 0) { 73 - gman->used_gmr_pages += mem->num_pages; 73 + gman->used_gmr_pages += (*res)->num_pages; 74 74 if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) 75 75 goto nospace; 76 76 } 77 77 78 - mem->mm_node = gman; 79 - mem->start = id; 78 + (*res)->start = id; 80 79 81 80 spin_unlock(&gman->lock); 82 81 return 0; 83 82 84 83 nospace: 85 - gman->used_gmr_pages -= mem->num_pages; 84 + gman->used_gmr_pages -= (*res)->num_pages; 86 85 spin_unlock(&gman->lock); 87 86 ida_free(&gman->gmr_ida, id); 87 + kfree(*res); 88 88 return -ENOSPC; 89 89 } 90 90 91 91 static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, 92 - struct ttm_resource *mem) 92 + struct ttm_resource *res) 93 93 { 94 94 struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man); 95 95 96 - ida_free(&gman->gmr_ida, mem->start); 96 + ida_free(&gman->gmr_ida, res->start); 97 97 spin_lock(&gman->lock); 98 - gman->used_gmr_pages -= mem->num_pages; 98 + gman->used_gmr_pages -= res->num_pages; 99 99 spin_unlock(&gman->lock); 100 - kfree(mem->mm_node); 100 + kfree(res); 101 101 } 102 102 103 103 static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
+13 -14
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
··· 51 51 static int vmw_thp_get_node(struct ttm_resource_manager *man, 52 52 struct ttm_buffer_object *bo, 53 53 const struct ttm_place *place, 54 - struct ttm_resource *mem) 54 + struct ttm_resource **res) 55 55 { 56 56 struct vmw_thp_manager *rman = to_thp_manager(man); 57 57 struct drm_mm *mm = &rman->mm; ··· 78 78 spin_lock(&rman->lock); 79 79 if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { 80 80 align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); 81 - if (mem->num_pages >= align_pages) { 81 + if (node->base.num_pages >= align_pages) { 82 82 ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], 83 - align_pages, place, mem, 84 - lpfn, mode); 83 + align_pages, place, 84 + &node->base, lpfn, mode); 85 85 if (!ret) 86 86 goto found_unlock; 87 87 } 88 88 } 89 89 90 90 align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); 91 - if (mem->num_pages >= align_pages) { 91 + if (node->base.num_pages >= align_pages) { 92 92 ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0], 93 - align_pages, place, mem, lpfn, 94 - mode); 93 + align_pages, place, &node->base, 94 + lpfn, mode); 95 95 if (!ret) 96 96 goto found_unlock; 97 97 } 98 98 99 99 ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], 100 - mem->num_pages, bo->page_alignment, 0, 100 + node->base.num_pages, 101 + bo->page_alignment, 0, 101 102 place->fpfn, lpfn, mode); 102 103 found_unlock: 103 104 spin_unlock(&rman->lock); ··· 106 105 if (unlikely(ret)) { 107 106 kfree(node); 108 107 } else { 109 - mem->mm_node = &node->mm_nodes[0]; 110 - mem->start = node->mm_nodes[0].start; 108 + node->base.start = node->mm_nodes[0].start; 109 + *res = &node->base; 111 110 } 112 111 113 112 return ret; 114 113 } 115 114 116 - 117 - 118 115 static void vmw_thp_put_node(struct ttm_resource_manager *man, 119 - struct ttm_resource *mem) 116 + struct ttm_resource *res) 120 117 { 118 + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); 121 119 struct vmw_thp_manager *rman = to_thp_manager(man); 122 - struct ttm_range_mgr_node * node = mem->mm_node; 123 120 124 121 spin_lock(&rman->lock); 125 122 drm_mm_remove_node(&node->mm_nodes[0]);
+1 -2
include/drm/ttm/ttm_range_manager.h
··· 30 30 static inline struct ttm_range_mgr_node * 31 31 to_ttm_range_mgr_node(struct ttm_resource *res) 32 32 { 33 - return container_of(res->mm_node, struct ttm_range_mgr_node, 34 - mm_nodes[0]); 33 + return container_of(res, struct ttm_range_mgr_node, base); 35 34 } 36 35 37 36 int ttm_range_man_init(struct ttm_device *bdev,
+17 -26
include/drm/ttm/ttm_resource.h
··· 45 45 * 46 46 * @man: Pointer to a memory type manager. 47 47 * @bo: Pointer to the buffer object we're allocating space for. 48 - * @placement: Placement details. 49 - * @flags: Additional placement flags. 50 - * @mem: Pointer to a struct ttm_resource to be filled in. 48 + * @place: Placement details. 49 + * @res: Resulting pointer to the ttm_resource. 51 50 * 52 51 * This function should allocate space in the memory type managed 53 - * by @man. Placement details if 54 - * applicable are given by @placement. If successful, 55 - * @mem::mm_node should be set to a non-null value, and 56 - * @mem::start should be set to a value identifying the beginning 52 + * by @man. Placement details if applicable are given by @place. If 53 + * successful, a filled in ttm_resource object should be returned in 54 + * @res. @res::start should be set to a value identifying the beginning 57 55 * of the range allocated, and the function should return zero. 58 - * If the memory region accommodate the buffer object, @mem::mm_node 59 - * should be set to NULL, and the function should return 0. 56 + * If the manager can't fulfill the request -ENOSPC should be returned. 60 57 * If a system error occurred, preventing the request to be fulfilled, 61 58 * the function should return a negative error code. 62 59 * 63 - * Note that @mem::mm_node will only be dereferenced by 64 - * struct ttm_resource_manager functions and optionally by the driver, 65 - * which has knowledge of the underlying type. 66 - * 67 - * This function may not be called from within atomic context, so 68 - * an implementation can and must use either a mutex or a spinlock to 69 - * protect any data structures managing the space. 60 + * This function may not be called from within atomic context and needs 61 + * to take care of its own locking to protect any data structures 62 + * managing the space. 70 63 */ 71 64 int (*alloc)(struct ttm_resource_manager *man, 72 65 struct ttm_buffer_object *bo, 73 66 const struct ttm_place *place, 74 - struct ttm_resource *mem); 67 + struct ttm_resource **res); 75 68 76 69 /** 77 70 * struct ttm_resource_manager_func member free 78 71 * 79 72 * @man: Pointer to a memory type manager. 80 - * @mem: Pointer to a struct ttm_resource to be filled in. 73 + * @res: Pointer to a struct ttm_resource to be freed. 81 74 * 82 - * This function frees memory type resources previously allocated 83 - * and that are identified by @mem::mm_node and @mem::start. May not 84 - * be called from within atomic context. 75 + * This function frees memory type resources previously allocated. 76 + * May not be called from within atomic context. 85 77 */ 86 78 void (*free)(struct ttm_resource_manager *man, 87 - struct ttm_resource *mem); 79 + struct ttm_resource *res); 88 80 89 81 /** 90 82 * struct ttm_resource_manager_func member debug ··· 150 158 /** 151 159 * struct ttm_resource 152 160 * 153 - * @mm_node: Memory manager node. 154 - * @size: Requested size of memory region. 155 - * @num_pages: Actual size of memory region in pages. 161 + * @start: Start of the allocation. 162 + * @num_pages: Actual size of resource in pages. 163 + * @mem_type: Resource type of the allocation. 156 164 * @placement: Placement flags. 157 165 * @bus: Placement on io bus accessible to the CPU 158 166 * ··· 160 168 * buffer object. 161 169 */ 162 170 struct ttm_resource { 163 - void *mm_node; 164 171 unsigned long start; 165 172 unsigned long num_pages; 166 173 uint32_t mem_type;