Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: cleanup ttm_mem_type_manager_func.get_node interface v3

Instead of signaling failure by setting the node pointer to
NULL do so by returning -ENOSPC.

v2: add memset() to make sure that mem is always initialized.
v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Link: https://patchwork.freedesktop.org/patch/373181/

+12 -23
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
··· 229 229 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && 230 230 atomic64_read(&mgr->available) < mem->num_pages) { 231 231 spin_unlock(&mgr->lock); 232 - return 0; 232 + return -ENOSPC; 233 233 } 234 234 atomic64_sub(mem->num_pages, &mgr->available); 235 235 spin_unlock(&mgr->lock); ··· 250 250 if (unlikely(r)) { 251 251 kfree(node); 252 252 mem->mm_node = NULL; 253 - r = 0; 254 253 goto err_out; 255 254 } 256 255 } else {
+2 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 319 319 mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; 320 320 if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { 321 321 atomic64_sub(mem_bytes, &mgr->usage); 322 - mem->mm_node = NULL; 323 - return 0; 322 + return -ENOSPC; 324 323 } 325 324 326 325 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { ··· 399 400 atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); 400 401 401 402 kvfree(nodes); 402 - return r == -ENOSPC ? 0 : r; 403 + return r; 403 404 } 404 405 405 406 /**
-8
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 75 75 ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page); 76 76 if (ret) { 77 77 nouveau_mem_del(reg); 78 - if (ret == -ENOSPC) { 79 - reg->mm_node = NULL; 80 - return 0; 81 - } 82 78 return ret; 83 79 } 84 80 ··· 135 139 reg->num_pages << PAGE_SHIFT, &mem->vma[0]); 136 140 if (ret) { 137 141 nouveau_mem_del(reg); 138 - if (ret == -ENOSPC) { 139 - reg->mm_node = NULL; 140 - return 0; 141 - } 142 142 return ret; 143 143 } 144 144
+7 -6
drivers/gpu/drm/ttm/ttm_bo.c
··· 909 909 ticket = dma_resv_locking_ctx(bo->base.resv); 910 910 do { 911 911 ret = (*man->func->get_node)(man, bo, place, mem); 912 - if (unlikely(ret != 0)) 913 - return ret; 914 - if (mem->mm_node) 912 + if (likely(!ret)) 915 913 break; 914 + if (unlikely(ret != -ENOSPC)) 915 + return ret; 916 916 ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx, 917 917 ticket); 918 918 if (unlikely(ret != 0)) ··· 1056 1056 1057 1057 man = &bdev->man[mem->mem_type]; 1058 1058 ret = (*man->func->get_node)(man, bo, place, mem); 1059 + if (ret == -ENOSPC) 1060 + continue; 1059 1061 if (unlikely(ret)) 1060 1062 goto error; 1061 - 1062 - if (!mem->mm_node) 1063 - continue; 1064 1063 1065 1064 ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); 1066 1065 if (unlikely(ret)) { ··· 1125 1126 mem.page_alignment = bo->mem.page_alignment; 1126 1127 mem.bus.io_reserved_vm = false; 1127 1128 mem.bus.io_reserved_count = 0; 1129 + mem.mm_node = NULL; 1130 + 1128 1131 /* 1129 1132 * Determine where to move the buffer. 1130 1133 */
+1 -1
drivers/gpu/drm/ttm/ttm_bo_manager.c
··· 86 86 mem->start = node->start; 87 87 } 88 88 89 - return 0; 89 + return ret; 90 90 } 91 91 92 92 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+1 -3
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 53 53 (struct vmwgfx_gmrid_man *)man->priv; 54 54 int id; 55 55 56 - mem->mm_node = NULL; 57 - 58 56 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); 59 57 if (id < 0) 60 58 return (id != -ENOMEM ? 0 : id); ··· 76 78 gman->used_gmr_pages -= bo->num_pages; 77 79 spin_unlock(&gman->lock); 78 80 ida_free(&gman->gmr_ida, id); 79 - return 0; 81 + return -ENOSPC; 80 82 } 81 83 82 84 static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,