Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: rename ttm_bo_put to _fini v3

Give TTM BOs a separate cleanup function.

No funktional change, but the next step in removing the TTM BO reference
counting and replacing it with the GEM object reference counting.

v2: move the code around a bit to make it clearer what's happening
v3: fix nouveau_bo_fini as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://lore.kernel.org/r/20250909144311.1927-1-christian.koenig@amd.com

+59 -58
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 198 198 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); 199 199 200 200 amdgpu_hmm_unregister(aobj); 201 - ttm_bo_put(&aobj->tbo); 201 + ttm_bo_fini(&aobj->tbo); 202 202 } 203 203 204 204 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+3 -3
drivers/gpu/drm/drm_gem_vram_helper.c
··· 107 107 108 108 static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) 109 109 { 110 - /* We got here via ttm_bo_put(), which means that the 110 + /* We got here via ttm_bo_fini(), which means that the 111 111 * TTM buffer object in 'bo' has already been cleaned 112 112 * up; only release the GEM object. 113 113 */ ··· 234 234 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object 235 235 * @gbo: the GEM VRAM object 236 236 * 237 - * See ttm_bo_put() for more information. 237 + * See ttm_bo_fini() for more information. 238 238 */ 239 239 void drm_gem_vram_put(struct drm_gem_vram_object *gbo) 240 240 { 241 - ttm_bo_put(&gbo->bo); 241 + ttm_bo_fini(&gbo->bo); 242 242 } 243 243 EXPORT_SYMBOL(drm_gem_vram_put); 244 244
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 1029 1029 { 1030 1030 GEM_BUG_ON(!obj->ttm.created); 1031 1031 1032 - ttm_bo_put(i915_gem_to_ttm(obj)); 1032 + ttm_bo_fini(i915_gem_to_ttm(obj)); 1033 1033 } 1034 1034 1035 1035 static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) ··· 1325 1325 * If this function fails, it will call the destructor, but 1326 1326 * our caller still owns the object. So no freeing in the 1327 1327 * destructor until obj->ttm.created is true. 1328 - * Similarly, in delayed_destroy, we can't call ttm_bo_put() 1328 + * Similarly, in delayed_destroy, we can't call ttm_bo_fini() 1329 1329 * until successful initialization. 1330 1330 */ 1331 1331 ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
+1 -1
drivers/gpu/drm/loongson/lsdc_gem.c
··· 57 57 struct ttm_buffer_object *tbo = to_ttm_bo(obj); 58 58 59 59 if (tbo) 60 - ttm_bo_put(tbo); 60 + ttm_bo_fini(tbo); 61 61 } 62 62 63 63 static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 57 57 static inline void 58 58 nouveau_bo_fini(struct nouveau_bo *bo) 59 59 { 60 - ttm_bo_put(&bo->bo); 60 + ttm_bo_fini(&bo->bo); 61 61 } 62 62 63 63 extern struct ttm_device_funcs nouveau_bo_driver;
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 87 87 return; 88 88 } 89 89 90 - ttm_bo_put(&nvbo->bo); 90 + ttm_bo_fini(&nvbo->bo); 91 91 92 92 pm_runtime_mark_last_busy(dev); 93 93 pm_runtime_put_autosuspend(dev);
+1 -1
drivers/gpu/drm/qxl/qxl_gem.c
··· 39 39 qxl_surface_evict(qdev, qobj, false); 40 40 41 41 tbo = &qobj->tbo; 42 - ttm_bo_put(tbo); 42 + ttm_bo_fini(tbo); 43 43 } 44 44 45 45 int qxl_gem_object_create(struct qxl_device *qdev, int size,
+1 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 86 86 87 87 if (robj) { 88 88 radeon_mn_unregister(robj); 89 - ttm_bo_put(&robj->tbo); 89 + ttm_bo_fini(&robj->tbo); 90 90 } 91 91 } 92 92
+6 -6
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
··· 379 379 dma_resv_fini(resv); 380 380 } 381 381 382 - static void ttm_bo_put_basic(struct kunit *test) 382 + static void ttm_bo_fini_basic(struct kunit *test) 383 383 { 384 384 struct ttm_test_devices *priv = test->priv; 385 385 struct ttm_buffer_object *bo; ··· 410 410 dma_resv_unlock(bo->base.resv); 411 411 KUNIT_EXPECT_EQ(test, err, 0); 412 412 413 - ttm_bo_put(bo); 413 + ttm_bo_fini(bo); 414 414 } 415 415 416 416 static const char *mock_name(struct dma_fence *f) ··· 423 423 .get_timeline_name = mock_name, 424 424 }; 425 425 426 - static void ttm_bo_put_shared_resv(struct kunit *test) 426 + static void ttm_bo_fini_shared_resv(struct kunit *test) 427 427 { 428 428 struct ttm_test_devices *priv = test->priv; 429 429 struct ttm_buffer_object *bo; ··· 463 463 bo->type = ttm_bo_type_device; 464 464 bo->base.resv = external_resv; 465 465 466 - ttm_bo_put(bo); 466 + ttm_bo_fini(bo); 467 467 } 468 468 469 469 static void ttm_bo_pin_basic(struct kunit *test) ··· 616 616 KUNIT_CASE(ttm_bo_unreserve_basic), 617 617 KUNIT_CASE(ttm_bo_unreserve_pinned), 618 618 KUNIT_CASE(ttm_bo_unreserve_bulk), 619 - KUNIT_CASE(ttm_bo_put_basic), 620 - KUNIT_CASE(ttm_bo_put_shared_resv), 619 + KUNIT_CASE(ttm_bo_fini_basic), 620 + KUNIT_CASE(ttm_bo_fini_shared_resv), 621 621 KUNIT_CASE(ttm_bo_pin_basic), 622 622 KUNIT_CASE(ttm_bo_pin_unpin_resource), 623 623 KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
+30 -30
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
··· 144 144 drm_mm_node_allocated(&bo->base.vma_node.vm_node)); 145 145 146 146 ttm_resource_free(bo, &bo->resource); 147 - ttm_bo_put(bo); 147 + ttm_bo_fini(bo); 148 148 } 149 149 150 150 static void ttm_bo_init_reserved_mock_man(struct kunit *test) ··· 186 186 drm_mm_node_allocated(&bo->base.vma_node.vm_node)); 187 187 188 188 ttm_resource_free(bo, &bo->resource); 189 - ttm_bo_put(bo); 189 + ttm_bo_fini(bo); 190 190 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 191 191 } 192 192 ··· 221 221 KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv); 222 222 223 223 ttm_resource_free(bo, &bo->resource); 224 - ttm_bo_put(bo); 224 + ttm_bo_fini(bo); 225 225 } 226 226 227 227 static void ttm_bo_validate_basic(struct kunit *test) ··· 265 265 KUNIT_EXPECT_EQ(test, bo->resource->placement, 266 266 DRM_BUDDY_TOPDOWN_ALLOCATION); 267 267 268 - ttm_bo_put(bo); 268 + ttm_bo_fini(bo); 269 269 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 270 270 } 271 271 ··· 292 292 293 293 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 294 294 295 - ttm_bo_put(bo); 295 + ttm_bo_fini(bo); 296 296 } 297 297 298 298 static void ttm_bo_validate_failed_alloc(struct kunit *test) ··· 321 321 322 322 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 323 323 324 - ttm_bo_put(bo); 324 + ttm_bo_fini(bo); 325 325 ttm_bad_manager_fini(priv->ttm_dev, mem_type); 326 326 } 327 327 ··· 353 353 ttm_bo_unpin(bo); 354 354 dma_resv_unlock(bo->base.resv); 355 355 356 - ttm_bo_put(bo); 356 + ttm_bo_fini(bo); 357 357 } 358 358 359 359 static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = { ··· 403 403 KUNIT_EXPECT_EQ(test, err, 0); 404 404 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0); 405 405 406 - ttm_bo_put(bo); 406 + ttm_bo_fini(bo); 407 407 408 408 if (params->mem_type != TTM_PL_SYSTEM) 409 409 ttm_mock_manager_fini(priv->ttm_dev, params->mem_type); ··· 452 452 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); 453 453 KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority])); 454 454 455 - ttm_bo_put(bo); 455 + ttm_bo_fini(bo); 456 456 ttm_bad_manager_fini(priv->ttm_dev, fst_mem); 457 457 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 458 458 } ··· 495 495 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2); 496 496 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem); 497 497 498 - ttm_bo_put(bo); 498 + ttm_bo_fini(bo); 499 499 500 500 ttm_mock_manager_fini(priv->ttm_dev, fst_mem); 501 501 ttm_mock_manager_fini(priv->ttm_dev, tmp_mem); ··· 567 567 KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC); 568 568 } 569 569 570 - ttm_bo_put(bo); 570 + ttm_bo_fini(bo); 571 571 } 572 572 573 573 static int threaded_dma_resv_signal(void *arg) ··· 635 635 /* Make sure we have an idle object at this point */ 636 636 dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT); 637 637 638 - ttm_bo_put(bo); 638 + ttm_bo_fini(bo); 639 639 } 640 640 641 641 static void ttm_bo_validate_move_fence_signaled(struct kunit *test) ··· 668 668 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type); 669 669 KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size); 670 670 671 - ttm_bo_put(bo); 671 + ttm_bo_fini(bo); 672 672 dma_fence_put(man->move); 673 673 } 674 674 ··· 753 753 else 754 754 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem); 755 755 756 - ttm_bo_put(bo); 756 + ttm_bo_fini(bo); 757 757 ttm_mock_manager_fini(priv->ttm_dev, fst_mem); 758 758 ttm_mock_manager_fini(priv->ttm_dev, snd_mem); 759 759 } ··· 807 807 KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type); 808 808 809 809 for (i = 0; i < bo_no; i++) 810 - ttm_bo_put(&bos[i]); 811 - ttm_bo_put(bo_val); 810 + ttm_bo_fini(&bos[i]); 811 + ttm_bo_fini(bo_val); 812 812 813 813 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 814 814 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 852 852 853 853 KUNIT_EXPECT_EQ(test, err, -ENOMEM); 854 854 855 - ttm_bo_put(bo_small); 855 + ttm_bo_fini(bo_small); 856 856 857 857 ttm_bo_reserve(bo_big, false, false, NULL); 858 858 ttm_bo_unpin(bo_big); 859 859 dma_resv_unlock(bo_big->base.resv); 860 - ttm_bo_put(bo_big); 860 + ttm_bo_fini(bo_big); 861 861 862 862 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 863 863 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 916 916 KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict); 917 917 KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE); 918 918 919 - ttm_bo_put(bo); 920 - ttm_bo_put(bo_evictable); 919 + ttm_bo_fini(bo); 920 + ttm_bo_fini(bo_evictable); 921 921 922 922 ttm_bo_reserve(bo_pinned, false, false, NULL); 923 923 ttm_bo_unpin(bo_pinned); 924 924 dma_resv_unlock(bo_pinned->base.resv); 925 - ttm_bo_put(bo_pinned); 925 + ttm_bo_fini(bo_pinned); 926 926 927 927 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 928 928 ttm_mock_manager_fini(priv->ttm_dev, mem_multihop); ··· 973 973 KUNIT_EXPECT_NULL(test, bo_big->ttm); 974 974 KUNIT_EXPECT_NULL(test, bo_big->resource); 975 975 976 - ttm_bo_put(bo_small); 977 - ttm_bo_put(bo_big); 976 + ttm_bo_fini(bo_small); 977 + ttm_bo_fini(bo_big); 978 978 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 979 979 } 980 980 ··· 1025 1025 KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type); 1026 1026 KUNIT_EXPECT_NULL(test, bo_val->resource); 1027 1027 1028 - ttm_bo_put(bo_init); 1029 - ttm_bo_put(bo_val); 1028 + ttm_bo_fini(bo_init); 1029 + ttm_bo_fini(bo_val); 1030 1030 1031 1031 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1032 1032 ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict); ··· 1070 1070 KUNIT_ASSERT_NULL(test, bo_evict->resource); 1071 1071 KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC); 1072 1072 1073 - ttm_bo_put(bo_evict); 1074 - ttm_bo_put(bo); 1073 + ttm_bo_fini(bo_evict); 1074 + ttm_bo_fini(bo); 1075 1075 1076 1076 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1077 1077 } ··· 1128 1128 ttm_mock_manager_fini(priv->ttm_dev, mem_type); 1129 1129 ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict); 1130 1130 1131 - ttm_bo_put(bo_val); 1132 - ttm_bo_put(bo_tt); 1133 - ttm_bo_put(bo_mock); 1131 + ttm_bo_fini(bo_val); 1132 + ttm_bo_fini(bo_tt); 1133 + ttm_bo_fini(bo_mock); 1134 1134 } 1135 1135 1136 1136 static struct kunit_case ttm_bo_validate_test_cases[] = {
+7 -8
drivers/gpu/drm/ttm/ttm_bo.c
··· 318 318 bo->destroy(bo); 319 319 } 320 320 321 - /** 322 - * ttm_bo_put 323 - * 324 - * @bo: The buffer object. 325 - * 326 - * Unreference a buffer object. 327 - */ 321 + /* TODO: remove! */ 328 322 void ttm_bo_put(struct ttm_buffer_object *bo) 329 323 { 330 324 kref_put(&bo->kref, ttm_bo_release); 331 325 } 332 - EXPORT_SYMBOL(ttm_bo_put); 326 + 327 + void ttm_bo_fini(struct ttm_buffer_object *bo) 328 + { 329 + ttm_bo_put(bo); 330 + } 331 + EXPORT_SYMBOL(ttm_bo_fini); 333 332 334 333 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo, 335 334 struct ttm_operation_ctx *ctx,
+2
drivers/gpu/drm/ttm/ttm_bo_internal.h
··· 55 55 return bo; 56 56 } 57 57 58 + void ttm_bo_put(struct ttm_buffer_object *bo); 59 + 58 60 #endif
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 37 37 { 38 38 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); 39 39 if (bo) 40 - ttm_bo_put(bo); 40 + ttm_bo_fini(bo); 41 41 } 42 42 43 43 static int vmw_gem_object_open(struct drm_gem_object *obj,
+1 -1
drivers/gpu/drm/xe/xe_bo.c
··· 1696 1696 * refcount directly if needed. 1697 1697 */ 1698 1698 __xe_bo_vunmap(gem_to_xe_bo(obj)); 1699 - ttm_bo_put(container_of(obj, struct ttm_buffer_object, base)); 1699 + ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base)); 1700 1700 } 1701 1701 1702 1702 static void xe_gem_object_close(struct drm_gem_object *obj,
+1 -1
include/drm/ttm/ttm_bo.h
··· 391 391 int ttm_bo_validate(struct ttm_buffer_object *bo, 392 392 struct ttm_placement *placement, 393 393 struct ttm_operation_ctx *ctx); 394 - void ttm_bo_put(struct ttm_buffer_object *bo); 394 + void ttm_bo_fini(struct ttm_buffer_object *bo); 395 395 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 396 396 struct ttm_lru_bulk_move *bulk); 397 397 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,