Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: Add getter for some pool properties

No functional change but to allow easier refactoring in the future.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-2-tvrtko.ursulin@igalia.com

authored by

Tvrtko Ursulin and committed by
Tvrtko Ursulin
d53adc24 d7a849d1

+43 -19
+3 -1
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
··· 8 8 #include <drm/ttm/ttm_pool.h> 9 9 10 10 #include "ttm_kunit_helpers.h" 11 + #include "../ttm_pool_internal.h" 11 12 12 13 struct ttm_pool_test_case { 13 14 const char *description; ··· 156 155 157 156 KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev); 158 157 KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE); 159 - KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); 158 + KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool), 159 + params->use_dma_alloc); 160 160 161 161 err = ttm_pool_alloc(pool, tt, &simple_ctx); 162 162 KUNIT_ASSERT_EQ(test, err, 0);
+15 -14
drivers/gpu/drm/ttm/ttm_pool.c
··· 48 48 #include <drm/ttm/ttm_bo.h> 49 49 50 50 #include "ttm_module.h" 51 + #include "ttm_pool_internal.h" 51 52 52 53 #ifdef CONFIG_FAULT_INJECTION 53 54 #include <linux/fault-inject.h> ··· 149 148 gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | 150 149 __GFP_THISNODE; 151 150 152 - if (!pool->use_dma_alloc) { 151 + if (!ttm_pool_uses_dma_alloc(pool)) { 153 152 p = alloc_pages_node(pool->nid, gfp_flags, order); 154 153 if (p) 155 154 p->private = order; ··· 201 200 set_pages_wb(p, 1 << order); 202 201 #endif 203 202 204 - if (!pool || !pool->use_dma_alloc) { 203 + if (!pool || !ttm_pool_uses_dma_alloc(pool)) { 205 204 __free_pages(p, order); 206 205 return; 207 206 } ··· 244 243 { 245 244 dma_addr_t addr; 246 245 247 - if (pool->use_dma_alloc) { 246 + if (ttm_pool_uses_dma_alloc(pool)) { 248 247 struct ttm_pool_dma *dma = (void *)p->private; 249 248 250 249 addr = dma->addr; ··· 266 265 unsigned int num_pages) 267 266 { 268 267 /* Unmapped while freeing the page */ 269 - if (pool->use_dma_alloc) 268 + if (ttm_pool_uses_dma_alloc(pool)) 270 269 return; 271 270 272 271 dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, ··· 340 339 enum ttm_caching caching, 341 340 unsigned int order) 342 341 { 343 - if (pool->use_dma_alloc) 342 + if (ttm_pool_uses_dma_alloc(pool)) 344 343 return &pool->caching[caching].orders[order]; 345 344 346 345 #ifdef CONFIG_X86 ··· 349 348 if (pool->nid != NUMA_NO_NODE) 350 349 return &pool->caching[caching].orders[order]; 351 350 352 - if (pool->use_dma32) 351 + if (ttm_pool_uses_dma32(pool)) 353 352 return &global_dma32_write_combined[order]; 354 353 355 354 return &global_write_combined[order]; ··· 357 356 if (pool->nid != NUMA_NO_NODE) 358 357 return &pool->caching[caching].orders[order]; 359 358 360 - if (pool->use_dma32) 359 + if (ttm_pool_uses_dma32(pool)) 361 360 return &global_dma32_uncached[order]; 362 361 363 362 return &global_uncached[order]; ··· 397 396 /* Return the allocation order based for a page */ 398 397 static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) 399 398 { 400 - if (pool->use_dma_alloc) { 399 + if (ttm_pool_uses_dma_alloc(pool)) { 401 400 struct ttm_pool_dma *dma = (void *)p->private; 402 401 403 402 return dma->vaddr & ~PAGE_MASK; ··· 720 719 if (ctx->gfp_retry_mayfail) 721 720 gfp_flags |= __GFP_RETRY_MAYFAIL; 722 721 723 - if (pool->use_dma32) 722 + if (ttm_pool_uses_dma32(pool)) 724 723 gfp_flags |= GFP_DMA32; 725 724 else 726 725 gfp_flags |= GFP_HIGHUSER; ··· 978 977 return -EINVAL; 979 978 980 979 if ((!ttm_backup_bytes_avail() && !flags->purge) || 981 - pool->use_dma_alloc || ttm_tt_is_backed_up(tt)) 980 + ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt)) 982 981 return -EBUSY; 983 982 984 983 #ifdef CONFIG_X86 ··· 1015 1014 if (flags->purge) 1016 1015 return shrunken; 1017 1016 1018 - if (pool->use_dma32) 1017 + if (ttm_pool_uses_dma32(pool)) 1019 1018 gfp = GFP_DMA32; 1020 1019 else 1021 1020 gfp = GFP_HIGHUSER; ··· 1069 1068 { 1070 1069 unsigned int i, j; 1071 1070 1072 - WARN_ON(!dev && use_dma_alloc); 1071 + WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool)); 1073 1072 1074 1073 pool->dev = dev; 1075 1074 pool->nid = nid; ··· 1240 1239 { 1241 1240 unsigned int i; 1242 1241 1243 - if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) { 1242 + if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) { 1244 1243 seq_puts(m, "unused\n"); 1245 1244 return 0; 1246 1245 } ··· 1251 1250 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 1252 1251 if (!ttm_pool_select_type(pool, i, 0)) 1253 1252 continue; 1254 - if (pool->use_dma_alloc) 1253 + if (ttm_pool_uses_dma_alloc(pool)) 1255 1254 seq_puts(m, "DMA "); 1256 1255 else 1257 1256 seq_printf(m, "N%d ", pool->nid);
+19
drivers/gpu/drm/ttm/ttm_pool_internal.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright (c) 2025 Valve Corporation */ 3 + 4 + #ifndef _TTM_POOL_INTERNAL_H_ 5 + #define _TTM_POOL_INTERNAL_H_ 6 + 7 + #include <drm/ttm/ttm_pool.h> 8 + 9 + static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool) 10 + { 11 + return pool->use_dma_alloc; 12 + } 13 + 14 + static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool) 15 + { 16 + return pool->use_dma32; 17 + } 18 + 19 + #endif
+6 -4
drivers/gpu/drm/ttm/ttm_tt.c
··· 47 47 #include <drm/ttm/ttm_tt.h> 48 48 49 49 #include "ttm_module.h" 50 + #include "ttm_pool_internal.h" 50 51 51 52 static unsigned long ttm_pages_limit; 52 53 ··· 95 94 * mapped TT pages need to be decrypted or otherwise the drivers 96 95 * will end up sending encrypted mem to the gpu. 97 96 */ 98 - if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 97 + if (ttm_pool_uses_dma_alloc(&bdev->pool) && 98 + cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { 99 99 page_flags |= TTM_TT_FLAG_DECRYPTED; 100 100 drm_info_once(ddev, "TT memory decryption enabled."); 101 101 } ··· 381 379 382 380 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 383 381 atomic_long_add(ttm->num_pages, &ttm_pages_allocated); 384 - if (bdev->pool.use_dma32) 382 + if (ttm_pool_uses_dma32(&bdev->pool)) 385 383 atomic_long_add(ttm->num_pages, 386 384 &ttm_dma32_pages_allocated); 387 385 } ··· 419 417 error: 420 418 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 421 419 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); 422 - if (bdev->pool.use_dma32) 420 + if (ttm_pool_uses_dma32(&bdev->pool)) 423 421 atomic_long_sub(ttm->num_pages, 424 422 &ttm_dma32_pages_allocated); 425 423 } ··· 442 440 443 441 if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 444 442 atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); 445 - if (bdev->pool.use_dma32) 443 + if (ttm_pool_uses_dma32(&bdev->pool)) 446 444 atomic_long_sub(ttm->num_pages, 447 445 &ttm_dma32_pages_allocated); 448 446 }