Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: Replace multiple booleans with flags in pool init

Multiple consecutive boolean function arguments are usually not very
readable.

Replace the ones in ttm_pool_init() with flags with the additional
benefit of soon being able to pass in more data with just this one
code base churning cost.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://lore.kernel.org/r/20251020115411.36818-3-tvrtko.ursulin@igalia.com

authored by

Tvrtko Ursulin and committed by
Tvrtko Ursulin
0af5b6a8 d53adc24

+45 -42
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1837 1837 for (i = 0; i < adev->gmc.num_mem_partitions; i++) { 1838 1838 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, 1839 1839 adev->gmc.mem_partitions[i].numa.node, 1840 - false, false); 1840 + 0); 1841 1841 } 1842 1842 return 0; 1843 1843 }
+10 -15
drivers/gpu/drm/ttm/tests/ttm_device_test.c
··· 7 7 #include <drm/ttm/ttm_placement.h> 8 8 9 9 #include "ttm_kunit_helpers.h" 10 + #include "../ttm_pool_internal.h" 10 11 11 12 struct ttm_device_test_case { 12 13 const char *description; 13 - bool use_dma_alloc; 14 - bool use_dma32; 14 + unsigned int alloc_flags; 15 15 bool pools_init_expected; 16 16 }; 17 17 ··· 119 119 static const struct ttm_device_test_case ttm_device_cases[] = { 120 120 { 121 121 .description = "No DMA allocations, no DMA32 required", 122 - .use_dma_alloc = false, 123 - .use_dma32 = false, 124 122 .pools_init_expected = false, 125 123 }, 126 124 { 127 125 .description = "DMA allocations, DMA32 required", 128 - .use_dma_alloc = true, 129 - .use_dma32 = true, 126 + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC | 127 + TTM_ALLOCATION_POOL_USE_DMA32, 130 128 .pools_init_expected = true, 131 129 }, 132 130 { 133 131 .description = "No DMA allocations, DMA32 required", 134 - .use_dma_alloc = false, 135 - .use_dma32 = true, 132 + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32, 136 133 .pools_init_expected = false, 137 134 }, 138 135 { 139 136 .description = "DMA allocations, no DMA32 required", 140 - .use_dma_alloc = true, 141 - .use_dma32 = false, 137 + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, 142 138 .pools_init_expected = true, 143 139 }, 144 140 }; ··· 159 163 KUNIT_ASSERT_NOT_NULL(test, ttm_dev); 160 164 161 165 err = ttm_device_kunit_init(priv, ttm_dev, 162 - params->use_dma_alloc, 163 - params->use_dma32); 166 + params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC, 167 + params->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32); 164 168 KUNIT_ASSERT_EQ(test, err, 0); 165 169 166 170 pool = &ttm_dev->pool; 167 171 KUNIT_ASSERT_NOT_NULL(test, pool); 168 172 KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev); 169 - KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); 170 - KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32); 173 + KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags); 171 174 172 175 if (params->pools_init_expected) { 173 176 for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { ··· 176 181 KUNIT_EXPECT_EQ(test, pt.caching, i); 177 182 KUNIT_EXPECT_EQ(test, pt.order, j); 178 183 179 - if (params->use_dma_alloc) 184 + if (ttm_pool_uses_dma_alloc(pool)) 180 185 KUNIT_ASSERT_FALSE(test, 181 186 list_empty(&pt.pages)); 182 187 }
+11 -13
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
··· 13 13 struct ttm_pool_test_case { 14 14 const char *description; 15 15 unsigned int order; 16 - bool use_dma_alloc; 16 + unsigned int alloc_flags; 17 17 }; 18 18 19 19 struct ttm_pool_test_priv { ··· 87 87 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); 88 88 KUNIT_ASSERT_NOT_NULL(test, pool); 89 89 90 - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); 90 + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); 91 91 92 92 err = ttm_pool_alloc(pool, tt, &simple_ctx); 93 93 KUNIT_ASSERT_EQ(test, err, 0); ··· 114 114 { 115 115 .description = "One page, with coherent DMA mappings enabled", 116 116 .order = 0, 117 - .use_dma_alloc = true, 117 + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, 118 118 }, 119 119 { 120 120 .description = "Above the allocation limit, with coherent DMA mappings enabled", 121 121 .order = MAX_PAGE_ORDER + 1, 122 - .use_dma_alloc = true, 122 + .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC, 123 123 }, 124 124 }; 125 125 ··· 151 151 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); 152 152 KUNIT_ASSERT_NOT_NULL(test, pool); 153 153 154 - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc, 155 - false); 154 + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags); 156 155 157 156 KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev); 158 157 KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE); 159 - KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool), 160 - params->use_dma_alloc); 158 + KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags); 161 159 162 160 err = ttm_pool_alloc(pool, tt, &simple_ctx); 163 161 KUNIT_ASSERT_EQ(test, err, 0); ··· 165 167 last_page = tt->pages[tt->num_pages - 1]; 166 168 167 169 if (params->order <= MAX_PAGE_ORDER) { 168 - if (params->use_dma_alloc) { 170 + if (ttm_pool_uses_dma_alloc(pool)) { 169 171 KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); 170 172 KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private); 171 173 } else { 172 174 KUNIT_ASSERT_EQ(test, fst_page->private, params->order); 173 175 } 174 176 } else { 175 - if (params->use_dma_alloc) { 177 + if (ttm_pool_uses_dma_alloc(pool)) { 176 178 KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); 177 179 KUNIT_ASSERT_NULL(test, (void *)last_page->private); 178 180 } else { ··· 218 220 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); 219 221 KUNIT_ASSERT_NOT_NULL(test, pool); 220 222 221 - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); 223 + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); 222 224 223 225 err = ttm_pool_alloc(pool, tt, &simple_ctx); 224 226 KUNIT_ASSERT_EQ(test, err, 0); ··· 348 350 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); 349 351 KUNIT_ASSERT_NOT_NULL(test, pool); 350 352 351 - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); 353 + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC); 352 354 ttm_pool_alloc(pool, tt, &simple_ctx); 353 355 354 356 pt = &pool->caching[caching].orders[order]; ··· 379 381 pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); 380 382 KUNIT_ASSERT_NOT_NULL(test, pool); 381 383 382 - ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false); 384 + ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0); 383 385 ttm_pool_alloc(pool, tt, &simple_ctx); 384 386 385 387 pt = &pool->caching[caching].orders[order];
+4 -1
drivers/gpu/drm/ttm/ttm_device.c
··· 31 31 #include <linux/export.h> 32 32 #include <linux/mm.h> 33 33 34 + #include <drm/ttm/ttm_allocation.h> 34 35 #include <drm/ttm/ttm_bo.h> 35 36 #include <drm/ttm/ttm_device.h> 36 37 #include <drm/ttm/ttm_tt.h> ··· 237 236 else 238 237 nid = NUMA_NO_NODE; 239 238 240 - ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32); 239 + ttm_pool_init(&bdev->pool, dev, nid, 240 + (use_dma_alloc ? TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | 241 + (use_dma32 ? TTM_ALLOCATION_POOL_USE_DMA32 : 0)); 241 242 242 243 bdev->vma_manager = vma_manager; 243 244 spin_lock_init(&bdev->lru_lock);
+3 -5
drivers/gpu/drm/ttm/ttm_pool.c
··· 1059 1059 * @pool: the pool to initialize 1060 1060 * @dev: device for DMA allocations and mappings 1061 1061 * @nid: NUMA node to use for allocations 1062 - * @use_dma_alloc: true if coherent DMA alloc should be used 1063 - * @use_dma32: true if GFP_DMA32 should be used 1062 + * @alloc_flags: TTM_ALLOCATION_POOL_ flags 1064 1063 * 1065 1064 * Initialize the pool and its pool types. 1066 1065 */ 1067 1066 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, 1068 - int nid, bool use_dma_alloc, bool use_dma32) 1067 + int nid, unsigned int alloc_flags) 1069 1068 { 1070 1069 unsigned int i, j; 1071 1070 ··· 1072 1073 1073 1074 pool->dev = dev; 1074 1075 pool->nid = nid; 1075 - pool->use_dma_alloc = use_dma_alloc; 1076 - pool->use_dma32 = use_dma32; 1076 + pool->alloc_flags = alloc_flags; 1077 1077 1078 1078 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 1079 1079 for (j = 0; j < NR_PAGE_ORDERS; ++j) {
+3 -2
drivers/gpu/drm/ttm/ttm_pool_internal.h
··· 4 4 #ifndef _TTM_POOL_INTERNAL_H_ 5 5 #define _TTM_POOL_INTERNAL_H_ 6 6 7 + #include <drm/ttm/ttm_allocation.h> 7 8 #include <drm/ttm/ttm_pool.h> 8 9 9 10 static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool) 10 11 { 11 - return pool->use_dma_alloc; 12 + return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC; 12 13 } 13 14 14 15 static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool) 15 16 { 16 - return pool->use_dma32; 17 + return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32; 17 18 } 18 19 19 20 #endif
+10
include/drm/ttm/ttm_allocation.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* Copyright (c) 2025 Valve Corporation */ 3 + 4 + #ifndef _TTM_ALLOCATION_H_ 5 + #define _TTM_ALLOCATION_H_ 6 + 7 + #define TTM_ALLOCATION_POOL_USE_DMA_ALLOC BIT(0) /* Use coherent DMA allocations. */ 8 + #define TTM_ALLOCATION_POOL_USE_DMA32 BIT(1) /* Use GFP_DMA32 allocations. */ 9 + 10 + #endif
+3 -5
include/drm/ttm/ttm_pool.h
··· 64 64 * 65 65 * @dev: the device we allocate pages for 66 66 * @nid: which numa node to use 67 - * @use_dma_alloc: if coherent DMA allocations should be used 68 - * @use_dma32: if GFP_DMA32 should be used 67 + * @alloc_flags: TTM_ALLOCATION_POOL_ flags 69 68 * @caching: pools for each caching/order 70 69 */ 71 70 struct ttm_pool { 72 71 struct device *dev; 73 72 int nid; 74 73 75 - bool use_dma_alloc; 76 - bool use_dma32; 74 + unsigned int alloc_flags; 77 75 78 76 struct { 79 77 struct ttm_pool_type orders[NR_PAGE_ORDERS]; ··· 83 85 void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); 84 86 85 87 void ttm_pool_init(struct ttm_pool *pool, struct device *dev, 86 - int nid, bool use_dma_alloc, bool use_dma32); 88 + int nid, unsigned int alloc_flags); 87 89 void ttm_pool_fini(struct ttm_pool *pool); 88 90 89 91 int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);