Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

stackdepot: make max number of pools boot-time configurable

We're hitting the WARN in depot_init_pool() about reaching the stack depot
limit because we have long stacks that don't dedup very well.

Introduce a new start-up parameter to allow users to set the number of
maximum stack depot pools.

Link: https://lkml.kernel.org/r/20250718153928.94229-1-matt@readmodwrite.com
Signed-off-by: Matt Fleming <mfleming@cloudflare.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matt Fleming and committed by
Andrew Morton
ed4f142f 6c6d8f8b

+63 -9
+5
Documentation/admin-guide/kernel-parameters.txt
··· 7029 7029 consumed by the stack hash table. By default this is set 7030 7030 to false. 7031 7031 7032 + stack_depot_max_pools= [KNL,EARLY] 7033 + Specify the maximum number of pools to use for storing 7034 + stack traces. Pools are allocated on-demand up to this 7035 + limit. Default value is 8191 pools. 7036 + 7032 7037 stacktrace [FTRACE] 7033 7038 Enabled the stack tracer on boot up. 7034 7039
+58 -9
lib/stackdepot.c
··· 36 36 #include <linux/memblock.h> 37 37 #include <linux/kasan-enabled.h> 38 38 39 - #define DEPOT_POOLS_CAP 8192 40 - /* The pool_index is offset by 1 so the first record does not have a 0 handle. */ 41 - #define DEPOT_MAX_POOLS \ 42 - (((1LL << (DEPOT_POOL_INDEX_BITS)) - 1 < DEPOT_POOLS_CAP) ? \ 43 - (1LL << (DEPOT_POOL_INDEX_BITS)) - 1 : DEPOT_POOLS_CAP) 39 + /* 40 + * The pool_index is offset by 1 so the first record does not have a 0 handle. 41 + */ 42 + static unsigned int stack_max_pools __read_mostly = 43 + MIN((1LL << DEPOT_POOL_INDEX_BITS) - 1, 8192); 44 44 45 45 static bool stack_depot_disabled; 46 46 static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); ··· 62 62 static unsigned int stack_hash_mask; 63 63 64 64 /* Array of memory regions that store stack records. */ 65 - static void *stack_pools[DEPOT_MAX_POOLS]; 65 + static void **stack_pools; 66 66 /* Newly allocated pool that is not yet added to stack_pools. */ 67 67 static void *new_pool; 68 68 /* Number of pools in stack_pools. */ ··· 100 100 return kstrtobool(str, &stack_depot_disabled); 101 101 } 102 102 early_param("stack_depot_disable", disable_stack_depot); 103 + 104 + static int __init parse_max_pools(char *str) 105 + { 106 + const long long limit = (1LL << (DEPOT_POOL_INDEX_BITS)) - 1; 107 + unsigned int max_pools; 108 + int rv; 109 + 110 + rv = kstrtouint(str, 0, &max_pools); 111 + if (rv) 112 + return rv; 113 + 114 + if (max_pools < 1024) { 115 + pr_err("stack_depot_max_pools below 1024, using default of %u\n", 116 + stack_max_pools); 117 + goto out; 118 + } 119 + 120 + if (max_pools > limit) { 121 + pr_err("stack_depot_max_pools exceeds %lld, using default of %u\n", 122 + limit, stack_max_pools); 123 + goto out; 124 + } 125 + 126 + stack_max_pools = max_pools; 127 + out: 128 + return 0; 129 + } 130 + early_param("stack_depot_max_pools", parse_max_pools); 103 131 104 132 void __init stack_depot_request_early_init(void) 105 133 { ··· 210 182 } 211 183 init_stack_table(entries); 212 184 185 + pr_info("allocating space for %u stack pools via memblock\n", 186 + stack_max_pools); 187 + stack_pools = 188 + memblock_alloc(stack_max_pools * sizeof(void *), PAGE_SIZE); 189 + if (!stack_pools) { 190 + pr_err("stack pools allocation failed, disabling\n"); 191 + memblock_free(stack_table, entries * sizeof(struct list_head)); 192 + stack_depot_disabled = true; 193 + return -ENOMEM; 194 + } 195 + 213 196 return 0; 214 197 } 215 198 ··· 270 231 stack_hash_mask = entries - 1; 271 232 init_stack_table(entries); 272 233 234 + pr_info("allocating space for %u stack pools via kvcalloc\n", 235 + stack_max_pools); 236 + stack_pools = kvcalloc(stack_max_pools, sizeof(void *), GFP_KERNEL); 237 + if (!stack_pools) { 238 + pr_err("stack pools allocation failed, disabling\n"); 239 + kvfree(stack_table); 240 + stack_depot_disabled = true; 241 + ret = -ENOMEM; 242 + } 243 + 273 244 out_unlock: 274 245 mutex_unlock(&stack_depot_init_mutex); 275 246 ··· 294 245 { 295 246 lockdep_assert_held(&pool_lock); 296 247 297 - if (unlikely(pools_num >= DEPOT_MAX_POOLS)) { 248 + if (unlikely(pools_num >= stack_max_pools)) { 298 249 /* Bail out if we reached the pool limit. */ 299 - WARN_ON_ONCE(pools_num > DEPOT_MAX_POOLS); /* should never happen */ 250 + WARN_ON_ONCE(pools_num > stack_max_pools); /* should never happen */ 300 251 WARN_ON_ONCE(!new_pool); /* to avoid unnecessary pre-allocation */ 301 252 WARN_ONCE(1, "Stack depot reached limit capacity"); 302 253 return false; ··· 322 273 * NULL; do not reset to NULL if we have reached the maximum number of 323 274 * pools. 324 275 */ 325 - if (pools_num < DEPOT_MAX_POOLS) 276 + if (pools_num < stack_max_pools) 326 277 WRITE_ONCE(new_pool, NULL); 327 278 else 328 279 WRITE_ONCE(new_pool, STACK_DEPOT_POISON);