Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'slab-for-6.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab fix from Vlastimil Babka:

- Fix for duplicate caches in some arm64 configurations with
CONFIG_SLAB_BUCKETS (Koichiro Den)

* tag 'slab-for-6.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm/slab: fix warning caused by duplicate kmem_cache creation in kmem_buckets_create

+20 -11
+20 -11
mm/slab_common.c
··· 380 380 unsigned int usersize, 381 381 void (*ctor)(void *)) 382 382 { 383 + unsigned long mask = 0; 384 + unsigned int idx; 383 385 kmem_buckets *b; 384 - int idx; 386 + 387 + BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]) > BITS_PER_LONG); 385 388 386 389 /* 387 390 * When the separate buckets API is not built in, just return ··· 406 403 for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) { 407 404 char *short_size, *cache_name; 408 405 unsigned int cache_useroffset, cache_usersize; 409 - unsigned int size; 406 + unsigned int size, aligned_idx; 410 407 411 408 if (!kmalloc_caches[KMALLOC_NORMAL][idx]) 412 409 continue; ··· 419 416 if (WARN_ON(!short_size)) 420 417 goto fail; 421 418 422 - cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); 423 - if (WARN_ON(!cache_name)) 424 - goto fail; 425 - 426 419 if (useroffset >= size) { 427 420 cache_useroffset = 0; 428 421 cache_usersize = 0; ··· 426 427 cache_useroffset = useroffset; 427 428 cache_usersize = min(size - cache_useroffset, usersize); 428 429 } 429 - (*b)[idx] = kmem_cache_create_usercopy(cache_name, size, 430 + 431 + aligned_idx = __kmalloc_index(size, false); 432 + if (!(*b)[aligned_idx]) { 433 + cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); 434 + if (WARN_ON(!cache_name)) 435 + goto fail; 436 + (*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size, 430 437 0, flags, cache_useroffset, 431 438 cache_usersize, ctor); 432 - kfree(cache_name); 433 - if (WARN_ON(!(*b)[idx])) 434 - goto fail; 439 + kfree(cache_name); 440 + if (WARN_ON(!(*b)[aligned_idx])) 441 + goto fail; 442 + set_bit(aligned_idx, &mask); 443 + } 444 + if (idx != aligned_idx) 445 + (*b)[idx] = (*b)[aligned_idx]; 435 446 } 436 447 437 448 return b; 438 449 439 450 fail: 440 - for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) 451 + for_each_set_bit(idx, &mask, ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL])) 441 452 kmem_cache_destroy((*b)[idx]); 442 453 kmem_cache_free(kmem_buckets_cache, b); 443 454