mm/kfence: randomize the freelist on initialization

Randomize the KFENCE freelist during pool initialization to make
allocation patterns less predictable. This is achieved by shuffling the
order in which metadata objects are added to the freelist using
get_random_u32_below().

Additionally, ensure the error path correctly calculates the address range
to be reset if initialization fails, as the address increment logic has
been moved to a separate loop.

Link: https://lkml.kernel.org/r/20260120161510.3289089-1-pimyn@google.com
Fixes: 0ce20dd84089 ("mm: add Kernel Electric-Fence infrastructure")
Signed-off-by: Pimyn Girgis <pimyn@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Ernesto Martnez Garca <ernesto.martinezgarcia@tugraz.at>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Kees Cook <kees@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Pimyn Girgis and committed by
Andrew Morton
870ff192 412a32f0

+19 -4
+19 -4
mm/kfence/core.c
··· 596 static unsigned long kfence_init_pool(void) 597 { 598 unsigned long addr, start_pfn; 599 - int i; 600 601 if (!arch_kfence_init_pool()) 602 return (unsigned long)__kfence_pool; ··· 647 INIT_LIST_HEAD(&meta->list); 648 raw_spin_lock_init(&meta->lock); 649 meta->state = KFENCE_OBJECT_UNUSED; 650 - meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ 651 - list_add_tail(&meta->list, &kfence_freelist); 652 653 /* Protect the right redzone. */ 654 - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) 655 goto reset_slab; 656 657 addr += 2 * PAGE_SIZE; 658 } 659 ··· 680 return 0; 681 682 reset_slab: 683 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { 684 struct page *page; 685
··· 596 static unsigned long kfence_init_pool(void) 597 { 598 unsigned long addr, start_pfn; 599 + int i, rand; 600 601 if (!arch_kfence_init_pool()) 602 return (unsigned long)__kfence_pool; ··· 647 INIT_LIST_HEAD(&meta->list); 648 raw_spin_lock_init(&meta->lock); 649 meta->state = KFENCE_OBJECT_UNUSED; 650 + /* Use addr to randomize the freelist. */ 651 + meta->addr = i; 652 653 /* Protect the right redzone. */ 654 + if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE))) 655 goto reset_slab; 656 + } 657 658 + for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) { 659 + rand = get_random_u32_below(i); 660 + swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr); 661 + } 662 + 663 + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { 664 + struct kfence_metadata *meta_1 = &kfence_metadata_init[i]; 665 + struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr]; 666 + 667 + list_add_tail(&meta_2->list, &kfence_freelist); 668 + } 669 + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { 670 + kfence_metadata_init[i].addr = addr; 671 addr += 2 * PAGE_SIZE; 672 } 673 ··· 666 return 0; 667 668 reset_slab: 669 + addr += 2 * i * PAGE_SIZE; 670 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { 671 struct page *page; 672