Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/kasan: fix KASAN poisoning in vrealloc()

A KASAN warning can be triggered when vrealloc() changes the requested
size to a value that is not aligned to KASAN_GRANULE_SIZE.

------------[ cut here ]------------
WARNING: CPU: 2 PID: 1 at mm/kasan/shadow.c:174 kasan_unpoison+0x40/0x48
...
pc : kasan_unpoison+0x40/0x48
lr : __kasan_unpoison_vmalloc+0x40/0x68
Call trace:
kasan_unpoison+0x40/0x48 (P)
vrealloc_node_align_noprof+0x200/0x320
bpf_patch_insn_data+0x90/0x2f0
convert_ctx_accesses+0x8c0/0x1158
bpf_check+0x1488/0x1900
bpf_prog_load+0xd20/0x1258
__sys_bpf+0x96c/0xdf0
__arm64_sys_bpf+0x50/0xa0
invoke_syscall+0x90/0x160

Introduce a dedicated kasan_vrealloc() helper that centralizes KASAN
handling for vmalloc reallocations. The helper accounts for KASAN granule
alignment when growing or shrinking an allocation and ensures that partial
granules are handled correctly.

Use this helper from vrealloc_node_align_noprof() to fix poisoning logic.

[ryabinin.a.a@gmail.com: move kasan_enabled() check, fix build]
Link: https://lkml.kernel.org/r/20260119144509.32767-1-ryabinin.a.a@gmail.com
Link: https://lkml.kernel.org/r/20260113191516.31015-1-ryabinin.a.a@gmail.com
Fixes: d699440f58ce ("mm: fix vrealloc()'s KASAN poisoning logic")
Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Reported-by: Maciej Żenczykowski <maze@google.com>
Reported-by: <joonki.min@samsung-slsi.corp-partner.google.com>
Closes: https://lkml.kernel.org/r/CANP3RGeuRW53vukDy7WDO3FiVgu34-xVJYkfpm08oLO3odYFrA@mail.gmail.com
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Tested-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Andrey Ryabinin and committed by
Andrew Morton
9b47d4ee 8a1968bd

+37 -5
+14
include/linux/kasan.h
··· 641 641 __kasan_unpoison_vmap_areas(vms, nr_vms, flags); 642 642 } 643 643 644 + void __kasan_vrealloc(const void *start, unsigned long old_size, 645 + unsigned long new_size); 646 + 647 + static __always_inline void kasan_vrealloc(const void *start, 648 + unsigned long old_size, 649 + unsigned long new_size) 650 + { 651 + if (kasan_enabled()) 652 + __kasan_vrealloc(start, old_size, new_size); 653 + } 654 + 644 655 #else /* CONFIG_KASAN_VMALLOC */ 645 656 646 657 static inline void kasan_populate_early_vm_area_shadow(void *start, ··· 680 669 kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, 681 670 kasan_vmalloc_flags_t flags) 682 671 { } 672 + 673 + static inline void kasan_vrealloc(const void *start, unsigned long old_size, 674 + unsigned long new_size) { } 683 675 684 676 #endif /* CONFIG_KASAN_VMALLOC */ 685 677
+21
mm/kasan/common.c
··· 606 606 __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG); 607 607 } 608 608 } 609 + 610 + void __kasan_vrealloc(const void *addr, unsigned long old_size, 611 + unsigned long new_size) 612 + { 613 + if (new_size < old_size) { 614 + kasan_poison_last_granule(addr, new_size); 615 + 616 + new_size = round_up(new_size, KASAN_GRANULE_SIZE); 617 + old_size = round_up(old_size, KASAN_GRANULE_SIZE); 618 + if (new_size < old_size) 619 + __kasan_poison_vmalloc(addr + new_size, 620 + old_size - new_size); 621 + } else if (new_size > old_size) { 622 + old_size = round_down(old_size, KASAN_GRANULE_SIZE); 623 + __kasan_unpoison_vmalloc(addr + old_size, 624 + new_size - old_size, 625 + KASAN_VMALLOC_PROT_NORMAL | 626 + KASAN_VMALLOC_VM_ALLOC | 627 + KASAN_VMALLOC_KEEP_TAG); 628 + } 629 + } 609 630 #endif
+2 -5
mm/vmalloc.c
··· 4322 4322 if (want_init_on_free() || want_init_on_alloc(flags)) 4323 4323 memset((void *)p + size, 0, old_size - size); 4324 4324 vm->requested_size = size; 4325 - kasan_poison_vmalloc(p + size, old_size - size); 4325 + kasan_vrealloc(p, old_size, size); 4326 4326 return (void *)p; 4327 4327 } 4328 4328 ··· 4330 4330 * We already have the bytes available in the allocation; use them. 4331 4331 */ 4332 4332 if (size <= alloced_size) { 4333 - kasan_unpoison_vmalloc(p + old_size, size - old_size, 4334 - KASAN_VMALLOC_PROT_NORMAL | 4335 - KASAN_VMALLOC_VM_ALLOC | 4336 - KASAN_VMALLOC_KEEP_TAG); 4337 4333 /* 4338 4334 * No need to zero memory here, as unused memory will have 4339 4335 * already been zeroed at initial allocation time or during 4340 4336 * realloc shrink time. 4341 4337 */ 4342 4338 vm->requested_size = size; 4339 + kasan_vrealloc(p, old_size, size); 4343 4340 return (void *)p; 4344 4341 } 4345 4342