Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: vmalloc: support more granular vrealloc() sizing

Introduce struct vm_struct::requested_size so that the requested
(re)allocation size is retained separately from the allocated area size.
This means that KASAN will correctly poison the correct spans of requested
bytes. This also means we can support growing the usable portion of an
allocation that can already be supported by the existing area's existing
allocation.

Link: https://lkml.kernel.org/r/20250426001105.it.679-kees@kernel.org
Fixes: 3ddc2fefe6f3 ("mm: vmalloc: implement vrealloc()")
Signed-off-by: Kees Cook <kees@kernel.org>
Reported-by: Erhard Furtner <erhard_f@mailbox.org>
Closes: https://lore.kernel.org/all/20250408192503.6149a816@outsider.home/
Reviewed-by: Danilo Krummrich <dakr@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kees Cook and committed by
Andrew Morton
a0309faf a8efadda

+25 -7
+1
include/linux/vmalloc.h
··· 61 61 unsigned int nr_pages; 62 62 phys_addr_t phys_addr; 63 63 const void *caller; 64 + unsigned long requested_size; 64 65 }; 65 66 66 67 struct vmap_area {
+24 -7
mm/vmalloc.c
··· 1940 1940 { 1941 1941 vm->flags = flags; 1942 1942 vm->addr = (void *)va->va_start; 1943 - vm->size = va_size(va); 1943 + vm->size = vm->requested_size = va_size(va); 1944 1944 vm->caller = caller; 1945 1945 va->vm = vm; 1946 1946 } ··· 3133 3133 3134 3134 area->flags = flags; 3135 3135 area->caller = caller; 3136 + area->requested_size = requested_size; 3136 3137 3137 3138 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); 3138 3139 if (IS_ERR(va)) { ··· 4064 4063 */ 4065 4064 void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) 4066 4065 { 4066 + struct vm_struct *vm = NULL; 4067 + size_t alloced_size = 0; 4067 4068 size_t old_size = 0; 4068 4069 void *n; 4069 4070 ··· 4075 4072 } 4076 4073 4077 4074 if (p) { 4078 - struct vm_struct *vm; 4079 - 4080 4075 vm = find_vm_area(p); 4081 4076 if (unlikely(!vm)) { 4082 4077 WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); 4083 4078 return NULL; 4084 4079 } 4085 4080 4086 - old_size = get_vm_area_size(vm); 4081 + alloced_size = get_vm_area_size(vm); 4082 + old_size = vm->requested_size; 4083 + if (WARN(alloced_size < old_size, 4084 + "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) 4085 + return NULL; 4087 4086 } 4088 4087 4089 4088 /* ··· 4093 4088 * would be a good heuristic for when to shrink the vm_area? 4094 4089 */ 4095 4090 if (size <= old_size) { 4096 - /* Zero out spare memory. */ 4097 - if (want_init_on_alloc(flags)) 4091 + /* Zero out "freed" memory. */ 4092 + if (want_init_on_free()) 4098 4093 memset((void *)p + size, 0, old_size - size); 4094 + vm->requested_size = size; 4099 4095 kasan_poison_vmalloc(p + size, old_size - size); 4100 - kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL); 4101 4096 return (void *)p; 4097 + } 4098 + 4099 + /* 4100 + * We already have the bytes available in the allocation; use them. 4101 + */ 4102 + if (size <= alloced_size) { 4103 + kasan_unpoison_vmalloc(p + old_size, size - old_size, 4104 + KASAN_VMALLOC_PROT_NORMAL); 4105 + /* Zero out "alloced" memory. */ 4106 + if (want_init_on_alloc(flags)) 4107 + memset((void *)p + old_size, 0, size - old_size); 4108 + vm->requested_size = size; 4102 4109 } 4103 4110 4104 4111 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */