Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

alloc_tag: allocate percpu counters for module tags dynamically

When a module gets unloaded it checks whether any of its tags are still in
use and if so, we keep the memory containing module's allocation tags
alive until all tags are unused. However percpu counters referenced by
the tags are freed by free_module(). This will lead to UAF if the memory
allocated by a module is accessed after module was unloaded.

To fix this we allocate percpu counters for module allocation tags
dynamically and we keep it alive for tags which are still in use after
module unloading. This also removes the requirement of a larger
PERCPU_MODULE_RESERVE when memory allocation profiling is enabled because
percpu memory for counters does not need to be reserved anymore.

Link: https://lkml.kernel.org/r/20250517000739.5930-1-surenb@google.com
Fixes: 0db6f8d7820a ("alloc_tag: load module tags into separate contiguous memory")
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Reported-by: David Wang <00107082@163.com>
Closes: https://lore.kernel.org/all/20250516131246.6244-1-00107082@163.com/
Tested-by: David Wang <00107082@163.com>
Cc: Christoph Lameter (Ampere) <cl@gentwo.org>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Suren Baghdasaryan and committed by
Andrew Morton
12ca42c2 221fcbf7

+88 -28
+12
include/linux/alloc_tag.h
··· 104 104 105 105 #else /* ARCH_NEEDS_WEAK_PER_CPU */ 106 106 107 + #ifdef MODULE 108 + 109 + #define DEFINE_ALLOC_TAG(_alloc_tag) \ 110 + static struct alloc_tag _alloc_tag __used __aligned(8) \ 111 + __section(ALLOC_TAG_SECTION_NAME) = { \ 112 + .ct = CODE_TAG_INIT, \ 113 + .counters = NULL }; 114 + 115 + #else /* MODULE */ 116 + 107 117 #define DEFINE_ALLOC_TAG(_alloc_tag) \ 108 118 static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ 109 119 static struct alloc_tag _alloc_tag __used __aligned(8) \ 110 120 __section(ALLOC_TAG_SECTION_NAME) = { \ 111 121 .ct = CODE_TAG_INIT, \ 112 122 .counters = &_alloc_tag_cntr }; 123 + 124 + #endif /* MODULE */ 113 125 114 126 #endif /* ARCH_NEEDS_WEAK_PER_CPU */ 115 127
+4 -4
include/linux/codetag.h
··· 36 36 struct codetag_type_desc { 37 37 const char *section; 38 38 size_t tag_size; 39 - void (*module_load)(struct codetag_type *cttype, 40 - struct codetag_module *cmod); 41 - void (*module_unload)(struct codetag_type *cttype, 42 - struct codetag_module *cmod); 39 + void (*module_load)(struct module *mod, 40 + struct codetag *start, struct codetag *end); 41 + void (*module_unload)(struct module *mod, 42 + struct codetag *start, struct codetag *end); 43 43 #ifdef CONFIG_MODULES 44 44 void (*module_replaced)(struct module *mod, struct module *new_mod); 45 45 bool (*needs_section_mem)(struct module *mod, unsigned long size);
-4
include/linux/percpu.h
··· 15 15 16 16 /* enough to cover all DEFINE_PER_CPUs in modules */ 17 17 #ifdef CONFIG_MODULES 18 - #ifdef CONFIG_MEM_ALLOC_PROFILING 19 - #define PERCPU_MODULE_RESERVE (8 << 13) 20 - #else 21 18 #define PERCPU_MODULE_RESERVE (8 << 10) 22 - #endif 23 19 #else 24 20 #define PERCPU_MODULE_RESERVE 0 25 21 #endif
+69 -18
lib/alloc_tag.c
··· 350 350 return size >= sizeof(struct alloc_tag); 351 351 } 352 352 353 - static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) 353 + static bool clean_unused_counters(struct alloc_tag *start_tag, 354 + struct alloc_tag *end_tag) 354 355 { 355 - while (from <= to) { 356 + struct alloc_tag *tag; 357 + bool ret = true; 358 + 359 + for (tag = start_tag; tag <= end_tag; tag++) { 356 360 struct alloc_tag_counters counter; 357 361 358 - counter = alloc_tag_read(from); 359 - if (counter.bytes) 360 - return from; 361 - from++; 362 + if (!tag->counters) 363 + continue; 364 + 365 + counter = alloc_tag_read(tag); 366 + if (!counter.bytes) { 367 + free_percpu(tag->counters); 368 + tag->counters = NULL; 369 + } else { 370 + ret = false; 371 + } 362 372 } 363 373 364 - return NULL; 374 + return ret; 365 375 } 366 376 367 377 /* Called with mod_area_mt locked */ ··· 381 371 struct module *val; 382 372 383 373 mas_for_each(&mas, val, module_tags.size) { 374 + struct alloc_tag *start_tag; 375 + struct alloc_tag *end_tag; 376 + 384 377 if (val != &unloaded_mod) 385 378 continue; 386 379 387 380 /* Release area if all tags are unused */ 388 - if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 389 - (struct alloc_tag *)(module_tags.start_addr + mas.last))) 381 + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); 382 + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); 383 + if (clean_unused_counters(start_tag, end_tag)) 390 384 mas_erase(&mas); 391 385 } 392 386 } ··· 575 561 static void release_module_tags(struct module *mod, bool used) 576 562 { 577 563 MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); 578 - struct alloc_tag *tag; 564 + struct alloc_tag *start_tag; 565 + struct alloc_tag *end_tag; 579 566 struct module *val; 580 567 581 568 mas_lock(&mas); ··· 590 575 if (!used) 591 576 goto release_area; 592 577 593 - /* Find out if the area is used */ 594 - tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), 595 - (struct alloc_tag *)(module_tags.start_addr + mas.last)); 596 - if (tag) { 597 - struct alloc_tag_counters counter = alloc_tag_read(tag); 578 + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); 579 + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); 580 + if (!clean_unused_counters(start_tag, end_tag)) { 581 + struct alloc_tag *tag; 598 582 599 - pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 600 - tag->ct.filename, tag->ct.lineno, tag->ct.modname, 601 - tag->ct.function, counter.bytes); 583 + for (tag = start_tag; tag <= end_tag; tag++) { 584 + struct alloc_tag_counters counter; 585 + 586 + if (!tag->counters) 587 + continue; 588 + 589 + counter = alloc_tag_read(tag); 590 + pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", 591 + tag->ct.filename, tag->ct.lineno, tag->ct.modname, 592 + tag->ct.function, counter.bytes); 593 + } 602 594 } else { 603 595 used = false; 604 596 } ··· 616 594 mas_store(&mas, NULL); 617 595 out: 618 596 mas_unlock(&mas); 597 + } 598 + 599 + static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) 600 + { 601 + /* Allocate module alloc_tag percpu counters */ 602 + struct alloc_tag *start_tag; 603 + struct alloc_tag *stop_tag; 604 + struct alloc_tag *tag; 605 + 606 + if (!mod) 607 + return; 608 + 609 + start_tag = ct_to_alloc_tag(start); 610 + stop_tag = ct_to_alloc_tag(stop); 611 + for (tag = start_tag; tag < stop_tag; tag++) { 612 + WARN_ON(tag->counters); 613 + tag->counters = alloc_percpu(struct alloc_tag_counters); 614 + if (!tag->counters) { 615 + while (--tag >= start_tag) { 616 + free_percpu(tag->counters); 617 + tag->counters = NULL; 618 + } 619 + shutdown_mem_profiling(true); 620 + pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n", 621 + mod->name); 622 + break; 623 + } 624 + } 619 625 } 620 626 621 627 static void replace_module(struct module *mod, struct module *new_mod) ··· 807 757 .needs_section_mem = needs_section_mem, 808 758 .alloc_section_mem = reserve_module_tags, 809 759 .free_section_mem = release_module_tags, 760 + .module_load = load_module, 810 761 .module_replaced = replace_module, 811 762 #endif 812 763 };
+3 -2
lib/codetag.c
··· 194 194 if (err >= 0) { 195 195 cttype->count += range_size(cttype, &range); 196 196 if (cttype->desc.module_load) 197 - cttype->desc.module_load(cttype, cmod); 197 + cttype->desc.module_load(mod, range.start, range.stop); 198 198 } 199 199 up_write(&cttype->mod_lock); 200 200 ··· 333 333 } 334 334 if (found) { 335 335 if (cttype->desc.module_unload) 336 - cttype->desc.module_unload(cttype, cmod); 336 + cttype->desc.module_unload(cmod->mod, 337 + cmod->range.start, cmod->range.stop); 337 338 338 339 cttype->count -= range_size(cttype, &cmod->range); 339 340 idr_remove(&cttype->mod_idr, mod_id);