Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'modules-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/modules/linux

Pull modules updates from Petr Pavlu:

- Use RCU instead of RCU-sched

The mix of rcu_read_lock(), rcu_read_lock_sched() and
preempt_disable() in the module code and its users has
been replaced with just rcu_read_lock()

- The rest of changes are smaller fixes and updates

* tag 'modules-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/modules/linux: (32 commits)
MAINTAINERS: Update the MODULE SUPPORT section
module: Remove unnecessary size argument when calling strscpy()
module: Replace deprecated strncpy() with strscpy()
params: Annotate struct module_param_attrs with __counted_by()
bug: Use RCU instead RCU-sched to protect module_bug_list.
static_call: Use RCU in all users of __module_text_address().
kprobes: Use RCU in all users of __module_text_address().
bpf: Use RCU in all users of __module_text_address().
jump_label: Use RCU in all users of __module_text_address().
jump_label: Use RCU in all users of __module_address().
x86: Use RCU in all users of __module_address().
cfi: Use RCU while invoking __module_address().
powerpc/ftrace: Use RCU in all users of __module_text_address().
LoongArch: ftrace: Use RCU in all users of __module_text_address().
LoongArch/orc: Use RCU in all users of __module_address().
arm64: module: Use RCU in all users of __module_text_address().
ARM: module: Use RCU in all users of __module_text_address().
module: Use RCU in all users of __module_text_address().
module: Use RCU in all users of __module_address().
module: Use RCU in search_module_extables().
...

+160 -250
+3 -1
MAINTAINERS
··· 16210 16210 16211 16211 MODULE SUPPORT 16212 16212 M: Luis Chamberlain <mcgrof@kernel.org> 16213 - R: Petr Pavlu <petr.pavlu@suse.com> 16213 + M: Petr Pavlu <petr.pavlu@suse.com> 16214 16214 R: Sami Tolvanen <samitolvanen@google.com> 16215 16215 R: Daniel Gomez <da.gomez@samsung.com> 16216 16216 L: linux-modules@vger.kernel.org ··· 16221 16221 F: include/linux/module*.h 16222 16222 F: kernel/module/ 16223 16223 F: lib/test_kmod.c 16224 + F: lib/tests/module/ 16224 16225 F: scripts/module* 16225 16226 F: tools/testing/selftests/kmod/ 16227 + F: tools/testing/selftests/module/ 16226 16228 16227 16229 MONOLITHIC POWER SYSTEM PMIC DRIVER 16228 16230 M: Saravanan Sekar <sravanhome@gmail.com>
+1 -3
arch/arm/kernel/module-plts.c
··· 285 285 struct module *mod; 286 286 bool ret; 287 287 288 - preempt_disable(); 288 + guard(rcu)(); 289 289 mod = __module_text_address(loc); 290 290 ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE || 291 291 loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE); 292 - preempt_enable(); 293 - 294 292 return ret; 295 293 }
+3 -4
arch/arm64/kernel/ftrace.c
··· 320 320 * dealing with an out-of-range condition, we can assume it 321 321 * is due to a module being loaded far away from the kernel. 322 322 * 323 - * NOTE: __module_text_address() must be called with preemption 324 - * disabled, but we can rely on ftrace_lock to ensure that 'mod' 323 + * NOTE: __module_text_address() must be called within a RCU read 324 + * section, but we can rely on ftrace_lock to ensure that 'mod' 325 325 * retains its validity throughout the remainder of this code. 326 326 */ 327 327 if (!mod) { 328 - preempt_disable(); 328 + guard(rcu)(); 329 329 mod = __module_text_address(pc); 330 - preempt_enable(); 331 330 } 332 331 333 332 if (WARN_ON(!mod))
+4 -5
arch/loongarch/kernel/ftrace_dyn.c
··· 85 85 * dealing with an out-of-range condition, we can assume it 86 86 * is due to a module being loaded far away from the kernel. 87 87 * 88 - * NOTE: __module_text_address() must be called with preemption 89 - * disabled, but we can rely on ftrace_lock to ensure that 'mod' 88 + * NOTE: __module_text_address() must be called within a RCU read 89 + * section, but we can rely on ftrace_lock to ensure that 'mod' 90 90 * retains its validity throughout the remainder of this code. 91 91 */ 92 92 if (!mod) { 93 - preempt_disable(); 94 - mod = __module_text_address(pc); 95 - preempt_enable(); 93 + scoped_guard(rcu) 94 + mod = __module_text_address(pc); 96 95 } 97 96 98 97 if (WARN_ON(!mod))
+1 -3
arch/loongarch/kernel/unwind_orc.c
··· 399 399 return false; 400 400 401 401 /* Don't let modules unload while we're reading their ORC data. */ 402 - preempt_disable(); 402 + guard(rcu)(); 403 403 404 404 if (is_entry_func(state->pc)) 405 405 goto end; ··· 514 514 if (!__kernel_text_address(state->pc)) 515 515 goto err; 516 516 517 - preempt_enable(); 518 517 return true; 519 518 520 519 err: 521 520 state->error = true; 522 521 523 522 end: 524 - preempt_enable(); 525 523 state->stack_info.type = STACK_TYPE_UNKNOWN; 526 524 return false; 527 525 }
+2 -4
arch/powerpc/kernel/trace/ftrace.c
··· 115 115 { 116 116 struct module *mod = NULL; 117 117 118 - preempt_disable(); 119 - mod = __module_text_address(ip); 120 - preempt_enable(); 121 - 118 + scoped_guard(rcu) 119 + mod = __module_text_address(ip); 122 120 if (!mod) 123 121 pr_err("No module loaded at addr=%lx\n", ip); 124 122
+2 -4
arch/powerpc/kernel/trace/ftrace_64_pg.c
··· 120 120 { 121 121 struct module *mod; 122 122 123 - preempt_disable(); 124 - mod = __module_text_address(rec->ip); 125 - preempt_enable(); 126 - 123 + scoped_guard(rcu) 124 + mod = __module_text_address(rec->ip); 127 125 if (!mod) 128 126 pr_err("No module loaded at addr=%lx\n", rec->ip); 129 127
+1 -2
arch/x86/kernel/callthunks.c
··· 98 98 #ifdef CONFIG_MODULES 99 99 struct module *mod; 100 100 101 - preempt_disable(); 101 + guard(rcu)(); 102 102 mod = __module_address((unsigned long)addr); 103 103 if (mod && within_module_core((unsigned long)addr, mod)) 104 104 ret = true; 105 - preempt_enable(); 106 105 #endif 107 106 return ret; 108 107 }
+1 -3
arch/x86/kernel/unwind_orc.c
··· 476 476 return false; 477 477 478 478 /* Don't let modules unload while we're reading their ORC data. */ 479 - preempt_disable(); 479 + guard(rcu)(); 480 480 481 481 /* End-of-stack check for user tasks: */ 482 482 if (state->regs && user_mode(state->regs)) ··· 669 669 goto err; 670 670 } 671 671 672 - preempt_enable(); 673 672 return true; 674 673 675 674 err: 676 675 state->error = true; 677 676 678 677 the_end: 679 - preempt_enable(); 680 678 state->stack_info.type = STACK_TYPE_UNKNOWN; 681 679 return false; 682 680 }
+1 -2
include/linux/kallsyms.h
··· 55 55 if (is_ksym_addr((unsigned long)ptr)) 56 56 return ptr; 57 57 58 - preempt_disable(); 58 + guard(rcu)(); 59 59 mod = __module_address((unsigned long)ptr); 60 60 61 61 if (mod) 62 62 ptr = dereference_module_function_descriptor(mod, ptr); 63 - preempt_enable(); 64 63 #endif 65 64 return ptr; 66 65 }
+1 -1
include/linux/module.h
··· 665 665 return within_module_init(addr, mod) || within_module_core(addr, mod); 666 666 } 667 667 668 - /* Search for module by name: must be in a RCU-sched critical section. */ 668 + /* Search for module by name: must be in a RCU critical section. */ 669 669 struct module *find_module(const char *name); 670 670 671 671 extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
+1 -4
kernel/cfi.c
··· 73 73 struct module *mod; 74 74 bool found = false; 75 75 76 - rcu_read_lock_sched_notrace(); 77 - 76 + guard(rcu)(); 78 77 mod = __module_address(addr); 79 78 if (mod) 80 79 found = is_trap(addr, mod->kcfi_traps, mod->kcfi_traps_end); 81 - 82 - rcu_read_unlock_sched_notrace(); 83 80 84 81 return found; 85 82 }
+15 -16
kernel/jump_label.c
··· 653 653 struct module *mod; 654 654 int ret; 655 655 656 - preempt_disable(); 657 - mod = __module_text_address((unsigned long)start); 658 - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 659 - if (!try_module_get(mod)) 660 - mod = NULL; 661 - preempt_enable(); 662 - 656 + scoped_guard(rcu) { 657 + mod = __module_text_address((unsigned long)start); 658 + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 659 + if (!try_module_get(mod)) 660 + mod = NULL; 661 + } 663 662 if (!mod) 664 663 return 0; 665 664 ··· 745 746 kfree(jlm); 746 747 return -ENOMEM; 747 748 } 748 - preempt_disable(); 749 - jlm2->mod = __module_address((unsigned long)key); 750 - preempt_enable(); 749 + scoped_guard(rcu) 750 + jlm2->mod = __module_address((unsigned long)key); 751 + 751 752 jlm2->entries = static_key_entries(key); 752 753 jlm2->next = NULL; 753 754 static_key_set_mod(key, jlm2); ··· 905 906 return; 906 907 } 907 908 908 - preempt_disable(); 909 - mod = __module_address((unsigned long)key); 910 - if (mod) { 911 - stop = mod->jump_entries + mod->num_jump_entries; 912 - init = mod->state == MODULE_STATE_COMING; 909 + scoped_guard(rcu) { 910 + mod = __module_address((unsigned long)key); 911 + if (mod) { 912 + stop = mod->jump_entries + mod->num_jump_entries; 913 + init = mod->state == MODULE_STATE_COMING; 914 + } 913 915 } 914 - preempt_enable(); 915 916 #endif 916 917 entry = static_key_entries(key); 917 918 /* if there are no users, entry can be NULL */
+1 -1
kernel/kprobes.c
··· 1547 1547 /* Ensure the address is in a text area, and find a module if exists. */ 1548 1548 *probed_mod = NULL; 1549 1549 if (!core_kernel_text((unsigned long) p->addr)) { 1550 - guard(preempt)(); 1550 + guard(rcu)(); 1551 1551 *probed_mod = __module_text_address((unsigned long) p->addr); 1552 1552 if (!(*probed_mod)) 1553 1553 return -EINVAL;
+1 -3
kernel/livepatch/core.c
··· 59 59 if (!klp_is_module(obj)) 60 60 return; 61 61 62 - rcu_read_lock_sched(); 62 + guard(rcu)(); 63 63 /* 64 64 * We do not want to block removal of patched modules and therefore 65 65 * we do not take a reference here. The patches are removed by ··· 75 75 */ 76 76 if (mod && mod->klp_alive) 77 77 obj->mod = mod; 78 - 79 - rcu_read_unlock_sched(); 80 78 } 81 79 82 80 static bool klp_initialized(void)
-11
kernel/module/internal.h
··· 124 124 #define for_each_modinfo_entry(entry, info, name) \ 125 125 for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry)) 126 126 127 - static inline void module_assert_mutex_or_preempt(void) 128 - { 129 - #ifdef CONFIG_LOCKDEP 130 - if (unlikely(!debug_locks)) 131 - return; 132 - 133 - WARN_ON_ONCE(!rcu_read_lock_sched_held() && 134 - !lockdep_is_held(&module_mutex)); 135 - #endif 136 - } 137 - 138 127 static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym) 139 128 { 140 129 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+27 -46
kernel/module/kallsyms.c
··· 177 177 unsigned long strtab_size; 178 178 void *data_base = mod->mem[MOD_DATA].base; 179 179 void *init_data_base = mod->mem[MOD_INIT_DATA].base; 180 + struct mod_kallsyms *kallsyms; 180 181 181 - /* Set up to point into init section. */ 182 - mod->kallsyms = (void __rcu *)init_data_base + 183 - info->mod_kallsyms_init_off; 182 + kallsyms = init_data_base + info->mod_kallsyms_init_off; 184 183 185 - rcu_read_lock(); 186 - /* The following is safe since this pointer cannot change */ 187 - rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr; 188 - rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 184 + kallsyms->symtab = (void *)symsec->sh_addr; 185 + kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 189 186 /* Make sure we get permanent strtab: don't use info->strtab. */ 190 - rcu_dereference(mod->kallsyms)->strtab = 191 - (void *)info->sechdrs[info->index.str].sh_addr; 192 - rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs; 187 + kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; 188 + kallsyms->typetab = init_data_base + info->init_typeoffs; 193 189 194 190 /* 195 191 * Now populate the cut down core kallsyms for after init ··· 195 199 mod->core_kallsyms.strtab = s = data_base + info->stroffs; 196 200 mod->core_kallsyms.typetab = data_base + info->core_typeoffs; 197 201 strtab_size = info->core_typeoffs - info->stroffs; 198 - src = rcu_dereference(mod->kallsyms)->symtab; 199 - for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) { 200 - rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info); 202 + src = kallsyms->symtab; 203 + for (ndst = i = 0; i < kallsyms->num_symtab; i++) { 204 + kallsyms->typetab[i] = elf_type(src + i, info); 201 205 if (i == 0 || is_livepatch_module(mod) || 202 206 is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum, 203 207 info->index.pcpu)) { 204 208 ssize_t ret; 205 209 206 210 mod->core_kallsyms.typetab[ndst] = 207 - rcu_dereference(mod->kallsyms)->typetab[i]; 211 + kallsyms->typetab[i]; 208 212 dst[ndst] = src[i]; 209 213 dst[ndst++].st_name = s - mod->core_kallsyms.strtab; 210 - ret = strscpy(s, 211 - &rcu_dereference(mod->kallsyms)->strtab[src[i].st_name], 214 + ret = strscpy(s, &kallsyms->strtab[src[i].st_name], 212 215 strtab_size); 213 216 if (ret < 0) 214 217 break; ··· 215 220 strtab_size -= ret + 1; 216 221 } 217 222 } 218 - rcu_read_unlock(); 223 + 224 + /* Set up to point into init section. */ 225 + rcu_assign_pointer(mod->kallsyms, kallsyms); 219 226 mod->core_kallsyms.num_symtab = ndst; 220 227 } 221 228 ··· 257 260 { 258 261 unsigned int i, best = 0; 259 262 unsigned long nextval, bestval; 260 - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 263 + struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms); 261 264 struct module_memory *mod_mem; 262 265 263 266 /* At worse, next value is at end of module */ ··· 316 319 317 320 /* 318 321 * For kallsyms to ask for address resolution. NULL means not found. Careful 319 - * not to lock to avoid deadlock on oopses, simply disable preemption. 322 + * not to lock to avoid deadlock on oopses, RCU is enough. 320 323 */ 321 324 int module_address_lookup(unsigned long addr, 322 325 unsigned long *size, ··· 329 332 int ret = 0; 330 333 struct module *mod; 331 334 332 - preempt_disable(); 335 + guard(rcu)(); 333 336 mod = __module_address(addr); 334 337 if (mod) { 335 338 if (modname) ··· 347 350 if (sym) 348 351 ret = strscpy(namebuf, sym, KSYM_NAME_LEN); 349 352 } 350 - preempt_enable(); 351 - 352 353 return ret; 353 354 } 354 355 ··· 354 359 { 355 360 struct module *mod; 356 361 357 - preempt_disable(); 362 + guard(rcu)(); 358 363 list_for_each_entry_rcu(mod, &modules, list) { 359 364 if (mod->state == MODULE_STATE_UNFORMED) 360 365 continue; ··· 366 371 goto out; 367 372 368 373 strscpy(symname, sym, KSYM_NAME_LEN); 369 - preempt_enable(); 370 374 return 0; 371 375 } 372 376 } 373 377 out: 374 - preempt_enable(); 375 378 return -ERANGE; 376 379 } 377 380 ··· 378 385 { 379 386 struct module *mod; 380 387 381 - preempt_disable(); 388 + guard(rcu)(); 382 389 list_for_each_entry_rcu(mod, &modules, list) { 383 390 struct mod_kallsyms *kallsyms; 384 391 385 392 if (mod->state == MODULE_STATE_UNFORMED) 386 393 continue; 387 - kallsyms = rcu_dereference_sched(mod->kallsyms); 394 + kallsyms = rcu_dereference(mod->kallsyms); 388 395 if (symnum < kallsyms->num_symtab) { 389 396 const Elf_Sym *sym = &kallsyms->symtab[symnum]; 390 397 ··· 393 400 strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); 394 401 strscpy(module_name, mod->name, MODULE_NAME_LEN); 395 402 *exported = is_exported(name, *value, mod); 396 - preempt_enable(); 397 403 return 0; 398 404 } 399 405 symnum -= kallsyms->num_symtab; 400 406 } 401 - preempt_enable(); 402 407 return -ERANGE; 403 408 } 404 409 ··· 404 413 static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name) 405 414 { 406 415 unsigned int i; 407 - struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 416 + struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms); 408 417 409 418 for (i = 0; i < kallsyms->num_symtab; i++) { 410 419 const Elf_Sym *sym = &kallsyms->symtab[i]; ··· 444 453 /* Look for this name: can be of form module:name. */ 445 454 unsigned long module_kallsyms_lookup_name(const char *name) 446 455 { 447 - unsigned long ret; 448 - 449 456 /* Don't lock: we're in enough trouble already. */ 450 - preempt_disable(); 451 - ret = __module_kallsyms_lookup_name(name); 452 - preempt_enable(); 453 - return ret; 457 + guard(rcu)(); 458 + return __module_kallsyms_lookup_name(name); 454 459 } 455 460 456 461 unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) 457 462 { 458 - unsigned long ret; 459 - 460 - preempt_disable(); 461 - ret = __find_kallsyms_symbol_value(mod, name); 462 - preempt_enable(); 463 - return ret; 463 + guard(rcu)(); 464 + return __find_kallsyms_symbol_value(mod, name); 464 465 } 465 466 466 467 int module_kallsyms_on_each_symbol(const char *modname, ··· 473 490 if (modname && strcmp(modname, mod->name)) 474 491 continue; 475 492 476 - /* Use rcu_dereference_sched() to remain compliant with the sparse tool */ 477 - preempt_disable(); 478 - kallsyms = rcu_dereference_sched(mod->kallsyms); 479 - preempt_enable(); 493 + kallsyms = rcu_dereference_check(mod->kallsyms, 494 + lockdep_is_held(&module_mutex)); 480 495 481 496 for (i = 0; i < kallsyms->num_symtab; i++) { 482 497 const Elf_Sym *sym = &kallsyms->symtab[i];
+39 -70
kernel/module/main.c
··· 67 67 68 68 /* 69 69 * Mutex protects: 70 - * 1) List of modules (also safely readable with preempt_disable), 70 + * 1) List of modules (also safely readable within RCU read section), 71 71 * 2) module_use links, 72 72 * 3) mod_tree.addr_min/mod_tree.addr_max. 73 73 * (delete and add uses RCU list operations). ··· 331 331 332 332 /* 333 333 * Find an exported symbol and return it, along with, (optional) crc and 334 - * (optional) module which owns it. Needs preempt disabled or module_mutex. 334 + * (optional) module which owns it. Needs RCU or module_mutex. 335 335 */ 336 336 bool find_symbol(struct find_symbol_arg *fsa) 337 337 { ··· 344 344 }; 345 345 struct module *mod; 346 346 unsigned int i; 347 - 348 - module_assert_mutex_or_preempt(); 349 347 350 348 for (i = 0; i < ARRAY_SIZE(arr); i++) 351 349 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) ··· 372 374 } 373 375 374 376 /* 375 - * Search for module by name: must hold module_mutex (or preempt disabled 376 - * for read-only access). 377 + * Search for module by name: must hold module_mutex (or RCU for read-only 378 + * access). 377 379 */ 378 380 struct module *find_module_all(const char *name, size_t len, 379 381 bool even_unformed) 380 382 { 381 383 struct module *mod; 382 - 383 - module_assert_mutex_or_preempt(); 384 384 385 385 list_for_each_entry_rcu(mod, &modules, list, 386 386 lockdep_is_held(&module_mutex)) { ··· 450 454 struct module *mod; 451 455 unsigned int cpu; 452 456 453 - preempt_disable(); 454 - 457 + guard(rcu)(); 455 458 list_for_each_entry_rcu(mod, &modules, list) { 456 459 if (mod->state == MODULE_STATE_UNFORMED) 457 460 continue; ··· 467 472 per_cpu_ptr(mod->percpu, 468 473 get_boot_cpu_id()); 469 474 } 470 - preempt_enable(); 471 475 return true; 472 476 } 473 477 } 474 478 } 475 - 476 - preempt_enable(); 477 479 return false; 478 480 } 479 481 ··· 787 795 async_synchronize_full(); 788 796 789 797 /* Store the name and taints of the last unloaded module for diagnostic purposes */ 790 - strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name)); 791 - strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints)); 798 + strscpy(last_unloaded_module.name, mod->name); 799 + strscpy(last_unloaded_module.taints, module_flags(mod, buf, false)); 792 800 793 801 free_module(mod); 794 802 /* someone could wait for the module in add_unformed_module() */ ··· 806 814 .gplok = true, 807 815 }; 808 816 809 - preempt_disable(); 817 + guard(rcu)(); 810 818 BUG_ON(!find_symbol(&fsa)); 811 819 module_put(fsa.owner); 812 - preempt_enable(); 813 820 } 814 821 EXPORT_SYMBOL(__symbol_put); 815 822 ··· 823 832 824 833 /* 825 834 * Even though we hold a reference on the module; we still need to 826 - * disable preemption in order to safely traverse the data structure. 835 + * RCU read section in order to safely traverse the data structure. 827 836 */ 828 - preempt_disable(); 837 + guard(rcu)(); 829 838 modaddr = __module_text_address(a); 830 839 BUG_ON(!modaddr); 831 840 module_put(modaddr); 832 - preempt_enable(); 833 841 } 834 842 EXPORT_SYMBOL_GPL(symbol_put_addr); 835 843 ··· 1179 1189 1180 1190 getname: 1181 1191 /* We must make copy under the lock if we failed to get ref. */ 1182 - strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1192 + strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1183 1193 unlock: 1184 1194 mutex_unlock(&module_mutex); 1185 1195 return fsa.sym; ··· 1331 1341 mod_tree_remove(mod); 1332 1342 /* Remove this module from bug list, this uses list_del_rcu */ 1333 1343 module_bug_cleanup(mod); 1334 - /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 1344 + /* Wait for RCU synchronizing before releasing mod->list and buglist. */ 1335 1345 synchronize_rcu(); 1336 1346 if (try_add_tainted_module(mod)) 1337 1347 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n", ··· 1354 1364 .warn = true, 1355 1365 }; 1356 1366 1357 - preempt_disable(); 1358 - if (!find_symbol(&fsa)) 1359 - goto fail; 1360 - if (fsa.license != GPL_ONLY) { 1361 - pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 1362 - symbol); 1363 - goto fail; 1367 + scoped_guard(rcu) { 1368 + if (!find_symbol(&fsa)) 1369 + return NULL; 1370 + if (fsa.license != GPL_ONLY) { 1371 + pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", 1372 + symbol); 1373 + return NULL; 1374 + } 1375 + if (strong_try_module_get(fsa.owner)) 1376 + return NULL; 1364 1377 } 1365 - if (strong_try_module_get(fsa.owner)) 1366 - goto fail; 1367 - preempt_enable(); 1368 1378 return (void *)kernel_symbol_value(fsa.sym); 1369 - fail: 1370 - preempt_enable(); 1371 - return NULL; 1372 1379 } 1373 1380 EXPORT_SYMBOL_GPL(__symbol_get); 1374 1381 ··· 3000 3013 #endif 3001 3014 /* 3002 3015 * We want to free module_init, but be aware that kallsyms may be 3003 - * walking this with preempt disabled. In all the failure paths, we 3016 + * walking this within an RCU read section. In all the failure paths, we 3004 3017 * call synchronize_rcu(), but we don't want to slow down the success 3005 3018 * path. execmem_free() cannot be called in an interrupt, so do the 3006 3019 * work and call synchronize_rcu() in a work queue. ··· 3667 3680 /* Given an address, look for it in the module exception tables. */ 3668 3681 const struct exception_table_entry *search_module_extables(unsigned long addr) 3669 3682 { 3670 - const struct exception_table_entry *e = NULL; 3671 3683 struct module *mod; 3672 3684 3673 - preempt_disable(); 3685 + guard(rcu)(); 3674 3686 mod = __module_address(addr); 3675 3687 if (!mod) 3676 - goto out; 3688 + return NULL; 3677 3689 3678 3690 if (!mod->num_exentries) 3679 - goto out; 3680 - 3681 - e = search_extable(mod->extable, 3682 - mod->num_exentries, 3683 - addr); 3684 - out: 3685 - preempt_enable(); 3686 - 3691 + return NULL; 3687 3692 /* 3688 - * Now, if we found one, we are running inside it now, hence 3689 - * we cannot unload the module, hence no refcnt needed. 3693 + * The address passed here belongs to a module that is currently 3694 + * invoked (we are running inside it). Therefore its module::refcnt 3695 + * needs already be >0 to ensure that it is not removed at this stage. 3696 + * All other user need to invoke this function within a RCU read 3697 + * section. 3690 3698 */ 3691 - return e; 3699 + return search_extable(mod->extable, mod->num_exentries, addr); 3692 3700 } 3693 3701 3694 3702 /** ··· 3695 3713 */ 3696 3714 bool is_module_address(unsigned long addr) 3697 3715 { 3698 - bool ret; 3699 - 3700 - preempt_disable(); 3701 - ret = __module_address(addr) != NULL; 3702 - preempt_enable(); 3703 - 3704 - return ret; 3716 + guard(rcu)(); 3717 + return __module_address(addr) != NULL; 3705 3718 } 3706 3719 3707 3720 /** 3708 3721 * __module_address() - get the module which contains an address. 3709 3722 * @addr: the address. 3710 3723 * 3711 - * Must be called with preempt disabled or module mutex held so that 3724 + * Must be called within RCU read section or module mutex held so that 3712 3725 * module doesn't get freed during this. 3713 3726 */ 3714 3727 struct module *__module_address(unsigned long addr) ··· 3721 3744 return NULL; 3722 3745 3723 3746 lookup: 3724 - module_assert_mutex_or_preempt(); 3725 - 3726 3747 mod = mod_find(addr, &mod_tree); 3727 3748 if (mod) { 3728 3749 BUG_ON(!within_module(addr, mod)); ··· 3740 3765 */ 3741 3766 bool is_module_text_address(unsigned long addr) 3742 3767 { 3743 - bool ret; 3744 - 3745 - preempt_disable(); 3746 - ret = __module_text_address(addr) != NULL; 3747 - preempt_enable(); 3748 - 3749 - return ret; 3768 + guard(rcu)(); 3769 + return __module_text_address(addr) != NULL; 3750 3770 } 3751 3771 3752 3772 /** 3753 3773 * __module_text_address() - get the module whose code contains an address. 3754 3774 * @addr: the address. 3755 3775 * 3756 - * Must be called with preempt disabled or module mutex held so that 3776 + * Must be called within RCU read section or module mutex held so that 3757 3777 * module doesn't get freed during this. 3758 3778 */ 3759 3779 struct module *__module_text_address(unsigned long addr) ··· 3771 3801 3772 3802 printk(KERN_DEFAULT "Modules linked in:"); 3773 3803 /* Most callers should already have preempt disabled, but make sure */ 3774 - preempt_disable(); 3804 + guard(rcu)(); 3775 3805 list_for_each_entry_rcu(mod, &modules, list) { 3776 3806 if (mod->state == MODULE_STATE_UNFORMED) 3777 3807 continue; ··· 3779 3809 } 3780 3810 3781 3811 print_unloaded_tainted_modules(); 3782 - preempt_enable(); 3783 3812 if (last_unloaded_module.name[0]) 3784 3813 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name, 3785 3814 last_unloaded_module.taints);
-2
kernel/module/tracking.c
··· 21 21 { 22 22 struct mod_unload_taint *mod_taint; 23 23 24 - module_assert_mutex_or_preempt(); 25 - 26 24 if (!mod->taints) 27 25 goto out; 28 26
+4 -4
kernel/module/tree_lookup.c
··· 12 12 13 13 /* 14 14 * Use a latched RB-tree for __module_address(); this allows us to use 15 - * RCU-sched lookups of the address from any context. 15 + * RCU lookups of the address from any context. 16 16 * 17 - * This is conditional on PERF_EVENTS || TRACING because those can really hit 18 - * __module_address() hard by doing a lot of stack unwinding; potentially from 19 - * NMI context. 17 + * This is conditional on PERF_EVENTS || TRACING || CFI_CLANG because those can 18 + * really hit __module_address() hard by doing a lot of stack unwinding; 19 + * potentially from NMI context. 20 20 */ 21 21 22 22 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
+7 -7
kernel/module/version.c
··· 79 79 .name = "module_layout", 80 80 .gplok = true, 81 81 }; 82 + bool have_symbol; 82 83 83 84 /* 84 85 * Since this should be found in kernel (which can't be removed), no 85 - * locking is necessary -- use preempt_disable() to placate lockdep. 86 + * locking is necessary. Regardless use a RCU read section to keep 87 + * lockdep happy. 86 88 */ 87 - preempt_disable(); 88 - if (!find_symbol(&fsa)) { 89 - preempt_enable(); 90 - BUG(); 91 - } 92 - preempt_enable(); 89 + scoped_guard(rcu) 90 + have_symbol = find_symbol(&fsa); 91 + BUG_ON(!have_symbol); 92 + 93 93 return check_version(info, "module_layout", mod, fsa.crc); 94 94 } 95 95
+13 -16
kernel/params.c
··· 551 551 { 552 552 unsigned int num; 553 553 struct attribute_group grp; 554 - struct param_attribute attrs[]; 554 + struct param_attribute attrs[] __counted_by(num); 555 555 }; 556 556 557 557 #ifdef CONFIG_SYSFS ··· 651 651 } 652 652 653 653 /* Enlarge allocations. */ 654 - new_mp = krealloc(mk->mp, 655 - sizeof(*mk->mp) + 656 - sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1), 654 + new_mp = krealloc(mk->mp, struct_size(mk->mp, attrs, mk->mp->num + 1), 657 655 GFP_KERNEL); 658 656 if (!new_mp) 659 657 return -ENOMEM; 660 658 mk->mp = new_mp; 659 + mk->mp->num++; 661 660 662 661 /* Extra pointer for NULL terminator */ 663 - new_attrs = krealloc(mk->mp->grp.attrs, 664 - sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2), 665 - GFP_KERNEL); 662 + new_attrs = krealloc_array(mk->mp->grp.attrs, mk->mp->num + 1, 663 + sizeof(mk->mp->grp.attrs[0]), GFP_KERNEL); 666 664 if (!new_attrs) 667 665 return -ENOMEM; 668 666 mk->mp->grp.attrs = new_attrs; 669 667 670 668 /* Tack new one on the end. */ 671 - memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0])); 672 - sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); 673 - mk->mp->attrs[mk->mp->num].param = kp; 674 - mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; 669 + memset(&mk->mp->attrs[mk->mp->num - 1], 0, sizeof(mk->mp->attrs[0])); 670 + sysfs_attr_init(&mk->mp->attrs[mk->mp->num - 1].mattr.attr); 671 + mk->mp->attrs[mk->mp->num - 1].param = kp; 672 + mk->mp->attrs[mk->mp->num - 1].mattr.show = param_attr_show; 675 673 /* Do not allow runtime DAC changes to make param writable. */ 676 674 if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) 677 - mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; 675 + mk->mp->attrs[mk->mp->num - 1].mattr.store = param_attr_store; 678 676 else 679 - mk->mp->attrs[mk->mp->num].mattr.store = NULL; 680 - mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; 681 - mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; 682 - mk->mp->num++; 677 + mk->mp->attrs[mk->mp->num - 1].mattr.store = NULL; 678 + mk->mp->attrs[mk->mp->num - 1].mattr.attr.name = (char *)name; 679 + mk->mp->attrs[mk->mp->num - 1].mattr.attr.mode = kp->perm; 683 680 684 681 /* Fix up all the pointers, since krealloc can move us */ 685 682 for (i = 0; i < mk->mp->num; i++)
+6 -7
kernel/static_call_inline.c
··· 325 325 struct module *mod; 326 326 int ret; 327 327 328 - preempt_disable(); 329 - mod = __module_text_address((unsigned long)start); 330 - WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 331 - if (!try_module_get(mod)) 332 - mod = NULL; 333 - preempt_enable(); 334 - 328 + scoped_guard(rcu) { 329 + mod = __module_text_address((unsigned long)start); 330 + WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); 331 + if (!try_module_get(mod)) 332 + mod = NULL; 333 + } 335 334 if (!mod) 336 335 return 0; 337 336
+13 -11
kernel/trace/bpf_trace.c
··· 2338 2338 { 2339 2339 struct module *mod; 2340 2340 2341 - preempt_disable(); 2341 + guard(rcu)(); 2342 2342 mod = __module_address((unsigned long)btp); 2343 2343 module_put(mod); 2344 - preempt_enable(); 2345 2344 } 2346 2345 2347 2346 static __always_inline ··· 2924 2925 u32 i, err = 0; 2925 2926 2926 2927 for (i = 0; i < addrs_cnt; i++) { 2928 + bool skip_add = false; 2927 2929 struct module *mod; 2928 2930 2929 - preempt_disable(); 2930 - mod = __module_address(addrs[i]); 2931 - /* Either no module or we it's already stored */ 2932 - if (!mod || has_module(&arr, mod)) { 2933 - preempt_enable(); 2934 - continue; 2931 + scoped_guard(rcu) { 2932 + mod = __module_address(addrs[i]); 2933 + /* Either no module or it's already stored */ 2934 + if (!mod || has_module(&arr, mod)) { 2935 + skip_add = true; 2936 + break; /* scoped_guard */ 2937 + } 2938 + if (!try_module_get(mod)) 2939 + err = -EINVAL; 2935 2940 } 2936 - if (!try_module_get(mod)) 2937 - err = -EINVAL; 2938 - preempt_enable(); 2941 + if (skip_add) 2942 + continue; 2939 2943 if (err) 2940 2944 break; 2941 2945 err = add_module(&arr, mod);
+3 -6
kernel/trace/trace_kprobe.c
··· 124 124 if (!p) 125 125 return true; 126 126 *p = '\0'; 127 - rcu_read_lock_sched(); 128 - ret = !!find_module(tk->symbol); 129 - rcu_read_unlock_sched(); 127 + scoped_guard(rcu) 128 + ret = !!find_module(tk->symbol); 130 129 *p = ':'; 131 130 132 131 return ret; ··· 795 796 { 796 797 struct module *mod; 797 798 798 - rcu_read_lock_sched(); 799 + guard(rcu)(); 799 800 mod = find_module(name); 800 801 if (mod && !try_module_get(mod)) 801 802 mod = NULL; 802 - rcu_read_unlock_sched(); 803 - 804 803 return mod; 805 804 } 806 805 #else
+9 -13
lib/bug.c
··· 66 66 67 67 static struct bug_entry *module_find_bug(unsigned long bugaddr) 68 68 { 69 + struct bug_entry *bug; 69 70 struct module *mod; 70 - struct bug_entry *bug = NULL; 71 71 72 - rcu_read_lock_sched(); 72 + guard(rcu)(); 73 73 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { 74 74 unsigned i; 75 75 76 76 bug = mod->bug_table; 77 77 for (i = 0; i < mod->num_bugs; ++i, ++bug) 78 78 if (bugaddr == bug_addr(bug)) 79 - goto out; 79 + return bug; 80 80 } 81 - bug = NULL; 82 - out: 83 - rcu_read_unlock_sched(); 84 - 85 - return bug; 81 + return NULL; 86 82 } 87 83 88 84 void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, ··· 231 235 #ifdef CONFIG_MODULES 232 236 struct module *mod; 233 237 234 - rcu_read_lock_sched(); 235 - list_for_each_entry_rcu(mod, &module_bug_list, bug_list) 236 - clear_once_table(mod->bug_table, 237 - mod->bug_table + mod->num_bugs); 238 - rcu_read_unlock_sched(); 238 + scoped_guard(rcu) { 239 + list_for_each_entry_rcu(mod, &module_bug_list, bug_list) 240 + clear_once_table(mod->bug_table, 241 + mod->bug_table + mod->num_bugs); 242 + } 239 243 #endif 240 244 241 245 clear_once_table(__start___bug_table, __stop___bug_table);
+1 -1
lib/tests/module/gen_test_kallsyms.sh
··· 1 - #!/bin/bash 1 + #!/usr/bin/env bash 2 2 3 3 TARGET=$(basename $1) 4 4 DIR=lib/tests/module