at v5.17-rc7 124 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2002 Richard Henderson 4 * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 5 */ 6 7#define INCLUDE_VERMAGIC 8 9#include <linux/export.h> 10#include <linux/extable.h> 11#include <linux/moduleloader.h> 12#include <linux/module_signature.h> 13#include <linux/trace_events.h> 14#include <linux/init.h> 15#include <linux/kallsyms.h> 16#include <linux/buildid.h> 17#include <linux/file.h> 18#include <linux/fs.h> 19#include <linux/sysfs.h> 20#include <linux/kernel.h> 21#include <linux/kernel_read_file.h> 22#include <linux/slab.h> 23#include <linux/vmalloc.h> 24#include <linux/elf.h> 25#include <linux/proc_fs.h> 26#include <linux/security.h> 27#include <linux/seq_file.h> 28#include <linux/syscalls.h> 29#include <linux/fcntl.h> 30#include <linux/rcupdate.h> 31#include <linux/capability.h> 32#include <linux/cpu.h> 33#include <linux/moduleparam.h> 34#include <linux/errno.h> 35#include <linux/err.h> 36#include <linux/vermagic.h> 37#include <linux/notifier.h> 38#include <linux/sched.h> 39#include <linux/device.h> 40#include <linux/string.h> 41#include <linux/mutex.h> 42#include <linux/rculist.h> 43#include <linux/uaccess.h> 44#include <asm/cacheflush.h> 45#include <linux/set_memory.h> 46#include <asm/mmu_context.h> 47#include <linux/license.h> 48#include <asm/sections.h> 49#include <linux/tracepoint.h> 50#include <linux/ftrace.h> 51#include <linux/livepatch.h> 52#include <linux/async.h> 53#include <linux/percpu.h> 54#include <linux/kmemleak.h> 55#include <linux/jump_label.h> 56#include <linux/pfn.h> 57#include <linux/bsearch.h> 58#include <linux/dynamic_debug.h> 59#include <linux/audit.h> 60#include <uapi/linux/module.h> 61#include "module-internal.h" 62 63#define CREATE_TRACE_POINTS 64#include <trace/events/module.h> 65 66#ifndef ARCH_SHF_SMALL 67#define ARCH_SHF_SMALL 0 68#endif 69 70/* 71 * Modules' sections will be aligned on page boundaries 72 * to ensure complete separation of code and data, but 73 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y 74 */ 75#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX 76# define debug_align(X) ALIGN(X, PAGE_SIZE) 77#else 78# define debug_align(X) (X) 79#endif 80 81/* If this is set, the section belongs in the init part of the module */ 82#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 83 84/* 85 * Mutex protects: 86 * 1) List of modules (also safely readable with preempt_disable), 87 * 2) module_use links, 88 * 3) module_addr_min/module_addr_max. 89 * (delete and add uses RCU list operations). 90 */ 91static DEFINE_MUTEX(module_mutex); 92static LIST_HEAD(modules); 93 94/* Work queue for freeing init sections in success case */ 95static void do_free_init(struct work_struct *w); 96static DECLARE_WORK(init_free_wq, do_free_init); 97static LLIST_HEAD(init_free_list); 98 99#ifdef CONFIG_MODULES_TREE_LOOKUP 100 101/* 102 * Use a latched RB-tree for __module_address(); this allows us to use 103 * RCU-sched lookups of the address from any context. 104 * 105 * This is conditional on PERF_EVENTS || TRACING because those can really hit 106 * __module_address() hard by doing a lot of stack unwinding; potentially from 107 * NMI context. 108 */ 109 110static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) 111{ 112 struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 113 114 return (unsigned long)layout->base; 115} 116 117static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) 118{ 119 struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 120 121 return (unsigned long)layout->size; 122} 123 124static __always_inline bool 125mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) 126{ 127 return __mod_tree_val(a) < __mod_tree_val(b); 128} 129 130static __always_inline int 131mod_tree_comp(void *key, struct latch_tree_node *n) 132{ 133 unsigned long val = (unsigned long)key; 134 unsigned long start, end; 135 136 start = __mod_tree_val(n); 137 if (val < start) 138 return -1; 139 140 end = start + __mod_tree_size(n); 141 if (val >= end) 142 return 1; 143 144 return 0; 145} 146 147static const struct latch_tree_ops mod_tree_ops = { 148 .less = mod_tree_less, 149 .comp = mod_tree_comp, 150}; 151 152static struct mod_tree_root { 153 struct latch_tree_root root; 154 unsigned long addr_min; 155 unsigned long addr_max; 156} mod_tree __cacheline_aligned = { 157 .addr_min = -1UL, 158}; 159 160#define module_addr_min mod_tree.addr_min 161#define module_addr_max mod_tree.addr_max 162 163static noinline void __mod_tree_insert(struct mod_tree_node *node) 164{ 165 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); 166} 167 168static void __mod_tree_remove(struct mod_tree_node *node) 169{ 170 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); 171} 172 173/* 174 * These modifications: insert, remove_init and remove; are serialized by the 175 * module_mutex. 176 */ 177static void mod_tree_insert(struct module *mod) 178{ 179 mod->core_layout.mtn.mod = mod; 180 mod->init_layout.mtn.mod = mod; 181 182 __mod_tree_insert(&mod->core_layout.mtn); 183 if (mod->init_layout.size) 184 __mod_tree_insert(&mod->init_layout.mtn); 185} 186 187static void mod_tree_remove_init(struct module *mod) 188{ 189 if (mod->init_layout.size) 190 __mod_tree_remove(&mod->init_layout.mtn); 191} 192 193static void mod_tree_remove(struct module *mod) 194{ 195 __mod_tree_remove(&mod->core_layout.mtn); 196 mod_tree_remove_init(mod); 197} 198 199static struct module *mod_find(unsigned long addr) 200{ 201 struct latch_tree_node *ltn; 202 203 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); 204 if (!ltn) 205 return NULL; 206 207 return container_of(ltn, struct mod_tree_node, node)->mod; 208} 209 210#else /* MODULES_TREE_LOOKUP */ 211 212static unsigned long module_addr_min = -1UL, module_addr_max = 0; 213 214static void mod_tree_insert(struct module *mod) { } 215static void mod_tree_remove_init(struct module *mod) { } 216static void mod_tree_remove(struct module *mod) { } 217 218static struct module *mod_find(unsigned long addr) 219{ 220 struct module *mod; 221 222 list_for_each_entry_rcu(mod, &modules, list, 223 lockdep_is_held(&module_mutex)) { 224 if (within_module(addr, mod)) 225 return mod; 226 } 227 228 return NULL; 229} 230 231#endif /* MODULES_TREE_LOOKUP */ 232 233/* 234 * Bounds of module text, for speeding up __module_address. 235 * Protected by module_mutex. 236 */ 237static void __mod_update_bounds(void *base, unsigned int size) 238{ 239 unsigned long min = (unsigned long)base; 240 unsigned long max = min + size; 241 242 if (min < module_addr_min) 243 module_addr_min = min; 244 if (max > module_addr_max) 245 module_addr_max = max; 246} 247 248static void mod_update_bounds(struct module *mod) 249{ 250 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); 251 if (mod->init_layout.size) 252 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); 253} 254 255#ifdef CONFIG_KGDB_KDB 256struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ 257#endif /* CONFIG_KGDB_KDB */ 258 259static void module_assert_mutex_or_preempt(void) 260{ 261#ifdef CONFIG_LOCKDEP 262 if (unlikely(!debug_locks)) 263 return; 264 265 WARN_ON_ONCE(!rcu_read_lock_sched_held() && 266 !lockdep_is_held(&module_mutex)); 267#endif 268} 269 270#ifdef CONFIG_MODULE_SIG 271static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); 272module_param(sig_enforce, bool_enable_only, 0644); 273 274void set_module_sig_enforced(void) 275{ 276 sig_enforce = true; 277} 278#else 279#define sig_enforce false 280#endif 281 282/* 283 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely 284 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. 285 */ 286bool is_module_sig_enforced(void) 287{ 288 return sig_enforce; 289} 290EXPORT_SYMBOL(is_module_sig_enforced); 291 292/* Block module loading/unloading? */ 293int modules_disabled = 0; 294core_param(nomodule, modules_disabled, bint, 0); 295 296/* Waiting for a module to finish initializing? */ 297static DECLARE_WAIT_QUEUE_HEAD(module_wq); 298 299static BLOCKING_NOTIFIER_HEAD(module_notify_list); 300 301int register_module_notifier(struct notifier_block *nb) 302{ 303 return blocking_notifier_chain_register(&module_notify_list, nb); 304} 305EXPORT_SYMBOL(register_module_notifier); 306 307int unregister_module_notifier(struct notifier_block *nb) 308{ 309 return blocking_notifier_chain_unregister(&module_notify_list, nb); 310} 311EXPORT_SYMBOL(unregister_module_notifier); 312 313/* 314 * We require a truly strong try_module_get(): 0 means success. 315 * Otherwise an error is returned due to ongoing or failed 316 * initialization etc. 317 */ 318static inline int strong_try_module_get(struct module *mod) 319{ 320 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 321 if (mod && mod->state == MODULE_STATE_COMING) 322 return -EBUSY; 323 if (try_module_get(mod)) 324 return 0; 325 else 326 return -ENOENT; 327} 328 329static inline void add_taint_module(struct module *mod, unsigned flag, 330 enum lockdep_ok lockdep_ok) 331{ 332 add_taint(flag, lockdep_ok); 333 set_bit(flag, &mod->taints); 334} 335 336/* 337 * A thread that wants to hold a reference to a module only while it 338 * is running can call this to safely exit. nfsd and lockd use this. 339 */ 340void __noreturn __module_put_and_kthread_exit(struct module *mod, long code) 341{ 342 module_put(mod); 343 kthread_exit(code); 344} 345EXPORT_SYMBOL(__module_put_and_kthread_exit); 346 347/* Find a module section: 0 means not found. */ 348static unsigned int find_sec(const struct load_info *info, const char *name) 349{ 350 unsigned int i; 351 352 for (i = 1; i < info->hdr->e_shnum; i++) { 353 Elf_Shdr *shdr = &info->sechdrs[i]; 354 /* Alloc bit cleared means "ignore it." */ 355 if ((shdr->sh_flags & SHF_ALLOC) 356 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 357 return i; 358 } 359 return 0; 360} 361 362/* Find a module section, or NULL. */ 363static void *section_addr(const struct load_info *info, const char *name) 364{ 365 /* Section 0 has sh_addr 0. */ 366 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 367} 368 369/* Find a module section, or NULL. Fill in number of "objects" in section. */ 370static void *section_objs(const struct load_info *info, 371 const char *name, 372 size_t object_size, 373 unsigned int *num) 374{ 375 unsigned int sec = find_sec(info, name); 376 377 /* Section 0 has sh_addr 0 and sh_size 0. */ 378 *num = info->sechdrs[sec].sh_size / object_size; 379 return (void *)info->sechdrs[sec].sh_addr; 380} 381 382/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */ 383static unsigned int find_any_sec(const struct load_info *info, const char *name) 384{ 385 unsigned int i; 386 387 for (i = 1; i < info->hdr->e_shnum; i++) { 388 Elf_Shdr *shdr = &info->sechdrs[i]; 389 if (strcmp(info->secstrings + shdr->sh_name, name) == 0) 390 return i; 391 } 392 return 0; 393} 394 395/* 396 * Find a module section, or NULL. Fill in number of "objects" in section. 397 * Ignores SHF_ALLOC flag. 398 */ 399static __maybe_unused void *any_section_objs(const struct load_info *info, 400 const char *name, 401 size_t object_size, 402 unsigned int *num) 403{ 404 unsigned int sec = find_any_sec(info, name); 405 406 /* Section 0 has sh_addr 0 and sh_size 0. */ 407 *num = info->sechdrs[sec].sh_size / object_size; 408 return (void *)info->sechdrs[sec].sh_addr; 409} 410 411/* Provided by the linker */ 412extern const struct kernel_symbol __start___ksymtab[]; 413extern const struct kernel_symbol __stop___ksymtab[]; 414extern const struct kernel_symbol __start___ksymtab_gpl[]; 415extern const struct kernel_symbol __stop___ksymtab_gpl[]; 416extern const s32 __start___kcrctab[]; 417extern const s32 __start___kcrctab_gpl[]; 418 419#ifndef CONFIG_MODVERSIONS 420#define symversion(base, idx) NULL 421#else 422#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 423#endif 424 425struct symsearch { 426 const struct kernel_symbol *start, *stop; 427 const s32 *crcs; 428 enum mod_license { 429 NOT_GPL_ONLY, 430 GPL_ONLY, 431 } license; 432}; 433 434struct find_symbol_arg { 435 /* Input */ 436 const char *name; 437 bool gplok; 438 bool warn; 439 440 /* Output */ 441 struct module *owner; 442 const s32 *crc; 443 const struct kernel_symbol *sym; 444 enum mod_license license; 445}; 446 447static bool check_exported_symbol(const struct symsearch *syms, 448 struct module *owner, 449 unsigned int symnum, void *data) 450{ 451 struct find_symbol_arg *fsa = data; 452 453 if (!fsa->gplok && syms->license == GPL_ONLY) 454 return false; 455 fsa->owner = owner; 456 fsa->crc = symversion(syms->crcs, symnum); 457 fsa->sym = &syms->start[symnum]; 458 fsa->license = syms->license; 459 return true; 460} 461 462static unsigned long kernel_symbol_value(const struct kernel_symbol *sym) 463{ 464#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 465 return (unsigned long)offset_to_ptr(&sym->value_offset); 466#else 467 return sym->value; 468#endif 469} 470 471static const char *kernel_symbol_name(const struct kernel_symbol *sym) 472{ 473#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 474 return offset_to_ptr(&sym->name_offset); 475#else 476 return sym->name; 477#endif 478} 479 480static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) 481{ 482#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 483 if (!sym->namespace_offset) 484 return NULL; 485 return offset_to_ptr(&sym->namespace_offset); 486#else 487 return sym->namespace; 488#endif 489} 490 491static int cmp_name(const void *name, const void *sym) 492{ 493 return strcmp(name, kernel_symbol_name(sym)); 494} 495 496static bool find_exported_symbol_in_section(const struct symsearch *syms, 497 struct module *owner, 498 void *data) 499{ 500 struct find_symbol_arg *fsa = data; 501 struct kernel_symbol *sym; 502 503 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 504 sizeof(struct kernel_symbol), cmp_name); 505 506 if (sym != NULL && check_exported_symbol(syms, owner, 507 sym - syms->start, data)) 508 return true; 509 510 return false; 511} 512 513/* 514 * Find an exported symbol and return it, along with, (optional) crc and 515 * (optional) module which owns it. Needs preempt disabled or module_mutex. 516 */ 517static bool find_symbol(struct find_symbol_arg *fsa) 518{ 519 static const struct symsearch arr[] = { 520 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 521 NOT_GPL_ONLY }, 522 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 523 __start___kcrctab_gpl, 524 GPL_ONLY }, 525 }; 526 struct module *mod; 527 unsigned int i; 528 529 module_assert_mutex_or_preempt(); 530 531 for (i = 0; i < ARRAY_SIZE(arr); i++) 532 if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) 533 return true; 534 535 list_for_each_entry_rcu(mod, &modules, list, 536 lockdep_is_held(&module_mutex)) { 537 struct symsearch arr[] = { 538 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 539 NOT_GPL_ONLY }, 540 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 541 mod->gpl_crcs, 542 GPL_ONLY }, 543 }; 544 545 if (mod->state == MODULE_STATE_UNFORMED) 546 continue; 547 548 for (i = 0; i < ARRAY_SIZE(arr); i++) 549 if (find_exported_symbol_in_section(&arr[i], mod, fsa)) 550 return true; 551 } 552 553 pr_debug("Failed to find symbol %s\n", fsa->name); 554 return false; 555} 556 557/* 558 * Search for module by name: must hold module_mutex (or preempt disabled 559 * for read-only access). 560 */ 561static struct module *find_module_all(const char *name, size_t len, 562 bool even_unformed) 563{ 564 struct module *mod; 565 566 module_assert_mutex_or_preempt(); 567 568 list_for_each_entry_rcu(mod, &modules, list, 569 lockdep_is_held(&module_mutex)) { 570 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 571 continue; 572 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 573 return mod; 574 } 575 return NULL; 576} 577 578struct module *find_module(const char *name) 579{ 580 return find_module_all(name, strlen(name), false); 581} 582 583#ifdef CONFIG_SMP 584 585static inline void __percpu *mod_percpu(struct module *mod) 586{ 587 return mod->percpu; 588} 589 590static int percpu_modalloc(struct module *mod, struct load_info *info) 591{ 592 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 593 unsigned long align = pcpusec->sh_addralign; 594 595 if (!pcpusec->sh_size) 596 return 0; 597 598 if (align > PAGE_SIZE) { 599 pr_warn("%s: per-cpu alignment %li > %li\n", 600 mod->name, align, PAGE_SIZE); 601 align = PAGE_SIZE; 602 } 603 604 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 605 if (!mod->percpu) { 606 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 607 mod->name, (unsigned long)pcpusec->sh_size); 608 return -ENOMEM; 609 } 610 mod->percpu_size = pcpusec->sh_size; 611 return 0; 612} 613 614static void percpu_modfree(struct module *mod) 615{ 616 free_percpu(mod->percpu); 617} 618 619static unsigned int find_pcpusec(struct load_info *info) 620{ 621 return find_sec(info, ".data..percpu"); 622} 623 624static void percpu_modcopy(struct module *mod, 625 const void *from, unsigned long size) 626{ 627 int cpu; 628 629 for_each_possible_cpu(cpu) 630 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 631} 632 633bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 634{ 635 struct module *mod; 636 unsigned int cpu; 637 638 preempt_disable(); 639 640 list_for_each_entry_rcu(mod, &modules, list) { 641 if (mod->state == MODULE_STATE_UNFORMED) 642 continue; 643 if (!mod->percpu_size) 644 continue; 645 for_each_possible_cpu(cpu) { 646 void *start = per_cpu_ptr(mod->percpu, cpu); 647 void *va = (void *)addr; 648 649 if (va >= start && va < start + mod->percpu_size) { 650 if (can_addr) { 651 *can_addr = (unsigned long) (va - start); 652 *can_addr += (unsigned long) 653 per_cpu_ptr(mod->percpu, 654 get_boot_cpu_id()); 655 } 656 preempt_enable(); 657 return true; 658 } 659 } 660 } 661 662 preempt_enable(); 663 return false; 664} 665 666/** 667 * is_module_percpu_address() - test whether address is from module static percpu 668 * @addr: address to test 669 * 670 * Test whether @addr belongs to module static percpu area. 671 * 672 * Return: %true if @addr is from module static percpu area 673 */ 674bool is_module_percpu_address(unsigned long addr) 675{ 676 return __is_module_percpu_address(addr, NULL); 677} 678 679#else /* ... !CONFIG_SMP */ 680 681static inline void __percpu *mod_percpu(struct module *mod) 682{ 683 return NULL; 684} 685static int percpu_modalloc(struct module *mod, struct load_info *info) 686{ 687 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 688 if (info->sechdrs[info->index.pcpu].sh_size != 0) 689 return -ENOMEM; 690 return 0; 691} 692static inline void percpu_modfree(struct module *mod) 693{ 694} 695static unsigned int find_pcpusec(struct load_info *info) 696{ 697 return 0; 698} 699static inline void percpu_modcopy(struct module *mod, 700 const void *from, unsigned long size) 701{ 702 /* pcpusec should be 0, and size of that section should be 0. */ 703 BUG_ON(size != 0); 704} 705bool is_module_percpu_address(unsigned long addr) 706{ 707 return false; 708} 709 710bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 711{ 712 return false; 713} 714 715#endif /* CONFIG_SMP */ 716 717#define MODINFO_ATTR(field) \ 718static void setup_modinfo_##field(struct module *mod, const char *s) \ 719{ \ 720 mod->field = kstrdup(s, GFP_KERNEL); \ 721} \ 722static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ 723 struct module_kobject *mk, char *buffer) \ 724{ \ 725 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 726} \ 727static int modinfo_##field##_exists(struct module *mod) \ 728{ \ 729 return mod->field != NULL; \ 730} \ 731static void free_modinfo_##field(struct module *mod) \ 732{ \ 733 kfree(mod->field); \ 734 mod->field = NULL; \ 735} \ 736static struct module_attribute modinfo_##field = { \ 737 .attr = { .name = __stringify(field), .mode = 0444 }, \ 738 .show = show_modinfo_##field, \ 739 .setup = setup_modinfo_##field, \ 740 .test = modinfo_##field##_exists, \ 741 .free = free_modinfo_##field, \ 742}; 743 744MODINFO_ATTR(version); 745MODINFO_ATTR(srcversion); 746 747static char last_unloaded_module[MODULE_NAME_LEN+1]; 748 749#ifdef CONFIG_MODULE_UNLOAD 750 751EXPORT_TRACEPOINT_SYMBOL(module_get); 752 753/* MODULE_REF_BASE is the base reference count by kmodule loader. */ 754#define MODULE_REF_BASE 1 755 756/* Init the unload section of the module. */ 757static int module_unload_init(struct module *mod) 758{ 759 /* 760 * Initialize reference counter to MODULE_REF_BASE. 761 * refcnt == 0 means module is going. 762 */ 763 atomic_set(&mod->refcnt, MODULE_REF_BASE); 764 765 INIT_LIST_HEAD(&mod->source_list); 766 INIT_LIST_HEAD(&mod->target_list); 767 768 /* Hold reference count during initialization. */ 769 atomic_inc(&mod->refcnt); 770 771 return 0; 772} 773 774/* Does a already use b? */ 775static int already_uses(struct module *a, struct module *b) 776{ 777 struct module_use *use; 778 779 list_for_each_entry(use, &b->source_list, source_list) { 780 if (use->source == a) { 781 pr_debug("%s uses %s!\n", a->name, b->name); 782 return 1; 783 } 784 } 785 pr_debug("%s does not use %s!\n", a->name, b->name); 786 return 0; 787} 788 789/* 790 * Module a uses b 791 * - we add 'a' as a "source", 'b' as a "target" of module use 792 * - the module_use is added to the list of 'b' sources (so 793 * 'b' can walk the list to see who sourced them), and of 'a' 794 * targets (so 'a' can see what modules it targets). 795 */ 796static int add_module_usage(struct module *a, struct module *b) 797{ 798 struct module_use *use; 799 800 pr_debug("Allocating new usage for %s.\n", a->name); 801 use = kmalloc(sizeof(*use), GFP_ATOMIC); 802 if (!use) 803 return -ENOMEM; 804 805 use->source = a; 806 use->target = b; 807 list_add(&use->source_list, &b->source_list); 808 list_add(&use->target_list, &a->target_list); 809 return 0; 810} 811 812/* Module a uses b: caller needs module_mutex() */ 813static int ref_module(struct module *a, struct module *b) 814{ 815 int err; 816 817 if (b == NULL || already_uses(a, b)) 818 return 0; 819 820 /* If module isn't available, we fail. */ 821 err = strong_try_module_get(b); 822 if (err) 823 return err; 824 825 err = add_module_usage(a, b); 826 if (err) { 827 module_put(b); 828 return err; 829 } 830 return 0; 831} 832 833/* Clear the unload stuff of the module. */ 834static void module_unload_free(struct module *mod) 835{ 836 struct module_use *use, *tmp; 837 838 mutex_lock(&module_mutex); 839 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 840 struct module *i = use->target; 841 pr_debug("%s unusing %s\n", mod->name, i->name); 842 module_put(i); 843 list_del(&use->source_list); 844 list_del(&use->target_list); 845 kfree(use); 846 } 847 mutex_unlock(&module_mutex); 848} 849 850#ifdef CONFIG_MODULE_FORCE_UNLOAD 851static inline int try_force_unload(unsigned int flags) 852{ 853 int ret = (flags & O_TRUNC); 854 if (ret) 855 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 856 return ret; 857} 858#else 859static inline int try_force_unload(unsigned int flags) 860{ 861 return 0; 862} 863#endif /* CONFIG_MODULE_FORCE_UNLOAD */ 864 865/* Try to release refcount of module, 0 means success. */ 866static int try_release_module_ref(struct module *mod) 867{ 868 int ret; 869 870 /* Try to decrement refcnt which we set at loading */ 871 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 872 BUG_ON(ret < 0); 873 if (ret) 874 /* Someone can put this right now, recover with checking */ 875 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 876 877 return ret; 878} 879 880static int try_stop_module(struct module *mod, int flags, int *forced) 881{ 882 /* If it's not unused, quit unless we're forcing. */ 883 if (try_release_module_ref(mod) != 0) { 884 *forced = try_force_unload(flags); 885 if (!(*forced)) 886 return -EWOULDBLOCK; 887 } 888 889 /* Mark it as dying. */ 890 mod->state = MODULE_STATE_GOING; 891 892 return 0; 893} 894 895/** 896 * module_refcount() - return the refcount or -1 if unloading 897 * @mod: the module we're checking 898 * 899 * Return: 900 * -1 if the module is in the process of unloading 901 * otherwise the number of references in the kernel to the module 902 */ 903int module_refcount(struct module *mod) 904{ 905 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 906} 907EXPORT_SYMBOL(module_refcount); 908 909/* This exists whether we can unload or not */ 910static void free_module(struct module *mod); 911 912SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 913 unsigned int, flags) 914{ 915 struct module *mod; 916 char name[MODULE_NAME_LEN]; 917 int ret, forced = 0; 918 919 if (!capable(CAP_SYS_MODULE) || modules_disabled) 920 return -EPERM; 921 922 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 923 return -EFAULT; 924 name[MODULE_NAME_LEN-1] = '\0'; 925 926 audit_log_kern_module(name); 927 928 if (mutex_lock_interruptible(&module_mutex) != 0) 929 return -EINTR; 930 931 mod = find_module(name); 932 if (!mod) { 933 ret = -ENOENT; 934 goto out; 935 } 936 937 if (!list_empty(&mod->source_list)) { 938 /* Other modules depend on us: get rid of them first. */ 939 ret = -EWOULDBLOCK; 940 goto out; 941 } 942 943 /* Doing init or already dying? */ 944 if (mod->state != MODULE_STATE_LIVE) { 945 /* FIXME: if (force), slam module count damn the torpedoes */ 946 pr_debug("%s already dying\n", mod->name); 947 ret = -EBUSY; 948 goto out; 949 } 950 951 /* If it has an init func, it must have an exit func to unload */ 952 if (mod->init && !mod->exit) { 953 forced = try_force_unload(flags); 954 if (!forced) { 955 /* This module can't be removed */ 956 ret = -EBUSY; 957 goto out; 958 } 959 } 960 961 ret = try_stop_module(mod, flags, &forced); 962 if (ret != 0) 963 goto out; 964 965 mutex_unlock(&module_mutex); 966 /* Final destruction now no one is using it. */ 967 if (mod->exit != NULL) 968 mod->exit(); 969 blocking_notifier_call_chain(&module_notify_list, 970 MODULE_STATE_GOING, mod); 971 klp_module_going(mod); 972 ftrace_release_mod(mod); 973 974 async_synchronize_full(); 975 976 /* Store the name of the last unloaded module for diagnostic purposes */ 977 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 978 979 free_module(mod); 980 /* someone could wait for the module in add_unformed_module() */ 981 wake_up_all(&module_wq); 982 return 0; 983out: 984 mutex_unlock(&module_mutex); 985 return ret; 986} 987 988static inline void print_unload_info(struct seq_file *m, struct module *mod) 989{ 990 struct module_use *use; 991 int printed_something = 0; 992 993 seq_printf(m, " %i ", module_refcount(mod)); 994 995 /* 996 * Always include a trailing , so userspace can differentiate 997 * between this and the old multi-field proc format. 998 */ 999 list_for_each_entry(use, &mod->source_list, source_list) { 1000 printed_something = 1; 1001 seq_printf(m, "%s,", use->source->name); 1002 } 1003 1004 if (mod->init != NULL && mod->exit == NULL) { 1005 printed_something = 1; 1006 seq_puts(m, "[permanent],"); 1007 } 1008 1009 if (!printed_something) 1010 seq_puts(m, "-"); 1011} 1012 1013void __symbol_put(const char *symbol) 1014{ 1015 struct find_symbol_arg fsa = { 1016 .name = symbol, 1017 .gplok = true, 1018 }; 1019 1020 preempt_disable(); 1021 BUG_ON(!find_symbol(&fsa)); 1022 module_put(fsa.owner); 1023 preempt_enable(); 1024} 1025EXPORT_SYMBOL(__symbol_put); 1026 1027/* Note this assumes addr is a function, which it currently always is. */ 1028void symbol_put_addr(void *addr) 1029{ 1030 struct module *modaddr; 1031 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 1032 1033 if (core_kernel_text(a)) 1034 return; 1035 1036 /* 1037 * Even though we hold a reference on the module; we still need to 1038 * disable preemption in order to safely traverse the data structure. 1039 */ 1040 preempt_disable(); 1041 modaddr = __module_text_address(a); 1042 BUG_ON(!modaddr); 1043 module_put(modaddr); 1044 preempt_enable(); 1045} 1046EXPORT_SYMBOL_GPL(symbol_put_addr); 1047 1048static ssize_t show_refcnt(struct module_attribute *mattr, 1049 struct module_kobject *mk, char *buffer) 1050{ 1051 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 1052} 1053 1054static struct module_attribute modinfo_refcnt = 1055 __ATTR(refcnt, 0444, show_refcnt, NULL); 1056 1057void __module_get(struct module *module) 1058{ 1059 if (module) { 1060 preempt_disable(); 1061 atomic_inc(&module->refcnt); 1062 trace_module_get(module, _RET_IP_); 1063 preempt_enable(); 1064 } 1065} 1066EXPORT_SYMBOL(__module_get); 1067 1068bool try_module_get(struct module *module) 1069{ 1070 bool ret = true; 1071 1072 if (module) { 1073 preempt_disable(); 1074 /* Note: here, we can fail to get a reference */ 1075 if (likely(module_is_live(module) && 1076 atomic_inc_not_zero(&module->refcnt) != 0)) 1077 trace_module_get(module, _RET_IP_); 1078 else 1079 ret = false; 1080 1081 preempt_enable(); 1082 } 1083 return ret; 1084} 1085EXPORT_SYMBOL(try_module_get); 1086 1087void module_put(struct module *module) 1088{ 1089 int ret; 1090 1091 if (module) { 1092 preempt_disable(); 1093 ret = atomic_dec_if_positive(&module->refcnt); 1094 WARN_ON(ret < 0); /* Failed to put refcount */ 1095 trace_module_put(module, _RET_IP_); 1096 preempt_enable(); 1097 } 1098} 1099EXPORT_SYMBOL(module_put); 1100 1101#else /* !CONFIG_MODULE_UNLOAD */ 1102static inline void print_unload_info(struct seq_file *m, struct module *mod) 1103{ 1104 /* We don't know the usage count, or what modules are using. */ 1105 seq_puts(m, " - -"); 1106} 1107 1108static inline void module_unload_free(struct module *mod) 1109{ 1110} 1111 1112static int ref_module(struct module *a, struct module *b) 1113{ 1114 return strong_try_module_get(b); 1115} 1116 1117static inline int module_unload_init(struct module *mod) 1118{ 1119 return 0; 1120} 1121#endif /* CONFIG_MODULE_UNLOAD */ 1122 1123static size_t module_flags_taint(struct module *mod, char *buf) 1124{ 1125 size_t l = 0; 1126 int i; 1127 1128 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 1129 if (taint_flags[i].module && test_bit(i, &mod->taints)) 1130 buf[l++] = taint_flags[i].c_true; 1131 } 1132 1133 return l; 1134} 1135 1136static ssize_t show_initstate(struct module_attribute *mattr, 1137 struct module_kobject *mk, char *buffer) 1138{ 1139 const char *state = "unknown"; 1140 1141 switch (mk->mod->state) { 1142 case MODULE_STATE_LIVE: 1143 state = "live"; 1144 break; 1145 case MODULE_STATE_COMING: 1146 state = "coming"; 1147 break; 1148 case MODULE_STATE_GOING: 1149 state = "going"; 1150 break; 1151 default: 1152 BUG(); 1153 } 1154 return sprintf(buffer, "%s\n", state); 1155} 1156 1157static struct module_attribute modinfo_initstate = 1158 __ATTR(initstate, 0444, show_initstate, NULL); 1159 1160static ssize_t store_uevent(struct module_attribute *mattr, 1161 struct module_kobject *mk, 1162 const char *buffer, size_t count) 1163{ 1164 int rc; 1165 1166 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 1167 return rc ? rc : count; 1168} 1169 1170struct module_attribute module_uevent = 1171 __ATTR(uevent, 0200, NULL, store_uevent); 1172 1173static ssize_t show_coresize(struct module_attribute *mattr, 1174 struct module_kobject *mk, char *buffer) 1175{ 1176 return sprintf(buffer, "%u\n", mk->mod->core_layout.size); 1177} 1178 1179static struct module_attribute modinfo_coresize = 1180 __ATTR(coresize, 0444, show_coresize, NULL); 1181 1182static ssize_t show_initsize(struct module_attribute *mattr, 1183 struct module_kobject *mk, char *buffer) 1184{ 1185 return sprintf(buffer, "%u\n", mk->mod->init_layout.size); 1186} 1187 1188static struct module_attribute modinfo_initsize = 1189 __ATTR(initsize, 0444, show_initsize, NULL); 1190 1191static ssize_t show_taint(struct module_attribute *mattr, 1192 struct module_kobject *mk, char *buffer) 1193{ 1194 size_t l; 1195 1196 l = module_flags_taint(mk->mod, buffer); 1197 buffer[l++] = '\n'; 1198 return l; 1199} 1200 1201static struct module_attribute modinfo_taint = 1202 __ATTR(taint, 0444, show_taint, NULL); 1203 1204static struct module_attribute *modinfo_attrs[] = { 1205 &module_uevent, 1206 &modinfo_version, 1207 &modinfo_srcversion, 1208 &modinfo_initstate, 1209 &modinfo_coresize, 1210 &modinfo_initsize, 1211 &modinfo_taint, 1212#ifdef CONFIG_MODULE_UNLOAD 1213 &modinfo_refcnt, 1214#endif 1215 NULL, 1216}; 1217 1218static const char vermagic[] = VERMAGIC_STRING; 1219 1220static int try_to_force_load(struct module *mod, const char *reason) 1221{ 1222#ifdef CONFIG_MODULE_FORCE_LOAD 1223 if (!test_taint(TAINT_FORCED_MODULE)) 1224 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1225 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1226 return 0; 1227#else 1228 return -ENOEXEC; 1229#endif 1230} 1231 1232#ifdef CONFIG_MODVERSIONS 1233 1234static u32 resolve_rel_crc(const s32 *crc) 1235{ 1236 return *(u32 *)((void *)crc + *crc); 1237} 1238 1239static int check_version(const struct load_info *info, 1240 const char *symname, 1241 struct module *mod, 1242 const s32 *crc) 1243{ 1244 Elf_Shdr *sechdrs = info->sechdrs; 1245 unsigned int versindex = info->index.vers; 1246 unsigned int i, num_versions; 1247 struct modversion_info *versions; 1248 1249 /* Exporting module didn't supply crcs? OK, we're already tainted. */ 1250 if (!crc) 1251 return 1; 1252 1253 /* No versions at all? modprobe --force does this. */ 1254 if (versindex == 0) 1255 return try_to_force_load(mod, symname) == 0; 1256 1257 versions = (void *) sechdrs[versindex].sh_addr; 1258 num_versions = sechdrs[versindex].sh_size 1259 / sizeof(struct modversion_info); 1260 1261 for (i = 0; i < num_versions; i++) { 1262 u32 crcval; 1263 1264 if (strcmp(versions[i].name, symname) != 0) 1265 continue; 1266 1267 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) 1268 crcval = resolve_rel_crc(crc); 1269 else 1270 crcval = *crc; 1271 if (versions[i].crc == crcval) 1272 return 1; 1273 pr_debug("Found checksum %X vs module %lX\n", 1274 crcval, versions[i].crc); 1275 goto bad_version; 1276 } 1277 1278 /* Broken toolchain. Warn once, then let it go.. */ 1279 pr_warn_once("%s: no symbol version for %s\n", info->name, symname); 1280 return 1; 1281 1282bad_version: 1283 pr_warn("%s: disagrees about version of symbol %s\n", 1284 info->name, symname); 1285 return 0; 1286} 1287 1288static inline int check_modstruct_version(const struct load_info *info, 1289 struct module *mod) 1290{ 1291 struct find_symbol_arg fsa = { 1292 .name = "module_layout", 1293 .gplok = true, 1294 }; 1295 1296 /* 1297 * Since this should be found in kernel (which can't be removed), no 1298 * locking is necessary -- use preempt_disable() to placate lockdep. 1299 */ 1300 preempt_disable(); 1301 if (!find_symbol(&fsa)) { 1302 preempt_enable(); 1303 BUG(); 1304 } 1305 preempt_enable(); 1306 return check_version(info, "module_layout", mod, fsa.crc); 1307} 1308 1309/* First part is kernel version, which we ignore if module has crcs. */ 1310static inline int same_magic(const char *amagic, const char *bmagic, 1311 bool has_crcs) 1312{ 1313 if (has_crcs) { 1314 amagic += strcspn(amagic, " "); 1315 bmagic += strcspn(bmagic, " "); 1316 } 1317 return strcmp(amagic, bmagic) == 0; 1318} 1319#else 1320static inline int check_version(const struct load_info *info, 1321 const char *symname, 1322 struct module *mod, 1323 const s32 *crc) 1324{ 1325 return 1; 1326} 1327 1328static inline int check_modstruct_version(const struct load_info *info, 1329 struct module *mod) 1330{ 1331 return 1; 1332} 1333 1334static inline int same_magic(const char *amagic, const char *bmagic, 1335 bool has_crcs) 1336{ 1337 return strcmp(amagic, bmagic) == 0; 1338} 1339#endif /* CONFIG_MODVERSIONS */ 1340 1341static char *get_modinfo(const struct load_info *info, const char *tag); 1342static char *get_next_modinfo(const struct load_info *info, const char *tag, 1343 char *prev); 1344 1345static int verify_namespace_is_imported(const struct load_info *info, 1346 const struct kernel_symbol *sym, 1347 struct module *mod) 1348{ 1349 const char *namespace; 1350 char *imported_namespace; 1351 1352 namespace = kernel_symbol_namespace(sym); 1353 if (namespace && namespace[0]) { 1354 imported_namespace = get_modinfo(info, "import_ns"); 1355 while (imported_namespace) { 1356 if (strcmp(namespace, imported_namespace) == 0) 1357 return 0; 1358 imported_namespace = get_next_modinfo( 1359 info, "import_ns", imported_namespace); 1360 } 1361#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1362 pr_warn( 1363#else 1364 pr_err( 1365#endif 1366 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", 1367 mod->name, kernel_symbol_name(sym), namespace); 1368#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS 1369 return -EINVAL; 1370#endif 1371 } 1372 return 0; 1373} 1374 1375static bool inherit_taint(struct module *mod, struct module *owner) 1376{ 1377 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) 1378 return true; 1379 1380 if (mod->using_gplonly_symbols) { 1381 pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", 1382 mod->name, owner->name); 1383 return false; 1384 } 1385 1386 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { 1387 pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", 1388 mod->name, owner->name); 1389 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); 1390 } 1391 return true; 1392} 1393 1394/* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1395static const struct kernel_symbol *resolve_symbol(struct module *mod, 1396 const struct load_info *info, 1397 const char *name, 1398 char ownername[]) 1399{ 1400 struct find_symbol_arg fsa = { 1401 .name = name, 1402 .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), 1403 .warn = true, 1404 }; 1405 int err; 1406 1407 /* 1408 * The module_mutex should not be a heavily contended lock; 1409 * if we get the occasional sleep here, we'll go an extra iteration 1410 * in the wait_event_interruptible(), which is harmless. 1411 */ 1412 sched_annotate_sleep(); 1413 mutex_lock(&module_mutex); 1414 if (!find_symbol(&fsa)) 1415 goto unlock; 1416 1417 if (fsa.license == GPL_ONLY) 1418 mod->using_gplonly_symbols = true; 1419 1420 if (!inherit_taint(mod, fsa.owner)) { 1421 fsa.sym = NULL; 1422 goto getname; 1423 } 1424 1425 if (!check_version(info, name, mod, fsa.crc)) { 1426 fsa.sym = ERR_PTR(-EINVAL); 1427 goto getname; 1428 } 1429 1430 err = verify_namespace_is_imported(info, fsa.sym, mod); 1431 if (err) { 1432 fsa.sym = ERR_PTR(err); 1433 goto getname; 1434 } 1435 1436 err = ref_module(mod, fsa.owner); 1437 if (err) { 1438 fsa.sym = ERR_PTR(err); 1439 goto getname; 1440 } 1441 1442getname: 1443 /* We must make copy under the lock if we failed to get ref. */ 1444 strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); 1445unlock: 1446 mutex_unlock(&module_mutex); 1447 return fsa.sym; 1448} 1449 1450static const struct kernel_symbol * 1451resolve_symbol_wait(struct module *mod, 1452 const struct load_info *info, 1453 const char *name) 1454{ 1455 const struct kernel_symbol *ksym; 1456 char owner[MODULE_NAME_LEN]; 1457 1458 if (wait_event_interruptible_timeout(module_wq, 1459 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1460 || PTR_ERR(ksym) != -EBUSY, 1461 30 * HZ) <= 0) { 1462 pr_warn("%s: gave up waiting for init of module %s.\n", 1463 mod->name, owner); 1464 } 1465 return ksym; 1466} 1467 1468#ifdef CONFIG_KALLSYMS 1469static inline bool sect_empty(const Elf_Shdr *sect) 1470{ 1471 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; 1472} 1473#endif 1474 1475/* 1476 * /sys/module/foo/sections stuff 1477 * J. Corbet <corbet@lwn.net> 1478 */ 1479#ifdef CONFIG_SYSFS 1480 1481#ifdef CONFIG_KALLSYMS 1482struct module_sect_attr { 1483 struct bin_attribute battr; 1484 unsigned long address; 1485}; 1486 1487struct module_sect_attrs { 1488 struct attribute_group grp; 1489 unsigned int nsections; 1490 struct module_sect_attr attrs[]; 1491}; 1492 1493#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) 1494static ssize_t module_sect_read(struct file *file, struct kobject *kobj, 1495 struct bin_attribute *battr, 1496 char *buf, loff_t pos, size_t count) 1497{ 1498 struct module_sect_attr *sattr = 1499 container_of(battr, struct module_sect_attr, battr); 1500 char bounce[MODULE_SECT_READ_SIZE + 1]; 1501 size_t wrote; 1502 1503 if (pos != 0) 1504 return -EINVAL; 1505 1506 /* 1507 * Since we're a binary read handler, we must account for the 1508 * trailing NUL byte that sprintf will write: if "buf" is 1509 * too small to hold the NUL, or the NUL is exactly the last 1510 * byte, the read will look like it got truncated by one byte. 1511 * Since there is no way to ask sprintf nicely to not write 1512 * the NUL, we have to use a bounce buffer. 1513 */ 1514 wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", 1515 kallsyms_show_value(file->f_cred) 1516 ? (void *)sattr->address : NULL); 1517 count = min(count, wrote); 1518 memcpy(buf, bounce, count); 1519 1520 return count; 1521} 1522 1523static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1524{ 1525 unsigned int section; 1526 1527 for (section = 0; section < sect_attrs->nsections; section++) 1528 kfree(sect_attrs->attrs[section].battr.attr.name); 1529 kfree(sect_attrs); 1530} 1531 1532static void add_sect_attrs(struct module *mod, const struct load_info *info) 1533{ 1534 unsigned int nloaded = 0, i, size[2]; 1535 struct module_sect_attrs *sect_attrs; 1536 struct module_sect_attr *sattr; 1537 struct bin_attribute **gattr; 1538 1539 /* Count loaded sections and allocate structures */ 1540 for (i = 0; i < info->hdr->e_shnum; i++) 1541 if (!sect_empty(&info->sechdrs[i])) 1542 nloaded++; 1543 size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded), 1544 sizeof(sect_attrs->grp.bin_attrs[0])); 1545 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]); 1546 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); 1547 if (sect_attrs == NULL) 1548 return; 1549 1550 /* Setup section attributes. */ 1551 sect_attrs->grp.name = "sections"; 1552 sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0]; 1553 1554 sect_attrs->nsections = 0; 1555 sattr = &sect_attrs->attrs[0]; 1556 gattr = &sect_attrs->grp.bin_attrs[0]; 1557 for (i = 0; i < info->hdr->e_shnum; i++) { 1558 Elf_Shdr *sec = &info->sechdrs[i]; 1559 if (sect_empty(sec)) 1560 continue; 1561 sysfs_bin_attr_init(&sattr->battr); 1562 sattr->address = sec->sh_addr; 1563 sattr->battr.attr.name = 1564 kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); 1565 if (sattr->battr.attr.name == NULL) 1566 goto out; 1567 sect_attrs->nsections++; 1568 sattr->battr.read = module_sect_read; 1569 sattr->battr.size = MODULE_SECT_READ_SIZE; 1570 sattr->battr.attr.mode = 0400; 1571 *(gattr++) = &(sattr++)->battr; 1572 } 1573 *gattr = NULL; 1574 1575 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp)) 1576 goto out; 1577 1578 mod->sect_attrs = sect_attrs; 1579 return; 1580 out: 1581 free_sect_attrs(sect_attrs); 1582} 1583 1584static void remove_sect_attrs(struct module *mod) 1585{ 1586 if (mod->sect_attrs) { 1587 sysfs_remove_group(&mod->mkobj.kobj, 1588 &mod->sect_attrs->grp); 1589 /* 1590 * We are positive that no one is using any sect attrs 1591 * at this point. Deallocate immediately. 1592 */ 1593 free_sect_attrs(mod->sect_attrs); 1594 mod->sect_attrs = NULL; 1595 } 1596} 1597 1598/* 1599 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. 1600 */ 1601 1602struct module_notes_attrs { 1603 struct kobject *dir; 1604 unsigned int notes; 1605 struct bin_attribute attrs[]; 1606}; 1607 1608static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, 1609 struct bin_attribute *bin_attr, 1610 char *buf, loff_t pos, size_t count) 1611{ 1612 /* 1613 * The caller checked the pos and count against our size. 1614 */ 1615 memcpy(buf, bin_attr->private + pos, count); 1616 return count; 1617} 1618 1619static void free_notes_attrs(struct module_notes_attrs *notes_attrs, 1620 unsigned int i) 1621{ 1622 if (notes_attrs->dir) { 1623 while (i-- > 0) 1624 sysfs_remove_bin_file(notes_attrs->dir, 1625 &notes_attrs->attrs[i]); 1626 kobject_put(notes_attrs->dir); 1627 } 1628 kfree(notes_attrs); 1629} 1630 1631static void add_notes_attrs(struct module *mod, const struct load_info *info) 1632{ 1633 unsigned int notes, loaded, i; 1634 struct module_notes_attrs *notes_attrs; 1635 struct bin_attribute *nattr; 1636 1637 /* failed to create section attributes, so can't create notes */ 1638 if (!mod->sect_attrs) 1639 return; 1640 1641 /* Count notes sections and allocate structures. */ 1642 notes = 0; 1643 for (i = 0; i < info->hdr->e_shnum; i++) 1644 if (!sect_empty(&info->sechdrs[i]) && 1645 (info->sechdrs[i].sh_type == SHT_NOTE)) 1646 ++notes; 1647 1648 if (notes == 0) 1649 return; 1650 1651 notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), 1652 GFP_KERNEL); 1653 if (notes_attrs == NULL) 1654 return; 1655 1656 notes_attrs->notes = notes; 1657 nattr = &notes_attrs->attrs[0]; 1658 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { 1659 if (sect_empty(&info->sechdrs[i])) 1660 continue; 1661 if (info->sechdrs[i].sh_type == SHT_NOTE) { 1662 sysfs_bin_attr_init(nattr); 1663 nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name; 1664 nattr->attr.mode = S_IRUGO; 1665 nattr->size = info->sechdrs[i].sh_size; 1666 nattr->private = (void *) info->sechdrs[i].sh_addr; 1667 nattr->read = module_notes_read; 1668 ++nattr; 1669 } 1670 ++loaded; 1671 } 1672 1673 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); 1674 if (!notes_attrs->dir) 1675 goto out; 1676 1677 for (i = 0; i < notes; ++i) 1678 if (sysfs_create_bin_file(notes_attrs->dir, 1679 &notes_attrs->attrs[i])) 1680 goto out; 1681 1682 mod->notes_attrs = notes_attrs; 1683 return; 1684 1685 out: 1686 free_notes_attrs(notes_attrs, i); 1687} 1688 1689static void remove_notes_attrs(struct module *mod) 1690{ 1691 if (mod->notes_attrs) 1692 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); 1693} 1694 1695#else 1696 1697static inline void add_sect_attrs(struct module *mod, 1698 const struct load_info *info) 1699{ 1700} 1701 1702static inline void remove_sect_attrs(struct module *mod) 1703{ 1704} 1705 1706static inline void add_notes_attrs(struct module *mod, 1707 const struct load_info *info) 1708{ 1709} 1710 1711static inline void remove_notes_attrs(struct module *mod) 1712{ 1713} 1714#endif /* CONFIG_KALLSYMS */ 1715 1716static void del_usage_links(struct module *mod) 1717{ 1718#ifdef CONFIG_MODULE_UNLOAD 1719 struct module_use *use; 1720 1721 mutex_lock(&module_mutex); 1722 list_for_each_entry(use, &mod->target_list, target_list) 1723 sysfs_remove_link(use->target->holders_dir, mod->name); 1724 mutex_unlock(&module_mutex); 1725#endif 1726} 1727 1728static int add_usage_links(struct module *mod) 1729{ 1730 int ret = 0; 1731#ifdef CONFIG_MODULE_UNLOAD 1732 struct module_use *use; 1733 1734 mutex_lock(&module_mutex); 1735 list_for_each_entry(use, &mod->target_list, target_list) { 1736 ret = sysfs_create_link(use->target->holders_dir, 1737 &mod->mkobj.kobj, mod->name); 1738 if (ret) 1739 break; 1740 } 1741 mutex_unlock(&module_mutex); 1742 if (ret) 1743 del_usage_links(mod); 1744#endif 1745 return ret; 1746} 1747 1748static void module_remove_modinfo_attrs(struct module *mod, int end); 1749 1750static int module_add_modinfo_attrs(struct module *mod) 1751{ 1752 struct module_attribute *attr; 1753 struct module_attribute *temp_attr; 1754 int error = 0; 1755 int i; 1756 1757 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * 1758 (ARRAY_SIZE(modinfo_attrs) + 1)), 1759 GFP_KERNEL); 1760 if (!mod->modinfo_attrs) 1761 return -ENOMEM; 1762 1763 temp_attr = mod->modinfo_attrs; 1764 for (i = 0; (attr = modinfo_attrs[i]); i++) { 1765 if (!attr->test || attr->test(mod)) { 1766 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1767 sysfs_attr_init(&temp_attr->attr); 1768 error = sysfs_create_file(&mod->mkobj.kobj, 1769 &temp_attr->attr); 1770 if (error) 1771 goto error_out; 1772 ++temp_attr; 1773 } 1774 } 1775 1776 return 0; 1777 1778error_out: 1779 if (i > 0) 1780 module_remove_modinfo_attrs(mod, --i); 1781 else 1782 kfree(mod->modinfo_attrs); 1783 return error; 1784} 1785 1786static void module_remove_modinfo_attrs(struct module *mod, int end) 1787{ 1788 struct module_attribute *attr; 1789 int i; 1790 1791 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { 1792 if (end >= 0 && i > end) 1793 break; 1794 /* pick a field to test for end of list */ 1795 if (!attr->attr.name) 1796 break; 1797 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); 1798 if (attr->free) 1799 attr->free(mod); 1800 } 1801 kfree(mod->modinfo_attrs); 1802} 1803 1804static void mod_kobject_put(struct module *mod) 1805{ 1806 DECLARE_COMPLETION_ONSTACK(c); 1807 mod->mkobj.kobj_completion = &c; 1808 kobject_put(&mod->mkobj.kobj); 1809 wait_for_completion(&c); 1810} 1811 1812static int mod_sysfs_init(struct module *mod) 1813{ 1814 int err; 1815 struct kobject *kobj; 1816 1817 if (!module_sysfs_initialized) { 1818 pr_err("%s: module sysfs not initialized\n", mod->name); 1819 err = -EINVAL; 1820 goto out; 1821 } 1822 1823 kobj = kset_find_obj(module_kset, mod->name); 1824 if (kobj) { 1825 pr_err("%s: module is already loaded\n", mod->name); 1826 kobject_put(kobj); 1827 err = -EINVAL; 1828 goto out; 1829 } 1830 1831 mod->mkobj.mod = mod; 1832 1833 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); 1834 mod->mkobj.kobj.kset = module_kset; 1835 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, 1836 "%s", mod->name); 1837 if (err) 1838 mod_kobject_put(mod); 1839 1840out: 1841 return err; 1842} 1843 1844static int mod_sysfs_setup(struct module *mod, 1845 const struct load_info *info, 1846 struct kernel_param *kparam, 1847 unsigned int num_params) 1848{ 1849 int err; 1850 1851 err = mod_sysfs_init(mod); 1852 if (err) 1853 goto out; 1854 1855 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); 1856 if (!mod->holders_dir) { 1857 err = -ENOMEM; 1858 goto out_unreg; 1859 } 1860 1861 err = module_param_sysfs_setup(mod, kparam, num_params); 1862 if (err) 1863 goto out_unreg_holders; 1864 1865 err = module_add_modinfo_attrs(mod); 1866 if (err) 1867 goto out_unreg_param; 1868 1869 err = add_usage_links(mod); 1870 if (err) 1871 goto out_unreg_modinfo_attrs; 1872 1873 add_sect_attrs(mod, info); 1874 add_notes_attrs(mod, info); 1875 1876 return 0; 1877 1878out_unreg_modinfo_attrs: 1879 module_remove_modinfo_attrs(mod, -1); 1880out_unreg_param: 1881 module_param_sysfs_remove(mod); 1882out_unreg_holders: 1883 kobject_put(mod->holders_dir); 1884out_unreg: 1885 mod_kobject_put(mod); 1886out: 1887 return err; 1888} 1889 1890static void mod_sysfs_fini(struct module *mod) 1891{ 1892 remove_notes_attrs(mod); 1893 remove_sect_attrs(mod); 1894 mod_kobject_put(mod); 1895} 1896 1897static void init_param_lock(struct module *mod) 1898{ 1899 mutex_init(&mod->param_lock); 1900} 1901#else /* !CONFIG_SYSFS */ 1902 1903static int mod_sysfs_setup(struct module *mod, 1904 const struct load_info *info, 1905 struct kernel_param *kparam, 1906 unsigned int num_params) 1907{ 1908 return 0; 1909} 1910 1911static void mod_sysfs_fini(struct module *mod) 1912{ 1913} 1914 1915static void module_remove_modinfo_attrs(struct module *mod, int end) 1916{ 1917} 1918 1919static void del_usage_links(struct module *mod) 1920{ 1921} 1922 1923static void init_param_lock(struct module *mod) 1924{ 1925} 1926#endif /* CONFIG_SYSFS */ 1927 1928static void mod_sysfs_teardown(struct module *mod) 1929{ 1930 del_usage_links(mod); 1931 module_remove_modinfo_attrs(mod, -1); 1932 module_param_sysfs_remove(mod); 1933 kobject_put(mod->mkobj.drivers_dir); 1934 kobject_put(mod->holders_dir); 1935 mod_sysfs_fini(mod); 1936} 1937 1938/* 1939 * LKM RO/NX protection: protect module's text/ro-data 1940 * from modification and any data from execution. 1941 * 1942 * General layout of module is: 1943 * [text] [read-only-data] [ro-after-init] [writable data] 1944 * text_size -----^ ^ ^ ^ 1945 * ro_size ------------------------| | | 1946 * ro_after_init_size -----------------------------| | 1947 * size -----------------------------------------------------------| 1948 * 1949 * These values are always page-aligned (as is base) 1950 */ 1951 1952/* 1953 * Since some arches are moving towards PAGE_KERNEL module allocations instead 1954 * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the 1955 * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of 1956 * whether we are strict. 1957 */ 1958#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX 1959static void frob_text(const struct module_layout *layout, 1960 int (*set_memory)(unsigned long start, int num_pages)) 1961{ 1962 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1963 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1964 set_memory((unsigned long)layout->base, 1965 layout->text_size >> PAGE_SHIFT); 1966} 1967 1968static void module_enable_x(const struct module *mod) 1969{ 1970 frob_text(&mod->core_layout, set_memory_x); 1971 frob_text(&mod->init_layout, set_memory_x); 1972} 1973#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ 1974static void module_enable_x(const struct module *mod) { } 1975#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ 1976 1977#ifdef CONFIG_STRICT_MODULE_RWX 1978static void frob_rodata(const struct module_layout *layout, 1979 int (*set_memory)(unsigned long start, int num_pages)) 1980{ 1981 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1982 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1983 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1984 set_memory((unsigned long)layout->base + layout->text_size, 1985 (layout->ro_size - layout->text_size) >> PAGE_SHIFT); 1986} 1987 1988static void frob_ro_after_init(const struct module_layout *layout, 1989 int (*set_memory)(unsigned long start, int num_pages)) 1990{ 1991 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1992 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1993 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); 1994 set_memory((unsigned long)layout->base + layout->ro_size, 1995 (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); 1996} 1997 1998static void frob_writable_data(const struct module_layout *layout, 1999 int (*set_memory)(unsigned long start, int num_pages)) 2000{ 2001 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 2002 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); 2003 BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); 2004 set_memory((unsigned long)layout->base + layout->ro_after_init_size, 2005 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); 2006} 2007 2008static void module_enable_ro(const struct module *mod, bool after_init) 2009{ 2010 if (!rodata_enabled) 2011 return; 2012 2013 set_vm_flush_reset_perms(mod->core_layout.base); 2014 set_vm_flush_reset_perms(mod->init_layout.base); 2015 frob_text(&mod->core_layout, set_memory_ro); 2016 2017 frob_rodata(&mod->core_layout, set_memory_ro); 2018 frob_text(&mod->init_layout, set_memory_ro); 2019 frob_rodata(&mod->init_layout, set_memory_ro); 2020 2021 if (after_init) 2022 frob_ro_after_init(&mod->core_layout, set_memory_ro); 2023} 2024 2025static void module_enable_nx(const struct module *mod) 2026{ 2027 frob_rodata(&mod->core_layout, set_memory_nx); 2028 frob_ro_after_init(&mod->core_layout, set_memory_nx); 2029 frob_writable_data(&mod->core_layout, set_memory_nx); 2030 frob_rodata(&mod->init_layout, set_memory_nx); 2031 frob_writable_data(&mod->init_layout, set_memory_nx); 2032} 2033 2034static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 2035 char *secstrings, struct module *mod) 2036{ 2037 const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR; 2038 int i; 2039 2040 for (i = 0; i < hdr->e_shnum; i++) { 2041 if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { 2042 pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", 2043 mod->name, secstrings + sechdrs[i].sh_name, i); 2044 return -ENOEXEC; 2045 } 2046 } 2047 2048 return 0; 2049} 2050 2051#else /* !CONFIG_STRICT_MODULE_RWX */ 2052static void module_enable_nx(const struct module *mod) { } 2053static void module_enable_ro(const struct module *mod, bool after_init) {} 2054static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, 2055 char *secstrings, struct module *mod) 2056{ 2057 return 0; 2058} 2059#endif /* CONFIG_STRICT_MODULE_RWX */ 2060 2061#ifdef CONFIG_LIVEPATCH 2062/* 2063 * Persist Elf information about a module. Copy the Elf header, 2064 * section header table, section string table, and symtab section 2065 * index from info to mod->klp_info. 2066 */ 2067static int copy_module_elf(struct module *mod, struct load_info *info) 2068{ 2069 unsigned int size, symndx; 2070 int ret; 2071 2072 size = sizeof(*mod->klp_info); 2073 mod->klp_info = kmalloc(size, GFP_KERNEL); 2074 if (mod->klp_info == NULL) 2075 return -ENOMEM; 2076 2077 /* Elf header */ 2078 size = sizeof(mod->klp_info->hdr); 2079 memcpy(&mod->klp_info->hdr, info->hdr, size); 2080 2081 /* Elf section header table */ 2082 size = sizeof(*info->sechdrs) * info->hdr->e_shnum; 2083 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); 2084 if (mod->klp_info->sechdrs == NULL) { 2085 ret = -ENOMEM; 2086 goto free_info; 2087 } 2088 2089 /* Elf section name string table */ 2090 size = info->sechdrs[info->hdr->e_shstrndx].sh_size; 2091 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); 2092 if (mod->klp_info->secstrings == NULL) { 2093 ret = -ENOMEM; 2094 goto free_sechdrs; 2095 } 2096 2097 /* Elf symbol section index */ 2098 symndx = info->index.sym; 2099 mod->klp_info->symndx = symndx; 2100 2101 /* 2102 * For livepatch modules, core_kallsyms.symtab is a complete 2103 * copy of the original symbol table. Adjust sh_addr to point 2104 * to core_kallsyms.symtab since the copy of the symtab in module 2105 * init memory is freed at the end of do_init_module(). 2106 */ 2107 mod->klp_info->sechdrs[symndx].sh_addr = \ 2108 (unsigned long) mod->core_kallsyms.symtab; 2109 2110 return 0; 2111 2112free_sechdrs: 2113 kfree(mod->klp_info->sechdrs); 2114free_info: 2115 kfree(mod->klp_info); 2116 return ret; 2117} 2118 2119static void free_module_elf(struct module *mod) 2120{ 2121 kfree(mod->klp_info->sechdrs); 2122 kfree(mod->klp_info->secstrings); 2123 kfree(mod->klp_info); 2124} 2125#else /* !CONFIG_LIVEPATCH */ 2126static int copy_module_elf(struct module *mod, struct load_info *info) 2127{ 2128 return 0; 2129} 2130 2131static void free_module_elf(struct module *mod) 2132{ 2133} 2134#endif /* CONFIG_LIVEPATCH */ 2135 2136void __weak module_memfree(void *module_region) 2137{ 2138 /* 2139 * This memory may be RO, and freeing RO memory in an interrupt is not 2140 * supported by vmalloc. 2141 */ 2142 WARN_ON(in_interrupt()); 2143 vfree(module_region); 2144} 2145 2146void __weak module_arch_cleanup(struct module *mod) 2147{ 2148} 2149 2150void __weak module_arch_freeing_init(struct module *mod) 2151{ 2152} 2153 2154static void cfi_cleanup(struct module *mod); 2155 2156/* Free a module, remove from lists, etc. */ 2157static void free_module(struct module *mod) 2158{ 2159 trace_module_free(mod); 2160 2161 mod_sysfs_teardown(mod); 2162 2163 /* 2164 * We leave it in list to prevent duplicate loads, but make sure 2165 * that noone uses it while it's being deconstructed. 2166 */ 2167 mutex_lock(&module_mutex); 2168 mod->state = MODULE_STATE_UNFORMED; 2169 mutex_unlock(&module_mutex); 2170 2171 /* Remove dynamic debug info */ 2172 ddebug_remove_module(mod->name); 2173 2174 /* Arch-specific cleanup. */ 2175 module_arch_cleanup(mod); 2176 2177 /* Module unload stuff */ 2178 module_unload_free(mod); 2179 2180 /* Free any allocated parameters. */ 2181 destroy_params(mod->kp, mod->num_kp); 2182 2183 if (is_livepatch_module(mod)) 2184 free_module_elf(mod); 2185 2186 /* Now we can delete it from the lists */ 2187 mutex_lock(&module_mutex); 2188 /* Unlink carefully: kallsyms could be walking list. */ 2189 list_del_rcu(&mod->list); 2190 mod_tree_remove(mod); 2191 /* Remove this module from bug list, this uses list_del_rcu */ 2192 module_bug_cleanup(mod); 2193 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 2194 synchronize_rcu(); 2195 mutex_unlock(&module_mutex); 2196 2197 /* Clean up CFI for the module. */ 2198 cfi_cleanup(mod); 2199 2200 /* This may be empty, but that's OK */ 2201 module_arch_freeing_init(mod); 2202 module_memfree(mod->init_layout.base); 2203 kfree(mod->args); 2204 percpu_modfree(mod); 2205 2206 /* Free lock-classes; relies on the preceding sync_rcu(). */ 2207 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 2208 2209 /* Finally, free the core (containing the module structure) */ 2210 module_memfree(mod->core_layout.base); 2211} 2212 2213void *__symbol_get(const char *symbol) 2214{ 2215 struct find_symbol_arg fsa = { 2216 .name = symbol, 2217 .gplok = true, 2218 .warn = true, 2219 }; 2220 2221 preempt_disable(); 2222 if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) { 2223 preempt_enable(); 2224 return NULL; 2225 } 2226 preempt_enable(); 2227 return (void *)kernel_symbol_value(fsa.sym); 2228} 2229EXPORT_SYMBOL_GPL(__symbol_get); 2230 2231/* 2232 * Ensure that an exported symbol [global namespace] does not already exist 2233 * in the kernel or in some other module's exported symbol table. 2234 * 2235 * You must hold the module_mutex. 2236 */ 2237static int verify_exported_symbols(struct module *mod) 2238{ 2239 unsigned int i; 2240 const struct kernel_symbol *s; 2241 struct { 2242 const struct kernel_symbol *sym; 2243 unsigned int num; 2244 } arr[] = { 2245 { mod->syms, mod->num_syms }, 2246 { mod->gpl_syms, mod->num_gpl_syms }, 2247 }; 2248 2249 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2250 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 2251 struct find_symbol_arg fsa = { 2252 .name = kernel_symbol_name(s), 2253 .gplok = true, 2254 }; 2255 if (find_symbol(&fsa)) { 2256 pr_err("%s: exports duplicate symbol %s" 2257 " (owned by %s)\n", 2258 mod->name, kernel_symbol_name(s), 2259 module_name(fsa.owner)); 2260 return -ENOEXEC; 2261 } 2262 } 2263 } 2264 return 0; 2265} 2266 2267static bool ignore_undef_symbol(Elf_Half emachine, const char *name) 2268{ 2269 /* 2270 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as 2271 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. 2272 * i386 has a similar problem but may not deserve a fix. 2273 * 2274 * If we ever have to ignore many symbols, consider refactoring the code to 2275 * only warn if referenced by a relocation. 2276 */ 2277 if (emachine == EM_386 || emachine == EM_X86_64) 2278 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); 2279 return false; 2280} 2281 2282/* Change all symbols so that st_value encodes the pointer directly. */ 2283static int simplify_symbols(struct module *mod, const struct load_info *info) 2284{ 2285 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2286 Elf_Sym *sym = (void *)symsec->sh_addr; 2287 unsigned long secbase; 2288 unsigned int i; 2289 int ret = 0; 2290 const struct kernel_symbol *ksym; 2291 2292 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 2293 const char *name = info->strtab + sym[i].st_name; 2294 2295 switch (sym[i].st_shndx) { 2296 case SHN_COMMON: 2297 /* Ignore common symbols */ 2298 if (!strncmp(name, "__gnu_lto", 9)) 2299 break; 2300 2301 /* 2302 * We compiled with -fno-common. These are not 2303 * supposed to happen. 2304 */ 2305 pr_debug("Common symbol: %s\n", name); 2306 pr_warn("%s: please compile with -fno-common\n", 2307 mod->name); 2308 ret = -ENOEXEC; 2309 break; 2310 2311 case SHN_ABS: 2312 /* Don't need to do anything */ 2313 pr_debug("Absolute symbol: 0x%08lx\n", 2314 (long)sym[i].st_value); 2315 break; 2316 2317 case SHN_LIVEPATCH: 2318 /* Livepatch symbols are resolved by livepatch */ 2319 break; 2320 2321 case SHN_UNDEF: 2322 ksym = resolve_symbol_wait(mod, info, name); 2323 /* Ok if resolved. */ 2324 if (ksym && !IS_ERR(ksym)) { 2325 sym[i].st_value = kernel_symbol_value(ksym); 2326 break; 2327 } 2328 2329 /* Ok if weak or ignored. */ 2330 if (!ksym && 2331 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || 2332 ignore_undef_symbol(info->hdr->e_machine, name))) 2333 break; 2334 2335 ret = PTR_ERR(ksym) ?: -ENOENT; 2336 pr_warn("%s: Unknown symbol %s (err %d)\n", 2337 mod->name, name, ret); 2338 break; 2339 2340 default: 2341 /* Divert to percpu allocation if a percpu var. */ 2342 if (sym[i].st_shndx == info->index.pcpu) 2343 secbase = (unsigned long)mod_percpu(mod); 2344 else 2345 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 2346 sym[i].st_value += secbase; 2347 break; 2348 } 2349 } 2350 2351 return ret; 2352} 2353 2354static int apply_relocations(struct module *mod, const struct load_info *info) 2355{ 2356 unsigned int i; 2357 int err = 0; 2358 2359 /* Now do relocations. */ 2360 for (i = 1; i < info->hdr->e_shnum; i++) { 2361 unsigned int infosec = info->sechdrs[i].sh_info; 2362 2363 /* Not a valid relocation section? */ 2364 if (infosec >= info->hdr->e_shnum) 2365 continue; 2366 2367 /* Don't bother with non-allocated sections */ 2368 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 2369 continue; 2370 2371 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 2372 err = klp_apply_section_relocs(mod, info->sechdrs, 2373 info->secstrings, 2374 info->strtab, 2375 info->index.sym, i, 2376 NULL); 2377 else if (info->sechdrs[i].sh_type == SHT_REL) 2378 err = apply_relocate(info->sechdrs, info->strtab, 2379 info->index.sym, i, mod); 2380 else if (info->sechdrs[i].sh_type == SHT_RELA) 2381 err = apply_relocate_add(info->sechdrs, info->strtab, 2382 info->index.sym, i, mod); 2383 if (err < 0) 2384 break; 2385 } 2386 return err; 2387} 2388 2389/* Additional bytes needed by arch in front of individual sections */ 2390unsigned int __weak arch_mod_section_prepend(struct module *mod, 2391 unsigned int section) 2392{ 2393 /* default implementation just returns zero */ 2394 return 0; 2395} 2396 2397/* Update size with this section: return offset. */ 2398static long get_offset(struct module *mod, unsigned int *size, 2399 Elf_Shdr *sechdr, unsigned int section) 2400{ 2401 long ret; 2402 2403 *size += arch_mod_section_prepend(mod, section); 2404 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 2405 *size = ret + sechdr->sh_size; 2406 return ret; 2407} 2408 2409static bool module_init_layout_section(const char *sname) 2410{ 2411#ifndef CONFIG_MODULE_UNLOAD 2412 if (module_exit_section(sname)) 2413 return true; 2414#endif 2415 return module_init_section(sname); 2416} 2417 2418/* 2419 * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 2420 * might -- code, read-only data, read-write data, small data. Tally 2421 * sizes, and place the offsets into sh_entsize fields: high bit means it 2422 * belongs in init. 2423 */ 2424static void layout_sections(struct module *mod, struct load_info *info) 2425{ 2426 static unsigned long const masks[][2] = { 2427 /* 2428 * NOTE: all executable code must be the first section 2429 * in this array; otherwise modify the text_size 2430 * finder in the two loops below 2431 */ 2432 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 2433 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 2434 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 2435 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 2436 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 2437 }; 2438 unsigned int m, i; 2439 2440 for (i = 0; i < info->hdr->e_shnum; i++) 2441 info->sechdrs[i].sh_entsize = ~0UL; 2442 2443 pr_debug("Core section allocation order:\n"); 2444 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2445 for (i = 0; i < info->hdr->e_shnum; ++i) { 2446 Elf_Shdr *s = &info->sechdrs[i]; 2447 const char *sname = info->secstrings + s->sh_name; 2448 2449 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2450 || (s->sh_flags & masks[m][1]) 2451 || s->sh_entsize != ~0UL 2452 || module_init_layout_section(sname)) 2453 continue; 2454 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); 2455 pr_debug("\t%s\n", sname); 2456 } 2457 switch (m) { 2458 case 0: /* executable */ 2459 mod->core_layout.size = debug_align(mod->core_layout.size); 2460 mod->core_layout.text_size = mod->core_layout.size; 2461 break; 2462 case 1: /* RO: text and ro-data */ 2463 mod->core_layout.size = debug_align(mod->core_layout.size); 2464 mod->core_layout.ro_size = mod->core_layout.size; 2465 break; 2466 case 2: /* RO after init */ 2467 mod->core_layout.size = debug_align(mod->core_layout.size); 2468 mod->core_layout.ro_after_init_size = mod->core_layout.size; 2469 break; 2470 case 4: /* whole core */ 2471 mod->core_layout.size = debug_align(mod->core_layout.size); 2472 break; 2473 } 2474 } 2475 2476 pr_debug("Init section allocation order:\n"); 2477 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2478 for (i = 0; i < info->hdr->e_shnum; ++i) { 2479 Elf_Shdr *s = &info->sechdrs[i]; 2480 const char *sname = info->secstrings + s->sh_name; 2481 2482 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2483 || (s->sh_flags & masks[m][1]) 2484 || s->sh_entsize != ~0UL 2485 || !module_init_layout_section(sname)) 2486 continue; 2487 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) 2488 | INIT_OFFSET_MASK); 2489 pr_debug("\t%s\n", sname); 2490 } 2491 switch (m) { 2492 case 0: /* executable */ 2493 mod->init_layout.size = debug_align(mod->init_layout.size); 2494 mod->init_layout.text_size = mod->init_layout.size; 2495 break; 2496 case 1: /* RO: text and ro-data */ 2497 mod->init_layout.size = debug_align(mod->init_layout.size); 2498 mod->init_layout.ro_size = mod->init_layout.size; 2499 break; 2500 case 2: 2501 /* 2502 * RO after init doesn't apply to init_layout (only 2503 * core_layout), so it just takes the value of ro_size. 2504 */ 2505 mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; 2506 break; 2507 case 4: /* whole init */ 2508 mod->init_layout.size = debug_align(mod->init_layout.size); 2509 break; 2510 } 2511 } 2512} 2513 2514static void set_license(struct module *mod, const char *license) 2515{ 2516 if (!license) 2517 license = "unspecified"; 2518 2519 if (!license_is_gpl_compatible(license)) { 2520 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 2521 pr_warn("%s: module license '%s' taints kernel.\n", 2522 mod->name, license); 2523 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2524 LOCKDEP_NOW_UNRELIABLE); 2525 } 2526} 2527 2528/* Parse tag=value strings from .modinfo section */ 2529static char *next_string(char *string, unsigned long *secsize) 2530{ 2531 /* Skip non-zero chars */ 2532 while (string[0]) { 2533 string++; 2534 if ((*secsize)-- <= 1) 2535 return NULL; 2536 } 2537 2538 /* Skip any zero padding. */ 2539 while (!string[0]) { 2540 string++; 2541 if ((*secsize)-- <= 1) 2542 return NULL; 2543 } 2544 return string; 2545} 2546 2547static char *get_next_modinfo(const struct load_info *info, const char *tag, 2548 char *prev) 2549{ 2550 char *p; 2551 unsigned int taglen = strlen(tag); 2552 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 2553 unsigned long size = infosec->sh_size; 2554 2555 /* 2556 * get_modinfo() calls made before rewrite_section_headers() 2557 * must use sh_offset, as sh_addr isn't set! 2558 */ 2559 char *modinfo = (char *)info->hdr + infosec->sh_offset; 2560 2561 if (prev) { 2562 size -= prev - modinfo; 2563 modinfo = next_string(prev, &size); 2564 } 2565 2566 for (p = modinfo; p; p = next_string(p, &size)) { 2567 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 2568 return p + taglen + 1; 2569 } 2570 return NULL; 2571} 2572 2573static char *get_modinfo(const struct load_info *info, const char *tag) 2574{ 2575 return get_next_modinfo(info, tag, NULL); 2576} 2577 2578static void setup_modinfo(struct module *mod, struct load_info *info) 2579{ 2580 struct module_attribute *attr; 2581 int i; 2582 2583 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2584 if (attr->setup) 2585 attr->setup(mod, get_modinfo(info, attr->attr.name)); 2586 } 2587} 2588 2589static void free_modinfo(struct module *mod) 2590{ 2591 struct module_attribute *attr; 2592 int i; 2593 2594 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2595 if (attr->free) 2596 attr->free(mod); 2597 } 2598} 2599 2600#ifdef CONFIG_KALLSYMS 2601 2602/* Lookup exported symbol in given range of kernel_symbols */ 2603static const struct kernel_symbol *lookup_exported_symbol(const char *name, 2604 const struct kernel_symbol *start, 2605 const struct kernel_symbol *stop) 2606{ 2607 return bsearch(name, start, stop - start, 2608 sizeof(struct kernel_symbol), cmp_name); 2609} 2610 2611static int is_exported(const char *name, unsigned long value, 2612 const struct module *mod) 2613{ 2614 const struct kernel_symbol *ks; 2615 if (!mod) 2616 ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); 2617 else 2618 ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); 2619 2620 return ks != NULL && kernel_symbol_value(ks) == value; 2621} 2622 2623/* As per nm */ 2624static char elf_type(const Elf_Sym *sym, const struct load_info *info) 2625{ 2626 const Elf_Shdr *sechdrs = info->sechdrs; 2627 2628 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { 2629 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) 2630 return 'v'; 2631 else 2632 return 'w'; 2633 } 2634 if (sym->st_shndx == SHN_UNDEF) 2635 return 'U'; 2636 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) 2637 return 'a'; 2638 if (sym->st_shndx >= SHN_LORESERVE) 2639 return '?'; 2640 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) 2641 return 't'; 2642 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC 2643 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { 2644 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) 2645 return 'r'; 2646 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2647 return 'g'; 2648 else 2649 return 'd'; 2650 } 2651 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { 2652 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2653 return 's'; 2654 else 2655 return 'b'; 2656 } 2657 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, 2658 ".debug")) { 2659 return 'n'; 2660 } 2661 return '?'; 2662} 2663 2664static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, 2665 unsigned int shnum, unsigned int pcpundx) 2666{ 2667 const Elf_Shdr *sec; 2668 2669 if (src->st_shndx == SHN_UNDEF 2670 || src->st_shndx >= shnum 2671 || !src->st_name) 2672 return false; 2673 2674#ifdef CONFIG_KALLSYMS_ALL 2675 if (src->st_shndx == pcpundx) 2676 return true; 2677#endif 2678 2679 sec = sechdrs + src->st_shndx; 2680 if (!(sec->sh_flags & SHF_ALLOC) 2681#ifndef CONFIG_KALLSYMS_ALL 2682 || !(sec->sh_flags & SHF_EXECINSTR) 2683#endif 2684 || (sec->sh_entsize & INIT_OFFSET_MASK)) 2685 return false; 2686 2687 return true; 2688} 2689 2690/* 2691 * We only allocate and copy the strings needed by the parts of symtab 2692 * we keep. This is simple, but has the effect of making multiple 2693 * copies of duplicates. We could be more sophisticated, see 2694 * linux-kernel thread starting with 2695 * <73defb5e4bca04a6431392cc341112b1@localhost>. 2696 */ 2697static void layout_symtab(struct module *mod, struct load_info *info) 2698{ 2699 Elf_Shdr *symsect = info->sechdrs + info->index.sym; 2700 Elf_Shdr *strsect = info->sechdrs + info->index.str; 2701 const Elf_Sym *src; 2702 unsigned int i, nsrc, ndst, strtab_size = 0; 2703 2704 /* Put symbol section at end of init part of module. */ 2705 symsect->sh_flags |= SHF_ALLOC; 2706 symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, 2707 info->index.sym) | INIT_OFFSET_MASK; 2708 pr_debug("\t%s\n", info->secstrings + symsect->sh_name); 2709 2710 src = (void *)info->hdr + symsect->sh_offset; 2711 nsrc = symsect->sh_size / sizeof(*src); 2712 2713 /* Compute total space required for the core symbols' strtab. */ 2714 for (ndst = i = 0; i < nsrc; i++) { 2715 if (i == 0 || is_livepatch_module(mod) || 2716 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2717 info->index.pcpu)) { 2718 strtab_size += strlen(&info->strtab[src[i].st_name])+1; 2719 ndst++; 2720 } 2721 } 2722 2723 /* Append room for core symbols at end of core part. */ 2724 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); 2725 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); 2726 mod->core_layout.size += strtab_size; 2727 info->core_typeoffs = mod->core_layout.size; 2728 mod->core_layout.size += ndst * sizeof(char); 2729 mod->core_layout.size = debug_align(mod->core_layout.size); 2730 2731 /* Put string table section at end of init part of module. */ 2732 strsect->sh_flags |= SHF_ALLOC; 2733 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, 2734 info->index.str) | INIT_OFFSET_MASK; 2735 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2736 2737 /* We'll tack temporary mod_kallsyms on the end. */ 2738 mod->init_layout.size = ALIGN(mod->init_layout.size, 2739 __alignof__(struct mod_kallsyms)); 2740 info->mod_kallsyms_init_off = mod->init_layout.size; 2741 mod->init_layout.size += sizeof(struct mod_kallsyms); 2742 info->init_typeoffs = mod->init_layout.size; 2743 mod->init_layout.size += nsrc * sizeof(char); 2744 mod->init_layout.size = debug_align(mod->init_layout.size); 2745} 2746 2747/* 2748 * We use the full symtab and strtab which layout_symtab arranged to 2749 * be appended to the init section. Later we switch to the cut-down 2750 * core-only ones. 2751 */ 2752static void add_kallsyms(struct module *mod, const struct load_info *info) 2753{ 2754 unsigned int i, ndst; 2755 const Elf_Sym *src; 2756 Elf_Sym *dst; 2757 char *s; 2758 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2759 2760 /* Set up to point into init section. */ 2761 mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; 2762 2763 mod->kallsyms->symtab = (void *)symsec->sh_addr; 2764 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 2765 /* Make sure we get permanent strtab: don't use info->strtab. */ 2766 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; 2767 mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; 2768 2769 /* 2770 * Now populate the cut down core kallsyms for after init 2771 * and set types up while we still have access to sections. 2772 */ 2773 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; 2774 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; 2775 mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; 2776 src = mod->kallsyms->symtab; 2777 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { 2778 mod->kallsyms->typetab[i] = elf_type(src + i, info); 2779 if (i == 0 || is_livepatch_module(mod) || 2780 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2781 info->index.pcpu)) { 2782 mod->core_kallsyms.typetab[ndst] = 2783 mod->kallsyms->typetab[i]; 2784 dst[ndst] = src[i]; 2785 dst[ndst++].st_name = s - mod->core_kallsyms.strtab; 2786 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], 2787 KSYM_NAME_LEN) + 1; 2788 } 2789 } 2790 mod->core_kallsyms.num_symtab = ndst; 2791} 2792#else 2793static inline void layout_symtab(struct module *mod, struct load_info *info) 2794{ 2795} 2796 2797static void add_kallsyms(struct module *mod, const struct load_info *info) 2798{ 2799} 2800#endif /* CONFIG_KALLSYMS */ 2801 2802#if IS_ENABLED(CONFIG_KALLSYMS) && IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) 2803static void init_build_id(struct module *mod, const struct load_info *info) 2804{ 2805 const Elf_Shdr *sechdr; 2806 unsigned int i; 2807 2808 for (i = 0; i < info->hdr->e_shnum; i++) { 2809 sechdr = &info->sechdrs[i]; 2810 if (!sect_empty(sechdr) && sechdr->sh_type == SHT_NOTE && 2811 !build_id_parse_buf((void *)sechdr->sh_addr, mod->build_id, 2812 sechdr->sh_size)) 2813 break; 2814 } 2815} 2816#else 2817static void init_build_id(struct module *mod, const struct load_info *info) 2818{ 2819} 2820#endif 2821 2822static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num) 2823{ 2824 if (!debug) 2825 return; 2826 ddebug_add_module(debug, num, mod->name); 2827} 2828 2829static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) 2830{ 2831 if (debug) 2832 ddebug_remove_module(mod->name); 2833} 2834 2835void * __weak module_alloc(unsigned long size) 2836{ 2837 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 2838 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, 2839 NUMA_NO_NODE, __builtin_return_address(0)); 2840} 2841 2842bool __weak module_init_section(const char *name) 2843{ 2844 return strstarts(name, ".init"); 2845} 2846 2847bool __weak module_exit_section(const char *name) 2848{ 2849 return strstarts(name, ".exit"); 2850} 2851 2852#ifdef CONFIG_DEBUG_KMEMLEAK 2853static void kmemleak_load_module(const struct module *mod, 2854 const struct load_info *info) 2855{ 2856 unsigned int i; 2857 2858 /* only scan the sections containing data */ 2859 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); 2860 2861 for (i = 1; i < info->hdr->e_shnum; i++) { 2862 /* Scan all writable sections that's not executable */ 2863 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || 2864 !(info->sechdrs[i].sh_flags & SHF_WRITE) || 2865 (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) 2866 continue; 2867 2868 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, 2869 info->sechdrs[i].sh_size, GFP_KERNEL); 2870 } 2871} 2872#else 2873static inline void kmemleak_load_module(const struct module *mod, 2874 const struct load_info *info) 2875{ 2876} 2877#endif 2878 2879#ifdef CONFIG_MODULE_SIG 2880static int module_sig_check(struct load_info *info, int flags) 2881{ 2882 int err = -ENODATA; 2883 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; 2884 const char *reason; 2885 const void *mod = info->hdr; 2886 bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | 2887 MODULE_INIT_IGNORE_VERMAGIC); 2888 /* 2889 * Do not allow mangled modules as a module with version information 2890 * removed is no longer the module that was signed. 2891 */ 2892 if (!mangled_module && 2893 info->len > markerlen && 2894 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { 2895 /* We truncate the module to discard the signature */ 2896 info->len -= markerlen; 2897 err = mod_verify_sig(mod, info); 2898 if (!err) { 2899 info->sig_ok = true; 2900 return 0; 2901 } 2902 } 2903 2904 /* 2905 * We don't permit modules to be loaded into the trusted kernels 2906 * without a valid signature on them, but if we're not enforcing, 2907 * certain errors are non-fatal. 2908 */ 2909 switch (err) { 2910 case -ENODATA: 2911 reason = "unsigned module"; 2912 break; 2913 case -ENOPKG: 2914 reason = "module with unsupported crypto"; 2915 break; 2916 case -ENOKEY: 2917 reason = "module with unavailable key"; 2918 break; 2919 2920 default: 2921 /* 2922 * All other errors are fatal, including lack of memory, 2923 * unparseable signatures, and signature check failures -- 2924 * even if signatures aren't required. 2925 */ 2926 return err; 2927 } 2928 2929 if (is_module_sig_enforced()) { 2930 pr_notice("Loading of %s is rejected\n", reason); 2931 return -EKEYREJECTED; 2932 } 2933 2934 return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); 2935} 2936#else /* !CONFIG_MODULE_SIG */ 2937static int module_sig_check(struct load_info *info, int flags) 2938{ 2939 return 0; 2940} 2941#endif /* !CONFIG_MODULE_SIG */ 2942 2943static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) 2944{ 2945#if defined(CONFIG_64BIT) 2946 unsigned long long secend; 2947#else 2948 unsigned long secend; 2949#endif 2950 2951 /* 2952 * Check for both overflow and offset/size being 2953 * too large. 2954 */ 2955 secend = shdr->sh_offset + shdr->sh_size; 2956 if (secend < shdr->sh_offset || secend > info->len) 2957 return -ENOEXEC; 2958 2959 return 0; 2960} 2961 2962/* 2963 * Sanity checks against invalid binaries, wrong arch, weird elf version. 2964 * 2965 * Also do basic validity checks against section offsets and sizes, the 2966 * section name string table, and the indices used for it (sh_name). 2967 */ 2968static int elf_validity_check(struct load_info *info) 2969{ 2970 unsigned int i; 2971 Elf_Shdr *shdr, *strhdr; 2972 int err; 2973 2974 if (info->len < sizeof(*(info->hdr))) { 2975 pr_err("Invalid ELF header len %lu\n", info->len); 2976 goto no_exec; 2977 } 2978 2979 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) { 2980 pr_err("Invalid ELF header magic: != %s\n", ELFMAG); 2981 goto no_exec; 2982 } 2983 if (info->hdr->e_type != ET_REL) { 2984 pr_err("Invalid ELF header type: %u != %u\n", 2985 info->hdr->e_type, ET_REL); 2986 goto no_exec; 2987 } 2988 if (!elf_check_arch(info->hdr)) { 2989 pr_err("Invalid architecture in ELF header: %u\n", 2990 info->hdr->e_machine); 2991 goto no_exec; 2992 } 2993 if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) { 2994 pr_err("Invalid ELF section header size\n"); 2995 goto no_exec; 2996 } 2997 2998 /* 2999 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is 3000 * known and small. So e_shnum * sizeof(Elf_Shdr) 3001 * will not overflow unsigned long on any platform. 3002 */ 3003 if (info->hdr->e_shoff >= info->len 3004 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 3005 info->len - info->hdr->e_shoff)) { 3006 pr_err("Invalid ELF section header overflow\n"); 3007 goto no_exec; 3008 } 3009 3010 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; 3011 3012 /* 3013 * Verify if the section name table index is valid. 3014 */ 3015 if (info->hdr->e_shstrndx == SHN_UNDEF 3016 || info->hdr->e_shstrndx >= info->hdr->e_shnum) { 3017 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n", 3018 info->hdr->e_shstrndx, info->hdr->e_shstrndx, 3019 info->hdr->e_shnum); 3020 goto no_exec; 3021 } 3022 3023 strhdr = &info->sechdrs[info->hdr->e_shstrndx]; 3024 err = validate_section_offset(info, strhdr); 3025 if (err < 0) { 3026 pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type); 3027 return err; 3028 } 3029 3030 /* 3031 * The section name table must be NUL-terminated, as required 3032 * by the spec. This makes strcmp and pr_* calls that access 3033 * strings in the section safe. 3034 */ 3035 info->secstrings = (void *)info->hdr + strhdr->sh_offset; 3036 if (info->secstrings[strhdr->sh_size - 1] != '\0') { 3037 pr_err("ELF Spec violation: section name table isn't null terminated\n"); 3038 goto no_exec; 3039 } 3040 3041 /* 3042 * The code assumes that section 0 has a length of zero and 3043 * an addr of zero, so check for it. 3044 */ 3045 if (info->sechdrs[0].sh_type != SHT_NULL 3046 || info->sechdrs[0].sh_size != 0 3047 || info->sechdrs[0].sh_addr != 0) { 3048 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n", 3049 info->sechdrs[0].sh_type); 3050 goto no_exec; 3051 } 3052 3053 for (i = 1; i < info->hdr->e_shnum; i++) { 3054 shdr = &info->sechdrs[i]; 3055 switch (shdr->sh_type) { 3056 case SHT_NULL: 3057 case SHT_NOBITS: 3058 continue; 3059 case SHT_SYMTAB: 3060 if (shdr->sh_link == SHN_UNDEF 3061 || shdr->sh_link >= info->hdr->e_shnum) { 3062 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n", 3063 shdr->sh_link, shdr->sh_link, 3064 info->hdr->e_shnum); 3065 goto no_exec; 3066 } 3067 fallthrough; 3068 default: 3069 err = validate_section_offset(info, shdr); 3070 if (err < 0) { 3071 pr_err("Invalid ELF section in module (section %u type %u)\n", 3072 i, shdr->sh_type); 3073 return err; 3074 } 3075 3076 if (shdr->sh_flags & SHF_ALLOC) { 3077 if (shdr->sh_name >= strhdr->sh_size) { 3078 pr_err("Invalid ELF section name in module (section %u type %u)\n", 3079 i, shdr->sh_type); 3080 return -ENOEXEC; 3081 } 3082 } 3083 break; 3084 } 3085 } 3086 3087 return 0; 3088 3089no_exec: 3090 return -ENOEXEC; 3091} 3092 3093#define COPY_CHUNK_SIZE (16*PAGE_SIZE) 3094 3095static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 3096{ 3097 do { 3098 unsigned long n = min(len, COPY_CHUNK_SIZE); 3099 3100 if (copy_from_user(dst, usrc, n) != 0) 3101 return -EFAULT; 3102 cond_resched(); 3103 dst += n; 3104 usrc += n; 3105 len -= n; 3106 } while (len); 3107 return 0; 3108} 3109 3110#ifdef CONFIG_LIVEPATCH 3111static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 3112{ 3113 if (get_modinfo(info, "livepatch")) { 3114 mod->klp = true; 3115 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 3116 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 3117 mod->name); 3118 } 3119 3120 return 0; 3121} 3122#else /* !CONFIG_LIVEPATCH */ 3123static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 3124{ 3125 if (get_modinfo(info, "livepatch")) { 3126 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 3127 mod->name); 3128 return -ENOEXEC; 3129 } 3130 3131 return 0; 3132} 3133#endif /* CONFIG_LIVEPATCH */ 3134 3135static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 3136{ 3137 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 3138 return; 3139 3140 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 3141 mod->name); 3142} 3143 3144/* Sets info->hdr and info->len. */ 3145static int copy_module_from_user(const void __user *umod, unsigned long len, 3146 struct load_info *info) 3147{ 3148 int err; 3149 3150 info->len = len; 3151 if (info->len < sizeof(*(info->hdr))) 3152 return -ENOEXEC; 3153 3154 err = security_kernel_load_data(LOADING_MODULE, true); 3155 if (err) 3156 return err; 3157 3158 /* Suck in entire file: we'll want most of it. */ 3159 info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); 3160 if (!info->hdr) 3161 return -ENOMEM; 3162 3163 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 3164 err = -EFAULT; 3165 goto out; 3166 } 3167 3168 err = security_kernel_post_load_data((char *)info->hdr, info->len, 3169 LOADING_MODULE, "init_module"); 3170out: 3171 if (err) 3172 vfree(info->hdr); 3173 3174 return err; 3175} 3176 3177static void free_copy(struct load_info *info, int flags) 3178{ 3179 if (flags & MODULE_INIT_COMPRESSED_FILE) 3180 module_decompress_cleanup(info); 3181 else 3182 vfree(info->hdr); 3183} 3184 3185static int rewrite_section_headers(struct load_info *info, int flags) 3186{ 3187 unsigned int i; 3188 3189 /* This should always be true, but let's be sure. */ 3190 info->sechdrs[0].sh_addr = 0; 3191 3192 for (i = 1; i < info->hdr->e_shnum; i++) { 3193 Elf_Shdr *shdr = &info->sechdrs[i]; 3194 3195 /* 3196 * Mark all sections sh_addr with their address in the 3197 * temporary image. 3198 */ 3199 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 3200 3201 } 3202 3203 /* Track but don't keep modinfo and version sections. */ 3204 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 3205 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 3206 3207 return 0; 3208} 3209 3210/* 3211 * Set up our basic convenience variables (pointers to section headers, 3212 * search for module section index etc), and do some basic section 3213 * verification. 3214 * 3215 * Set info->mod to the temporary copy of the module in info->hdr. The final one 3216 * will be allocated in move_module(). 3217 */ 3218static int setup_load_info(struct load_info *info, int flags) 3219{ 3220 unsigned int i; 3221 3222 /* Try to find a name early so we can log errors with a module name */ 3223 info->index.info = find_sec(info, ".modinfo"); 3224 if (info->index.info) 3225 info->name = get_modinfo(info, "name"); 3226 3227 /* Find internal symbols and strings. */ 3228 for (i = 1; i < info->hdr->e_shnum; i++) { 3229 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 3230 info->index.sym = i; 3231 info->index.str = info->sechdrs[i].sh_link; 3232 info->strtab = (char *)info->hdr 3233 + info->sechdrs[info->index.str].sh_offset; 3234 break; 3235 } 3236 } 3237 3238 if (info->index.sym == 0) { 3239 pr_warn("%s: module has no symbols (stripped?)\n", 3240 info->name ?: "(missing .modinfo section or name field)"); 3241 return -ENOEXEC; 3242 } 3243 3244 info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); 3245 if (!info->index.mod) { 3246 pr_warn("%s: No module found in object\n", 3247 info->name ?: "(missing .modinfo section or name field)"); 3248 return -ENOEXEC; 3249 } 3250 /* This is temporary: point mod into copy of data. */ 3251 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 3252 3253 /* 3254 * If we didn't load the .modinfo 'name' field earlier, fall back to 3255 * on-disk struct mod 'name' field. 3256 */ 3257 if (!info->name) 3258 info->name = info->mod->name; 3259 3260 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) 3261 info->index.vers = 0; /* Pretend no __versions section! */ 3262 else 3263 info->index.vers = find_sec(info, "__versions"); 3264 3265 info->index.pcpu = find_pcpusec(info); 3266 3267 return 0; 3268} 3269 3270static int check_modinfo(struct module *mod, struct load_info *info, int flags) 3271{ 3272 const char *modmagic = get_modinfo(info, "vermagic"); 3273 int err; 3274 3275 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 3276 modmagic = NULL; 3277 3278 /* This is allowed: modprobe --force will invalidate it. */ 3279 if (!modmagic) { 3280 err = try_to_force_load(mod, "bad vermagic"); 3281 if (err) 3282 return err; 3283 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 3284 pr_err("%s: version magic '%s' should be '%s'\n", 3285 info->name, modmagic, vermagic); 3286 return -ENOEXEC; 3287 } 3288 3289 if (!get_modinfo(info, "intree")) { 3290 if (!test_taint(TAINT_OOT_MODULE)) 3291 pr_warn("%s: loading out-of-tree module taints kernel.\n", 3292 mod->name); 3293 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 3294 } 3295 3296 check_modinfo_retpoline(mod, info); 3297 3298 if (get_modinfo(info, "staging")) { 3299 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 3300 pr_warn("%s: module is from the staging directory, the quality " 3301 "is unknown, you have been warned.\n", mod->name); 3302 } 3303 3304 err = check_modinfo_livepatch(mod, info); 3305 if (err) 3306 return err; 3307 3308 /* Set up license info based on the info section */ 3309 set_license(mod, get_modinfo(info, "license")); 3310 3311 return 0; 3312} 3313 3314static int find_module_sections(struct module *mod, struct load_info *info) 3315{ 3316 mod->kp = section_objs(info, "__param", 3317 sizeof(*mod->kp), &mod->num_kp); 3318 mod->syms = section_objs(info, "__ksymtab", 3319 sizeof(*mod->syms), &mod->num_syms); 3320 mod->crcs = section_addr(info, "__kcrctab"); 3321 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 3322 sizeof(*mod->gpl_syms), 3323 &mod->num_gpl_syms); 3324 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 3325 3326#ifdef CONFIG_CONSTRUCTORS 3327 mod->ctors = section_objs(info, ".ctors", 3328 sizeof(*mod->ctors), &mod->num_ctors); 3329 if (!mod->ctors) 3330 mod->ctors = section_objs(info, ".init_array", 3331 sizeof(*mod->ctors), &mod->num_ctors); 3332 else if (find_sec(info, ".init_array")) { 3333 /* 3334 * This shouldn't happen with same compiler and binutils 3335 * building all parts of the module. 3336 */ 3337 pr_warn("%s: has both .ctors and .init_array.\n", 3338 mod->name); 3339 return -EINVAL; 3340 } 3341#endif 3342 3343 mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, 3344 &mod->noinstr_text_size); 3345 3346#ifdef CONFIG_TRACEPOINTS 3347 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 3348 sizeof(*mod->tracepoints_ptrs), 3349 &mod->num_tracepoints); 3350#endif 3351#ifdef CONFIG_TREE_SRCU 3352 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", 3353 sizeof(*mod->srcu_struct_ptrs), 3354 &mod->num_srcu_structs); 3355#endif 3356#ifdef CONFIG_BPF_EVENTS 3357 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 3358 sizeof(*mod->bpf_raw_events), 3359 &mod->num_bpf_raw_events); 3360#endif 3361#ifdef CONFIG_DEBUG_INFO_BTF_MODULES 3362 mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); 3363#endif 3364#ifdef CONFIG_JUMP_LABEL 3365 mod->jump_entries = section_objs(info, "__jump_table", 3366 sizeof(*mod->jump_entries), 3367 &mod->num_jump_entries); 3368#endif 3369#ifdef CONFIG_EVENT_TRACING 3370 mod->trace_events = section_objs(info, "_ftrace_events", 3371 sizeof(*mod->trace_events), 3372 &mod->num_trace_events); 3373 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 3374 sizeof(*mod->trace_evals), 3375 &mod->num_trace_evals); 3376#endif 3377#ifdef CONFIG_TRACING 3378 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 3379 sizeof(*mod->trace_bprintk_fmt_start), 3380 &mod->num_trace_bprintk_fmt); 3381#endif 3382#ifdef CONFIG_FTRACE_MCOUNT_RECORD 3383 /* sechdrs[0].sh_size is always zero */ 3384 mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, 3385 sizeof(*mod->ftrace_callsites), 3386 &mod->num_ftrace_callsites); 3387#endif 3388#ifdef CONFIG_FUNCTION_ERROR_INJECTION 3389 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 3390 sizeof(*mod->ei_funcs), 3391 &mod->num_ei_funcs); 3392#endif 3393#ifdef CONFIG_KPROBES 3394 mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, 3395 &mod->kprobes_text_size); 3396 mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", 3397 sizeof(unsigned long), 3398 &mod->num_kprobe_blacklist); 3399#endif 3400#ifdef CONFIG_PRINTK_INDEX 3401 mod->printk_index_start = section_objs(info, ".printk_index", 3402 sizeof(*mod->printk_index_start), 3403 &mod->printk_index_size); 3404#endif 3405#ifdef CONFIG_HAVE_STATIC_CALL_INLINE 3406 mod->static_call_sites = section_objs(info, ".static_call_sites", 3407 sizeof(*mod->static_call_sites), 3408 &mod->num_static_call_sites); 3409#endif 3410 mod->extable = section_objs(info, "__ex_table", 3411 sizeof(*mod->extable), &mod->num_exentries); 3412 3413 if (section_addr(info, "__obsparm")) 3414 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 3415 3416 info->debug = section_objs(info, "__dyndbg", 3417 sizeof(*info->debug), &info->num_debug); 3418 3419 return 0; 3420} 3421 3422static int move_module(struct module *mod, struct load_info *info) 3423{ 3424 int i; 3425 void *ptr; 3426 3427 /* Do the allocs. */ 3428 ptr = module_alloc(mod->core_layout.size); 3429 /* 3430 * The pointer to this block is stored in the module structure 3431 * which is inside the block. Just mark it as not being a 3432 * leak. 3433 */ 3434 kmemleak_not_leak(ptr); 3435 if (!ptr) 3436 return -ENOMEM; 3437 3438 memset(ptr, 0, mod->core_layout.size); 3439 mod->core_layout.base = ptr; 3440 3441 if (mod->init_layout.size) { 3442 ptr = module_alloc(mod->init_layout.size); 3443 /* 3444 * The pointer to this block is stored in the module structure 3445 * which is inside the block. This block doesn't need to be 3446 * scanned as it contains data and code that will be freed 3447 * after the module is initialized. 3448 */ 3449 kmemleak_ignore(ptr); 3450 if (!ptr) { 3451 module_memfree(mod->core_layout.base); 3452 return -ENOMEM; 3453 } 3454 memset(ptr, 0, mod->init_layout.size); 3455 mod->init_layout.base = ptr; 3456 } else 3457 mod->init_layout.base = NULL; 3458 3459 /* Transfer each section which specifies SHF_ALLOC */ 3460 pr_debug("final section addresses:\n"); 3461 for (i = 0; i < info->hdr->e_shnum; i++) { 3462 void *dest; 3463 Elf_Shdr *shdr = &info->sechdrs[i]; 3464 3465 if (!(shdr->sh_flags & SHF_ALLOC)) 3466 continue; 3467 3468 if (shdr->sh_entsize & INIT_OFFSET_MASK) 3469 dest = mod->init_layout.base 3470 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 3471 else 3472 dest = mod->core_layout.base + shdr->sh_entsize; 3473 3474 if (shdr->sh_type != SHT_NOBITS) 3475 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 3476 /* Update sh_addr to point to copy in image. */ 3477 shdr->sh_addr = (unsigned long)dest; 3478 pr_debug("\t0x%lx %s\n", 3479 (long)shdr->sh_addr, info->secstrings + shdr->sh_name); 3480 } 3481 3482 return 0; 3483} 3484 3485static int check_module_license_and_versions(struct module *mod) 3486{ 3487 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 3488 3489 /* 3490 * ndiswrapper is under GPL by itself, but loads proprietary modules. 3491 * Don't use add_taint_module(), as it would prevent ndiswrapper from 3492 * using GPL-only symbols it needs. 3493 */ 3494 if (strcmp(mod->name, "ndiswrapper") == 0) 3495 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 3496 3497 /* driverloader was caught wrongly pretending to be under GPL */ 3498 if (strcmp(mod->name, "driverloader") == 0) 3499 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3500 LOCKDEP_NOW_UNRELIABLE); 3501 3502 /* lve claims to be GPL but upstream won't provide source */ 3503 if (strcmp(mod->name, "lve") == 0) 3504 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3505 LOCKDEP_NOW_UNRELIABLE); 3506 3507 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 3508 pr_warn("%s: module license taints kernel.\n", mod->name); 3509 3510#ifdef CONFIG_MODVERSIONS 3511 if ((mod->num_syms && !mod->crcs) || 3512 (mod->num_gpl_syms && !mod->gpl_crcs)) { 3513 return try_to_force_load(mod, 3514 "no versions for exported symbols"); 3515 } 3516#endif 3517 return 0; 3518} 3519 3520static void flush_module_icache(const struct module *mod) 3521{ 3522 /* 3523 * Flush the instruction cache, since we've played with text. 3524 * Do it before processing of module parameters, so the module 3525 * can provide parameter accessor functions of its own. 3526 */ 3527 if (mod->init_layout.base) 3528 flush_icache_range((unsigned long)mod->init_layout.base, 3529 (unsigned long)mod->init_layout.base 3530 + mod->init_layout.size); 3531 flush_icache_range((unsigned long)mod->core_layout.base, 3532 (unsigned long)mod->core_layout.base + mod->core_layout.size); 3533} 3534 3535int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 3536 Elf_Shdr *sechdrs, 3537 char *secstrings, 3538 struct module *mod) 3539{ 3540 return 0; 3541} 3542 3543/* module_blacklist is a comma-separated list of module names */ 3544static char *module_blacklist; 3545static bool blacklisted(const char *module_name) 3546{ 3547 const char *p; 3548 size_t len; 3549 3550 if (!module_blacklist) 3551 return false; 3552 3553 for (p = module_blacklist; *p; p += len) { 3554 len = strcspn(p, ","); 3555 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 3556 return true; 3557 if (p[len] == ',') 3558 len++; 3559 } 3560 return false; 3561} 3562core_param(module_blacklist, module_blacklist, charp, 0400); 3563 3564static struct module *layout_and_allocate(struct load_info *info, int flags) 3565{ 3566 struct module *mod; 3567 unsigned int ndx; 3568 int err; 3569 3570 err = check_modinfo(info->mod, info, flags); 3571 if (err) 3572 return ERR_PTR(err); 3573 3574 /* Allow arches to frob section contents and sizes. */ 3575 err = module_frob_arch_sections(info->hdr, info->sechdrs, 3576 info->secstrings, info->mod); 3577 if (err < 0) 3578 return ERR_PTR(err); 3579 3580 err = module_enforce_rwx_sections(info->hdr, info->sechdrs, 3581 info->secstrings, info->mod); 3582 if (err < 0) 3583 return ERR_PTR(err); 3584 3585 /* We will do a special allocation for per-cpu sections later. */ 3586 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 3587 3588 /* 3589 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that 3590 * layout_sections() can put it in the right place. 3591 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 3592 */ 3593 ndx = find_sec(info, ".data..ro_after_init"); 3594 if (ndx) 3595 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 3596 /* 3597 * Mark the __jump_table section as ro_after_init as well: these data 3598 * structures are never modified, with the exception of entries that 3599 * refer to code in the __init section, which are annotated as such 3600 * at module load time. 3601 */ 3602 ndx = find_sec(info, "__jump_table"); 3603 if (ndx) 3604 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 3605 3606 /* 3607 * Determine total sizes, and put offsets in sh_entsize. For now 3608 * this is done generically; there doesn't appear to be any 3609 * special cases for the architectures. 3610 */ 3611 layout_sections(info->mod, info); 3612 layout_symtab(info->mod, info); 3613 3614 /* Allocate and move to the final place */ 3615 err = move_module(info->mod, info); 3616 if (err) 3617 return ERR_PTR(err); 3618 3619 /* Module has been copied to its final place now: return it. */ 3620 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 3621 kmemleak_load_module(mod, info); 3622 return mod; 3623} 3624 3625/* mod is no longer valid after this! */ 3626static void module_deallocate(struct module *mod, struct load_info *info) 3627{ 3628 percpu_modfree(mod); 3629 module_arch_freeing_init(mod); 3630 module_memfree(mod->init_layout.base); 3631 module_memfree(mod->core_layout.base); 3632} 3633 3634int __weak module_finalize(const Elf_Ehdr *hdr, 3635 const Elf_Shdr *sechdrs, 3636 struct module *me) 3637{ 3638 return 0; 3639} 3640 3641static int post_relocation(struct module *mod, const struct load_info *info) 3642{ 3643 /* Sort exception table now relocations are done. */ 3644 sort_extable(mod->extable, mod->extable + mod->num_exentries); 3645 3646 /* Copy relocated percpu area over. */ 3647 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 3648 info->sechdrs[info->index.pcpu].sh_size); 3649 3650 /* Setup kallsyms-specific fields. */ 3651 add_kallsyms(mod, info); 3652 3653 /* Arch-specific module finalizing. */ 3654 return module_finalize(info->hdr, info->sechdrs, mod); 3655} 3656 3657/* Is this module of this name done loading? No locks held. */ 3658static bool finished_loading(const char *name) 3659{ 3660 struct module *mod; 3661 bool ret; 3662 3663 /* 3664 * The module_mutex should not be a heavily contended lock; 3665 * if we get the occasional sleep here, we'll go an extra iteration 3666 * in the wait_event_interruptible(), which is harmless. 3667 */ 3668 sched_annotate_sleep(); 3669 mutex_lock(&module_mutex); 3670 mod = find_module_all(name, strlen(name), true); 3671 ret = !mod || mod->state == MODULE_STATE_LIVE; 3672 mutex_unlock(&module_mutex); 3673 3674 return ret; 3675} 3676 3677/* Call module constructors. */ 3678static void do_mod_ctors(struct module *mod) 3679{ 3680#ifdef CONFIG_CONSTRUCTORS 3681 unsigned long i; 3682 3683 for (i = 0; i < mod->num_ctors; i++) 3684 mod->ctors[i](); 3685#endif 3686} 3687 3688/* For freeing module_init on success, in case kallsyms traversing */ 3689struct mod_initfree { 3690 struct llist_node node; 3691 void *module_init; 3692}; 3693 3694static void do_free_init(struct work_struct *w) 3695{ 3696 struct llist_node *pos, *n, *list; 3697 struct mod_initfree *initfree; 3698 3699 list = llist_del_all(&init_free_list); 3700 3701 synchronize_rcu(); 3702 3703 llist_for_each_safe(pos, n, list) { 3704 initfree = container_of(pos, struct mod_initfree, node); 3705 module_memfree(initfree->module_init); 3706 kfree(initfree); 3707 } 3708} 3709 3710/* 3711 * This is where the real work happens. 3712 * 3713 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 3714 * helper command 'lx-symbols'. 3715 */ 3716static noinline int do_init_module(struct module *mod) 3717{ 3718 int ret = 0; 3719 struct mod_initfree *freeinit; 3720 3721 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 3722 if (!freeinit) { 3723 ret = -ENOMEM; 3724 goto fail; 3725 } 3726 freeinit->module_init = mod->init_layout.base; 3727 3728 do_mod_ctors(mod); 3729 /* Start the module */ 3730 if (mod->init != NULL) 3731 ret = do_one_initcall(mod->init); 3732 if (ret < 0) { 3733 goto fail_free_freeinit; 3734 } 3735 if (ret > 0) { 3736 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3737 "follow 0/-E convention\n" 3738 "%s: loading module anyway...\n", 3739 __func__, mod->name, ret, __func__); 3740 dump_stack(); 3741 } 3742 3743 /* Now it's a first class citizen! */ 3744 mod->state = MODULE_STATE_LIVE; 3745 blocking_notifier_call_chain(&module_notify_list, 3746 MODULE_STATE_LIVE, mod); 3747 3748 /* Delay uevent until module has finished its init routine */ 3749 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 3750 3751 /* 3752 * We need to finish all async code before the module init sequence 3753 * is done. This has potential to deadlock if synchronous module 3754 * loading is requested from async (which is not allowed!). 3755 * 3756 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous 3757 * request_module() from async workers") for more details. 3758 */ 3759 if (!mod->async_probe_requested) 3760 async_synchronize_full(); 3761 3762 ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + 3763 mod->init_layout.size); 3764 mutex_lock(&module_mutex); 3765 /* Drop initial reference. */ 3766 module_put(mod); 3767 trim_init_extable(mod); 3768#ifdef CONFIG_KALLSYMS 3769 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 3770 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 3771#endif 3772 module_enable_ro(mod, true); 3773 mod_tree_remove_init(mod); 3774 module_arch_freeing_init(mod); 3775 mod->init_layout.base = NULL; 3776 mod->init_layout.size = 0; 3777 mod->init_layout.ro_size = 0; 3778 mod->init_layout.ro_after_init_size = 0; 3779 mod->init_layout.text_size = 0; 3780#ifdef CONFIG_DEBUG_INFO_BTF_MODULES 3781 /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ 3782 mod->btf_data = NULL; 3783#endif 3784 /* 3785 * We want to free module_init, but be aware that kallsyms may be 3786 * walking this with preempt disabled. In all the failure paths, we 3787 * call synchronize_rcu(), but we don't want to slow down the success 3788 * path. module_memfree() cannot be called in an interrupt, so do the 3789 * work and call synchronize_rcu() in a work queue. 3790 * 3791 * Note that module_alloc() on most architectures creates W+X page 3792 * mappings which won't be cleaned up until do_free_init() runs. Any 3793 * code such as mark_rodata_ro() which depends on those mappings to 3794 * be cleaned up needs to sync with the queued work - ie 3795 * rcu_barrier() 3796 */ 3797 if (llist_add(&freeinit->node, &init_free_list)) 3798 schedule_work(&init_free_wq); 3799 3800 mutex_unlock(&module_mutex); 3801 wake_up_all(&module_wq); 3802 3803 return 0; 3804 3805fail_free_freeinit: 3806 kfree(freeinit); 3807fail: 3808 /* Try to protect us from buggy refcounters. */ 3809 mod->state = MODULE_STATE_GOING; 3810 synchronize_rcu(); 3811 module_put(mod); 3812 blocking_notifier_call_chain(&module_notify_list, 3813 MODULE_STATE_GOING, mod); 3814 klp_module_going(mod); 3815 ftrace_release_mod(mod); 3816 free_module(mod); 3817 wake_up_all(&module_wq); 3818 return ret; 3819} 3820 3821static int may_init_module(void) 3822{ 3823 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3824 return -EPERM; 3825 3826 return 0; 3827} 3828 3829/* 3830 * We try to place it in the list now to make sure it's unique before 3831 * we dedicate too many resources. In particular, temporary percpu 3832 * memory exhaustion. 3833 */ 3834static int add_unformed_module(struct module *mod) 3835{ 3836 int err; 3837 struct module *old; 3838 3839 mod->state = MODULE_STATE_UNFORMED; 3840 3841again: 3842 mutex_lock(&module_mutex); 3843 old = find_module_all(mod->name, strlen(mod->name), true); 3844 if (old != NULL) { 3845 if (old->state != MODULE_STATE_LIVE) { 3846 /* Wait in case it fails to load. */ 3847 mutex_unlock(&module_mutex); 3848 err = wait_event_interruptible(module_wq, 3849 finished_loading(mod->name)); 3850 if (err) 3851 goto out_unlocked; 3852 goto again; 3853 } 3854 err = -EEXIST; 3855 goto out; 3856 } 3857 mod_update_bounds(mod); 3858 list_add_rcu(&mod->list, &modules); 3859 mod_tree_insert(mod); 3860 err = 0; 3861 3862out: 3863 mutex_unlock(&module_mutex); 3864out_unlocked: 3865 return err; 3866} 3867 3868static int complete_formation(struct module *mod, struct load_info *info) 3869{ 3870 int err; 3871 3872 mutex_lock(&module_mutex); 3873 3874 /* Find duplicate symbols (must be called under lock). */ 3875 err = verify_exported_symbols(mod); 3876 if (err < 0) 3877 goto out; 3878 3879 /* This relies on module_mutex for list integrity. */ 3880 module_bug_finalize(info->hdr, info->sechdrs, mod); 3881 3882 module_enable_ro(mod, false); 3883 module_enable_nx(mod); 3884 module_enable_x(mod); 3885 3886 /* 3887 * Mark state as coming so strong_try_module_get() ignores us, 3888 * but kallsyms etc. can see us. 3889 */ 3890 mod->state = MODULE_STATE_COMING; 3891 mutex_unlock(&module_mutex); 3892 3893 return 0; 3894 3895out: 3896 mutex_unlock(&module_mutex); 3897 return err; 3898} 3899 3900static int prepare_coming_module(struct module *mod) 3901{ 3902 int err; 3903 3904 ftrace_module_enable(mod); 3905 err = klp_module_coming(mod); 3906 if (err) 3907 return err; 3908 3909 err = blocking_notifier_call_chain_robust(&module_notify_list, 3910 MODULE_STATE_COMING, MODULE_STATE_GOING, mod); 3911 err = notifier_to_errno(err); 3912 if (err) 3913 klp_module_going(mod); 3914 3915 return err; 3916} 3917 3918static int unknown_module_param_cb(char *param, char *val, const char *modname, 3919 void *arg) 3920{ 3921 struct module *mod = arg; 3922 int ret; 3923 3924 if (strcmp(param, "async_probe") == 0) { 3925 mod->async_probe_requested = true; 3926 return 0; 3927 } 3928 3929 /* Check for magic 'dyndbg' arg */ 3930 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3931 if (ret != 0) 3932 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3933 return 0; 3934} 3935 3936static void cfi_init(struct module *mod); 3937 3938/* 3939 * Allocate and load the module: note that size of section 0 is always 3940 * zero, and we rely on this for optional sections. 3941 */ 3942static int load_module(struct load_info *info, const char __user *uargs, 3943 int flags) 3944{ 3945 struct module *mod; 3946 long err = 0; 3947 char *after_dashes; 3948 3949 /* 3950 * Do the signature check (if any) first. All that 3951 * the signature check needs is info->len, it does 3952 * not need any of the section info. That can be 3953 * set up later. This will minimize the chances 3954 * of a corrupt module causing problems before 3955 * we even get to the signature check. 3956 * 3957 * The check will also adjust info->len by stripping 3958 * off the sig length at the end of the module, making 3959 * checks against info->len more correct. 3960 */ 3961 err = module_sig_check(info, flags); 3962 if (err) 3963 goto free_copy; 3964 3965 /* 3966 * Do basic sanity checks against the ELF header and 3967 * sections. 3968 */ 3969 err = elf_validity_check(info); 3970 if (err) 3971 goto free_copy; 3972 3973 /* 3974 * Everything checks out, so set up the section info 3975 * in the info structure. 3976 */ 3977 err = setup_load_info(info, flags); 3978 if (err) 3979 goto free_copy; 3980 3981 /* 3982 * Now that we know we have the correct module name, check 3983 * if it's blacklisted. 3984 */ 3985 if (blacklisted(info->name)) { 3986 err = -EPERM; 3987 pr_err("Module %s is blacklisted\n", info->name); 3988 goto free_copy; 3989 } 3990 3991 err = rewrite_section_headers(info, flags); 3992 if (err) 3993 goto free_copy; 3994 3995 /* Check module struct version now, before we try to use module. */ 3996 if (!check_modstruct_version(info, info->mod)) { 3997 err = -ENOEXEC; 3998 goto free_copy; 3999 } 4000 4001 /* Figure out module layout, and allocate all the memory. */ 4002 mod = layout_and_allocate(info, flags); 4003 if (IS_ERR(mod)) { 4004 err = PTR_ERR(mod); 4005 goto free_copy; 4006 } 4007 4008 audit_log_kern_module(mod->name); 4009 4010 /* Reserve our place in the list. */ 4011 err = add_unformed_module(mod); 4012 if (err) 4013 goto free_module; 4014 4015#ifdef CONFIG_MODULE_SIG 4016 mod->sig_ok = info->sig_ok; 4017 if (!mod->sig_ok) { 4018 pr_notice_once("%s: module verification failed: signature " 4019 "and/or required key missing - tainting " 4020 "kernel\n", mod->name); 4021 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 4022 } 4023#endif 4024 4025 /* To avoid stressing percpu allocator, do this once we're unique. */ 4026 err = percpu_modalloc(mod, info); 4027 if (err) 4028 goto unlink_mod; 4029 4030 /* Now module is in final location, initialize linked lists, etc. */ 4031 err = module_unload_init(mod); 4032 if (err) 4033 goto unlink_mod; 4034 4035 init_param_lock(mod); 4036 4037 /* 4038 * Now we've got everything in the final locations, we can 4039 * find optional sections. 4040 */ 4041 err = find_module_sections(mod, info); 4042 if (err) 4043 goto free_unload; 4044 4045 err = check_module_license_and_versions(mod); 4046 if (err) 4047 goto free_unload; 4048 4049 /* Set up MODINFO_ATTR fields */ 4050 setup_modinfo(mod, info); 4051 4052 /* Fix up syms, so that st_value is a pointer to location. */ 4053 err = simplify_symbols(mod, info); 4054 if (err < 0) 4055 goto free_modinfo; 4056 4057 err = apply_relocations(mod, info); 4058 if (err < 0) 4059 goto free_modinfo; 4060 4061 err = post_relocation(mod, info); 4062 if (err < 0) 4063 goto free_modinfo; 4064 4065 flush_module_icache(mod); 4066 4067 /* Setup CFI for the module. */ 4068 cfi_init(mod); 4069 4070 /* Now copy in args */ 4071 mod->args = strndup_user(uargs, ~0UL >> 1); 4072 if (IS_ERR(mod->args)) { 4073 err = PTR_ERR(mod->args); 4074 goto free_arch_cleanup; 4075 } 4076 4077 init_build_id(mod, info); 4078 dynamic_debug_setup(mod, info->debug, info->num_debug); 4079 4080 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 4081 ftrace_module_init(mod); 4082 4083 /* Finally it's fully formed, ready to start executing. */ 4084 err = complete_formation(mod, info); 4085 if (err) 4086 goto ddebug_cleanup; 4087 4088 err = prepare_coming_module(mod); 4089 if (err) 4090 goto bug_cleanup; 4091 4092 /* Module is ready to execute: parsing args may do that. */ 4093 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 4094 -32768, 32767, mod, 4095 unknown_module_param_cb); 4096 if (IS_ERR(after_dashes)) { 4097 err = PTR_ERR(after_dashes); 4098 goto coming_cleanup; 4099 } else if (after_dashes) { 4100 pr_warn("%s: parameters '%s' after `--' ignored\n", 4101 mod->name, after_dashes); 4102 } 4103 4104 /* Link in to sysfs. */ 4105 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 4106 if (err < 0) 4107 goto coming_cleanup; 4108 4109 if (is_livepatch_module(mod)) { 4110 err = copy_module_elf(mod, info); 4111 if (err < 0) 4112 goto sysfs_cleanup; 4113 } 4114 4115 /* Get rid of temporary copy. */ 4116 free_copy(info, flags); 4117 4118 /* Done! */ 4119 trace_module_load(mod); 4120 4121 return do_init_module(mod); 4122 4123 sysfs_cleanup: 4124 mod_sysfs_teardown(mod); 4125 coming_cleanup: 4126 mod->state = MODULE_STATE_GOING; 4127 destroy_params(mod->kp, mod->num_kp); 4128 blocking_notifier_call_chain(&module_notify_list, 4129 MODULE_STATE_GOING, mod); 4130 klp_module_going(mod); 4131 bug_cleanup: 4132 mod->state = MODULE_STATE_GOING; 4133 /* module_bug_cleanup needs module_mutex protection */ 4134 mutex_lock(&module_mutex); 4135 module_bug_cleanup(mod); 4136 mutex_unlock(&module_mutex); 4137 4138 ddebug_cleanup: 4139 ftrace_release_mod(mod); 4140 dynamic_debug_remove(mod, info->debug); 4141 synchronize_rcu(); 4142 kfree(mod->args); 4143 free_arch_cleanup: 4144 cfi_cleanup(mod); 4145 module_arch_cleanup(mod); 4146 free_modinfo: 4147 free_modinfo(mod); 4148 free_unload: 4149 module_unload_free(mod); 4150 unlink_mod: 4151 mutex_lock(&module_mutex); 4152 /* Unlink carefully: kallsyms could be walking list. */ 4153 list_del_rcu(&mod->list); 4154 mod_tree_remove(mod); 4155 wake_up_all(&module_wq); 4156 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 4157 synchronize_rcu(); 4158 mutex_unlock(&module_mutex); 4159 free_module: 4160 /* Free lock-classes; relies on the preceding sync_rcu() */ 4161 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 4162 4163 module_deallocate(mod, info); 4164 free_copy: 4165 free_copy(info, flags); 4166 return err; 4167} 4168 4169SYSCALL_DEFINE3(init_module, void __user *, umod, 4170 unsigned long, len, const char __user *, uargs) 4171{ 4172 int err; 4173 struct load_info info = { }; 4174 4175 err = may_init_module(); 4176 if (err) 4177 return err; 4178 4179 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 4180 umod, len, uargs); 4181 4182 err = copy_module_from_user(umod, len, &info); 4183 if (err) 4184 return err; 4185 4186 return load_module(&info, uargs, 0); 4187} 4188 4189SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 4190{ 4191 struct load_info info = { }; 4192 void *buf = NULL; 4193 int len; 4194 int err; 4195 4196 err = may_init_module(); 4197 if (err) 4198 return err; 4199 4200 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 4201 4202 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 4203 |MODULE_INIT_IGNORE_VERMAGIC 4204 |MODULE_INIT_COMPRESSED_FILE)) 4205 return -EINVAL; 4206 4207 len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL, 4208 READING_MODULE); 4209 if (len < 0) 4210 return len; 4211 4212 if (flags & MODULE_INIT_COMPRESSED_FILE) { 4213 err = module_decompress(&info, buf, len); 4214 vfree(buf); /* compressed data is no longer needed */ 4215 if (err) 4216 return err; 4217 } else { 4218 info.hdr = buf; 4219 info.len = len; 4220 } 4221 4222 return load_module(&info, uargs, flags); 4223} 4224 4225static inline int within(unsigned long addr, void *start, unsigned long size) 4226{ 4227 return ((void *)addr >= start && (void *)addr < start + size); 4228} 4229 4230#ifdef CONFIG_KALLSYMS 4231/* 4232 * This ignores the intensely annoying "mapping symbols" found 4233 * in ARM ELF files: $a, $t and $d. 4234 */ 4235static inline int is_arm_mapping_symbol(const char *str) 4236{ 4237 if (str[0] == '.' && str[1] == 'L') 4238 return true; 4239 return str[0] == '$' && strchr("axtd", str[1]) 4240 && (str[2] == '\0' || str[2] == '.'); 4241} 4242 4243static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) 4244{ 4245 return kallsyms->strtab + kallsyms->symtab[symnum].st_name; 4246} 4247 4248/* 4249 * Given a module and address, find the corresponding symbol and return its name 4250 * while providing its size and offset if needed. 4251 */ 4252static const char *find_kallsyms_symbol(struct module *mod, 4253 unsigned long addr, 4254 unsigned long *size, 4255 unsigned long *offset) 4256{ 4257 unsigned int i, best = 0; 4258 unsigned long nextval, bestval; 4259 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 4260 4261 /* At worse, next value is at end of module */ 4262 if (within_module_init(addr, mod)) 4263 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; 4264 else 4265 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; 4266 4267 bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); 4268 4269 /* 4270 * Scan for closest preceding symbol, and next symbol. (ELF 4271 * starts real symbols at 1). 4272 */ 4273 for (i = 1; i < kallsyms->num_symtab; i++) { 4274 const Elf_Sym *sym = &kallsyms->symtab[i]; 4275 unsigned long thisval = kallsyms_symbol_value(sym); 4276 4277 if (sym->st_shndx == SHN_UNDEF) 4278 continue; 4279 4280 /* 4281 * We ignore unnamed symbols: they're uninformative 4282 * and inserted at a whim. 4283 */ 4284 if (*kallsyms_symbol_name(kallsyms, i) == '\0' 4285 || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) 4286 continue; 4287 4288 if (thisval <= addr && thisval > bestval) { 4289 best = i; 4290 bestval = thisval; 4291 } 4292 if (thisval > addr && thisval < nextval) 4293 nextval = thisval; 4294 } 4295 4296 if (!best) 4297 return NULL; 4298 4299 if (size) 4300 *size = nextval - bestval; 4301 if (offset) 4302 *offset = addr - bestval; 4303 4304 return kallsyms_symbol_name(kallsyms, best); 4305} 4306 4307void * __weak dereference_module_function_descriptor(struct module *mod, 4308 void *ptr) 4309{ 4310 return ptr; 4311} 4312 4313/* 4314 * For kallsyms to ask for address resolution. NULL means not found. Careful 4315 * not to lock to avoid deadlock on oopses, simply disable preemption. 4316 */ 4317const char *module_address_lookup(unsigned long addr, 4318 unsigned long *size, 4319 unsigned long *offset, 4320 char **modname, 4321 const unsigned char **modbuildid, 4322 char *namebuf) 4323{ 4324 const char *ret = NULL; 4325 struct module *mod; 4326 4327 preempt_disable(); 4328 mod = __module_address(addr); 4329 if (mod) { 4330 if (modname) 4331 *modname = mod->name; 4332 if (modbuildid) { 4333#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) 4334 *modbuildid = mod->build_id; 4335#else 4336 *modbuildid = NULL; 4337#endif 4338 } 4339 4340 ret = find_kallsyms_symbol(mod, addr, size, offset); 4341 } 4342 /* Make a copy in here where it's safe */ 4343 if (ret) { 4344 strncpy(namebuf, ret, KSYM_NAME_LEN - 1); 4345 ret = namebuf; 4346 } 4347 preempt_enable(); 4348 4349 return ret; 4350} 4351 4352int lookup_module_symbol_name(unsigned long addr, char *symname) 4353{ 4354 struct module *mod; 4355 4356 preempt_disable(); 4357 list_for_each_entry_rcu(mod, &modules, list) { 4358 if (mod->state == MODULE_STATE_UNFORMED) 4359 continue; 4360 if (within_module(addr, mod)) { 4361 const char *sym; 4362 4363 sym = find_kallsyms_symbol(mod, addr, NULL, NULL); 4364 if (!sym) 4365 goto out; 4366 4367 strlcpy(symname, sym, KSYM_NAME_LEN); 4368 preempt_enable(); 4369 return 0; 4370 } 4371 } 4372out: 4373 preempt_enable(); 4374 return -ERANGE; 4375} 4376 4377int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, 4378 unsigned long *offset, char *modname, char *name) 4379{ 4380 struct module *mod; 4381 4382 preempt_disable(); 4383 list_for_each_entry_rcu(mod, &modules, list) { 4384 if (mod->state == MODULE_STATE_UNFORMED) 4385 continue; 4386 if (within_module(addr, mod)) { 4387 const char *sym; 4388 4389 sym = find_kallsyms_symbol(mod, addr, size, offset); 4390 if (!sym) 4391 goto out; 4392 if (modname) 4393 strlcpy(modname, mod->name, MODULE_NAME_LEN); 4394 if (name) 4395 strlcpy(name, sym, KSYM_NAME_LEN); 4396 preempt_enable(); 4397 return 0; 4398 } 4399 } 4400out: 4401 preempt_enable(); 4402 return -ERANGE; 4403} 4404 4405int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 4406 char *name, char *module_name, int *exported) 4407{ 4408 struct module *mod; 4409 4410 preempt_disable(); 4411 list_for_each_entry_rcu(mod, &modules, list) { 4412 struct mod_kallsyms *kallsyms; 4413 4414 if (mod->state == MODULE_STATE_UNFORMED) 4415 continue; 4416 kallsyms = rcu_dereference_sched(mod->kallsyms); 4417 if (symnum < kallsyms->num_symtab) { 4418 const Elf_Sym *sym = &kallsyms->symtab[symnum]; 4419 4420 *value = kallsyms_symbol_value(sym); 4421 *type = kallsyms->typetab[symnum]; 4422 strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); 4423 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 4424 *exported = is_exported(name, *value, mod); 4425 preempt_enable(); 4426 return 0; 4427 } 4428 symnum -= kallsyms->num_symtab; 4429 } 4430 preempt_enable(); 4431 return -ERANGE; 4432} 4433 4434/* Given a module and name of symbol, find and return the symbol's value */ 4435static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) 4436{ 4437 unsigned int i; 4438 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 4439 4440 for (i = 0; i < kallsyms->num_symtab; i++) { 4441 const Elf_Sym *sym = &kallsyms->symtab[i]; 4442 4443 if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && 4444 sym->st_shndx != SHN_UNDEF) 4445 return kallsyms_symbol_value(sym); 4446 } 4447 return 0; 4448} 4449 4450/* Look for this name: can be of form module:name. */ 4451unsigned long module_kallsyms_lookup_name(const char *name) 4452{ 4453 struct module *mod; 4454 char *colon; 4455 unsigned long ret = 0; 4456 4457 /* Don't lock: we're in enough trouble already. */ 4458 preempt_disable(); 4459 if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { 4460 if ((mod = find_module_all(name, colon - name, false)) != NULL) 4461 ret = find_kallsyms_symbol_value(mod, colon+1); 4462 } else { 4463 list_for_each_entry_rcu(mod, &modules, list) { 4464 if (mod->state == MODULE_STATE_UNFORMED) 4465 continue; 4466 if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) 4467 break; 4468 } 4469 } 4470 preempt_enable(); 4471 return ret; 4472} 4473 4474#ifdef CONFIG_LIVEPATCH 4475int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, 4476 struct module *, unsigned long), 4477 void *data) 4478{ 4479 struct module *mod; 4480 unsigned int i; 4481 int ret = 0; 4482 4483 mutex_lock(&module_mutex); 4484 list_for_each_entry(mod, &modules, list) { 4485 /* We hold module_mutex: no need for rcu_dereference_sched */ 4486 struct mod_kallsyms *kallsyms = mod->kallsyms; 4487 4488 if (mod->state == MODULE_STATE_UNFORMED) 4489 continue; 4490 for (i = 0; i < kallsyms->num_symtab; i++) { 4491 const Elf_Sym *sym = &kallsyms->symtab[i]; 4492 4493 if (sym->st_shndx == SHN_UNDEF) 4494 continue; 4495 4496 ret = fn(data, kallsyms_symbol_name(kallsyms, i), 4497 mod, kallsyms_symbol_value(sym)); 4498 if (ret != 0) 4499 goto out; 4500 4501 cond_resched(); 4502 } 4503 } 4504out: 4505 mutex_unlock(&module_mutex); 4506 return ret; 4507} 4508#endif /* CONFIG_LIVEPATCH */ 4509#endif /* CONFIG_KALLSYMS */ 4510 4511static void cfi_init(struct module *mod) 4512{ 4513#ifdef CONFIG_CFI_CLANG 4514 initcall_t *init; 4515 exitcall_t *exit; 4516 4517 rcu_read_lock_sched(); 4518 mod->cfi_check = (cfi_check_fn) 4519 find_kallsyms_symbol_value(mod, "__cfi_check"); 4520 init = (initcall_t *) 4521 find_kallsyms_symbol_value(mod, "__cfi_jt_init_module"); 4522 exit = (exitcall_t *) 4523 find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module"); 4524 rcu_read_unlock_sched(); 4525 4526 /* Fix init/exit functions to point to the CFI jump table */ 4527 if (init) 4528 mod->init = *init; 4529#ifdef CONFIG_MODULE_UNLOAD 4530 if (exit) 4531 mod->exit = *exit; 4532#endif 4533 4534 cfi_module_add(mod, module_addr_min); 4535#endif 4536} 4537 4538static void cfi_cleanup(struct module *mod) 4539{ 4540#ifdef CONFIG_CFI_CLANG 4541 cfi_module_remove(mod, module_addr_min); 4542#endif 4543} 4544 4545/* Maximum number of characters written by module_flags() */ 4546#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) 4547 4548/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 4549static char *module_flags(struct module *mod, char *buf) 4550{ 4551 int bx = 0; 4552 4553 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 4554 if (mod->taints || 4555 mod->state == MODULE_STATE_GOING || 4556 mod->state == MODULE_STATE_COMING) { 4557 buf[bx++] = '('; 4558 bx += module_flags_taint(mod, buf + bx); 4559 /* Show a - for module-is-being-unloaded */ 4560 if (mod->state == MODULE_STATE_GOING) 4561 buf[bx++] = '-'; 4562 /* Show a + for module-is-being-loaded */ 4563 if (mod->state == MODULE_STATE_COMING) 4564 buf[bx++] = '+'; 4565 buf[bx++] = ')'; 4566 } 4567 buf[bx] = '\0'; 4568 4569 return buf; 4570} 4571 4572#ifdef CONFIG_PROC_FS 4573/* Called by the /proc file system to return a list of modules. */ 4574static void *m_start(struct seq_file *m, loff_t *pos) 4575{ 4576 mutex_lock(&module_mutex); 4577 return seq_list_start(&modules, *pos); 4578} 4579 4580static void *m_next(struct seq_file *m, void *p, loff_t *pos) 4581{ 4582 return seq_list_next(p, &modules, pos); 4583} 4584 4585static void m_stop(struct seq_file *m, void *p) 4586{ 4587 mutex_unlock(&module_mutex); 4588} 4589 4590static int m_show(struct seq_file *m, void *p) 4591{ 4592 struct module *mod = list_entry(p, struct module, list); 4593 char buf[MODULE_FLAGS_BUF_SIZE]; 4594 void *value; 4595 4596 /* We always ignore unformed modules. */ 4597 if (mod->state == MODULE_STATE_UNFORMED) 4598 return 0; 4599 4600 seq_printf(m, "%s %u", 4601 mod->name, mod->init_layout.size + mod->core_layout.size); 4602 print_unload_info(m, mod); 4603 4604 /* Informative for users. */ 4605 seq_printf(m, " %s", 4606 mod->state == MODULE_STATE_GOING ? "Unloading" : 4607 mod->state == MODULE_STATE_COMING ? "Loading" : 4608 "Live"); 4609 /* Used by oprofile and other similar tools. */ 4610 value = m->private ? NULL : mod->core_layout.base; 4611 seq_printf(m, " 0x%px", value); 4612 4613 /* Taints info */ 4614 if (mod->taints) 4615 seq_printf(m, " %s", module_flags(mod, buf)); 4616 4617 seq_puts(m, "\n"); 4618 return 0; 4619} 4620 4621/* 4622 * Format: modulename size refcount deps address 4623 * 4624 * Where refcount is a number or -, and deps is a comma-separated list 4625 * of depends or -. 4626 */ 4627static const struct seq_operations modules_op = { 4628 .start = m_start, 4629 .next = m_next, 4630 .stop = m_stop, 4631 .show = m_show 4632}; 4633 4634/* 4635 * This also sets the "private" pointer to non-NULL if the 4636 * kernel pointers should be hidden (so you can just test 4637 * "m->private" to see if you should keep the values private). 4638 * 4639 * We use the same logic as for /proc/kallsyms. 4640 */ 4641static int modules_open(struct inode *inode, struct file *file) 4642{ 4643 int err = seq_open(file, &modules_op); 4644 4645 if (!err) { 4646 struct seq_file *m = file->private_data; 4647 m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul; 4648 } 4649 4650 return err; 4651} 4652 4653static const struct proc_ops modules_proc_ops = { 4654 .proc_flags = PROC_ENTRY_PERMANENT, 4655 .proc_open = modules_open, 4656 .proc_read = seq_read, 4657 .proc_lseek = seq_lseek, 4658 .proc_release = seq_release, 4659}; 4660 4661static int __init proc_modules_init(void) 4662{ 4663 proc_create("modules", 0, NULL, &modules_proc_ops); 4664 return 0; 4665} 4666module_init(proc_modules_init); 4667#endif 4668 4669/* Given an address, look for it in the module exception tables. */ 4670const struct exception_table_entry *search_module_extables(unsigned long addr) 4671{ 4672 const struct exception_table_entry *e = NULL; 4673 struct module *mod; 4674 4675 preempt_disable(); 4676 mod = __module_address(addr); 4677 if (!mod) 4678 goto out; 4679 4680 if (!mod->num_exentries) 4681 goto out; 4682 4683 e = search_extable(mod->extable, 4684 mod->num_exentries, 4685 addr); 4686out: 4687 preempt_enable(); 4688 4689 /* 4690 * Now, if we found one, we are running inside it now, hence 4691 * we cannot unload the module, hence no refcnt needed. 4692 */ 4693 return e; 4694} 4695 4696/** 4697 * is_module_address() - is this address inside a module? 4698 * @addr: the address to check. 4699 * 4700 * See is_module_text_address() if you simply want to see if the address 4701 * is code (not data). 4702 */ 4703bool is_module_address(unsigned long addr) 4704{ 4705 bool ret; 4706 4707 preempt_disable(); 4708 ret = __module_address(addr) != NULL; 4709 preempt_enable(); 4710 4711 return ret; 4712} 4713 4714/** 4715 * __module_address() - get the module which contains an address. 4716 * @addr: the address. 4717 * 4718 * Must be called with preempt disabled or module mutex held so that 4719 * module doesn't get freed during this. 4720 */ 4721struct module *__module_address(unsigned long addr) 4722{ 4723 struct module *mod; 4724 4725 if (addr < module_addr_min || addr > module_addr_max) 4726 return NULL; 4727 4728 module_assert_mutex_or_preempt(); 4729 4730 mod = mod_find(addr); 4731 if (mod) { 4732 BUG_ON(!within_module(addr, mod)); 4733 if (mod->state == MODULE_STATE_UNFORMED) 4734 mod = NULL; 4735 } 4736 return mod; 4737} 4738 4739/** 4740 * is_module_text_address() - is this address inside module code? 4741 * @addr: the address to check. 4742 * 4743 * See is_module_address() if you simply want to see if the address is 4744 * anywhere in a module. See kernel_text_address() for testing if an 4745 * address corresponds to kernel or module code. 4746 */ 4747bool is_module_text_address(unsigned long addr) 4748{ 4749 bool ret; 4750 4751 preempt_disable(); 4752 ret = __module_text_address(addr) != NULL; 4753 preempt_enable(); 4754 4755 return ret; 4756} 4757 4758/** 4759 * __module_text_address() - get the module whose code contains an address. 4760 * @addr: the address. 4761 * 4762 * Must be called with preempt disabled or module mutex held so that 4763 * module doesn't get freed during this. 4764 */ 4765struct module *__module_text_address(unsigned long addr) 4766{ 4767 struct module *mod = __module_address(addr); 4768 if (mod) { 4769 /* Make sure it's within the text section. */ 4770 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) 4771 && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) 4772 mod = NULL; 4773 } 4774 return mod; 4775} 4776 4777/* Don't grab lock, we're oopsing. */ 4778void print_modules(void) 4779{ 4780 struct module *mod; 4781 char buf[MODULE_FLAGS_BUF_SIZE]; 4782 4783 printk(KERN_DEFAULT "Modules linked in:"); 4784 /* Most callers should already have preempt disabled, but make sure */ 4785 preempt_disable(); 4786 list_for_each_entry_rcu(mod, &modules, list) { 4787 if (mod->state == MODULE_STATE_UNFORMED) 4788 continue; 4789 pr_cont(" %s%s", mod->name, module_flags(mod, buf)); 4790 } 4791 preempt_enable(); 4792 if (last_unloaded_module[0]) 4793 pr_cont(" [last unloaded: %s]", last_unloaded_module); 4794 pr_cont("\n"); 4795} 4796 4797#ifdef CONFIG_MODVERSIONS 4798/* 4799 * Generate the signature for all relevant module structures here. 4800 * If these change, we don't want to try to parse the module. 4801 */ 4802void module_layout(struct module *mod, 4803 struct modversion_info *ver, 4804 struct kernel_param *kp, 4805 struct kernel_symbol *ks, 4806 struct tracepoint * const *tp) 4807{ 4808} 4809EXPORT_SYMBOL(module_layout); 4810#endif