at v5.1-rc2 116 kB view raw
1/* 2 Copyright (C) 2002 Richard Henderson 3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. 4 5 This program is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2 of the License, or 8 (at your option) any later version. 9 10 This program is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 GNU General Public License for more details. 14 15 You should have received a copy of the GNU General Public License 16 along with this program; if not, write to the Free Software 17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18*/ 19#include <linux/export.h> 20#include <linux/extable.h> 21#include <linux/moduleloader.h> 22#include <linux/trace_events.h> 23#include <linux/init.h> 24#include <linux/kallsyms.h> 25#include <linux/file.h> 26#include <linux/fs.h> 27#include <linux/sysfs.h> 28#include <linux/kernel.h> 29#include <linux/slab.h> 30#include <linux/vmalloc.h> 31#include <linux/elf.h> 32#include <linux/proc_fs.h> 33#include <linux/security.h> 34#include <linux/seq_file.h> 35#include <linux/syscalls.h> 36#include <linux/fcntl.h> 37#include <linux/rcupdate.h> 38#include <linux/capability.h> 39#include <linux/cpu.h> 40#include <linux/moduleparam.h> 41#include <linux/errno.h> 42#include <linux/err.h> 43#include <linux/vermagic.h> 44#include <linux/notifier.h> 45#include <linux/sched.h> 46#include <linux/device.h> 47#include <linux/string.h> 48#include <linux/mutex.h> 49#include <linux/rculist.h> 50#include <linux/uaccess.h> 51#include <asm/cacheflush.h> 52#include <linux/set_memory.h> 53#include <asm/mmu_context.h> 54#include <linux/license.h> 55#include <asm/sections.h> 56#include <linux/tracepoint.h> 57#include <linux/ftrace.h> 58#include <linux/livepatch.h> 59#include <linux/async.h> 60#include <linux/percpu.h> 61#include <linux/kmemleak.h> 62#include <linux/jump_label.h> 63#include <linux/pfn.h> 64#include <linux/bsearch.h> 65#include <linux/dynamic_debug.h> 66#include <linux/audit.h> 67#include <uapi/linux/module.h> 68#include "module-internal.h" 69 70#define CREATE_TRACE_POINTS 71#include <trace/events/module.h> 72 73#ifndef ARCH_SHF_SMALL 74#define ARCH_SHF_SMALL 0 75#endif 76 77/* 78 * Modules' sections will be aligned on page boundaries 79 * to ensure complete separation of code and data, but 80 * only when CONFIG_STRICT_MODULE_RWX=y 81 */ 82#ifdef CONFIG_STRICT_MODULE_RWX 83# define debug_align(X) ALIGN(X, PAGE_SIZE) 84#else 85# define debug_align(X) (X) 86#endif 87 88/* If this is set, the section belongs in the init part of the module */ 89#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 90 91/* 92 * Mutex protects: 93 * 1) List of modules (also safely readable with preempt_disable), 94 * 2) module_use links, 95 * 3) module_addr_min/module_addr_max. 96 * (delete and add uses RCU list operations). */ 97DEFINE_MUTEX(module_mutex); 98EXPORT_SYMBOL_GPL(module_mutex); 99static LIST_HEAD(modules); 100 101#ifdef CONFIG_MODULES_TREE_LOOKUP 102 103/* 104 * Use a latched RB-tree for __module_address(); this allows us to use 105 * RCU-sched lookups of the address from any context. 106 * 107 * This is conditional on PERF_EVENTS || TRACING because those can really hit 108 * __module_address() hard by doing a lot of stack unwinding; potentially from 109 * NMI context. 110 */ 111 112static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) 113{ 114 struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 115 116 return (unsigned long)layout->base; 117} 118 119static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) 120{ 121 struct module_layout *layout = container_of(n, struct module_layout, mtn.node); 122 123 return (unsigned long)layout->size; 124} 125 126static __always_inline bool 127mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b) 128{ 129 return __mod_tree_val(a) < __mod_tree_val(b); 130} 131 132static __always_inline int 133mod_tree_comp(void *key, struct latch_tree_node *n) 134{ 135 unsigned long val = (unsigned long)key; 136 unsigned long start, end; 137 138 start = __mod_tree_val(n); 139 if (val < start) 140 return -1; 141 142 end = start + __mod_tree_size(n); 143 if (val >= end) 144 return 1; 145 146 return 0; 147} 148 149static const struct latch_tree_ops mod_tree_ops = { 150 .less = mod_tree_less, 151 .comp = mod_tree_comp, 152}; 153 154static struct mod_tree_root { 155 struct latch_tree_root root; 156 unsigned long addr_min; 157 unsigned long addr_max; 158} mod_tree __cacheline_aligned = { 159 .addr_min = -1UL, 160}; 161 162#define module_addr_min mod_tree.addr_min 163#define module_addr_max mod_tree.addr_max 164 165static noinline void __mod_tree_insert(struct mod_tree_node *node) 166{ 167 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops); 168} 169 170static void __mod_tree_remove(struct mod_tree_node *node) 171{ 172 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops); 173} 174 175/* 176 * These modifications: insert, remove_init and remove; are serialized by the 177 * module_mutex. 178 */ 179static void mod_tree_insert(struct module *mod) 180{ 181 mod->core_layout.mtn.mod = mod; 182 mod->init_layout.mtn.mod = mod; 183 184 __mod_tree_insert(&mod->core_layout.mtn); 185 if (mod->init_layout.size) 186 __mod_tree_insert(&mod->init_layout.mtn); 187} 188 189static void mod_tree_remove_init(struct module *mod) 190{ 191 if (mod->init_layout.size) 192 __mod_tree_remove(&mod->init_layout.mtn); 193} 194 195static void mod_tree_remove(struct module *mod) 196{ 197 __mod_tree_remove(&mod->core_layout.mtn); 198 mod_tree_remove_init(mod); 199} 200 201static struct module *mod_find(unsigned long addr) 202{ 203 struct latch_tree_node *ltn; 204 205 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops); 206 if (!ltn) 207 return NULL; 208 209 return container_of(ltn, struct mod_tree_node, node)->mod; 210} 211 212#else /* MODULES_TREE_LOOKUP */ 213 214static unsigned long module_addr_min = -1UL, module_addr_max = 0; 215 216static void mod_tree_insert(struct module *mod) { } 217static void mod_tree_remove_init(struct module *mod) { } 218static void mod_tree_remove(struct module *mod) { } 219 220static struct module *mod_find(unsigned long addr) 221{ 222 struct module *mod; 223 224 list_for_each_entry_rcu(mod, &modules, list) { 225 if (within_module(addr, mod)) 226 return mod; 227 } 228 229 return NULL; 230} 231 232#endif /* MODULES_TREE_LOOKUP */ 233 234/* 235 * Bounds of module text, for speeding up __module_address. 236 * Protected by module_mutex. 237 */ 238static void __mod_update_bounds(void *base, unsigned int size) 239{ 240 unsigned long min = (unsigned long)base; 241 unsigned long max = min + size; 242 243 if (min < module_addr_min) 244 module_addr_min = min; 245 if (max > module_addr_max) 246 module_addr_max = max; 247} 248 249static void mod_update_bounds(struct module *mod) 250{ 251 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); 252 if (mod->init_layout.size) 253 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); 254} 255 256#ifdef CONFIG_KGDB_KDB 257struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ 258#endif /* CONFIG_KGDB_KDB */ 259 260static void module_assert_mutex(void) 261{ 262 lockdep_assert_held(&module_mutex); 263} 264 265static void module_assert_mutex_or_preempt(void) 266{ 267#ifdef CONFIG_LOCKDEP 268 if (unlikely(!debug_locks)) 269 return; 270 271 WARN_ON_ONCE(!rcu_read_lock_sched_held() && 272 !lockdep_is_held(&module_mutex)); 273#endif 274} 275 276static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); 277module_param(sig_enforce, bool_enable_only, 0644); 278 279/* 280 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely 281 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. 282 */ 283bool is_module_sig_enforced(void) 284{ 285 return sig_enforce; 286} 287EXPORT_SYMBOL(is_module_sig_enforced); 288 289/* Block module loading/unloading? */ 290int modules_disabled = 0; 291core_param(nomodule, modules_disabled, bint, 0); 292 293/* Waiting for a module to finish initializing? */ 294static DECLARE_WAIT_QUEUE_HEAD(module_wq); 295 296static BLOCKING_NOTIFIER_HEAD(module_notify_list); 297 298int register_module_notifier(struct notifier_block *nb) 299{ 300 return blocking_notifier_chain_register(&module_notify_list, nb); 301} 302EXPORT_SYMBOL(register_module_notifier); 303 304int unregister_module_notifier(struct notifier_block *nb) 305{ 306 return blocking_notifier_chain_unregister(&module_notify_list, nb); 307} 308EXPORT_SYMBOL(unregister_module_notifier); 309 310/* 311 * We require a truly strong try_module_get(): 0 means success. 312 * Otherwise an error is returned due to ongoing or failed 313 * initialization etc. 314 */ 315static inline int strong_try_module_get(struct module *mod) 316{ 317 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED); 318 if (mod && mod->state == MODULE_STATE_COMING) 319 return -EBUSY; 320 if (try_module_get(mod)) 321 return 0; 322 else 323 return -ENOENT; 324} 325 326static inline void add_taint_module(struct module *mod, unsigned flag, 327 enum lockdep_ok lockdep_ok) 328{ 329 add_taint(flag, lockdep_ok); 330 set_bit(flag, &mod->taints); 331} 332 333/* 334 * A thread that wants to hold a reference to a module only while it 335 * is running can call this to safely exit. nfsd and lockd use this. 336 */ 337void __noreturn __module_put_and_exit(struct module *mod, long code) 338{ 339 module_put(mod); 340 do_exit(code); 341} 342EXPORT_SYMBOL(__module_put_and_exit); 343 344/* Find a module section: 0 means not found. */ 345static unsigned int find_sec(const struct load_info *info, const char *name) 346{ 347 unsigned int i; 348 349 for (i = 1; i < info->hdr->e_shnum; i++) { 350 Elf_Shdr *shdr = &info->sechdrs[i]; 351 /* Alloc bit cleared means "ignore it." */ 352 if ((shdr->sh_flags & SHF_ALLOC) 353 && strcmp(info->secstrings + shdr->sh_name, name) == 0) 354 return i; 355 } 356 return 0; 357} 358 359/* Find a module section, or NULL. */ 360static void *section_addr(const struct load_info *info, const char *name) 361{ 362 /* Section 0 has sh_addr 0. */ 363 return (void *)info->sechdrs[find_sec(info, name)].sh_addr; 364} 365 366/* Find a module section, or NULL. Fill in number of "objects" in section. */ 367static void *section_objs(const struct load_info *info, 368 const char *name, 369 size_t object_size, 370 unsigned int *num) 371{ 372 unsigned int sec = find_sec(info, name); 373 374 /* Section 0 has sh_addr 0 and sh_size 0. */ 375 *num = info->sechdrs[sec].sh_size / object_size; 376 return (void *)info->sechdrs[sec].sh_addr; 377} 378 379/* Provided by the linker */ 380extern const struct kernel_symbol __start___ksymtab[]; 381extern const struct kernel_symbol __stop___ksymtab[]; 382extern const struct kernel_symbol __start___ksymtab_gpl[]; 383extern const struct kernel_symbol __stop___ksymtab_gpl[]; 384extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 385extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 386extern const s32 __start___kcrctab[]; 387extern const s32 __start___kcrctab_gpl[]; 388extern const s32 __start___kcrctab_gpl_future[]; 389#ifdef CONFIG_UNUSED_SYMBOLS 390extern const struct kernel_symbol __start___ksymtab_unused[]; 391extern const struct kernel_symbol __stop___ksymtab_unused[]; 392extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; 393extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; 394extern const s32 __start___kcrctab_unused[]; 395extern const s32 __start___kcrctab_unused_gpl[]; 396#endif 397 398#ifndef CONFIG_MODVERSIONS 399#define symversion(base, idx) NULL 400#else 401#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 402#endif 403 404static bool each_symbol_in_section(const struct symsearch *arr, 405 unsigned int arrsize, 406 struct module *owner, 407 bool (*fn)(const struct symsearch *syms, 408 struct module *owner, 409 void *data), 410 void *data) 411{ 412 unsigned int j; 413 414 for (j = 0; j < arrsize; j++) { 415 if (fn(&arr[j], owner, data)) 416 return true; 417 } 418 419 return false; 420} 421 422/* Returns true as soon as fn returns true, otherwise false. */ 423bool each_symbol_section(bool (*fn)(const struct symsearch *arr, 424 struct module *owner, 425 void *data), 426 void *data) 427{ 428 struct module *mod; 429 static const struct symsearch arr[] = { 430 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 431 NOT_GPL_ONLY, false }, 432 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 433 __start___kcrctab_gpl, 434 GPL_ONLY, false }, 435 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, 436 __start___kcrctab_gpl_future, 437 WILL_BE_GPL_ONLY, false }, 438#ifdef CONFIG_UNUSED_SYMBOLS 439 { __start___ksymtab_unused, __stop___ksymtab_unused, 440 __start___kcrctab_unused, 441 NOT_GPL_ONLY, true }, 442 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, 443 __start___kcrctab_unused_gpl, 444 GPL_ONLY, true }, 445#endif 446 }; 447 448 module_assert_mutex_or_preempt(); 449 450 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) 451 return true; 452 453 list_for_each_entry_rcu(mod, &modules, list) { 454 struct symsearch arr[] = { 455 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 456 NOT_GPL_ONLY, false }, 457 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 458 mod->gpl_crcs, 459 GPL_ONLY, false }, 460 { mod->gpl_future_syms, 461 mod->gpl_future_syms + mod->num_gpl_future_syms, 462 mod->gpl_future_crcs, 463 WILL_BE_GPL_ONLY, false }, 464#ifdef CONFIG_UNUSED_SYMBOLS 465 { mod->unused_syms, 466 mod->unused_syms + mod->num_unused_syms, 467 mod->unused_crcs, 468 NOT_GPL_ONLY, true }, 469 { mod->unused_gpl_syms, 470 mod->unused_gpl_syms + mod->num_unused_gpl_syms, 471 mod->unused_gpl_crcs, 472 GPL_ONLY, true }, 473#endif 474 }; 475 476 if (mod->state == MODULE_STATE_UNFORMED) 477 continue; 478 479 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) 480 return true; 481 } 482 return false; 483} 484EXPORT_SYMBOL_GPL(each_symbol_section); 485 486struct find_symbol_arg { 487 /* Input */ 488 const char *name; 489 bool gplok; 490 bool warn; 491 492 /* Output */ 493 struct module *owner; 494 const s32 *crc; 495 const struct kernel_symbol *sym; 496}; 497 498static bool check_exported_symbol(const struct symsearch *syms, 499 struct module *owner, 500 unsigned int symnum, void *data) 501{ 502 struct find_symbol_arg *fsa = data; 503 504 if (!fsa->gplok) { 505 if (syms->licence == GPL_ONLY) 506 return false; 507 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { 508 pr_warn("Symbol %s is being used by a non-GPL module, " 509 "which will not be allowed in the future\n", 510 fsa->name); 511 } 512 } 513 514#ifdef CONFIG_UNUSED_SYMBOLS 515 if (syms->unused && fsa->warn) { 516 pr_warn("Symbol %s is marked as UNUSED, however this module is " 517 "using it.\n", fsa->name); 518 pr_warn("This symbol will go away in the future.\n"); 519 pr_warn("Please evaluate if this is the right api to use and " 520 "if it really is, submit a report to the linux kernel " 521 "mailing list together with submitting your code for " 522 "inclusion.\n"); 523 } 524#endif 525 526 fsa->owner = owner; 527 fsa->crc = symversion(syms->crcs, symnum); 528 fsa->sym = &syms->start[symnum]; 529 return true; 530} 531 532static unsigned long kernel_symbol_value(const struct kernel_symbol *sym) 533{ 534#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 535 return (unsigned long)offset_to_ptr(&sym->value_offset); 536#else 537 return sym->value; 538#endif 539} 540 541static const char *kernel_symbol_name(const struct kernel_symbol *sym) 542{ 543#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 544 return offset_to_ptr(&sym->name_offset); 545#else 546 return sym->name; 547#endif 548} 549 550static int cmp_name(const void *va, const void *vb) 551{ 552 const char *a; 553 const struct kernel_symbol *b; 554 a = va; b = vb; 555 return strcmp(a, kernel_symbol_name(b)); 556} 557 558static bool find_exported_symbol_in_section(const struct symsearch *syms, 559 struct module *owner, 560 void *data) 561{ 562 struct find_symbol_arg *fsa = data; 563 struct kernel_symbol *sym; 564 565 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, 566 sizeof(struct kernel_symbol), cmp_name); 567 568 if (sym != NULL && check_exported_symbol(syms, owner, 569 sym - syms->start, data)) 570 return true; 571 572 return false; 573} 574 575/* Find an exported symbol and return it, along with, (optional) crc and 576 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ 577const struct kernel_symbol *find_symbol(const char *name, 578 struct module **owner, 579 const s32 **crc, 580 bool gplok, 581 bool warn) 582{ 583 struct find_symbol_arg fsa; 584 585 fsa.name = name; 586 fsa.gplok = gplok; 587 fsa.warn = warn; 588 589 if (each_symbol_section(find_exported_symbol_in_section, &fsa)) { 590 if (owner) 591 *owner = fsa.owner; 592 if (crc) 593 *crc = fsa.crc; 594 return fsa.sym; 595 } 596 597 pr_debug("Failed to find symbol %s\n", name); 598 return NULL; 599} 600EXPORT_SYMBOL_GPL(find_symbol); 601 602/* 603 * Search for module by name: must hold module_mutex (or preempt disabled 604 * for read-only access). 605 */ 606static struct module *find_module_all(const char *name, size_t len, 607 bool even_unformed) 608{ 609 struct module *mod; 610 611 module_assert_mutex_or_preempt(); 612 613 list_for_each_entry_rcu(mod, &modules, list) { 614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 615 continue; 616 if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) 617 return mod; 618 } 619 return NULL; 620} 621 622struct module *find_module(const char *name) 623{ 624 module_assert_mutex(); 625 return find_module_all(name, strlen(name), false); 626} 627EXPORT_SYMBOL_GPL(find_module); 628 629#ifdef CONFIG_SMP 630 631static inline void __percpu *mod_percpu(struct module *mod) 632{ 633 return mod->percpu; 634} 635 636static int percpu_modalloc(struct module *mod, struct load_info *info) 637{ 638 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu]; 639 unsigned long align = pcpusec->sh_addralign; 640 641 if (!pcpusec->sh_size) 642 return 0; 643 644 if (align > PAGE_SIZE) { 645 pr_warn("%s: per-cpu alignment %li > %li\n", 646 mod->name, align, PAGE_SIZE); 647 align = PAGE_SIZE; 648 } 649 650 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align); 651 if (!mod->percpu) { 652 pr_warn("%s: Could not allocate %lu bytes percpu data\n", 653 mod->name, (unsigned long)pcpusec->sh_size); 654 return -ENOMEM; 655 } 656 mod->percpu_size = pcpusec->sh_size; 657 return 0; 658} 659 660static void percpu_modfree(struct module *mod) 661{ 662 free_percpu(mod->percpu); 663} 664 665static unsigned int find_pcpusec(struct load_info *info) 666{ 667 return find_sec(info, ".data..percpu"); 668} 669 670static void percpu_modcopy(struct module *mod, 671 const void *from, unsigned long size) 672{ 673 int cpu; 674 675 for_each_possible_cpu(cpu) 676 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); 677} 678 679bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 680{ 681 struct module *mod; 682 unsigned int cpu; 683 684 preempt_disable(); 685 686 list_for_each_entry_rcu(mod, &modules, list) { 687 if (mod->state == MODULE_STATE_UNFORMED) 688 continue; 689 if (!mod->percpu_size) 690 continue; 691 for_each_possible_cpu(cpu) { 692 void *start = per_cpu_ptr(mod->percpu, cpu); 693 void *va = (void *)addr; 694 695 if (va >= start && va < start + mod->percpu_size) { 696 if (can_addr) { 697 *can_addr = (unsigned long) (va - start); 698 *can_addr += (unsigned long) 699 per_cpu_ptr(mod->percpu, 700 get_boot_cpu_id()); 701 } 702 preempt_enable(); 703 return true; 704 } 705 } 706 } 707 708 preempt_enable(); 709 return false; 710} 711 712/** 713 * is_module_percpu_address - test whether address is from module static percpu 714 * @addr: address to test 715 * 716 * Test whether @addr belongs to module static percpu area. 717 * 718 * RETURNS: 719 * %true if @addr is from module static percpu area 720 */ 721bool is_module_percpu_address(unsigned long addr) 722{ 723 return __is_module_percpu_address(addr, NULL); 724} 725 726#else /* ... !CONFIG_SMP */ 727 728static inline void __percpu *mod_percpu(struct module *mod) 729{ 730 return NULL; 731} 732static int percpu_modalloc(struct module *mod, struct load_info *info) 733{ 734 /* UP modules shouldn't have this section: ENOMEM isn't quite right */ 735 if (info->sechdrs[info->index.pcpu].sh_size != 0) 736 return -ENOMEM; 737 return 0; 738} 739static inline void percpu_modfree(struct module *mod) 740{ 741} 742static unsigned int find_pcpusec(struct load_info *info) 743{ 744 return 0; 745} 746static inline void percpu_modcopy(struct module *mod, 747 const void *from, unsigned long size) 748{ 749 /* pcpusec should be 0, and size of that section should be 0. */ 750 BUG_ON(size != 0); 751} 752bool is_module_percpu_address(unsigned long addr) 753{ 754 return false; 755} 756 757bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) 758{ 759 return false; 760} 761 762#endif /* CONFIG_SMP */ 763 764#define MODINFO_ATTR(field) \ 765static void setup_modinfo_##field(struct module *mod, const char *s) \ 766{ \ 767 mod->field = kstrdup(s, GFP_KERNEL); \ 768} \ 769static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ 770 struct module_kobject *mk, char *buffer) \ 771{ \ 772 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ 773} \ 774static int modinfo_##field##_exists(struct module *mod) \ 775{ \ 776 return mod->field != NULL; \ 777} \ 778static void free_modinfo_##field(struct module *mod) \ 779{ \ 780 kfree(mod->field); \ 781 mod->field = NULL; \ 782} \ 783static struct module_attribute modinfo_##field = { \ 784 .attr = { .name = __stringify(field), .mode = 0444 }, \ 785 .show = show_modinfo_##field, \ 786 .setup = setup_modinfo_##field, \ 787 .test = modinfo_##field##_exists, \ 788 .free = free_modinfo_##field, \ 789}; 790 791MODINFO_ATTR(version); 792MODINFO_ATTR(srcversion); 793 794static char last_unloaded_module[MODULE_NAME_LEN+1]; 795 796#ifdef CONFIG_MODULE_UNLOAD 797 798EXPORT_TRACEPOINT_SYMBOL(module_get); 799 800/* MODULE_REF_BASE is the base reference count by kmodule loader. */ 801#define MODULE_REF_BASE 1 802 803/* Init the unload section of the module. */ 804static int module_unload_init(struct module *mod) 805{ 806 /* 807 * Initialize reference counter to MODULE_REF_BASE. 808 * refcnt == 0 means module is going. 809 */ 810 atomic_set(&mod->refcnt, MODULE_REF_BASE); 811 812 INIT_LIST_HEAD(&mod->source_list); 813 INIT_LIST_HEAD(&mod->target_list); 814 815 /* Hold reference count during initialization. */ 816 atomic_inc(&mod->refcnt); 817 818 return 0; 819} 820 821/* Does a already use b? */ 822static int already_uses(struct module *a, struct module *b) 823{ 824 struct module_use *use; 825 826 list_for_each_entry(use, &b->source_list, source_list) { 827 if (use->source == a) { 828 pr_debug("%s uses %s!\n", a->name, b->name); 829 return 1; 830 } 831 } 832 pr_debug("%s does not use %s!\n", a->name, b->name); 833 return 0; 834} 835 836/* 837 * Module a uses b 838 * - we add 'a' as a "source", 'b' as a "target" of module use 839 * - the module_use is added to the list of 'b' sources (so 840 * 'b' can walk the list to see who sourced them), and of 'a' 841 * targets (so 'a' can see what modules it targets). 842 */ 843static int add_module_usage(struct module *a, struct module *b) 844{ 845 struct module_use *use; 846 847 pr_debug("Allocating new usage for %s.\n", a->name); 848 use = kmalloc(sizeof(*use), GFP_ATOMIC); 849 if (!use) 850 return -ENOMEM; 851 852 use->source = a; 853 use->target = b; 854 list_add(&use->source_list, &b->source_list); 855 list_add(&use->target_list, &a->target_list); 856 return 0; 857} 858 859/* Module a uses b: caller needs module_mutex() */ 860int ref_module(struct module *a, struct module *b) 861{ 862 int err; 863 864 if (b == NULL || already_uses(a, b)) 865 return 0; 866 867 /* If module isn't available, we fail. */ 868 err = strong_try_module_get(b); 869 if (err) 870 return err; 871 872 err = add_module_usage(a, b); 873 if (err) { 874 module_put(b); 875 return err; 876 } 877 return 0; 878} 879EXPORT_SYMBOL_GPL(ref_module); 880 881/* Clear the unload stuff of the module. */ 882static void module_unload_free(struct module *mod) 883{ 884 struct module_use *use, *tmp; 885 886 mutex_lock(&module_mutex); 887 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { 888 struct module *i = use->target; 889 pr_debug("%s unusing %s\n", mod->name, i->name); 890 module_put(i); 891 list_del(&use->source_list); 892 list_del(&use->target_list); 893 kfree(use); 894 } 895 mutex_unlock(&module_mutex); 896} 897 898#ifdef CONFIG_MODULE_FORCE_UNLOAD 899static inline int try_force_unload(unsigned int flags) 900{ 901 int ret = (flags & O_TRUNC); 902 if (ret) 903 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE); 904 return ret; 905} 906#else 907static inline int try_force_unload(unsigned int flags) 908{ 909 return 0; 910} 911#endif /* CONFIG_MODULE_FORCE_UNLOAD */ 912 913/* Try to release refcount of module, 0 means success. */ 914static int try_release_module_ref(struct module *mod) 915{ 916 int ret; 917 918 /* Try to decrement refcnt which we set at loading */ 919 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); 920 BUG_ON(ret < 0); 921 if (ret) 922 /* Someone can put this right now, recover with checking */ 923 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0); 924 925 return ret; 926} 927 928static int try_stop_module(struct module *mod, int flags, int *forced) 929{ 930 /* If it's not unused, quit unless we're forcing. */ 931 if (try_release_module_ref(mod) != 0) { 932 *forced = try_force_unload(flags); 933 if (!(*forced)) 934 return -EWOULDBLOCK; 935 } 936 937 /* Mark it as dying. */ 938 mod->state = MODULE_STATE_GOING; 939 940 return 0; 941} 942 943/** 944 * module_refcount - return the refcount or -1 if unloading 945 * 946 * @mod: the module we're checking 947 * 948 * Returns: 949 * -1 if the module is in the process of unloading 950 * otherwise the number of references in the kernel to the module 951 */ 952int module_refcount(struct module *mod) 953{ 954 return atomic_read(&mod->refcnt) - MODULE_REF_BASE; 955} 956EXPORT_SYMBOL(module_refcount); 957 958/* This exists whether we can unload or not */ 959static void free_module(struct module *mod); 960 961SYSCALL_DEFINE2(delete_module, const char __user *, name_user, 962 unsigned int, flags) 963{ 964 struct module *mod; 965 char name[MODULE_NAME_LEN]; 966 int ret, forced = 0; 967 968 if (!capable(CAP_SYS_MODULE) || modules_disabled) 969 return -EPERM; 970 971 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) 972 return -EFAULT; 973 name[MODULE_NAME_LEN-1] = '\0'; 974 975 audit_log_kern_module(name); 976 977 if (mutex_lock_interruptible(&module_mutex) != 0) 978 return -EINTR; 979 980 mod = find_module(name); 981 if (!mod) { 982 ret = -ENOENT; 983 goto out; 984 } 985 986 if (!list_empty(&mod->source_list)) { 987 /* Other modules depend on us: get rid of them first. */ 988 ret = -EWOULDBLOCK; 989 goto out; 990 } 991 992 /* Doing init or already dying? */ 993 if (mod->state != MODULE_STATE_LIVE) { 994 /* FIXME: if (force), slam module count damn the torpedoes */ 995 pr_debug("%s already dying\n", mod->name); 996 ret = -EBUSY; 997 goto out; 998 } 999 1000 /* If it has an init func, it must have an exit func to unload */ 1001 if (mod->init && !mod->exit) { 1002 forced = try_force_unload(flags); 1003 if (!forced) { 1004 /* This module can't be removed */ 1005 ret = -EBUSY; 1006 goto out; 1007 } 1008 } 1009 1010 /* Stop the machine so refcounts can't move and disable module. */ 1011 ret = try_stop_module(mod, flags, &forced); 1012 if (ret != 0) 1013 goto out; 1014 1015 mutex_unlock(&module_mutex); 1016 /* Final destruction now no one is using it. */ 1017 if (mod->exit != NULL) 1018 mod->exit(); 1019 blocking_notifier_call_chain(&module_notify_list, 1020 MODULE_STATE_GOING, mod); 1021 klp_module_going(mod); 1022 ftrace_release_mod(mod); 1023 1024 async_synchronize_full(); 1025 1026 /* Store the name of the last unloaded module for diagnostic purposes */ 1027 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 1028 1029 free_module(mod); 1030 return 0; 1031out: 1032 mutex_unlock(&module_mutex); 1033 return ret; 1034} 1035 1036static inline void print_unload_info(struct seq_file *m, struct module *mod) 1037{ 1038 struct module_use *use; 1039 int printed_something = 0; 1040 1041 seq_printf(m, " %i ", module_refcount(mod)); 1042 1043 /* 1044 * Always include a trailing , so userspace can differentiate 1045 * between this and the old multi-field proc format. 1046 */ 1047 list_for_each_entry(use, &mod->source_list, source_list) { 1048 printed_something = 1; 1049 seq_printf(m, "%s,", use->source->name); 1050 } 1051 1052 if (mod->init != NULL && mod->exit == NULL) { 1053 printed_something = 1; 1054 seq_puts(m, "[permanent],"); 1055 } 1056 1057 if (!printed_something) 1058 seq_puts(m, "-"); 1059} 1060 1061void __symbol_put(const char *symbol) 1062{ 1063 struct module *owner; 1064 1065 preempt_disable(); 1066 if (!find_symbol(symbol, &owner, NULL, true, false)) 1067 BUG(); 1068 module_put(owner); 1069 preempt_enable(); 1070} 1071EXPORT_SYMBOL(__symbol_put); 1072 1073/* Note this assumes addr is a function, which it currently always is. */ 1074void symbol_put_addr(void *addr) 1075{ 1076 struct module *modaddr; 1077 unsigned long a = (unsigned long)dereference_function_descriptor(addr); 1078 1079 if (core_kernel_text(a)) 1080 return; 1081 1082 /* 1083 * Even though we hold a reference on the module; we still need to 1084 * disable preemption in order to safely traverse the data structure. 1085 */ 1086 preempt_disable(); 1087 modaddr = __module_text_address(a); 1088 BUG_ON(!modaddr); 1089 module_put(modaddr); 1090 preempt_enable(); 1091} 1092EXPORT_SYMBOL_GPL(symbol_put_addr); 1093 1094static ssize_t show_refcnt(struct module_attribute *mattr, 1095 struct module_kobject *mk, char *buffer) 1096{ 1097 return sprintf(buffer, "%i\n", module_refcount(mk->mod)); 1098} 1099 1100static struct module_attribute modinfo_refcnt = 1101 __ATTR(refcnt, 0444, show_refcnt, NULL); 1102 1103void __module_get(struct module *module) 1104{ 1105 if (module) { 1106 preempt_disable(); 1107 atomic_inc(&module->refcnt); 1108 trace_module_get(module, _RET_IP_); 1109 preempt_enable(); 1110 } 1111} 1112EXPORT_SYMBOL(__module_get); 1113 1114bool try_module_get(struct module *module) 1115{ 1116 bool ret = true; 1117 1118 if (module) { 1119 preempt_disable(); 1120 /* Note: here, we can fail to get a reference */ 1121 if (likely(module_is_live(module) && 1122 atomic_inc_not_zero(&module->refcnt) != 0)) 1123 trace_module_get(module, _RET_IP_); 1124 else 1125 ret = false; 1126 1127 preempt_enable(); 1128 } 1129 return ret; 1130} 1131EXPORT_SYMBOL(try_module_get); 1132 1133void module_put(struct module *module) 1134{ 1135 int ret; 1136 1137 if (module) { 1138 preempt_disable(); 1139 ret = atomic_dec_if_positive(&module->refcnt); 1140 WARN_ON(ret < 0); /* Failed to put refcount */ 1141 trace_module_put(module, _RET_IP_); 1142 preempt_enable(); 1143 } 1144} 1145EXPORT_SYMBOL(module_put); 1146 1147#else /* !CONFIG_MODULE_UNLOAD */ 1148static inline void print_unload_info(struct seq_file *m, struct module *mod) 1149{ 1150 /* We don't know the usage count, or what modules are using. */ 1151 seq_puts(m, " - -"); 1152} 1153 1154static inline void module_unload_free(struct module *mod) 1155{ 1156} 1157 1158int ref_module(struct module *a, struct module *b) 1159{ 1160 return strong_try_module_get(b); 1161} 1162EXPORT_SYMBOL_GPL(ref_module); 1163 1164static inline int module_unload_init(struct module *mod) 1165{ 1166 return 0; 1167} 1168#endif /* CONFIG_MODULE_UNLOAD */ 1169 1170static size_t module_flags_taint(struct module *mod, char *buf) 1171{ 1172 size_t l = 0; 1173 int i; 1174 1175 for (i = 0; i < TAINT_FLAGS_COUNT; i++) { 1176 if (taint_flags[i].module && test_bit(i, &mod->taints)) 1177 buf[l++] = taint_flags[i].c_true; 1178 } 1179 1180 return l; 1181} 1182 1183static ssize_t show_initstate(struct module_attribute *mattr, 1184 struct module_kobject *mk, char *buffer) 1185{ 1186 const char *state = "unknown"; 1187 1188 switch (mk->mod->state) { 1189 case MODULE_STATE_LIVE: 1190 state = "live"; 1191 break; 1192 case MODULE_STATE_COMING: 1193 state = "coming"; 1194 break; 1195 case MODULE_STATE_GOING: 1196 state = "going"; 1197 break; 1198 default: 1199 BUG(); 1200 } 1201 return sprintf(buffer, "%s\n", state); 1202} 1203 1204static struct module_attribute modinfo_initstate = 1205 __ATTR(initstate, 0444, show_initstate, NULL); 1206 1207static ssize_t store_uevent(struct module_attribute *mattr, 1208 struct module_kobject *mk, 1209 const char *buffer, size_t count) 1210{ 1211 int rc; 1212 1213 rc = kobject_synth_uevent(&mk->kobj, buffer, count); 1214 return rc ? rc : count; 1215} 1216 1217struct module_attribute module_uevent = 1218 __ATTR(uevent, 0200, NULL, store_uevent); 1219 1220static ssize_t show_coresize(struct module_attribute *mattr, 1221 struct module_kobject *mk, char *buffer) 1222{ 1223 return sprintf(buffer, "%u\n", mk->mod->core_layout.size); 1224} 1225 1226static struct module_attribute modinfo_coresize = 1227 __ATTR(coresize, 0444, show_coresize, NULL); 1228 1229static ssize_t show_initsize(struct module_attribute *mattr, 1230 struct module_kobject *mk, char *buffer) 1231{ 1232 return sprintf(buffer, "%u\n", mk->mod->init_layout.size); 1233} 1234 1235static struct module_attribute modinfo_initsize = 1236 __ATTR(initsize, 0444, show_initsize, NULL); 1237 1238static ssize_t show_taint(struct module_attribute *mattr, 1239 struct module_kobject *mk, char *buffer) 1240{ 1241 size_t l; 1242 1243 l = module_flags_taint(mk->mod, buffer); 1244 buffer[l++] = '\n'; 1245 return l; 1246} 1247 1248static struct module_attribute modinfo_taint = 1249 __ATTR(taint, 0444, show_taint, NULL); 1250 1251static struct module_attribute *modinfo_attrs[] = { 1252 &module_uevent, 1253 &modinfo_version, 1254 &modinfo_srcversion, 1255 &modinfo_initstate, 1256 &modinfo_coresize, 1257 &modinfo_initsize, 1258 &modinfo_taint, 1259#ifdef CONFIG_MODULE_UNLOAD 1260 &modinfo_refcnt, 1261#endif 1262 NULL, 1263}; 1264 1265static const char vermagic[] = VERMAGIC_STRING; 1266 1267static int try_to_force_load(struct module *mod, const char *reason) 1268{ 1269#ifdef CONFIG_MODULE_FORCE_LOAD 1270 if (!test_taint(TAINT_FORCED_MODULE)) 1271 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason); 1272 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE); 1273 return 0; 1274#else 1275 return -ENOEXEC; 1276#endif 1277} 1278 1279#ifdef CONFIG_MODVERSIONS 1280 1281static u32 resolve_rel_crc(const s32 *crc) 1282{ 1283 return *(u32 *)((void *)crc + *crc); 1284} 1285 1286static int check_version(const struct load_info *info, 1287 const char *symname, 1288 struct module *mod, 1289 const s32 *crc) 1290{ 1291 Elf_Shdr *sechdrs = info->sechdrs; 1292 unsigned int versindex = info->index.vers; 1293 unsigned int i, num_versions; 1294 struct modversion_info *versions; 1295 1296 /* Exporting module didn't supply crcs? OK, we're already tainted. */ 1297 if (!crc) 1298 return 1; 1299 1300 /* No versions at all? modprobe --force does this. */ 1301 if (versindex == 0) 1302 return try_to_force_load(mod, symname) == 0; 1303 1304 versions = (void *) sechdrs[versindex].sh_addr; 1305 num_versions = sechdrs[versindex].sh_size 1306 / sizeof(struct modversion_info); 1307 1308 for (i = 0; i < num_versions; i++) { 1309 u32 crcval; 1310 1311 if (strcmp(versions[i].name, symname) != 0) 1312 continue; 1313 1314 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS)) 1315 crcval = resolve_rel_crc(crc); 1316 else 1317 crcval = *crc; 1318 if (versions[i].crc == crcval) 1319 return 1; 1320 pr_debug("Found checksum %X vs module %lX\n", 1321 crcval, versions[i].crc); 1322 goto bad_version; 1323 } 1324 1325 /* Broken toolchain. Warn once, then let it go.. */ 1326 pr_warn_once("%s: no symbol version for %s\n", info->name, symname); 1327 return 1; 1328 1329bad_version: 1330 pr_warn("%s: disagrees about version of symbol %s\n", 1331 info->name, symname); 1332 return 0; 1333} 1334 1335static inline int check_modstruct_version(const struct load_info *info, 1336 struct module *mod) 1337{ 1338 const s32 *crc; 1339 1340 /* 1341 * Since this should be found in kernel (which can't be removed), no 1342 * locking is necessary -- use preempt_disable() to placate lockdep. 1343 */ 1344 preempt_disable(); 1345 if (!find_symbol("module_layout", NULL, &crc, true, false)) { 1346 preempt_enable(); 1347 BUG(); 1348 } 1349 preempt_enable(); 1350 return check_version(info, "module_layout", mod, crc); 1351} 1352 1353/* First part is kernel version, which we ignore if module has crcs. */ 1354static inline int same_magic(const char *amagic, const char *bmagic, 1355 bool has_crcs) 1356{ 1357 if (has_crcs) { 1358 amagic += strcspn(amagic, " "); 1359 bmagic += strcspn(bmagic, " "); 1360 } 1361 return strcmp(amagic, bmagic) == 0; 1362} 1363#else 1364static inline int check_version(const struct load_info *info, 1365 const char *symname, 1366 struct module *mod, 1367 const s32 *crc) 1368{ 1369 return 1; 1370} 1371 1372static inline int check_modstruct_version(const struct load_info *info, 1373 struct module *mod) 1374{ 1375 return 1; 1376} 1377 1378static inline int same_magic(const char *amagic, const char *bmagic, 1379 bool has_crcs) 1380{ 1381 return strcmp(amagic, bmagic) == 0; 1382} 1383#endif /* CONFIG_MODVERSIONS */ 1384 1385/* Resolve a symbol for this module. I.e. if we find one, record usage. */ 1386static const struct kernel_symbol *resolve_symbol(struct module *mod, 1387 const struct load_info *info, 1388 const char *name, 1389 char ownername[]) 1390{ 1391 struct module *owner; 1392 const struct kernel_symbol *sym; 1393 const s32 *crc; 1394 int err; 1395 1396 /* 1397 * The module_mutex should not be a heavily contended lock; 1398 * if we get the occasional sleep here, we'll go an extra iteration 1399 * in the wait_event_interruptible(), which is harmless. 1400 */ 1401 sched_annotate_sleep(); 1402 mutex_lock(&module_mutex); 1403 sym = find_symbol(name, &owner, &crc, 1404 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); 1405 if (!sym) 1406 goto unlock; 1407 1408 if (!check_version(info, name, mod, crc)) { 1409 sym = ERR_PTR(-EINVAL); 1410 goto getname; 1411 } 1412 1413 err = ref_module(mod, owner); 1414 if (err) { 1415 sym = ERR_PTR(err); 1416 goto getname; 1417 } 1418 1419getname: 1420 /* We must make copy under the lock if we failed to get ref. */ 1421 strncpy(ownername, module_name(owner), MODULE_NAME_LEN); 1422unlock: 1423 mutex_unlock(&module_mutex); 1424 return sym; 1425} 1426 1427static const struct kernel_symbol * 1428resolve_symbol_wait(struct module *mod, 1429 const struct load_info *info, 1430 const char *name) 1431{ 1432 const struct kernel_symbol *ksym; 1433 char owner[MODULE_NAME_LEN]; 1434 1435 if (wait_event_interruptible_timeout(module_wq, 1436 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) 1437 || PTR_ERR(ksym) != -EBUSY, 1438 30 * HZ) <= 0) { 1439 pr_warn("%s: gave up waiting for init of module %s.\n", 1440 mod->name, owner); 1441 } 1442 return ksym; 1443} 1444 1445/* 1446 * /sys/module/foo/sections stuff 1447 * J. Corbet <corbet@lwn.net> 1448 */ 1449#ifdef CONFIG_SYSFS 1450 1451#ifdef CONFIG_KALLSYMS 1452static inline bool sect_empty(const Elf_Shdr *sect) 1453{ 1454 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; 1455} 1456 1457struct module_sect_attr { 1458 struct module_attribute mattr; 1459 char *name; 1460 unsigned long address; 1461}; 1462 1463struct module_sect_attrs { 1464 struct attribute_group grp; 1465 unsigned int nsections; 1466 struct module_sect_attr attrs[0]; 1467}; 1468 1469static ssize_t module_sect_show(struct module_attribute *mattr, 1470 struct module_kobject *mk, char *buf) 1471{ 1472 struct module_sect_attr *sattr = 1473 container_of(mattr, struct module_sect_attr, mattr); 1474 return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? 1475 (void *)sattr->address : NULL); 1476} 1477 1478static void free_sect_attrs(struct module_sect_attrs *sect_attrs) 1479{ 1480 unsigned int section; 1481 1482 for (section = 0; section < sect_attrs->nsections; section++) 1483 kfree(sect_attrs->attrs[section].name); 1484 kfree(sect_attrs); 1485} 1486 1487static void add_sect_attrs(struct module *mod, const struct load_info *info) 1488{ 1489 unsigned int nloaded = 0, i, size[2]; 1490 struct module_sect_attrs *sect_attrs; 1491 struct module_sect_attr *sattr; 1492 struct attribute **gattr; 1493 1494 /* Count loaded sections and allocate structures */ 1495 for (i = 0; i < info->hdr->e_shnum; i++) 1496 if (!sect_empty(&info->sechdrs[i])) 1497 nloaded++; 1498 size[0] = ALIGN(sizeof(*sect_attrs) 1499 + nloaded * sizeof(sect_attrs->attrs[0]), 1500 sizeof(sect_attrs->grp.attrs[0])); 1501 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); 1502 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); 1503 if (sect_attrs == NULL) 1504 return; 1505 1506 /* Setup section attributes. */ 1507 sect_attrs->grp.name = "sections"; 1508 sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; 1509 1510 sect_attrs->nsections = 0; 1511 sattr = &sect_attrs->attrs[0]; 1512 gattr = &sect_attrs->grp.attrs[0]; 1513 for (i = 0; i < info->hdr->e_shnum; i++) { 1514 Elf_Shdr *sec = &info->sechdrs[i]; 1515 if (sect_empty(sec)) 1516 continue; 1517 sattr->address = sec->sh_addr; 1518 sattr->name = kstrdup(info->secstrings + sec->sh_name, 1519 GFP_KERNEL); 1520 if (sattr->name == NULL) 1521 goto out; 1522 sect_attrs->nsections++; 1523 sysfs_attr_init(&sattr->mattr.attr); 1524 sattr->mattr.show = module_sect_show; 1525 sattr->mattr.store = NULL; 1526 sattr->mattr.attr.name = sattr->name; 1527 sattr->mattr.attr.mode = S_IRUSR; 1528 *(gattr++) = &(sattr++)->mattr.attr; 1529 } 1530 *gattr = NULL; 1531 1532 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp)) 1533 goto out; 1534 1535 mod->sect_attrs = sect_attrs; 1536 return; 1537 out: 1538 free_sect_attrs(sect_attrs); 1539} 1540 1541static void remove_sect_attrs(struct module *mod) 1542{ 1543 if (mod->sect_attrs) { 1544 sysfs_remove_group(&mod->mkobj.kobj, 1545 &mod->sect_attrs->grp); 1546 /* We are positive that no one is using any sect attrs 1547 * at this point. Deallocate immediately. */ 1548 free_sect_attrs(mod->sect_attrs); 1549 mod->sect_attrs = NULL; 1550 } 1551} 1552 1553/* 1554 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. 1555 */ 1556 1557struct module_notes_attrs { 1558 struct kobject *dir; 1559 unsigned int notes; 1560 struct bin_attribute attrs[0]; 1561}; 1562 1563static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, 1564 struct bin_attribute *bin_attr, 1565 char *buf, loff_t pos, size_t count) 1566{ 1567 /* 1568 * The caller checked the pos and count against our size. 1569 */ 1570 memcpy(buf, bin_attr->private + pos, count); 1571 return count; 1572} 1573 1574static void free_notes_attrs(struct module_notes_attrs *notes_attrs, 1575 unsigned int i) 1576{ 1577 if (notes_attrs->dir) { 1578 while (i-- > 0) 1579 sysfs_remove_bin_file(notes_attrs->dir, 1580 &notes_attrs->attrs[i]); 1581 kobject_put(notes_attrs->dir); 1582 } 1583 kfree(notes_attrs); 1584} 1585 1586static void add_notes_attrs(struct module *mod, const struct load_info *info) 1587{ 1588 unsigned int notes, loaded, i; 1589 struct module_notes_attrs *notes_attrs; 1590 struct bin_attribute *nattr; 1591 1592 /* failed to create section attributes, so can't create notes */ 1593 if (!mod->sect_attrs) 1594 return; 1595 1596 /* Count notes sections and allocate structures. */ 1597 notes = 0; 1598 for (i = 0; i < info->hdr->e_shnum; i++) 1599 if (!sect_empty(&info->sechdrs[i]) && 1600 (info->sechdrs[i].sh_type == SHT_NOTE)) 1601 ++notes; 1602 1603 if (notes == 0) 1604 return; 1605 1606 notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes), 1607 GFP_KERNEL); 1608 if (notes_attrs == NULL) 1609 return; 1610 1611 notes_attrs->notes = notes; 1612 nattr = &notes_attrs->attrs[0]; 1613 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { 1614 if (sect_empty(&info->sechdrs[i])) 1615 continue; 1616 if (info->sechdrs[i].sh_type == SHT_NOTE) { 1617 sysfs_bin_attr_init(nattr); 1618 nattr->attr.name = mod->sect_attrs->attrs[loaded].name; 1619 nattr->attr.mode = S_IRUGO; 1620 nattr->size = info->sechdrs[i].sh_size; 1621 nattr->private = (void *) info->sechdrs[i].sh_addr; 1622 nattr->read = module_notes_read; 1623 ++nattr; 1624 } 1625 ++loaded; 1626 } 1627 1628 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); 1629 if (!notes_attrs->dir) 1630 goto out; 1631 1632 for (i = 0; i < notes; ++i) 1633 if (sysfs_create_bin_file(notes_attrs->dir, 1634 &notes_attrs->attrs[i])) 1635 goto out; 1636 1637 mod->notes_attrs = notes_attrs; 1638 return; 1639 1640 out: 1641 free_notes_attrs(notes_attrs, i); 1642} 1643 1644static void remove_notes_attrs(struct module *mod) 1645{ 1646 if (mod->notes_attrs) 1647 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); 1648} 1649 1650#else 1651 1652static inline void add_sect_attrs(struct module *mod, 1653 const struct load_info *info) 1654{ 1655} 1656 1657static inline void remove_sect_attrs(struct module *mod) 1658{ 1659} 1660 1661static inline void add_notes_attrs(struct module *mod, 1662 const struct load_info *info) 1663{ 1664} 1665 1666static inline void remove_notes_attrs(struct module *mod) 1667{ 1668} 1669#endif /* CONFIG_KALLSYMS */ 1670 1671static void del_usage_links(struct module *mod) 1672{ 1673#ifdef CONFIG_MODULE_UNLOAD 1674 struct module_use *use; 1675 1676 mutex_lock(&module_mutex); 1677 list_for_each_entry(use, &mod->target_list, target_list) 1678 sysfs_remove_link(use->target->holders_dir, mod->name); 1679 mutex_unlock(&module_mutex); 1680#endif 1681} 1682 1683static int add_usage_links(struct module *mod) 1684{ 1685 int ret = 0; 1686#ifdef CONFIG_MODULE_UNLOAD 1687 struct module_use *use; 1688 1689 mutex_lock(&module_mutex); 1690 list_for_each_entry(use, &mod->target_list, target_list) { 1691 ret = sysfs_create_link(use->target->holders_dir, 1692 &mod->mkobj.kobj, mod->name); 1693 if (ret) 1694 break; 1695 } 1696 mutex_unlock(&module_mutex); 1697 if (ret) 1698 del_usage_links(mod); 1699#endif 1700 return ret; 1701} 1702 1703static int module_add_modinfo_attrs(struct module *mod) 1704{ 1705 struct module_attribute *attr; 1706 struct module_attribute *temp_attr; 1707 int error = 0; 1708 int i; 1709 1710 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * 1711 (ARRAY_SIZE(modinfo_attrs) + 1)), 1712 GFP_KERNEL); 1713 if (!mod->modinfo_attrs) 1714 return -ENOMEM; 1715 1716 temp_attr = mod->modinfo_attrs; 1717 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { 1718 if (!attr->test || attr->test(mod)) { 1719 memcpy(temp_attr, attr, sizeof(*temp_attr)); 1720 sysfs_attr_init(&temp_attr->attr); 1721 error = sysfs_create_file(&mod->mkobj.kobj, 1722 &temp_attr->attr); 1723 ++temp_attr; 1724 } 1725 } 1726 return error; 1727} 1728 1729static void module_remove_modinfo_attrs(struct module *mod) 1730{ 1731 struct module_attribute *attr; 1732 int i; 1733 1734 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { 1735 /* pick a field to test for end of list */ 1736 if (!attr->attr.name) 1737 break; 1738 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr); 1739 if (attr->free) 1740 attr->free(mod); 1741 } 1742 kfree(mod->modinfo_attrs); 1743} 1744 1745static void mod_kobject_put(struct module *mod) 1746{ 1747 DECLARE_COMPLETION_ONSTACK(c); 1748 mod->mkobj.kobj_completion = &c; 1749 kobject_put(&mod->mkobj.kobj); 1750 wait_for_completion(&c); 1751} 1752 1753static int mod_sysfs_init(struct module *mod) 1754{ 1755 int err; 1756 struct kobject *kobj; 1757 1758 if (!module_sysfs_initialized) { 1759 pr_err("%s: module sysfs not initialized\n", mod->name); 1760 err = -EINVAL; 1761 goto out; 1762 } 1763 1764 kobj = kset_find_obj(module_kset, mod->name); 1765 if (kobj) { 1766 pr_err("%s: module is already loaded\n", mod->name); 1767 kobject_put(kobj); 1768 err = -EINVAL; 1769 goto out; 1770 } 1771 1772 mod->mkobj.mod = mod; 1773 1774 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); 1775 mod->mkobj.kobj.kset = module_kset; 1776 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, 1777 "%s", mod->name); 1778 if (err) 1779 mod_kobject_put(mod); 1780 1781 /* delay uevent until full sysfs population */ 1782out: 1783 return err; 1784} 1785 1786static int mod_sysfs_setup(struct module *mod, 1787 const struct load_info *info, 1788 struct kernel_param *kparam, 1789 unsigned int num_params) 1790{ 1791 int err; 1792 1793 err = mod_sysfs_init(mod); 1794 if (err) 1795 goto out; 1796 1797 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); 1798 if (!mod->holders_dir) { 1799 err = -ENOMEM; 1800 goto out_unreg; 1801 } 1802 1803 err = module_param_sysfs_setup(mod, kparam, num_params); 1804 if (err) 1805 goto out_unreg_holders; 1806 1807 err = module_add_modinfo_attrs(mod); 1808 if (err) 1809 goto out_unreg_param; 1810 1811 err = add_usage_links(mod); 1812 if (err) 1813 goto out_unreg_modinfo_attrs; 1814 1815 add_sect_attrs(mod, info); 1816 add_notes_attrs(mod, info); 1817 1818 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); 1819 return 0; 1820 1821out_unreg_modinfo_attrs: 1822 module_remove_modinfo_attrs(mod); 1823out_unreg_param: 1824 module_param_sysfs_remove(mod); 1825out_unreg_holders: 1826 kobject_put(mod->holders_dir); 1827out_unreg: 1828 mod_kobject_put(mod); 1829out: 1830 return err; 1831} 1832 1833static void mod_sysfs_fini(struct module *mod) 1834{ 1835 remove_notes_attrs(mod); 1836 remove_sect_attrs(mod); 1837 mod_kobject_put(mod); 1838} 1839 1840static void init_param_lock(struct module *mod) 1841{ 1842 mutex_init(&mod->param_lock); 1843} 1844#else /* !CONFIG_SYSFS */ 1845 1846static int mod_sysfs_setup(struct module *mod, 1847 const struct load_info *info, 1848 struct kernel_param *kparam, 1849 unsigned int num_params) 1850{ 1851 return 0; 1852} 1853 1854static void mod_sysfs_fini(struct module *mod) 1855{ 1856} 1857 1858static void module_remove_modinfo_attrs(struct module *mod) 1859{ 1860} 1861 1862static void del_usage_links(struct module *mod) 1863{ 1864} 1865 1866static void init_param_lock(struct module *mod) 1867{ 1868} 1869#endif /* CONFIG_SYSFS */ 1870 1871static void mod_sysfs_teardown(struct module *mod) 1872{ 1873 del_usage_links(mod); 1874 module_remove_modinfo_attrs(mod); 1875 module_param_sysfs_remove(mod); 1876 kobject_put(mod->mkobj.drivers_dir); 1877 kobject_put(mod->holders_dir); 1878 mod_sysfs_fini(mod); 1879} 1880 1881#ifdef CONFIG_STRICT_MODULE_RWX 1882/* 1883 * LKM RO/NX protection: protect module's text/ro-data 1884 * from modification and any data from execution. 1885 * 1886 * General layout of module is: 1887 * [text] [read-only-data] [ro-after-init] [writable data] 1888 * text_size -----^ ^ ^ ^ 1889 * ro_size ------------------------| | | 1890 * ro_after_init_size -----------------------------| | 1891 * size -----------------------------------------------------------| 1892 * 1893 * These values are always page-aligned (as is base) 1894 */ 1895static void frob_text(const struct module_layout *layout, 1896 int (*set_memory)(unsigned long start, int num_pages)) 1897{ 1898 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1899 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1900 set_memory((unsigned long)layout->base, 1901 layout->text_size >> PAGE_SHIFT); 1902} 1903 1904static void frob_rodata(const struct module_layout *layout, 1905 int (*set_memory)(unsigned long start, int num_pages)) 1906{ 1907 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1908 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); 1909 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1910 set_memory((unsigned long)layout->base + layout->text_size, 1911 (layout->ro_size - layout->text_size) >> PAGE_SHIFT); 1912} 1913 1914static void frob_ro_after_init(const struct module_layout *layout, 1915 int (*set_memory)(unsigned long start, int num_pages)) 1916{ 1917 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1918 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); 1919 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); 1920 set_memory((unsigned long)layout->base + layout->ro_size, 1921 (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT); 1922} 1923 1924static void frob_writable_data(const struct module_layout *layout, 1925 int (*set_memory)(unsigned long start, int num_pages)) 1926{ 1927 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); 1928 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1)); 1929 BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); 1930 set_memory((unsigned long)layout->base + layout->ro_after_init_size, 1931 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); 1932} 1933 1934/* livepatching wants to disable read-only so it can frob module. */ 1935void module_disable_ro(const struct module *mod) 1936{ 1937 if (!rodata_enabled) 1938 return; 1939 1940 frob_text(&mod->core_layout, set_memory_rw); 1941 frob_rodata(&mod->core_layout, set_memory_rw); 1942 frob_ro_after_init(&mod->core_layout, set_memory_rw); 1943 frob_text(&mod->init_layout, set_memory_rw); 1944 frob_rodata(&mod->init_layout, set_memory_rw); 1945} 1946 1947void module_enable_ro(const struct module *mod, bool after_init) 1948{ 1949 if (!rodata_enabled) 1950 return; 1951 1952 frob_text(&mod->core_layout, set_memory_ro); 1953 frob_rodata(&mod->core_layout, set_memory_ro); 1954 frob_text(&mod->init_layout, set_memory_ro); 1955 frob_rodata(&mod->init_layout, set_memory_ro); 1956 1957 if (after_init) 1958 frob_ro_after_init(&mod->core_layout, set_memory_ro); 1959} 1960 1961static void module_enable_nx(const struct module *mod) 1962{ 1963 frob_rodata(&mod->core_layout, set_memory_nx); 1964 frob_ro_after_init(&mod->core_layout, set_memory_nx); 1965 frob_writable_data(&mod->core_layout, set_memory_nx); 1966 frob_rodata(&mod->init_layout, set_memory_nx); 1967 frob_writable_data(&mod->init_layout, set_memory_nx); 1968} 1969 1970static void module_disable_nx(const struct module *mod) 1971{ 1972 frob_rodata(&mod->core_layout, set_memory_x); 1973 frob_ro_after_init(&mod->core_layout, set_memory_x); 1974 frob_writable_data(&mod->core_layout, set_memory_x); 1975 frob_rodata(&mod->init_layout, set_memory_x); 1976 frob_writable_data(&mod->init_layout, set_memory_x); 1977} 1978 1979/* Iterate through all modules and set each module's text as RW */ 1980void set_all_modules_text_rw(void) 1981{ 1982 struct module *mod; 1983 1984 if (!rodata_enabled) 1985 return; 1986 1987 mutex_lock(&module_mutex); 1988 list_for_each_entry_rcu(mod, &modules, list) { 1989 if (mod->state == MODULE_STATE_UNFORMED) 1990 continue; 1991 1992 frob_text(&mod->core_layout, set_memory_rw); 1993 frob_text(&mod->init_layout, set_memory_rw); 1994 } 1995 mutex_unlock(&module_mutex); 1996} 1997 1998/* Iterate through all modules and set each module's text as RO */ 1999void set_all_modules_text_ro(void) 2000{ 2001 struct module *mod; 2002 2003 if (!rodata_enabled) 2004 return; 2005 2006 mutex_lock(&module_mutex); 2007 list_for_each_entry_rcu(mod, &modules, list) { 2008 /* 2009 * Ignore going modules since it's possible that ro 2010 * protection has already been disabled, otherwise we'll 2011 * run into protection faults at module deallocation. 2012 */ 2013 if (mod->state == MODULE_STATE_UNFORMED || 2014 mod->state == MODULE_STATE_GOING) 2015 continue; 2016 2017 frob_text(&mod->core_layout, set_memory_ro); 2018 frob_text(&mod->init_layout, set_memory_ro); 2019 } 2020 mutex_unlock(&module_mutex); 2021} 2022 2023static void disable_ro_nx(const struct module_layout *layout) 2024{ 2025 if (rodata_enabled) { 2026 frob_text(layout, set_memory_rw); 2027 frob_rodata(layout, set_memory_rw); 2028 frob_ro_after_init(layout, set_memory_rw); 2029 } 2030 frob_rodata(layout, set_memory_x); 2031 frob_ro_after_init(layout, set_memory_x); 2032 frob_writable_data(layout, set_memory_x); 2033} 2034 2035#else 2036static void disable_ro_nx(const struct module_layout *layout) { } 2037static void module_enable_nx(const struct module *mod) { } 2038static void module_disable_nx(const struct module *mod) { } 2039#endif 2040 2041#ifdef CONFIG_LIVEPATCH 2042/* 2043 * Persist Elf information about a module. Copy the Elf header, 2044 * section header table, section string table, and symtab section 2045 * index from info to mod->klp_info. 2046 */ 2047static int copy_module_elf(struct module *mod, struct load_info *info) 2048{ 2049 unsigned int size, symndx; 2050 int ret; 2051 2052 size = sizeof(*mod->klp_info); 2053 mod->klp_info = kmalloc(size, GFP_KERNEL); 2054 if (mod->klp_info == NULL) 2055 return -ENOMEM; 2056 2057 /* Elf header */ 2058 size = sizeof(mod->klp_info->hdr); 2059 memcpy(&mod->klp_info->hdr, info->hdr, size); 2060 2061 /* Elf section header table */ 2062 size = sizeof(*info->sechdrs) * info->hdr->e_shnum; 2063 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL); 2064 if (mod->klp_info->sechdrs == NULL) { 2065 ret = -ENOMEM; 2066 goto free_info; 2067 } 2068 2069 /* Elf section name string table */ 2070 size = info->sechdrs[info->hdr->e_shstrndx].sh_size; 2071 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL); 2072 if (mod->klp_info->secstrings == NULL) { 2073 ret = -ENOMEM; 2074 goto free_sechdrs; 2075 } 2076 2077 /* Elf symbol section index */ 2078 symndx = info->index.sym; 2079 mod->klp_info->symndx = symndx; 2080 2081 /* 2082 * For livepatch modules, core_kallsyms.symtab is a complete 2083 * copy of the original symbol table. Adjust sh_addr to point 2084 * to core_kallsyms.symtab since the copy of the symtab in module 2085 * init memory is freed at the end of do_init_module(). 2086 */ 2087 mod->klp_info->sechdrs[symndx].sh_addr = \ 2088 (unsigned long) mod->core_kallsyms.symtab; 2089 2090 return 0; 2091 2092free_sechdrs: 2093 kfree(mod->klp_info->sechdrs); 2094free_info: 2095 kfree(mod->klp_info); 2096 return ret; 2097} 2098 2099static void free_module_elf(struct module *mod) 2100{ 2101 kfree(mod->klp_info->sechdrs); 2102 kfree(mod->klp_info->secstrings); 2103 kfree(mod->klp_info); 2104} 2105#else /* !CONFIG_LIVEPATCH */ 2106static int copy_module_elf(struct module *mod, struct load_info *info) 2107{ 2108 return 0; 2109} 2110 2111static void free_module_elf(struct module *mod) 2112{ 2113} 2114#endif /* CONFIG_LIVEPATCH */ 2115 2116void __weak module_memfree(void *module_region) 2117{ 2118 vfree(module_region); 2119} 2120 2121void __weak module_arch_cleanup(struct module *mod) 2122{ 2123} 2124 2125void __weak module_arch_freeing_init(struct module *mod) 2126{ 2127} 2128 2129/* Free a module, remove from lists, etc. */ 2130static void free_module(struct module *mod) 2131{ 2132 trace_module_free(mod); 2133 2134 mod_sysfs_teardown(mod); 2135 2136 /* We leave it in list to prevent duplicate loads, but make sure 2137 * that noone uses it while it's being deconstructed. */ 2138 mutex_lock(&module_mutex); 2139 mod->state = MODULE_STATE_UNFORMED; 2140 mutex_unlock(&module_mutex); 2141 2142 /* Remove dynamic debug info */ 2143 ddebug_remove_module(mod->name); 2144 2145 /* Arch-specific cleanup. */ 2146 module_arch_cleanup(mod); 2147 2148 /* Module unload stuff */ 2149 module_unload_free(mod); 2150 2151 /* Free any allocated parameters. */ 2152 destroy_params(mod->kp, mod->num_kp); 2153 2154 if (is_livepatch_module(mod)) 2155 free_module_elf(mod); 2156 2157 /* Now we can delete it from the lists */ 2158 mutex_lock(&module_mutex); 2159 /* Unlink carefully: kallsyms could be walking list. */ 2160 list_del_rcu(&mod->list); 2161 mod_tree_remove(mod); 2162 /* Remove this module from bug list, this uses list_del_rcu */ 2163 module_bug_cleanup(mod); 2164 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ 2165 synchronize_rcu(); 2166 mutex_unlock(&module_mutex); 2167 2168 /* This may be empty, but that's OK */ 2169 disable_ro_nx(&mod->init_layout); 2170 module_arch_freeing_init(mod); 2171 module_memfree(mod->init_layout.base); 2172 kfree(mod->args); 2173 percpu_modfree(mod); 2174 2175 /* Free lock-classes; relies on the preceding sync_rcu(). */ 2176 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 2177 2178 /* Finally, free the core (containing the module structure) */ 2179 disable_ro_nx(&mod->core_layout); 2180 module_memfree(mod->core_layout.base); 2181} 2182 2183void *__symbol_get(const char *symbol) 2184{ 2185 struct module *owner; 2186 const struct kernel_symbol *sym; 2187 2188 preempt_disable(); 2189 sym = find_symbol(symbol, &owner, NULL, true, true); 2190 if (sym && strong_try_module_get(owner)) 2191 sym = NULL; 2192 preempt_enable(); 2193 2194 return sym ? (void *)kernel_symbol_value(sym) : NULL; 2195} 2196EXPORT_SYMBOL_GPL(__symbol_get); 2197 2198/* 2199 * Ensure that an exported symbol [global namespace] does not already exist 2200 * in the kernel or in some other module's exported symbol table. 2201 * 2202 * You must hold the module_mutex. 2203 */ 2204static int verify_exported_symbols(struct module *mod) 2205{ 2206 unsigned int i; 2207 struct module *owner; 2208 const struct kernel_symbol *s; 2209 struct { 2210 const struct kernel_symbol *sym; 2211 unsigned int num; 2212 } arr[] = { 2213 { mod->syms, mod->num_syms }, 2214 { mod->gpl_syms, mod->num_gpl_syms }, 2215 { mod->gpl_future_syms, mod->num_gpl_future_syms }, 2216#ifdef CONFIG_UNUSED_SYMBOLS 2217 { mod->unused_syms, mod->num_unused_syms }, 2218 { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, 2219#endif 2220 }; 2221 2222 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2223 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 2224 if (find_symbol(kernel_symbol_name(s), &owner, NULL, 2225 true, false)) { 2226 pr_err("%s: exports duplicate symbol %s" 2227 " (owned by %s)\n", 2228 mod->name, kernel_symbol_name(s), 2229 module_name(owner)); 2230 return -ENOEXEC; 2231 } 2232 } 2233 } 2234 return 0; 2235} 2236 2237/* Change all symbols so that st_value encodes the pointer directly. */ 2238static int simplify_symbols(struct module *mod, const struct load_info *info) 2239{ 2240 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2241 Elf_Sym *sym = (void *)symsec->sh_addr; 2242 unsigned long secbase; 2243 unsigned int i; 2244 int ret = 0; 2245 const struct kernel_symbol *ksym; 2246 2247 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { 2248 const char *name = info->strtab + sym[i].st_name; 2249 2250 switch (sym[i].st_shndx) { 2251 case SHN_COMMON: 2252 /* Ignore common symbols */ 2253 if (!strncmp(name, "__gnu_lto", 9)) 2254 break; 2255 2256 /* We compiled with -fno-common. These are not 2257 supposed to happen. */ 2258 pr_debug("Common symbol: %s\n", name); 2259 pr_warn("%s: please compile with -fno-common\n", 2260 mod->name); 2261 ret = -ENOEXEC; 2262 break; 2263 2264 case SHN_ABS: 2265 /* Don't need to do anything */ 2266 pr_debug("Absolute symbol: 0x%08lx\n", 2267 (long)sym[i].st_value); 2268 break; 2269 2270 case SHN_LIVEPATCH: 2271 /* Livepatch symbols are resolved by livepatch */ 2272 break; 2273 2274 case SHN_UNDEF: 2275 ksym = resolve_symbol_wait(mod, info, name); 2276 /* Ok if resolved. */ 2277 if (ksym && !IS_ERR(ksym)) { 2278 sym[i].st_value = kernel_symbol_value(ksym); 2279 break; 2280 } 2281 2282 /* Ok if weak. */ 2283 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) 2284 break; 2285 2286 ret = PTR_ERR(ksym) ?: -ENOENT; 2287 pr_warn("%s: Unknown symbol %s (err %d)\n", 2288 mod->name, name, ret); 2289 break; 2290 2291 default: 2292 /* Divert to percpu allocation if a percpu var. */ 2293 if (sym[i].st_shndx == info->index.pcpu) 2294 secbase = (unsigned long)mod_percpu(mod); 2295 else 2296 secbase = info->sechdrs[sym[i].st_shndx].sh_addr; 2297 sym[i].st_value += secbase; 2298 break; 2299 } 2300 } 2301 2302 return ret; 2303} 2304 2305static int apply_relocations(struct module *mod, const struct load_info *info) 2306{ 2307 unsigned int i; 2308 int err = 0; 2309 2310 /* Now do relocations. */ 2311 for (i = 1; i < info->hdr->e_shnum; i++) { 2312 unsigned int infosec = info->sechdrs[i].sh_info; 2313 2314 /* Not a valid relocation section? */ 2315 if (infosec >= info->hdr->e_shnum) 2316 continue; 2317 2318 /* Don't bother with non-allocated sections */ 2319 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) 2320 continue; 2321 2322 /* Livepatch relocation sections are applied by livepatch */ 2323 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) 2324 continue; 2325 2326 if (info->sechdrs[i].sh_type == SHT_REL) 2327 err = apply_relocate(info->sechdrs, info->strtab, 2328 info->index.sym, i, mod); 2329 else if (info->sechdrs[i].sh_type == SHT_RELA) 2330 err = apply_relocate_add(info->sechdrs, info->strtab, 2331 info->index.sym, i, mod); 2332 if (err < 0) 2333 break; 2334 } 2335 return err; 2336} 2337 2338/* Additional bytes needed by arch in front of individual sections */ 2339unsigned int __weak arch_mod_section_prepend(struct module *mod, 2340 unsigned int section) 2341{ 2342 /* default implementation just returns zero */ 2343 return 0; 2344} 2345 2346/* Update size with this section: return offset. */ 2347static long get_offset(struct module *mod, unsigned int *size, 2348 Elf_Shdr *sechdr, unsigned int section) 2349{ 2350 long ret; 2351 2352 *size += arch_mod_section_prepend(mod, section); 2353 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 2354 *size = ret + sechdr->sh_size; 2355 return ret; 2356} 2357 2358/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld 2359 might -- code, read-only data, read-write data, small data. Tally 2360 sizes, and place the offsets into sh_entsize fields: high bit means it 2361 belongs in init. */ 2362static void layout_sections(struct module *mod, struct load_info *info) 2363{ 2364 static unsigned long const masks[][2] = { 2365 /* NOTE: all executable code must be the first section 2366 * in this array; otherwise modify the text_size 2367 * finder in the two loops below */ 2368 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, 2369 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, 2370 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL }, 2371 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, 2372 { ARCH_SHF_SMALL | SHF_ALLOC, 0 } 2373 }; 2374 unsigned int m, i; 2375 2376 for (i = 0; i < info->hdr->e_shnum; i++) 2377 info->sechdrs[i].sh_entsize = ~0UL; 2378 2379 pr_debug("Core section allocation order:\n"); 2380 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2381 for (i = 0; i < info->hdr->e_shnum; ++i) { 2382 Elf_Shdr *s = &info->sechdrs[i]; 2383 const char *sname = info->secstrings + s->sh_name; 2384 2385 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2386 || (s->sh_flags & masks[m][1]) 2387 || s->sh_entsize != ~0UL 2388 || strstarts(sname, ".init")) 2389 continue; 2390 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); 2391 pr_debug("\t%s\n", sname); 2392 } 2393 switch (m) { 2394 case 0: /* executable */ 2395 mod->core_layout.size = debug_align(mod->core_layout.size); 2396 mod->core_layout.text_size = mod->core_layout.size; 2397 break; 2398 case 1: /* RO: text and ro-data */ 2399 mod->core_layout.size = debug_align(mod->core_layout.size); 2400 mod->core_layout.ro_size = mod->core_layout.size; 2401 break; 2402 case 2: /* RO after init */ 2403 mod->core_layout.size = debug_align(mod->core_layout.size); 2404 mod->core_layout.ro_after_init_size = mod->core_layout.size; 2405 break; 2406 case 4: /* whole core */ 2407 mod->core_layout.size = debug_align(mod->core_layout.size); 2408 break; 2409 } 2410 } 2411 2412 pr_debug("Init section allocation order:\n"); 2413 for (m = 0; m < ARRAY_SIZE(masks); ++m) { 2414 for (i = 0; i < info->hdr->e_shnum; ++i) { 2415 Elf_Shdr *s = &info->sechdrs[i]; 2416 const char *sname = info->secstrings + s->sh_name; 2417 2418 if ((s->sh_flags & masks[m][0]) != masks[m][0] 2419 || (s->sh_flags & masks[m][1]) 2420 || s->sh_entsize != ~0UL 2421 || !strstarts(sname, ".init")) 2422 continue; 2423 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) 2424 | INIT_OFFSET_MASK); 2425 pr_debug("\t%s\n", sname); 2426 } 2427 switch (m) { 2428 case 0: /* executable */ 2429 mod->init_layout.size = debug_align(mod->init_layout.size); 2430 mod->init_layout.text_size = mod->init_layout.size; 2431 break; 2432 case 1: /* RO: text and ro-data */ 2433 mod->init_layout.size = debug_align(mod->init_layout.size); 2434 mod->init_layout.ro_size = mod->init_layout.size; 2435 break; 2436 case 2: 2437 /* 2438 * RO after init doesn't apply to init_layout (only 2439 * core_layout), so it just takes the value of ro_size. 2440 */ 2441 mod->init_layout.ro_after_init_size = mod->init_layout.ro_size; 2442 break; 2443 case 4: /* whole init */ 2444 mod->init_layout.size = debug_align(mod->init_layout.size); 2445 break; 2446 } 2447 } 2448} 2449 2450static void set_license(struct module *mod, const char *license) 2451{ 2452 if (!license) 2453 license = "unspecified"; 2454 2455 if (!license_is_gpl_compatible(license)) { 2456 if (!test_taint(TAINT_PROPRIETARY_MODULE)) 2457 pr_warn("%s: module license '%s' taints kernel.\n", 2458 mod->name, license); 2459 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 2460 LOCKDEP_NOW_UNRELIABLE); 2461 } 2462} 2463 2464/* Parse tag=value strings from .modinfo section */ 2465static char *next_string(char *string, unsigned long *secsize) 2466{ 2467 /* Skip non-zero chars */ 2468 while (string[0]) { 2469 string++; 2470 if ((*secsize)-- <= 1) 2471 return NULL; 2472 } 2473 2474 /* Skip any zero padding. */ 2475 while (!string[0]) { 2476 string++; 2477 if ((*secsize)-- <= 1) 2478 return NULL; 2479 } 2480 return string; 2481} 2482 2483static char *get_modinfo(struct load_info *info, const char *tag) 2484{ 2485 char *p; 2486 unsigned int taglen = strlen(tag); 2487 Elf_Shdr *infosec = &info->sechdrs[info->index.info]; 2488 unsigned long size = infosec->sh_size; 2489 2490 /* 2491 * get_modinfo() calls made before rewrite_section_headers() 2492 * must use sh_offset, as sh_addr isn't set! 2493 */ 2494 for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) { 2495 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') 2496 return p + taglen + 1; 2497 } 2498 return NULL; 2499} 2500 2501static void setup_modinfo(struct module *mod, struct load_info *info) 2502{ 2503 struct module_attribute *attr; 2504 int i; 2505 2506 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2507 if (attr->setup) 2508 attr->setup(mod, get_modinfo(info, attr->attr.name)); 2509 } 2510} 2511 2512static void free_modinfo(struct module *mod) 2513{ 2514 struct module_attribute *attr; 2515 int i; 2516 2517 for (i = 0; (attr = modinfo_attrs[i]); i++) { 2518 if (attr->free) 2519 attr->free(mod); 2520 } 2521} 2522 2523#ifdef CONFIG_KALLSYMS 2524 2525/* Lookup exported symbol in given range of kernel_symbols */ 2526static const struct kernel_symbol *lookup_exported_symbol(const char *name, 2527 const struct kernel_symbol *start, 2528 const struct kernel_symbol *stop) 2529{ 2530 return bsearch(name, start, stop - start, 2531 sizeof(struct kernel_symbol), cmp_name); 2532} 2533 2534static int is_exported(const char *name, unsigned long value, 2535 const struct module *mod) 2536{ 2537 const struct kernel_symbol *ks; 2538 if (!mod) 2539 ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); 2540 else 2541 ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); 2542 2543 return ks != NULL && kernel_symbol_value(ks) == value; 2544} 2545 2546/* As per nm */ 2547static char elf_type(const Elf_Sym *sym, const struct load_info *info) 2548{ 2549 const Elf_Shdr *sechdrs = info->sechdrs; 2550 2551 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { 2552 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) 2553 return 'v'; 2554 else 2555 return 'w'; 2556 } 2557 if (sym->st_shndx == SHN_UNDEF) 2558 return 'U'; 2559 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) 2560 return 'a'; 2561 if (sym->st_shndx >= SHN_LORESERVE) 2562 return '?'; 2563 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) 2564 return 't'; 2565 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC 2566 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { 2567 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) 2568 return 'r'; 2569 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2570 return 'g'; 2571 else 2572 return 'd'; 2573 } 2574 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { 2575 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) 2576 return 's'; 2577 else 2578 return 'b'; 2579 } 2580 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, 2581 ".debug")) { 2582 return 'n'; 2583 } 2584 return '?'; 2585} 2586 2587static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, 2588 unsigned int shnum, unsigned int pcpundx) 2589{ 2590 const Elf_Shdr *sec; 2591 2592 if (src->st_shndx == SHN_UNDEF 2593 || src->st_shndx >= shnum 2594 || !src->st_name) 2595 return false; 2596 2597#ifdef CONFIG_KALLSYMS_ALL 2598 if (src->st_shndx == pcpundx) 2599 return true; 2600#endif 2601 2602 sec = sechdrs + src->st_shndx; 2603 if (!(sec->sh_flags & SHF_ALLOC) 2604#ifndef CONFIG_KALLSYMS_ALL 2605 || !(sec->sh_flags & SHF_EXECINSTR) 2606#endif 2607 || (sec->sh_entsize & INIT_OFFSET_MASK)) 2608 return false; 2609 2610 return true; 2611} 2612 2613/* 2614 * We only allocate and copy the strings needed by the parts of symtab 2615 * we keep. This is simple, but has the effect of making multiple 2616 * copies of duplicates. We could be more sophisticated, see 2617 * linux-kernel thread starting with 2618 * <73defb5e4bca04a6431392cc341112b1@localhost>. 2619 */ 2620static void layout_symtab(struct module *mod, struct load_info *info) 2621{ 2622 Elf_Shdr *symsect = info->sechdrs + info->index.sym; 2623 Elf_Shdr *strsect = info->sechdrs + info->index.str; 2624 const Elf_Sym *src; 2625 unsigned int i, nsrc, ndst, strtab_size = 0; 2626 2627 /* Put symbol section at end of init part of module. */ 2628 symsect->sh_flags |= SHF_ALLOC; 2629 symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, 2630 info->index.sym) | INIT_OFFSET_MASK; 2631 pr_debug("\t%s\n", info->secstrings + symsect->sh_name); 2632 2633 src = (void *)info->hdr + symsect->sh_offset; 2634 nsrc = symsect->sh_size / sizeof(*src); 2635 2636 /* Compute total space required for the core symbols' strtab. */ 2637 for (ndst = i = 0; i < nsrc; i++) { 2638 if (i == 0 || is_livepatch_module(mod) || 2639 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2640 info->index.pcpu)) { 2641 strtab_size += strlen(&info->strtab[src[i].st_name])+1; 2642 ndst++; 2643 } 2644 } 2645 2646 /* Append room for core symbols at end of core part. */ 2647 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); 2648 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); 2649 mod->core_layout.size += strtab_size; 2650 mod->core_layout.size = debug_align(mod->core_layout.size); 2651 2652 /* Put string table section at end of init part of module. */ 2653 strsect->sh_flags |= SHF_ALLOC; 2654 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, 2655 info->index.str) | INIT_OFFSET_MASK; 2656 pr_debug("\t%s\n", info->secstrings + strsect->sh_name); 2657 2658 /* We'll tack temporary mod_kallsyms on the end. */ 2659 mod->init_layout.size = ALIGN(mod->init_layout.size, 2660 __alignof__(struct mod_kallsyms)); 2661 info->mod_kallsyms_init_off = mod->init_layout.size; 2662 mod->init_layout.size += sizeof(struct mod_kallsyms); 2663 mod->init_layout.size = debug_align(mod->init_layout.size); 2664} 2665 2666/* 2667 * We use the full symtab and strtab which layout_symtab arranged to 2668 * be appended to the init section. Later we switch to the cut-down 2669 * core-only ones. 2670 */ 2671static void add_kallsyms(struct module *mod, const struct load_info *info) 2672{ 2673 unsigned int i, ndst; 2674 const Elf_Sym *src; 2675 Elf_Sym *dst; 2676 char *s; 2677 Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; 2678 2679 /* Set up to point into init section. */ 2680 mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; 2681 2682 mod->kallsyms->symtab = (void *)symsec->sh_addr; 2683 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); 2684 /* Make sure we get permanent strtab: don't use info->strtab. */ 2685 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; 2686 2687 /* Set types up while we still have access to sections. */ 2688 for (i = 0; i < mod->kallsyms->num_symtab; i++) 2689 mod->kallsyms->symtab[i].st_size 2690 = elf_type(&mod->kallsyms->symtab[i], info); 2691 2692 /* Now populate the cut down core kallsyms for after init. */ 2693 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; 2694 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; 2695 src = mod->kallsyms->symtab; 2696 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { 2697 if (i == 0 || is_livepatch_module(mod) || 2698 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, 2699 info->index.pcpu)) { 2700 dst[ndst] = src[i]; 2701 dst[ndst++].st_name = s - mod->core_kallsyms.strtab; 2702 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], 2703 KSYM_NAME_LEN) + 1; 2704 } 2705 } 2706 mod->core_kallsyms.num_symtab = ndst; 2707} 2708#else 2709static inline void layout_symtab(struct module *mod, struct load_info *info) 2710{ 2711} 2712 2713static void add_kallsyms(struct module *mod, const struct load_info *info) 2714{ 2715} 2716#endif /* CONFIG_KALLSYMS */ 2717 2718static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num) 2719{ 2720 if (!debug) 2721 return; 2722 ddebug_add_module(debug, num, mod->name); 2723} 2724 2725static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) 2726{ 2727 if (debug) 2728 ddebug_remove_module(mod->name); 2729} 2730 2731void * __weak module_alloc(unsigned long size) 2732{ 2733 return vmalloc_exec(size); 2734} 2735 2736#ifdef CONFIG_DEBUG_KMEMLEAK 2737static void kmemleak_load_module(const struct module *mod, 2738 const struct load_info *info) 2739{ 2740 unsigned int i; 2741 2742 /* only scan the sections containing data */ 2743 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); 2744 2745 for (i = 1; i < info->hdr->e_shnum; i++) { 2746 /* Scan all writable sections that's not executable */ 2747 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) || 2748 !(info->sechdrs[i].sh_flags & SHF_WRITE) || 2749 (info->sechdrs[i].sh_flags & SHF_EXECINSTR)) 2750 continue; 2751 2752 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, 2753 info->sechdrs[i].sh_size, GFP_KERNEL); 2754 } 2755} 2756#else 2757static inline void kmemleak_load_module(const struct module *mod, 2758 const struct load_info *info) 2759{ 2760} 2761#endif 2762 2763#ifdef CONFIG_MODULE_SIG 2764static int module_sig_check(struct load_info *info, int flags) 2765{ 2766 int err = -ENOKEY; 2767 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; 2768 const void *mod = info->hdr; 2769 2770 /* 2771 * Require flags == 0, as a module with version information 2772 * removed is no longer the module that was signed 2773 */ 2774 if (flags == 0 && 2775 info->len > markerlen && 2776 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { 2777 /* We truncate the module to discard the signature */ 2778 info->len -= markerlen; 2779 err = mod_verify_sig(mod, info); 2780 } 2781 2782 if (!err) { 2783 info->sig_ok = true; 2784 return 0; 2785 } 2786 2787 /* Not having a signature is only an error if we're strict. */ 2788 if (err == -ENOKEY && !is_module_sig_enforced()) 2789 err = 0; 2790 2791 return err; 2792} 2793#else /* !CONFIG_MODULE_SIG */ 2794static int module_sig_check(struct load_info *info, int flags) 2795{ 2796 return 0; 2797} 2798#endif /* !CONFIG_MODULE_SIG */ 2799 2800/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ 2801static int elf_header_check(struct load_info *info) 2802{ 2803 if (info->len < sizeof(*(info->hdr))) 2804 return -ENOEXEC; 2805 2806 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 2807 || info->hdr->e_type != ET_REL 2808 || !elf_check_arch(info->hdr) 2809 || info->hdr->e_shentsize != sizeof(Elf_Shdr)) 2810 return -ENOEXEC; 2811 2812 if (info->hdr->e_shoff >= info->len 2813 || (info->hdr->e_shnum * sizeof(Elf_Shdr) > 2814 info->len - info->hdr->e_shoff)) 2815 return -ENOEXEC; 2816 2817 return 0; 2818} 2819 2820#define COPY_CHUNK_SIZE (16*PAGE_SIZE) 2821 2822static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len) 2823{ 2824 do { 2825 unsigned long n = min(len, COPY_CHUNK_SIZE); 2826 2827 if (copy_from_user(dst, usrc, n) != 0) 2828 return -EFAULT; 2829 cond_resched(); 2830 dst += n; 2831 usrc += n; 2832 len -= n; 2833 } while (len); 2834 return 0; 2835} 2836 2837#ifdef CONFIG_LIVEPATCH 2838static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2839{ 2840 if (get_modinfo(info, "livepatch")) { 2841 mod->klp = true; 2842 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK); 2843 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n", 2844 mod->name); 2845 } 2846 2847 return 0; 2848} 2849#else /* !CONFIG_LIVEPATCH */ 2850static int check_modinfo_livepatch(struct module *mod, struct load_info *info) 2851{ 2852 if (get_modinfo(info, "livepatch")) { 2853 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled", 2854 mod->name); 2855 return -ENOEXEC; 2856 } 2857 2858 return 0; 2859} 2860#endif /* CONFIG_LIVEPATCH */ 2861 2862static void check_modinfo_retpoline(struct module *mod, struct load_info *info) 2863{ 2864 if (retpoline_module_ok(get_modinfo(info, "retpoline"))) 2865 return; 2866 2867 pr_warn("%s: loading module not compiled with retpoline compiler.\n", 2868 mod->name); 2869} 2870 2871/* Sets info->hdr and info->len. */ 2872static int copy_module_from_user(const void __user *umod, unsigned long len, 2873 struct load_info *info) 2874{ 2875 int err; 2876 2877 info->len = len; 2878 if (info->len < sizeof(*(info->hdr))) 2879 return -ENOEXEC; 2880 2881 err = security_kernel_load_data(LOADING_MODULE); 2882 if (err) 2883 return err; 2884 2885 /* Suck in entire file: we'll want most of it. */ 2886 info->hdr = __vmalloc(info->len, 2887 GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL); 2888 if (!info->hdr) 2889 return -ENOMEM; 2890 2891 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { 2892 vfree(info->hdr); 2893 return -EFAULT; 2894 } 2895 2896 return 0; 2897} 2898 2899static void free_copy(struct load_info *info) 2900{ 2901 vfree(info->hdr); 2902} 2903 2904static int rewrite_section_headers(struct load_info *info, int flags) 2905{ 2906 unsigned int i; 2907 2908 /* This should always be true, but let's be sure. */ 2909 info->sechdrs[0].sh_addr = 0; 2910 2911 for (i = 1; i < info->hdr->e_shnum; i++) { 2912 Elf_Shdr *shdr = &info->sechdrs[i]; 2913 if (shdr->sh_type != SHT_NOBITS 2914 && info->len < shdr->sh_offset + shdr->sh_size) { 2915 pr_err("Module len %lu truncated\n", info->len); 2916 return -ENOEXEC; 2917 } 2918 2919 /* Mark all sections sh_addr with their address in the 2920 temporary image. */ 2921 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; 2922 2923#ifndef CONFIG_MODULE_UNLOAD 2924 /* Don't load .exit sections */ 2925 if (strstarts(info->secstrings+shdr->sh_name, ".exit")) 2926 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; 2927#endif 2928 } 2929 2930 /* Track but don't keep modinfo and version sections. */ 2931 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; 2932 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; 2933 2934 return 0; 2935} 2936 2937/* 2938 * Set up our basic convenience variables (pointers to section headers, 2939 * search for module section index etc), and do some basic section 2940 * verification. 2941 * 2942 * Set info->mod to the temporary copy of the module in info->hdr. The final one 2943 * will be allocated in move_module(). 2944 */ 2945static int setup_load_info(struct load_info *info, int flags) 2946{ 2947 unsigned int i; 2948 2949 /* Set up the convenience variables */ 2950 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; 2951 info->secstrings = (void *)info->hdr 2952 + info->sechdrs[info->hdr->e_shstrndx].sh_offset; 2953 2954 /* Try to find a name early so we can log errors with a module name */ 2955 info->index.info = find_sec(info, ".modinfo"); 2956 if (!info->index.info) 2957 info->name = "(missing .modinfo section)"; 2958 else 2959 info->name = get_modinfo(info, "name"); 2960 2961 /* Find internal symbols and strings. */ 2962 for (i = 1; i < info->hdr->e_shnum; i++) { 2963 if (info->sechdrs[i].sh_type == SHT_SYMTAB) { 2964 info->index.sym = i; 2965 info->index.str = info->sechdrs[i].sh_link; 2966 info->strtab = (char *)info->hdr 2967 + info->sechdrs[info->index.str].sh_offset; 2968 break; 2969 } 2970 } 2971 2972 if (info->index.sym == 0) { 2973 pr_warn("%s: module has no symbols (stripped?)\n", info->name); 2974 return -ENOEXEC; 2975 } 2976 2977 info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); 2978 if (!info->index.mod) { 2979 pr_warn("%s: No module found in object\n", 2980 info->name ?: "(missing .modinfo name field)"); 2981 return -ENOEXEC; 2982 } 2983 /* This is temporary: point mod into copy of data. */ 2984 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset; 2985 2986 /* 2987 * If we didn't load the .modinfo 'name' field earlier, fall back to 2988 * on-disk struct mod 'name' field. 2989 */ 2990 if (!info->name) 2991 info->name = info->mod->name; 2992 2993 if (flags & MODULE_INIT_IGNORE_MODVERSIONS) 2994 info->index.vers = 0; /* Pretend no __versions section! */ 2995 else 2996 info->index.vers = find_sec(info, "__versions"); 2997 2998 info->index.pcpu = find_pcpusec(info); 2999 3000 return 0; 3001} 3002 3003static int check_modinfo(struct module *mod, struct load_info *info, int flags) 3004{ 3005 const char *modmagic = get_modinfo(info, "vermagic"); 3006 int err; 3007 3008 if (flags & MODULE_INIT_IGNORE_VERMAGIC) 3009 modmagic = NULL; 3010 3011 /* This is allowed: modprobe --force will invalidate it. */ 3012 if (!modmagic) { 3013 err = try_to_force_load(mod, "bad vermagic"); 3014 if (err) 3015 return err; 3016 } else if (!same_magic(modmagic, vermagic, info->index.vers)) { 3017 pr_err("%s: version magic '%s' should be '%s'\n", 3018 info->name, modmagic, vermagic); 3019 return -ENOEXEC; 3020 } 3021 3022 if (!get_modinfo(info, "intree")) { 3023 if (!test_taint(TAINT_OOT_MODULE)) 3024 pr_warn("%s: loading out-of-tree module taints kernel.\n", 3025 mod->name); 3026 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); 3027 } 3028 3029 check_modinfo_retpoline(mod, info); 3030 3031 if (get_modinfo(info, "staging")) { 3032 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK); 3033 pr_warn("%s: module is from the staging directory, the quality " 3034 "is unknown, you have been warned.\n", mod->name); 3035 } 3036 3037 err = check_modinfo_livepatch(mod, info); 3038 if (err) 3039 return err; 3040 3041 /* Set up license info based on the info section */ 3042 set_license(mod, get_modinfo(info, "license")); 3043 3044 return 0; 3045} 3046 3047static int find_module_sections(struct module *mod, struct load_info *info) 3048{ 3049 mod->kp = section_objs(info, "__param", 3050 sizeof(*mod->kp), &mod->num_kp); 3051 mod->syms = section_objs(info, "__ksymtab", 3052 sizeof(*mod->syms), &mod->num_syms); 3053 mod->crcs = section_addr(info, "__kcrctab"); 3054 mod->gpl_syms = section_objs(info, "__ksymtab_gpl", 3055 sizeof(*mod->gpl_syms), 3056 &mod->num_gpl_syms); 3057 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); 3058 mod->gpl_future_syms = section_objs(info, 3059 "__ksymtab_gpl_future", 3060 sizeof(*mod->gpl_future_syms), 3061 &mod->num_gpl_future_syms); 3062 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); 3063 3064#ifdef CONFIG_UNUSED_SYMBOLS 3065 mod->unused_syms = section_objs(info, "__ksymtab_unused", 3066 sizeof(*mod->unused_syms), 3067 &mod->num_unused_syms); 3068 mod->unused_crcs = section_addr(info, "__kcrctab_unused"); 3069 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", 3070 sizeof(*mod->unused_gpl_syms), 3071 &mod->num_unused_gpl_syms); 3072 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); 3073#endif 3074#ifdef CONFIG_CONSTRUCTORS 3075 mod->ctors = section_objs(info, ".ctors", 3076 sizeof(*mod->ctors), &mod->num_ctors); 3077 if (!mod->ctors) 3078 mod->ctors = section_objs(info, ".init_array", 3079 sizeof(*mod->ctors), &mod->num_ctors); 3080 else if (find_sec(info, ".init_array")) { 3081 /* 3082 * This shouldn't happen with same compiler and binutils 3083 * building all parts of the module. 3084 */ 3085 pr_warn("%s: has both .ctors and .init_array.\n", 3086 mod->name); 3087 return -EINVAL; 3088 } 3089#endif 3090 3091#ifdef CONFIG_TRACEPOINTS 3092 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", 3093 sizeof(*mod->tracepoints_ptrs), 3094 &mod->num_tracepoints); 3095#endif 3096#ifdef CONFIG_BPF_EVENTS 3097 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", 3098 sizeof(*mod->bpf_raw_events), 3099 &mod->num_bpf_raw_events); 3100#endif 3101#ifdef CONFIG_JUMP_LABEL 3102 mod->jump_entries = section_objs(info, "__jump_table", 3103 sizeof(*mod->jump_entries), 3104 &mod->num_jump_entries); 3105#endif 3106#ifdef CONFIG_EVENT_TRACING 3107 mod->trace_events = section_objs(info, "_ftrace_events", 3108 sizeof(*mod->trace_events), 3109 &mod->num_trace_events); 3110 mod->trace_evals = section_objs(info, "_ftrace_eval_map", 3111 sizeof(*mod->trace_evals), 3112 &mod->num_trace_evals); 3113#endif 3114#ifdef CONFIG_TRACING 3115 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 3116 sizeof(*mod->trace_bprintk_fmt_start), 3117 &mod->num_trace_bprintk_fmt); 3118#endif 3119#ifdef CONFIG_FTRACE_MCOUNT_RECORD 3120 /* sechdrs[0].sh_size is always zero */ 3121 mod->ftrace_callsites = section_objs(info, "__mcount_loc", 3122 sizeof(*mod->ftrace_callsites), 3123 &mod->num_ftrace_callsites); 3124#endif 3125#ifdef CONFIG_FUNCTION_ERROR_INJECTION 3126 mod->ei_funcs = section_objs(info, "_error_injection_whitelist", 3127 sizeof(*mod->ei_funcs), 3128 &mod->num_ei_funcs); 3129#endif 3130 mod->extable = section_objs(info, "__ex_table", 3131 sizeof(*mod->extable), &mod->num_exentries); 3132 3133 if (section_addr(info, "__obsparm")) 3134 pr_warn("%s: Ignoring obsolete parameters\n", mod->name); 3135 3136 info->debug = section_objs(info, "__verbose", 3137 sizeof(*info->debug), &info->num_debug); 3138 3139 return 0; 3140} 3141 3142static int move_module(struct module *mod, struct load_info *info) 3143{ 3144 int i; 3145 void *ptr; 3146 3147 /* Do the allocs. */ 3148 ptr = module_alloc(mod->core_layout.size); 3149 /* 3150 * The pointer to this block is stored in the module structure 3151 * which is inside the block. Just mark it as not being a 3152 * leak. 3153 */ 3154 kmemleak_not_leak(ptr); 3155 if (!ptr) 3156 return -ENOMEM; 3157 3158 memset(ptr, 0, mod->core_layout.size); 3159 mod->core_layout.base = ptr; 3160 3161 if (mod->init_layout.size) { 3162 ptr = module_alloc(mod->init_layout.size); 3163 /* 3164 * The pointer to this block is stored in the module structure 3165 * which is inside the block. This block doesn't need to be 3166 * scanned as it contains data and code that will be freed 3167 * after the module is initialized. 3168 */ 3169 kmemleak_ignore(ptr); 3170 if (!ptr) { 3171 module_memfree(mod->core_layout.base); 3172 return -ENOMEM; 3173 } 3174 memset(ptr, 0, mod->init_layout.size); 3175 mod->init_layout.base = ptr; 3176 } else 3177 mod->init_layout.base = NULL; 3178 3179 /* Transfer each section which specifies SHF_ALLOC */ 3180 pr_debug("final section addresses:\n"); 3181 for (i = 0; i < info->hdr->e_shnum; i++) { 3182 void *dest; 3183 Elf_Shdr *shdr = &info->sechdrs[i]; 3184 3185 if (!(shdr->sh_flags & SHF_ALLOC)) 3186 continue; 3187 3188 if (shdr->sh_entsize & INIT_OFFSET_MASK) 3189 dest = mod->init_layout.base 3190 + (shdr->sh_entsize & ~INIT_OFFSET_MASK); 3191 else 3192 dest = mod->core_layout.base + shdr->sh_entsize; 3193 3194 if (shdr->sh_type != SHT_NOBITS) 3195 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); 3196 /* Update sh_addr to point to copy in image. */ 3197 shdr->sh_addr = (unsigned long)dest; 3198 pr_debug("\t0x%lx %s\n", 3199 (long)shdr->sh_addr, info->secstrings + shdr->sh_name); 3200 } 3201 3202 return 0; 3203} 3204 3205static int check_module_license_and_versions(struct module *mod) 3206{ 3207 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE); 3208 3209 /* 3210 * ndiswrapper is under GPL by itself, but loads proprietary modules. 3211 * Don't use add_taint_module(), as it would prevent ndiswrapper from 3212 * using GPL-only symbols it needs. 3213 */ 3214 if (strcmp(mod->name, "ndiswrapper") == 0) 3215 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE); 3216 3217 /* driverloader was caught wrongly pretending to be under GPL */ 3218 if (strcmp(mod->name, "driverloader") == 0) 3219 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3220 LOCKDEP_NOW_UNRELIABLE); 3221 3222 /* lve claims to be GPL but upstream won't provide source */ 3223 if (strcmp(mod->name, "lve") == 0) 3224 add_taint_module(mod, TAINT_PROPRIETARY_MODULE, 3225 LOCKDEP_NOW_UNRELIABLE); 3226 3227 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE)) 3228 pr_warn("%s: module license taints kernel.\n", mod->name); 3229 3230#ifdef CONFIG_MODVERSIONS 3231 if ((mod->num_syms && !mod->crcs) 3232 || (mod->num_gpl_syms && !mod->gpl_crcs) 3233 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) 3234#ifdef CONFIG_UNUSED_SYMBOLS 3235 || (mod->num_unused_syms && !mod->unused_crcs) 3236 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) 3237#endif 3238 ) { 3239 return try_to_force_load(mod, 3240 "no versions for exported symbols"); 3241 } 3242#endif 3243 return 0; 3244} 3245 3246static void flush_module_icache(const struct module *mod) 3247{ 3248 mm_segment_t old_fs; 3249 3250 /* flush the icache in correct context */ 3251 old_fs = get_fs(); 3252 set_fs(KERNEL_DS); 3253 3254 /* 3255 * Flush the instruction cache, since we've played with text. 3256 * Do it before processing of module parameters, so the module 3257 * can provide parameter accessor functions of its own. 3258 */ 3259 if (mod->init_layout.base) 3260 flush_icache_range((unsigned long)mod->init_layout.base, 3261 (unsigned long)mod->init_layout.base 3262 + mod->init_layout.size); 3263 flush_icache_range((unsigned long)mod->core_layout.base, 3264 (unsigned long)mod->core_layout.base + mod->core_layout.size); 3265 3266 set_fs(old_fs); 3267} 3268 3269int __weak module_frob_arch_sections(Elf_Ehdr *hdr, 3270 Elf_Shdr *sechdrs, 3271 char *secstrings, 3272 struct module *mod) 3273{ 3274 return 0; 3275} 3276 3277/* module_blacklist is a comma-separated list of module names */ 3278static char *module_blacklist; 3279static bool blacklisted(const char *module_name) 3280{ 3281 const char *p; 3282 size_t len; 3283 3284 if (!module_blacklist) 3285 return false; 3286 3287 for (p = module_blacklist; *p; p += len) { 3288 len = strcspn(p, ","); 3289 if (strlen(module_name) == len && !memcmp(module_name, p, len)) 3290 return true; 3291 if (p[len] == ',') 3292 len++; 3293 } 3294 return false; 3295} 3296core_param(module_blacklist, module_blacklist, charp, 0400); 3297 3298static struct module *layout_and_allocate(struct load_info *info, int flags) 3299{ 3300 struct module *mod; 3301 unsigned int ndx; 3302 int err; 3303 3304 err = check_modinfo(info->mod, info, flags); 3305 if (err) 3306 return ERR_PTR(err); 3307 3308 /* Allow arches to frob section contents and sizes. */ 3309 err = module_frob_arch_sections(info->hdr, info->sechdrs, 3310 info->secstrings, info->mod); 3311 if (err < 0) 3312 return ERR_PTR(err); 3313 3314 /* We will do a special allocation for per-cpu sections later. */ 3315 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; 3316 3317 /* 3318 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that 3319 * layout_sections() can put it in the right place. 3320 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. 3321 */ 3322 ndx = find_sec(info, ".data..ro_after_init"); 3323 if (ndx) 3324 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 3325 /* 3326 * Mark the __jump_table section as ro_after_init as well: these data 3327 * structures are never modified, with the exception of entries that 3328 * refer to code in the __init section, which are annotated as such 3329 * at module load time. 3330 */ 3331 ndx = find_sec(info, "__jump_table"); 3332 if (ndx) 3333 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; 3334 3335 /* Determine total sizes, and put offsets in sh_entsize. For now 3336 this is done generically; there doesn't appear to be any 3337 special cases for the architectures. */ 3338 layout_sections(info->mod, info); 3339 layout_symtab(info->mod, info); 3340 3341 /* Allocate and move to the final place */ 3342 err = move_module(info->mod, info); 3343 if (err) 3344 return ERR_PTR(err); 3345 3346 /* Module has been copied to its final place now: return it. */ 3347 mod = (void *)info->sechdrs[info->index.mod].sh_addr; 3348 kmemleak_load_module(mod, info); 3349 return mod; 3350} 3351 3352/* mod is no longer valid after this! */ 3353static void module_deallocate(struct module *mod, struct load_info *info) 3354{ 3355 percpu_modfree(mod); 3356 module_arch_freeing_init(mod); 3357 module_memfree(mod->init_layout.base); 3358 module_memfree(mod->core_layout.base); 3359} 3360 3361int __weak module_finalize(const Elf_Ehdr *hdr, 3362 const Elf_Shdr *sechdrs, 3363 struct module *me) 3364{ 3365 return 0; 3366} 3367 3368static int post_relocation(struct module *mod, const struct load_info *info) 3369{ 3370 /* Sort exception table now relocations are done. */ 3371 sort_extable(mod->extable, mod->extable + mod->num_exentries); 3372 3373 /* Copy relocated percpu area over. */ 3374 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, 3375 info->sechdrs[info->index.pcpu].sh_size); 3376 3377 /* Setup kallsyms-specific fields. */ 3378 add_kallsyms(mod, info); 3379 3380 /* Arch-specific module finalizing. */ 3381 return module_finalize(info->hdr, info->sechdrs, mod); 3382} 3383 3384/* Is this module of this name done loading? No locks held. */ 3385static bool finished_loading(const char *name) 3386{ 3387 struct module *mod; 3388 bool ret; 3389 3390 /* 3391 * The module_mutex should not be a heavily contended lock; 3392 * if we get the occasional sleep here, we'll go an extra iteration 3393 * in the wait_event_interruptible(), which is harmless. 3394 */ 3395 sched_annotate_sleep(); 3396 mutex_lock(&module_mutex); 3397 mod = find_module_all(name, strlen(name), true); 3398 ret = !mod || mod->state == MODULE_STATE_LIVE 3399 || mod->state == MODULE_STATE_GOING; 3400 mutex_unlock(&module_mutex); 3401 3402 return ret; 3403} 3404 3405/* Call module constructors. */ 3406static void do_mod_ctors(struct module *mod) 3407{ 3408#ifdef CONFIG_CONSTRUCTORS 3409 unsigned long i; 3410 3411 for (i = 0; i < mod->num_ctors; i++) 3412 mod->ctors[i](); 3413#endif 3414} 3415 3416/* For freeing module_init on success, in case kallsyms traversing */ 3417struct mod_initfree { 3418 struct rcu_head rcu; 3419 void *module_init; 3420}; 3421 3422static void do_free_init(struct rcu_head *head) 3423{ 3424 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); 3425 module_memfree(m->module_init); 3426 kfree(m); 3427} 3428 3429/* 3430 * This is where the real work happens. 3431 * 3432 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb 3433 * helper command 'lx-symbols'. 3434 */ 3435static noinline int do_init_module(struct module *mod) 3436{ 3437 int ret = 0; 3438 struct mod_initfree *freeinit; 3439 3440 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); 3441 if (!freeinit) { 3442 ret = -ENOMEM; 3443 goto fail; 3444 } 3445 freeinit->module_init = mod->init_layout.base; 3446 3447 /* 3448 * We want to find out whether @mod uses async during init. Clear 3449 * PF_USED_ASYNC. async_schedule*() will set it. 3450 */ 3451 current->flags &= ~PF_USED_ASYNC; 3452 3453 do_mod_ctors(mod); 3454 /* Start the module */ 3455 if (mod->init != NULL) 3456 ret = do_one_initcall(mod->init); 3457 if (ret < 0) { 3458 goto fail_free_freeinit; 3459 } 3460 if (ret > 0) { 3461 pr_warn("%s: '%s'->init suspiciously returned %d, it should " 3462 "follow 0/-E convention\n" 3463 "%s: loading module anyway...\n", 3464 __func__, mod->name, ret, __func__); 3465 dump_stack(); 3466 } 3467 3468 /* Now it's a first class citizen! */ 3469 mod->state = MODULE_STATE_LIVE; 3470 blocking_notifier_call_chain(&module_notify_list, 3471 MODULE_STATE_LIVE, mod); 3472 3473 /* 3474 * We need to finish all async code before the module init sequence 3475 * is done. This has potential to deadlock. For example, a newly 3476 * detected block device can trigger request_module() of the 3477 * default iosched from async probing task. Once userland helper 3478 * reaches here, async_synchronize_full() will wait on the async 3479 * task waiting on request_module() and deadlock. 3480 * 3481 * This deadlock is avoided by perfomring async_synchronize_full() 3482 * iff module init queued any async jobs. This isn't a full 3483 * solution as it will deadlock the same if module loading from 3484 * async jobs nests more than once; however, due to the various 3485 * constraints, this hack seems to be the best option for now. 3486 * Please refer to the following thread for details. 3487 * 3488 * http://thread.gmane.org/gmane.linux.kernel/1420814 3489 */ 3490 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) 3491 async_synchronize_full(); 3492 3493 ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + 3494 mod->init_layout.size); 3495 mutex_lock(&module_mutex); 3496 /* Drop initial reference. */ 3497 module_put(mod); 3498 trim_init_extable(mod); 3499#ifdef CONFIG_KALLSYMS 3500 /* Switch to core kallsyms now init is done: kallsyms may be walking! */ 3501 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); 3502#endif 3503 module_enable_ro(mod, true); 3504 mod_tree_remove_init(mod); 3505 disable_ro_nx(&mod->init_layout); 3506 module_arch_freeing_init(mod); 3507 mod->init_layout.base = NULL; 3508 mod->init_layout.size = 0; 3509 mod->init_layout.ro_size = 0; 3510 mod->init_layout.ro_after_init_size = 0; 3511 mod->init_layout.text_size = 0; 3512 /* 3513 * We want to free module_init, but be aware that kallsyms may be 3514 * walking this with preempt disabled. In all the failure paths, we 3515 * call synchronize_rcu(), but we don't want to slow down the success 3516 * path, so use actual RCU here. 3517 * Note that module_alloc() on most architectures creates W+X page 3518 * mappings which won't be cleaned up until do_free_init() runs. Any 3519 * code such as mark_rodata_ro() which depends on those mappings to 3520 * be cleaned up needs to sync with the queued work - ie 3521 * rcu_barrier() 3522 */ 3523 call_rcu(&freeinit->rcu, do_free_init); 3524 mutex_unlock(&module_mutex); 3525 wake_up_all(&module_wq); 3526 3527 return 0; 3528 3529fail_free_freeinit: 3530 kfree(freeinit); 3531fail: 3532 /* Try to protect us from buggy refcounters. */ 3533 mod->state = MODULE_STATE_GOING; 3534 synchronize_rcu(); 3535 module_put(mod); 3536 blocking_notifier_call_chain(&module_notify_list, 3537 MODULE_STATE_GOING, mod); 3538 klp_module_going(mod); 3539 ftrace_release_mod(mod); 3540 free_module(mod); 3541 wake_up_all(&module_wq); 3542 return ret; 3543} 3544 3545static int may_init_module(void) 3546{ 3547 if (!capable(CAP_SYS_MODULE) || modules_disabled) 3548 return -EPERM; 3549 3550 return 0; 3551} 3552 3553/* 3554 * We try to place it in the list now to make sure it's unique before 3555 * we dedicate too many resources. In particular, temporary percpu 3556 * memory exhaustion. 3557 */ 3558static int add_unformed_module(struct module *mod) 3559{ 3560 int err; 3561 struct module *old; 3562 3563 mod->state = MODULE_STATE_UNFORMED; 3564 3565again: 3566 mutex_lock(&module_mutex); 3567 old = find_module_all(mod->name, strlen(mod->name), true); 3568 if (old != NULL) { 3569 if (old->state == MODULE_STATE_COMING 3570 || old->state == MODULE_STATE_UNFORMED) { 3571 /* Wait in case it fails to load. */ 3572 mutex_unlock(&module_mutex); 3573 err = wait_event_interruptible(module_wq, 3574 finished_loading(mod->name)); 3575 if (err) 3576 goto out_unlocked; 3577 goto again; 3578 } 3579 err = -EEXIST; 3580 goto out; 3581 } 3582 mod_update_bounds(mod); 3583 list_add_rcu(&mod->list, &modules); 3584 mod_tree_insert(mod); 3585 err = 0; 3586 3587out: 3588 mutex_unlock(&module_mutex); 3589out_unlocked: 3590 return err; 3591} 3592 3593static int complete_formation(struct module *mod, struct load_info *info) 3594{ 3595 int err; 3596 3597 mutex_lock(&module_mutex); 3598 3599 /* Find duplicate symbols (must be called under lock). */ 3600 err = verify_exported_symbols(mod); 3601 if (err < 0) 3602 goto out; 3603 3604 /* This relies on module_mutex for list integrity. */ 3605 module_bug_finalize(info->hdr, info->sechdrs, mod); 3606 3607 module_enable_ro(mod, false); 3608 module_enable_nx(mod); 3609 3610 /* Mark state as coming so strong_try_module_get() ignores us, 3611 * but kallsyms etc. can see us. */ 3612 mod->state = MODULE_STATE_COMING; 3613 mutex_unlock(&module_mutex); 3614 3615 return 0; 3616 3617out: 3618 mutex_unlock(&module_mutex); 3619 return err; 3620} 3621 3622static int prepare_coming_module(struct module *mod) 3623{ 3624 int err; 3625 3626 ftrace_module_enable(mod); 3627 err = klp_module_coming(mod); 3628 if (err) 3629 return err; 3630 3631 blocking_notifier_call_chain(&module_notify_list, 3632 MODULE_STATE_COMING, mod); 3633 return 0; 3634} 3635 3636static int unknown_module_param_cb(char *param, char *val, const char *modname, 3637 void *arg) 3638{ 3639 struct module *mod = arg; 3640 int ret; 3641 3642 if (strcmp(param, "async_probe") == 0) { 3643 mod->async_probe_requested = true; 3644 return 0; 3645 } 3646 3647 /* Check for magic 'dyndbg' arg */ 3648 ret = ddebug_dyndbg_module_param_cb(param, val, modname); 3649 if (ret != 0) 3650 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param); 3651 return 0; 3652} 3653 3654/* Allocate and load the module: note that size of section 0 is always 3655 zero, and we rely on this for optional sections. */ 3656static int load_module(struct load_info *info, const char __user *uargs, 3657 int flags) 3658{ 3659 struct module *mod; 3660 long err = 0; 3661 char *after_dashes; 3662 3663 err = elf_header_check(info); 3664 if (err) 3665 goto free_copy; 3666 3667 err = setup_load_info(info, flags); 3668 if (err) 3669 goto free_copy; 3670 3671 if (blacklisted(info->name)) { 3672 err = -EPERM; 3673 goto free_copy; 3674 } 3675 3676 err = module_sig_check(info, flags); 3677 if (err) 3678 goto free_copy; 3679 3680 err = rewrite_section_headers(info, flags); 3681 if (err) 3682 goto free_copy; 3683 3684 /* Check module struct version now, before we try to use module. */ 3685 if (!check_modstruct_version(info, info->mod)) { 3686 err = -ENOEXEC; 3687 goto free_copy; 3688 } 3689 3690 /* Figure out module layout, and allocate all the memory. */ 3691 mod = layout_and_allocate(info, flags); 3692 if (IS_ERR(mod)) { 3693 err = PTR_ERR(mod); 3694 goto free_copy; 3695 } 3696 3697 audit_log_kern_module(mod->name); 3698 3699 /* Reserve our place in the list. */ 3700 err = add_unformed_module(mod); 3701 if (err) 3702 goto free_module; 3703 3704#ifdef CONFIG_MODULE_SIG 3705 mod->sig_ok = info->sig_ok; 3706 if (!mod->sig_ok) { 3707 pr_notice_once("%s: module verification failed: signature " 3708 "and/or required key missing - tainting " 3709 "kernel\n", mod->name); 3710 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); 3711 } 3712#endif 3713 3714 /* To avoid stressing percpu allocator, do this once we're unique. */ 3715 err = percpu_modalloc(mod, info); 3716 if (err) 3717 goto unlink_mod; 3718 3719 /* Now module is in final location, initialize linked lists, etc. */ 3720 err = module_unload_init(mod); 3721 if (err) 3722 goto unlink_mod; 3723 3724 init_param_lock(mod); 3725 3726 /* Now we've got everything in the final locations, we can 3727 * find optional sections. */ 3728 err = find_module_sections(mod, info); 3729 if (err) 3730 goto free_unload; 3731 3732 err = check_module_license_and_versions(mod); 3733 if (err) 3734 goto free_unload; 3735 3736 /* Set up MODINFO_ATTR fields */ 3737 setup_modinfo(mod, info); 3738 3739 /* Fix up syms, so that st_value is a pointer to location. */ 3740 err = simplify_symbols(mod, info); 3741 if (err < 0) 3742 goto free_modinfo; 3743 3744 err = apply_relocations(mod, info); 3745 if (err < 0) 3746 goto free_modinfo; 3747 3748 err = post_relocation(mod, info); 3749 if (err < 0) 3750 goto free_modinfo; 3751 3752 flush_module_icache(mod); 3753 3754 /* Now copy in args */ 3755 mod->args = strndup_user(uargs, ~0UL >> 1); 3756 if (IS_ERR(mod->args)) { 3757 err = PTR_ERR(mod->args); 3758 goto free_arch_cleanup; 3759 } 3760 3761 dynamic_debug_setup(mod, info->debug, info->num_debug); 3762 3763 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */ 3764 ftrace_module_init(mod); 3765 3766 /* Finally it's fully formed, ready to start executing. */ 3767 err = complete_formation(mod, info); 3768 if (err) 3769 goto ddebug_cleanup; 3770 3771 err = prepare_coming_module(mod); 3772 if (err) 3773 goto bug_cleanup; 3774 3775 /* Module is ready to execute: parsing args may do that. */ 3776 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, 3777 -32768, 32767, mod, 3778 unknown_module_param_cb); 3779 if (IS_ERR(after_dashes)) { 3780 err = PTR_ERR(after_dashes); 3781 goto coming_cleanup; 3782 } else if (after_dashes) { 3783 pr_warn("%s: parameters '%s' after `--' ignored\n", 3784 mod->name, after_dashes); 3785 } 3786 3787 /* Link in to sysfs. */ 3788 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp); 3789 if (err < 0) 3790 goto coming_cleanup; 3791 3792 if (is_livepatch_module(mod)) { 3793 err = copy_module_elf(mod, info); 3794 if (err < 0) 3795 goto sysfs_cleanup; 3796 } 3797 3798 /* Get rid of temporary copy. */ 3799 free_copy(info); 3800 3801 /* Done! */ 3802 trace_module_load(mod); 3803 3804 return do_init_module(mod); 3805 3806 sysfs_cleanup: 3807 mod_sysfs_teardown(mod); 3808 coming_cleanup: 3809 mod->state = MODULE_STATE_GOING; 3810 destroy_params(mod->kp, mod->num_kp); 3811 blocking_notifier_call_chain(&module_notify_list, 3812 MODULE_STATE_GOING, mod); 3813 klp_module_going(mod); 3814 bug_cleanup: 3815 /* module_bug_cleanup needs module_mutex protection */ 3816 mutex_lock(&module_mutex); 3817 module_bug_cleanup(mod); 3818 mutex_unlock(&module_mutex); 3819 3820 /* we can't deallocate the module until we clear memory protection */ 3821 module_disable_ro(mod); 3822 module_disable_nx(mod); 3823 3824 ddebug_cleanup: 3825 ftrace_release_mod(mod); 3826 dynamic_debug_remove(mod, info->debug); 3827 synchronize_rcu(); 3828 kfree(mod->args); 3829 free_arch_cleanup: 3830 module_arch_cleanup(mod); 3831 free_modinfo: 3832 free_modinfo(mod); 3833 free_unload: 3834 module_unload_free(mod); 3835 unlink_mod: 3836 mutex_lock(&module_mutex); 3837 /* Unlink carefully: kallsyms could be walking list. */ 3838 list_del_rcu(&mod->list); 3839 mod_tree_remove(mod); 3840 wake_up_all(&module_wq); 3841 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3842 synchronize_rcu(); 3843 mutex_unlock(&module_mutex); 3844 free_module: 3845 /* Free lock-classes; relies on the preceding sync_rcu() */ 3846 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); 3847 3848 module_deallocate(mod, info); 3849 free_copy: 3850 free_copy(info); 3851 return err; 3852} 3853 3854SYSCALL_DEFINE3(init_module, void __user *, umod, 3855 unsigned long, len, const char __user *, uargs) 3856{ 3857 int err; 3858 struct load_info info = { }; 3859 3860 err = may_init_module(); 3861 if (err) 3862 return err; 3863 3864 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", 3865 umod, len, uargs); 3866 3867 err = copy_module_from_user(umod, len, &info); 3868 if (err) 3869 return err; 3870 3871 return load_module(&info, uargs, 0); 3872} 3873 3874SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) 3875{ 3876 struct load_info info = { }; 3877 loff_t size; 3878 void *hdr; 3879 int err; 3880 3881 err = may_init_module(); 3882 if (err) 3883 return err; 3884 3885 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); 3886 3887 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS 3888 |MODULE_INIT_IGNORE_VERMAGIC)) 3889 return -EINVAL; 3890 3891 err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX, 3892 READING_MODULE); 3893 if (err) 3894 return err; 3895 info.hdr = hdr; 3896 info.len = size; 3897 3898 return load_module(&info, uargs, flags); 3899} 3900 3901static inline int within(unsigned long addr, void *start, unsigned long size) 3902{ 3903 return ((void *)addr >= start && (void *)addr < start + size); 3904} 3905 3906#ifdef CONFIG_KALLSYMS 3907/* 3908 * This ignores the intensely annoying "mapping symbols" found 3909 * in ARM ELF files: $a, $t and $d. 3910 */ 3911static inline int is_arm_mapping_symbol(const char *str) 3912{ 3913 if (str[0] == '.' && str[1] == 'L') 3914 return true; 3915 return str[0] == '$' && strchr("axtd", str[1]) 3916 && (str[2] == '\0' || str[2] == '.'); 3917} 3918 3919static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) 3920{ 3921 return kallsyms->strtab + kallsyms->symtab[symnum].st_name; 3922} 3923 3924/* 3925 * Given a module and address, find the corresponding symbol and return its name 3926 * while providing its size and offset if needed. 3927 */ 3928static const char *find_kallsyms_symbol(struct module *mod, 3929 unsigned long addr, 3930 unsigned long *size, 3931 unsigned long *offset) 3932{ 3933 unsigned int i, best = 0; 3934 unsigned long nextval, bestval; 3935 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 3936 3937 /* At worse, next value is at end of module */ 3938 if (within_module_init(addr, mod)) 3939 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; 3940 else 3941 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; 3942 3943 bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); 3944 3945 /* Scan for closest preceding symbol, and next symbol. (ELF 3946 starts real symbols at 1). */ 3947 for (i = 1; i < kallsyms->num_symtab; i++) { 3948 const Elf_Sym *sym = &kallsyms->symtab[i]; 3949 unsigned long thisval = kallsyms_symbol_value(sym); 3950 3951 if (sym->st_shndx == SHN_UNDEF) 3952 continue; 3953 3954 /* We ignore unnamed symbols: they're uninformative 3955 * and inserted at a whim. */ 3956 if (*kallsyms_symbol_name(kallsyms, i) == '\0' 3957 || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))) 3958 continue; 3959 3960 if (thisval <= addr && thisval > bestval) { 3961 best = i; 3962 bestval = thisval; 3963 } 3964 if (thisval > addr && thisval < nextval) 3965 nextval = thisval; 3966 } 3967 3968 if (!best) 3969 return NULL; 3970 3971 if (size) 3972 *size = nextval - bestval; 3973 if (offset) 3974 *offset = addr - bestval; 3975 3976 return kallsyms_symbol_name(kallsyms, best); 3977} 3978 3979void * __weak dereference_module_function_descriptor(struct module *mod, 3980 void *ptr) 3981{ 3982 return ptr; 3983} 3984 3985/* For kallsyms to ask for address resolution. NULL means not found. Careful 3986 * not to lock to avoid deadlock on oopses, simply disable preemption. */ 3987const char *module_address_lookup(unsigned long addr, 3988 unsigned long *size, 3989 unsigned long *offset, 3990 char **modname, 3991 char *namebuf) 3992{ 3993 const char *ret = NULL; 3994 struct module *mod; 3995 3996 preempt_disable(); 3997 mod = __module_address(addr); 3998 if (mod) { 3999 if (modname) 4000 *modname = mod->name; 4001 4002 ret = find_kallsyms_symbol(mod, addr, size, offset); 4003 } 4004 /* Make a copy in here where it's safe */ 4005 if (ret) { 4006 strncpy(namebuf, ret, KSYM_NAME_LEN - 1); 4007 ret = namebuf; 4008 } 4009 preempt_enable(); 4010 4011 return ret; 4012} 4013 4014int lookup_module_symbol_name(unsigned long addr, char *symname) 4015{ 4016 struct module *mod; 4017 4018 preempt_disable(); 4019 list_for_each_entry_rcu(mod, &modules, list) { 4020 if (mod->state == MODULE_STATE_UNFORMED) 4021 continue; 4022 if (within_module(addr, mod)) { 4023 const char *sym; 4024 4025 sym = find_kallsyms_symbol(mod, addr, NULL, NULL); 4026 if (!sym) 4027 goto out; 4028 4029 strlcpy(symname, sym, KSYM_NAME_LEN); 4030 preempt_enable(); 4031 return 0; 4032 } 4033 } 4034out: 4035 preempt_enable(); 4036 return -ERANGE; 4037} 4038 4039int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, 4040 unsigned long *offset, char *modname, char *name) 4041{ 4042 struct module *mod; 4043 4044 preempt_disable(); 4045 list_for_each_entry_rcu(mod, &modules, list) { 4046 if (mod->state == MODULE_STATE_UNFORMED) 4047 continue; 4048 if (within_module(addr, mod)) { 4049 const char *sym; 4050 4051 sym = find_kallsyms_symbol(mod, addr, size, offset); 4052 if (!sym) 4053 goto out; 4054 if (modname) 4055 strlcpy(modname, mod->name, MODULE_NAME_LEN); 4056 if (name) 4057 strlcpy(name, sym, KSYM_NAME_LEN); 4058 preempt_enable(); 4059 return 0; 4060 } 4061 } 4062out: 4063 preempt_enable(); 4064 return -ERANGE; 4065} 4066 4067int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 4068 char *name, char *module_name, int *exported) 4069{ 4070 struct module *mod; 4071 4072 preempt_disable(); 4073 list_for_each_entry_rcu(mod, &modules, list) { 4074 struct mod_kallsyms *kallsyms; 4075 4076 if (mod->state == MODULE_STATE_UNFORMED) 4077 continue; 4078 kallsyms = rcu_dereference_sched(mod->kallsyms); 4079 if (symnum < kallsyms->num_symtab) { 4080 const Elf_Sym *sym = &kallsyms->symtab[symnum]; 4081 4082 *value = kallsyms_symbol_value(sym); 4083 *type = sym->st_size; 4084 strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); 4085 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 4086 *exported = is_exported(name, *value, mod); 4087 preempt_enable(); 4088 return 0; 4089 } 4090 symnum -= kallsyms->num_symtab; 4091 } 4092 preempt_enable(); 4093 return -ERANGE; 4094} 4095 4096/* Given a module and name of symbol, find and return the symbol's value */ 4097static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) 4098{ 4099 unsigned int i; 4100 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); 4101 4102 for (i = 0; i < kallsyms->num_symtab; i++) { 4103 const Elf_Sym *sym = &kallsyms->symtab[i]; 4104 4105 if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && 4106 sym->st_shndx != SHN_UNDEF) 4107 return kallsyms_symbol_value(sym); 4108 } 4109 return 0; 4110} 4111 4112/* Look for this name: can be of form module:name. */ 4113unsigned long module_kallsyms_lookup_name(const char *name) 4114{ 4115 struct module *mod; 4116 char *colon; 4117 unsigned long ret = 0; 4118 4119 /* Don't lock: we're in enough trouble already. */ 4120 preempt_disable(); 4121 if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { 4122 if ((mod = find_module_all(name, colon - name, false)) != NULL) 4123 ret = find_kallsyms_symbol_value(mod, colon+1); 4124 } else { 4125 list_for_each_entry_rcu(mod, &modules, list) { 4126 if (mod->state == MODULE_STATE_UNFORMED) 4127 continue; 4128 if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) 4129 break; 4130 } 4131 } 4132 preempt_enable(); 4133 return ret; 4134} 4135 4136int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, 4137 struct module *, unsigned long), 4138 void *data) 4139{ 4140 struct module *mod; 4141 unsigned int i; 4142 int ret; 4143 4144 module_assert_mutex(); 4145 4146 list_for_each_entry(mod, &modules, list) { 4147 /* We hold module_mutex: no need for rcu_dereference_sched */ 4148 struct mod_kallsyms *kallsyms = mod->kallsyms; 4149 4150 if (mod->state == MODULE_STATE_UNFORMED) 4151 continue; 4152 for (i = 0; i < kallsyms->num_symtab; i++) { 4153 const Elf_Sym *sym = &kallsyms->symtab[i]; 4154 4155 if (sym->st_shndx == SHN_UNDEF) 4156 continue; 4157 4158 ret = fn(data, kallsyms_symbol_name(kallsyms, i), 4159 mod, kallsyms_symbol_value(sym)); 4160 if (ret != 0) 4161 return ret; 4162 } 4163 } 4164 return 0; 4165} 4166#endif /* CONFIG_KALLSYMS */ 4167 4168/* Maximum number of characters written by module_flags() */ 4169#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4) 4170 4171/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */ 4172static char *module_flags(struct module *mod, char *buf) 4173{ 4174 int bx = 0; 4175 4176 BUG_ON(mod->state == MODULE_STATE_UNFORMED); 4177 if (mod->taints || 4178 mod->state == MODULE_STATE_GOING || 4179 mod->state == MODULE_STATE_COMING) { 4180 buf[bx++] = '('; 4181 bx += module_flags_taint(mod, buf + bx); 4182 /* Show a - for module-is-being-unloaded */ 4183 if (mod->state == MODULE_STATE_GOING) 4184 buf[bx++] = '-'; 4185 /* Show a + for module-is-being-loaded */ 4186 if (mod->state == MODULE_STATE_COMING) 4187 buf[bx++] = '+'; 4188 buf[bx++] = ')'; 4189 } 4190 buf[bx] = '\0'; 4191 4192 return buf; 4193} 4194 4195#ifdef CONFIG_PROC_FS 4196/* Called by the /proc file system to return a list of modules. */ 4197static void *m_start(struct seq_file *m, loff_t *pos) 4198{ 4199 mutex_lock(&module_mutex); 4200 return seq_list_start(&modules, *pos); 4201} 4202 4203static void *m_next(struct seq_file *m, void *p, loff_t *pos) 4204{ 4205 return seq_list_next(p, &modules, pos); 4206} 4207 4208static void m_stop(struct seq_file *m, void *p) 4209{ 4210 mutex_unlock(&module_mutex); 4211} 4212 4213static int m_show(struct seq_file *m, void *p) 4214{ 4215 struct module *mod = list_entry(p, struct module, list); 4216 char buf[MODULE_FLAGS_BUF_SIZE]; 4217 void *value; 4218 4219 /* We always ignore unformed modules. */ 4220 if (mod->state == MODULE_STATE_UNFORMED) 4221 return 0; 4222 4223 seq_printf(m, "%s %u", 4224 mod->name, mod->init_layout.size + mod->core_layout.size); 4225 print_unload_info(m, mod); 4226 4227 /* Informative for users. */ 4228 seq_printf(m, " %s", 4229 mod->state == MODULE_STATE_GOING ? "Unloading" : 4230 mod->state == MODULE_STATE_COMING ? "Loading" : 4231 "Live"); 4232 /* Used by oprofile and other similar tools. */ 4233 value = m->private ? NULL : mod->core_layout.base; 4234 seq_printf(m, " 0x%px", value); 4235 4236 /* Taints info */ 4237 if (mod->taints) 4238 seq_printf(m, " %s", module_flags(mod, buf)); 4239 4240 seq_puts(m, "\n"); 4241 return 0; 4242} 4243 4244/* Format: modulename size refcount deps address 4245 4246 Where refcount is a number or -, and deps is a comma-separated list 4247 of depends or -. 4248*/ 4249static const struct seq_operations modules_op = { 4250 .start = m_start, 4251 .next = m_next, 4252 .stop = m_stop, 4253 .show = m_show 4254}; 4255 4256/* 4257 * This also sets the "private" pointer to non-NULL if the 4258 * kernel pointers should be hidden (so you can just test 4259 * "m->private" to see if you should keep the values private). 4260 * 4261 * We use the same logic as for /proc/kallsyms. 4262 */ 4263static int modules_open(struct inode *inode, struct file *file) 4264{ 4265 int err = seq_open(file, &modules_op); 4266 4267 if (!err) { 4268 struct seq_file *m = file->private_data; 4269 m->private = kallsyms_show_value() ? NULL : (void *)8ul; 4270 } 4271 4272 return err; 4273} 4274 4275static const struct file_operations proc_modules_operations = { 4276 .open = modules_open, 4277 .read = seq_read, 4278 .llseek = seq_lseek, 4279 .release = seq_release, 4280}; 4281 4282static int __init proc_modules_init(void) 4283{ 4284 proc_create("modules", 0, NULL, &proc_modules_operations); 4285 return 0; 4286} 4287module_init(proc_modules_init); 4288#endif 4289 4290/* Given an address, look for it in the module exception tables. */ 4291const struct exception_table_entry *search_module_extables(unsigned long addr) 4292{ 4293 const struct exception_table_entry *e = NULL; 4294 struct module *mod; 4295 4296 preempt_disable(); 4297 mod = __module_address(addr); 4298 if (!mod) 4299 goto out; 4300 4301 if (!mod->num_exentries) 4302 goto out; 4303 4304 e = search_extable(mod->extable, 4305 mod->num_exentries, 4306 addr); 4307out: 4308 preempt_enable(); 4309 4310 /* 4311 * Now, if we found one, we are running inside it now, hence 4312 * we cannot unload the module, hence no refcnt needed. 4313 */ 4314 return e; 4315} 4316 4317/* 4318 * is_module_address - is this address inside a module? 4319 * @addr: the address to check. 4320 * 4321 * See is_module_text_address() if you simply want to see if the address 4322 * is code (not data). 4323 */ 4324bool is_module_address(unsigned long addr) 4325{ 4326 bool ret; 4327 4328 preempt_disable(); 4329 ret = __module_address(addr) != NULL; 4330 preempt_enable(); 4331 4332 return ret; 4333} 4334 4335/* 4336 * __module_address - get the module which contains an address. 4337 * @addr: the address. 4338 * 4339 * Must be called with preempt disabled or module mutex held so that 4340 * module doesn't get freed during this. 4341 */ 4342struct module *__module_address(unsigned long addr) 4343{ 4344 struct module *mod; 4345 4346 if (addr < module_addr_min || addr > module_addr_max) 4347 return NULL; 4348 4349 module_assert_mutex_or_preempt(); 4350 4351 mod = mod_find(addr); 4352 if (mod) { 4353 BUG_ON(!within_module(addr, mod)); 4354 if (mod->state == MODULE_STATE_UNFORMED) 4355 mod = NULL; 4356 } 4357 return mod; 4358} 4359EXPORT_SYMBOL_GPL(__module_address); 4360 4361/* 4362 * is_module_text_address - is this address inside module code? 4363 * @addr: the address to check. 4364 * 4365 * See is_module_address() if you simply want to see if the address is 4366 * anywhere in a module. See kernel_text_address() for testing if an 4367 * address corresponds to kernel or module code. 4368 */ 4369bool is_module_text_address(unsigned long addr) 4370{ 4371 bool ret; 4372 4373 preempt_disable(); 4374 ret = __module_text_address(addr) != NULL; 4375 preempt_enable(); 4376 4377 return ret; 4378} 4379 4380/* 4381 * __module_text_address - get the module whose code contains an address. 4382 * @addr: the address. 4383 * 4384 * Must be called with preempt disabled or module mutex held so that 4385 * module doesn't get freed during this. 4386 */ 4387struct module *__module_text_address(unsigned long addr) 4388{ 4389 struct module *mod = __module_address(addr); 4390 if (mod) { 4391 /* Make sure it's within the text section. */ 4392 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) 4393 && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) 4394 mod = NULL; 4395 } 4396 return mod; 4397} 4398EXPORT_SYMBOL_GPL(__module_text_address); 4399 4400/* Don't grab lock, we're oopsing. */ 4401void print_modules(void) 4402{ 4403 struct module *mod; 4404 char buf[MODULE_FLAGS_BUF_SIZE]; 4405 4406 printk(KERN_DEFAULT "Modules linked in:"); 4407 /* Most callers should already have preempt disabled, but make sure */ 4408 preempt_disable(); 4409 list_for_each_entry_rcu(mod, &modules, list) { 4410 if (mod->state == MODULE_STATE_UNFORMED) 4411 continue; 4412 pr_cont(" %s%s", mod->name, module_flags(mod, buf)); 4413 } 4414 preempt_enable(); 4415 if (last_unloaded_module[0]) 4416 pr_cont(" [last unloaded: %s]", last_unloaded_module); 4417 pr_cont("\n"); 4418} 4419 4420#ifdef CONFIG_MODVERSIONS 4421/* Generate the signature for all relevant module structures here. 4422 * If these change, we don't want to try to parse the module. */ 4423void module_layout(struct module *mod, 4424 struct modversion_info *ver, 4425 struct kernel_param *kp, 4426 struct kernel_symbol *ks, 4427 struct tracepoint * const *tp) 4428{ 4429} 4430EXPORT_SYMBOL(module_layout); 4431#endif