Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
module: convert to stop_machine_create/destroy.
stop_machine: introduce stop_machine_create/destroy.
parisc: fix module loading failure of large kernel modules
module: fix module loading failure of large kernel modules for parisc
module: fix warning of unused function when !CONFIG_PROC_FS
kernel/module.c: compare symbol values when marking symbols as exported in /proc/kallsyms.
remove CONFIG_KMOD

+258 -115
+4 -2
arch/parisc/include/asm/module.h
··· 23 23 { 24 24 unsigned long got_offset, got_count, got_max; 25 25 unsigned long fdesc_offset, fdesc_count, fdesc_max; 26 - unsigned long stub_offset, stub_count, stub_max; 27 - unsigned long init_stub_offset, init_stub_count, init_stub_max; 26 + struct { 27 + unsigned long stub_offset; 28 + unsigned int stub_entries; 29 + } *section; 28 30 int unwind_section; 29 31 struct unwind_table *unwind; 30 32 };
+135 -81
arch/parisc/kernel/module.c
··· 6 6 * 7 7 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 8 8 * Copyright (C) 2003 Randolph Chung <tausq at debian . org> 9 + * Copyright (C) 2008 Helge Deller <deller@gmx.de> 9 10 * 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify ··· 25 24 * 26 25 * 27 26 * Notes: 27 + * - PLT stub handling 28 + * On 32bit (and sometimes 64bit) and with big kernel modules like xfs or 29 + * ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may 30 + * fail to reach their PLT stub if we only create one big stub array for 31 + * all sections at the beginning of the core or init section. 32 + * Instead we now insert individual PLT stub entries directly in front of 33 + * of the code sections where the stubs are actually called. 34 + * This reduces the distance between the PCREL location and the stub entry 35 + * so that the relocations can be fulfilled. 36 + * While calculating the final layout of the kernel module in memory, the 37 + * kernel module loader calls arch_mod_section_prepend() to request the 38 + * to be reserved amount of memory in front of each individual section. 39 + * 28 40 * - SEGREL32 handling 29 41 * We are not doing SEGREL32 handling correctly. According to the ABI, we 30 42 * should do a value offset, like this: ··· 72 58 #define DEBUGP(fmt...) 73 59 #endif 74 60 61 + #define RELOC_REACHABLE(val, bits) \ 62 + (( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ 63 + ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \ 64 + 0 : 1) 65 + 75 66 #define CHECK_RELOC(val, bits) \ 76 - if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \ 77 - ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \ 67 + if (!RELOC_REACHABLE(val, bits)) { \ 78 68 printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \ 79 69 me->name, strtab + sym->st_name, (unsigned long)val, bits); \ 80 70 return -ENOEXEC; \ ··· 109 91 { 110 92 return in_init(me, loc) || in_core(me, loc); 111 93 } 112 - 113 - static inline int in_local_section(struct module *me, void *loc, void *dot) 114 - { 115 - return (in_init(me, loc) && in_init(me, dot)) || 116 - (in_core(me, loc) && in_core(me, dot)); 117 - } 118 - 119 94 120 95 #ifndef CONFIG_64BIT 121 96 struct got_entry { ··· 269 258 /* Free memory returned from module_alloc */ 270 259 void module_free(struct module *mod, void *module_region) 271 260 { 261 + kfree(mod->arch.section); 262 + mod->arch.section = NULL; 263 + 272 264 vfree(module_region); 273 265 /* FIXME: If module_region == mod->init_region, trim exception 274 266 table entries. */ 267 + } 268 + 269 + /* Additional bytes needed in front of individual sections */ 270 + unsigned int arch_mod_section_prepend(struct module *mod, 271 + unsigned int section) 272 + { 273 + /* size needed for all stubs of this section (including 274 + * one additional for correct alignment of the stubs) */ 275 + return (mod->arch.section[section].stub_entries + 1) 276 + * sizeof(struct stub_entry); 275 277 } 276 278 277 279 #define CONST ··· 293 269 CONST char *secstrings, 294 270 struct module *me) 295 271 { 296 - unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0; 272 + unsigned long gots = 0, fdescs = 0, len; 297 273 unsigned int i; 298 274 275 + len = hdr->e_shnum * sizeof(me->arch.section[0]); 276 + me->arch.section = kzalloc(len, GFP_KERNEL); 277 + if (!me->arch.section) 278 + return -ENOMEM; 279 + 299 280 for (i = 1; i < hdr->e_shnum; i++) { 300 - const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset; 281 + const Elf_Rela *rels = (void *)sechdrs[i].sh_addr; 301 282 unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels); 283 + unsigned int count, s; 302 284 303 285 if (strncmp(secstrings + sechdrs[i].sh_name, 304 286 ".PARISC.unwind", 14) == 0) ··· 320 290 */ 321 291 gots += count_gots(rels, nrels); 322 292 fdescs += count_fdescs(rels, nrels); 323 - if(strncmp(secstrings + sechdrs[i].sh_name, 324 - ".rela.init", 10) == 0) 325 - init_stubs += count_stubs(rels, nrels); 326 - else 327 - stubs += count_stubs(rels, nrels); 293 + 294 + /* XXX: By sorting the relocs and finding duplicate entries 295 + * we could reduce the number of necessary stubs and save 296 + * some memory. */ 297 + count = count_stubs(rels, nrels); 298 + if (!count) 299 + continue; 300 + 301 + /* so we need relocation stubs. reserve necessary memory. */ 302 + /* sh_info gives the section for which we need to add stubs. */ 303 + s = sechdrs[i].sh_info; 304 + 305 + /* each code section should only have one relocation section */ 306 + WARN_ON(me->arch.section[s].stub_entries); 307 + 308 + /* store number of stubs we need for this section */ 309 + me->arch.section[s].stub_entries += count; 328 310 } 329 311 330 312 /* align things a bit */ ··· 348 306 me->arch.fdesc_offset = me->core_size; 349 307 me->core_size += fdescs * sizeof(Elf_Fdesc); 350 308 351 - me->core_size = ALIGN(me->core_size, 16); 352 - me->arch.stub_offset = me->core_size; 353 - me->core_size += stubs * sizeof(struct stub_entry); 354 - 355 - me->init_size = ALIGN(me->init_size, 16); 356 - me->arch.init_stub_offset = me->init_size; 357 - me->init_size += init_stubs * sizeof(struct stub_entry); 358 - 359 309 me->arch.got_max = gots; 360 310 me->arch.fdesc_max = fdescs; 361 - me->arch.stub_max = stubs; 362 - me->arch.init_stub_max = init_stubs; 363 311 364 312 return 0; 365 313 } ··· 412 380 }; 413 381 414 382 static Elf_Addr get_stub(struct module *me, unsigned long value, long addend, 415 - enum elf_stub_type stub_type, int init_section) 383 + enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec) 416 384 { 417 - unsigned long i; 418 385 struct stub_entry *stub; 419 386 420 - if(init_section) { 421 - i = me->arch.init_stub_count++; 422 - BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max); 423 - stub = me->module_init + me->arch.init_stub_offset + 424 - i * sizeof(struct stub_entry); 425 - } else { 426 - i = me->arch.stub_count++; 427 - BUG_ON(me->arch.stub_count > me->arch.stub_max); 428 - stub = me->module_core + me->arch.stub_offset + 429 - i * sizeof(struct stub_entry); 387 + /* initialize stub_offset to point in front of the section */ 388 + if (!me->arch.section[targetsec].stub_offset) { 389 + loc0 -= (me->arch.section[targetsec].stub_entries + 1) * 390 + sizeof(struct stub_entry); 391 + /* get correct alignment for the stubs */ 392 + loc0 = ALIGN(loc0, sizeof(struct stub_entry)); 393 + me->arch.section[targetsec].stub_offset = loc0; 430 394 } 395 + 396 + /* get address of stub entry */ 397 + stub = (void *) me->arch.section[targetsec].stub_offset; 398 + me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry); 399 + 400 + /* do not write outside available stub area */ 401 + BUG_ON(0 == me->arch.section[targetsec].stub_entries--); 402 + 431 403 432 404 #ifndef CONFIG_64BIT 433 405 /* for 32-bit the stub looks like this: ··· 525 489 Elf32_Addr val; 526 490 Elf32_Sword addend; 527 491 Elf32_Addr dot; 492 + Elf_Addr loc0; 493 + unsigned int targetsec = sechdrs[relsec].sh_info; 528 494 //unsigned long dp = (unsigned long)$global$; 529 495 register unsigned long dp asm ("r27"); 530 496 531 497 DEBUGP("Applying relocate section %u to %u\n", relsec, 532 - sechdrs[relsec].sh_info); 498 + targetsec); 533 499 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 534 500 /* This is where to make the change */ 535 - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 501 + loc = (void *)sechdrs[targetsec].sh_addr 536 502 + rel[i].r_offset; 503 + /* This is the start of the target section */ 504 + loc0 = sechdrs[targetsec].sh_addr; 537 505 /* This is the symbol it is referring to */ 538 506 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 539 507 + ELF32_R_SYM(rel[i].r_info); ··· 609 569 break; 610 570 case R_PARISC_PCREL17F: 611 571 /* 17-bit PC relative address */ 612 - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); 572 + /* calculate direct call offset */ 573 + val += addend; 613 574 val = (val - dot - 8)/4; 614 - CHECK_RELOC(val, 17) 575 + if (!RELOC_REACHABLE(val, 17)) { 576 + /* direct distance too far, create 577 + * stub entry instead */ 578 + val = get_stub(me, sym->st_value, addend, 579 + ELF_STUB_DIRECT, loc0, targetsec); 580 + val = (val - dot - 8)/4; 581 + CHECK_RELOC(val, 17); 582 + } 615 583 *loc = (*loc & ~0x1f1ffd) | reassemble_17(val); 616 584 break; 617 585 case R_PARISC_PCREL22F: 618 586 /* 22-bit PC relative address; only defined for pa20 */ 619 - val = get_stub(me, val, addend, ELF_STUB_GOT, in_init(me, loc)); 620 - DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n", 621 - strtab + sym->st_name, (unsigned long)loc, addend, 622 - val) 587 + /* calculate direct call offset */ 588 + val += addend; 623 589 val = (val - dot - 8)/4; 624 - CHECK_RELOC(val, 22); 590 + if (!RELOC_REACHABLE(val, 22)) { 591 + /* direct distance too far, create 592 + * stub entry instead */ 593 + val = get_stub(me, sym->st_value, addend, 594 + ELF_STUB_DIRECT, loc0, targetsec); 595 + val = (val - dot - 8)/4; 596 + CHECK_RELOC(val, 22); 597 + } 625 598 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 626 599 break; 627 600 ··· 663 610 Elf64_Addr val; 664 611 Elf64_Sxword addend; 665 612 Elf64_Addr dot; 613 + Elf_Addr loc0; 614 + unsigned int targetsec = sechdrs[relsec].sh_info; 666 615 667 616 DEBUGP("Applying relocate section %u to %u\n", relsec, 668 - sechdrs[relsec].sh_info); 617 + targetsec); 669 618 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 670 619 /* This is where to make the change */ 671 - loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 620 + loc = (void *)sechdrs[targetsec].sh_addr 672 621 + rel[i].r_offset; 622 + /* This is the start of the target section */ 623 + loc0 = sechdrs[targetsec].sh_addr; 673 624 /* This is the symbol it is referring to */ 674 625 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 675 626 + ELF64_R_SYM(rel[i].r_info); ··· 729 672 DEBUGP("PCREL22F Symbol %s loc %p val %lx\n", 730 673 strtab + sym->st_name, 731 674 loc, val); 675 + val += addend; 732 676 /* can we reach it locally? */ 733 - if(!in_local_section(me, (void *)val, (void *)dot)) { 734 - 735 - if (in_local(me, (void *)val)) 736 - /* this is the case where the 737 - * symbol is local to the 738 - * module, but in a different 739 - * section, so stub the jump 740 - * in case it's more than 22 741 - * bits away */ 742 - val = get_stub(me, val, addend, ELF_STUB_DIRECT, 743 - in_init(me, loc)); 744 - else if (strncmp(strtab + sym->st_name, "$$", 2) 677 + if (in_local(me, (void *)val)) { 678 + /* this is the case where the symbol is local 679 + * to the module, but in a different section, 680 + * so stub the jump in case it's more than 22 681 + * bits away */ 682 + val = (val - dot - 8)/4; 683 + if (!RELOC_REACHABLE(val, 22)) { 684 + /* direct distance too far, create 685 + * stub entry instead */ 686 + val = get_stub(me, sym->st_value, 687 + addend, ELF_STUB_DIRECT, 688 + loc0, targetsec); 689 + } else { 690 + /* Ok, we can reach it directly. */ 691 + val = sym->st_value; 692 + val += addend; 693 + } 694 + } else { 695 + val = sym->st_value; 696 + if (strncmp(strtab + sym->st_name, "$$", 2) 745 697 == 0) 746 698 val = get_stub(me, val, addend, ELF_STUB_MILLI, 747 - in_init(me, loc)); 699 + loc0, targetsec); 748 700 else 749 701 val = get_stub(me, val, addend, ELF_STUB_GOT, 750 - in_init(me, loc)); 702 + loc0, targetsec); 751 703 } 752 704 DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n", 753 705 strtab + sym->st_name, loc, sym->st_value, 754 706 addend, val); 755 - /* FIXME: local symbols work as long as the 756 - * core and init pieces aren't separated too 757 - * far. If this is ever broken, you will trip 758 - * the check below. The way to fix it would 759 - * be to generate local stubs to go between init 760 - * and core */ 761 - if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 || 762 - (Elf64_Sxword)(val - dot - 8) < -0x800000) { 763 - printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n", 764 - me->name, strtab + sym->st_name); 765 - return -ENOEXEC; 766 - } 767 707 val = (val - dot - 8)/4; 708 + CHECK_RELOC(val, 22); 768 709 *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); 769 710 break; 770 711 case R_PARISC_DIR64: ··· 849 794 addr = (u32 *)entry->addr; 850 795 printk("INSNS: %x %x %x %x\n", 851 796 addr[0], addr[1], addr[2], addr[3]); 852 - printk("stubs used %ld, stubs max %ld\n" 853 - "init_stubs used %ld, init stubs max %ld\n" 854 - "got entries used %ld, gots max %ld\n" 797 + printk("got entries used %ld, gots max %ld\n" 855 798 "fdescs used %ld, fdescs max %ld\n", 856 - me->arch.stub_count, me->arch.stub_max, 857 - me->arch.init_stub_count, me->arch.init_stub_max, 858 799 me->arch.got_count, me->arch.got_max, 859 800 me->arch.fdesc_count, me->arch.fdesc_max); 860 801 #endif ··· 880 829 me->name, me->arch.got_count, MAX_GOTS); 881 830 return -EINVAL; 882 831 } 883 - 832 + 833 + kfree(me->arch.section); 834 + me->arch.section = NULL; 835 + 884 836 /* no symbol table */ 885 837 if(symhdr == NULL) 886 838 return 0;
+3
include/linux/moduleloader.h
··· 13 13 char *secstrings, 14 14 struct module *mod); 15 15 16 + /* Additional bytes needed by arch in front of individual sections */ 17 + unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section); 18 + 16 19 /* Allocator used for allocating struct module, core sections and init 17 20 sections. Returns NULL on failure. */ 18 21 void *module_alloc(unsigned long size);
+22
include/linux/stop_machine.h
··· 35 35 * won't come or go while it's being called. Used by hotplug cpu. 36 36 */ 37 37 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); 38 + 39 + /** 40 + * stop_machine_create: create all stop_machine threads 41 + * 42 + * Description: This causes all stop_machine threads to be created before 43 + * stop_machine actually gets called. This can be used by subsystems that 44 + * need a non failing stop_machine infrastructure. 45 + */ 46 + int stop_machine_create(void); 47 + 48 + /** 49 + * stop_machine_destroy: destroy all stop_machine threads 50 + * 51 + * Description: This causes all stop_machine threads which were created with 52 + * stop_machine_create to be destroyed again. 53 + */ 54 + void stop_machine_destroy(void); 55 + 38 56 #else 39 57 40 58 static inline int stop_machine(int (*fn)(void *), void *data, ··· 64 46 local_irq_enable(); 65 47 return ret; 66 48 } 49 + 50 + static inline int stop_machine_create(void) { return 0; } 51 + static inline void stop_machine_destroy(void) { } 52 + 67 53 #endif /* CONFIG_SMP */ 68 54 #endif /* _LINUX_STOP_MACHINE */
-6
init/Kconfig
··· 916 916 the version). With this option, such a "srcversion" field 917 917 will be created for all modules. If unsure, say N. 918 918 919 - config KMOD 920 - def_bool y 921 - help 922 - This is being removed soon. These days, CONFIG_MODULES 923 - implies CONFIG_KMOD, so use that instead. 924 - 925 919 endif # MODULES 926 920 927 921 config INIT_ALL_POSSIBLE
+5 -1
kernel/cpu.c
··· 269 269 270 270 int __ref cpu_down(unsigned int cpu) 271 271 { 272 - int err = 0; 272 + int err; 273 273 274 + err = stop_machine_create(); 275 + if (err) 276 + return err; 274 277 cpu_maps_update_begin(); 275 278 276 279 if (cpu_hotplug_disabled) { ··· 300 297 301 298 out: 302 299 cpu_maps_update_done(); 300 + stop_machine_destroy(); 303 301 return err; 304 302 } 305 303 EXPORT_SYMBOL(cpu_down);
+44 -15
kernel/module.c
··· 757 757 return -EFAULT; 758 758 name[MODULE_NAME_LEN-1] = '\0'; 759 759 760 - if (mutex_lock_interruptible(&module_mutex) != 0) 761 - return -EINTR; 760 + /* Create stop_machine threads since free_module relies on 761 + * a non-failing stop_machine call. */ 762 + ret = stop_machine_create(); 763 + if (ret) 764 + return ret; 765 + 766 + if (mutex_lock_interruptible(&module_mutex) != 0) { 767 + ret = -EINTR; 768 + goto out_stop; 769 + } 762 770 763 771 mod = find_module(name); 764 772 if (!mod) { ··· 825 817 826 818 out: 827 819 mutex_unlock(&module_mutex); 820 + out_stop: 821 + stop_machine_destroy(); 828 822 return ret; 829 823 } 830 824 831 - static void print_unload_info(struct seq_file *m, struct module *mod) 825 + static inline void print_unload_info(struct seq_file *m, struct module *mod) 832 826 { 833 827 struct module_use *use; 834 828 int printed_something = 0; ··· 903 893 EXPORT_SYMBOL(module_put); 904 894 905 895 #else /* !CONFIG_MODULE_UNLOAD */ 906 - static void print_unload_info(struct seq_file *m, struct module *mod) 896 + static inline void print_unload_info(struct seq_file *m, struct module *mod) 907 897 { 908 898 /* We don't know the usage count, or what modules are using. */ 909 899 seq_printf(m, " - -"); ··· 1588 1578 return ret; 1589 1579 } 1590 1580 1581 + /* Additional bytes needed by arch in front of individual sections */ 1582 + unsigned int __weak arch_mod_section_prepend(struct module *mod, 1583 + unsigned int section) 1584 + { 1585 + /* default implementation just returns zero */ 1586 + return 0; 1587 + } 1588 + 1591 1589 /* Update size with this section: return offset. */ 1592 - static long get_offset(unsigned int *size, Elf_Shdr *sechdr) 1590 + static long get_offset(struct module *mod, unsigned int *size, 1591 + Elf_Shdr *sechdr, unsigned int section) 1593 1592 { 1594 1593 long ret; 1595 1594 1595 + *size += arch_mod_section_prepend(mod, section); 1596 1596 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 1597 1597 *size = ret + sechdr->sh_size; 1598 1598 return ret; ··· 1642 1622 || strncmp(secstrings + s->sh_name, 1643 1623 ".init", 5) == 0) 1644 1624 continue; 1645 - s->sh_entsize = get_offset(&mod->core_size, s); 1625 + s->sh_entsize = get_offset(mod, &mod->core_size, s, i); 1646 1626 DEBUGP("\t%s\n", secstrings + s->sh_name); 1647 1627 } 1648 1628 if (m == 0) ··· 1660 1640 || strncmp(secstrings + s->sh_name, 1661 1641 ".init", 5) != 0) 1662 1642 continue; 1663 - s->sh_entsize = (get_offset(&mod->init_size, s) 1643 + s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) 1664 1644 | INIT_OFFSET_MASK); 1665 1645 DEBUGP("\t%s\n", secstrings + s->sh_name); 1666 1646 } ··· 1745 1725 return NULL; 1746 1726 } 1747 1727 1748 - static int is_exported(const char *name, const struct module *mod) 1728 + static int is_exported(const char *name, unsigned long value, 1729 + const struct module *mod) 1749 1730 { 1750 - if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1751 - return 1; 1731 + const struct kernel_symbol *ks; 1732 + if (!mod) 1733 + ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); 1752 1734 else 1753 - if (mod && lookup_symbol(name, mod->syms, mod->syms + mod->num_syms)) 1754 - return 1; 1755 - else 1756 - return 0; 1735 + ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); 1736 + return ks != NULL && ks->value == value; 1757 1737 } 1758 1738 1759 1739 /* As per nm */ ··· 1885 1865 /* vmalloc barfs on "unusual" numbers. Check here */ 1886 1866 if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) 1887 1867 return ERR_PTR(-ENOMEM); 1868 + 1869 + /* Create stop_machine threads since the error path relies on 1870 + * a non-failing stop_machine call. */ 1871 + err = stop_machine_create(); 1872 + if (err) 1873 + goto free_hdr; 1874 + 1888 1875 if (copy_from_user(hdr, umod, len) != 0) { 1889 1876 err = -EFAULT; 1890 1877 goto free_hdr; ··· 2275 2248 /* Get rid of temporary copy */ 2276 2249 vfree(hdr); 2277 2250 2251 + stop_machine_destroy(); 2278 2252 /* Done! */ 2279 2253 return mod; 2280 2254 ··· 2298 2270 kfree(args); 2299 2271 free_hdr: 2300 2272 vfree(hdr); 2273 + stop_machine_destroy(); 2301 2274 return ERR_PTR(err); 2302 2275 2303 2276 truncated: ··· 2533 2504 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, 2534 2505 KSYM_NAME_LEN); 2535 2506 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 2536 - *exported = is_exported(name, mod); 2507 + *exported = is_exported(name, *value, mod); 2537 2508 preempt_enable(); 2538 2509 return 0; 2539 2510 }
+45 -10
kernel/stop_machine.c
··· 38 38 static unsigned int num_threads; 39 39 static atomic_t thread_ack; 40 40 static DEFINE_MUTEX(lock); 41 - 41 + /* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */ 42 + static DEFINE_MUTEX(setup_lock); 43 + /* Users of stop_machine. */ 44 + static int refcount; 42 45 static struct workqueue_struct *stop_machine_wq; 43 46 static struct stop_machine_data active, idle; 44 47 static const cpumask_t *active_cpus; ··· 112 109 return 0; 113 110 } 114 111 112 + int stop_machine_create(void) 113 + { 114 + mutex_lock(&setup_lock); 115 + if (refcount) 116 + goto done; 117 + stop_machine_wq = create_rt_workqueue("kstop"); 118 + if (!stop_machine_wq) 119 + goto err_out; 120 + stop_machine_work = alloc_percpu(struct work_struct); 121 + if (!stop_machine_work) 122 + goto err_out; 123 + done: 124 + refcount++; 125 + mutex_unlock(&setup_lock); 126 + return 0; 127 + 128 + err_out: 129 + if (stop_machine_wq) 130 + destroy_workqueue(stop_machine_wq); 131 + mutex_unlock(&setup_lock); 132 + return -ENOMEM; 133 + } 134 + EXPORT_SYMBOL_GPL(stop_machine_create); 135 + 136 + void stop_machine_destroy(void) 137 + { 138 + mutex_lock(&setup_lock); 139 + refcount--; 140 + if (refcount) 141 + goto done; 142 + destroy_workqueue(stop_machine_wq); 143 + free_percpu(stop_machine_work); 144 + done: 145 + mutex_unlock(&setup_lock); 146 + } 147 + EXPORT_SYMBOL_GPL(stop_machine_destroy); 148 + 115 149 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 116 150 { 117 151 struct work_struct *sm_work; ··· 186 146 { 187 147 int ret; 188 148 149 + ret = stop_machine_create(); 150 + if (ret) 151 + return ret; 189 152 /* No CPUs can come up or down during this. */ 190 153 get_online_cpus(); 191 154 ret = __stop_machine(fn, data, cpus); 192 155 put_online_cpus(); 193 - 156 + stop_machine_destroy(); 194 157 return ret; 195 158 } 196 159 EXPORT_SYMBOL_GPL(stop_machine); 197 - 198 - static int __init stop_machine_init(void) 199 - { 200 - stop_machine_wq = create_rt_workqueue("kstop"); 201 - stop_machine_work = alloc_percpu(struct work_struct); 202 - return 0; 203 - } 204 - core_initcall(stop_machine_init);