at v4.11-rc7 257 lines 6.7 kB view raw
1/* Kernel module help for x86. 2 Copyright (C) 2001 Rusty Russell. 3 4 This program is free software; you can redistribute it and/or modify 5 it under the terms of the GNU General Public License as published by 6 the Free Software Foundation; either version 2 of the License, or 7 (at your option) any later version. 8 9 This program is distributed in the hope that it will be useful, 10 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 GNU General Public License for more details. 13 14 You should have received a copy of the GNU General Public License 15 along with this program; if not, write to the Free Software 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17*/ 18 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21#include <linux/moduleloader.h> 22#include <linux/elf.h> 23#include <linux/vmalloc.h> 24#include <linux/fs.h> 25#include <linux/string.h> 26#include <linux/kernel.h> 27#include <linux/kasan.h> 28#include <linux/bug.h> 29#include <linux/mm.h> 30#include <linux/gfp.h> 31#include <linux/jump_label.h> 32#include <linux/random.h> 33 34#include <asm/text-patching.h> 35#include <asm/page.h> 36#include <asm/pgtable.h> 37#include <asm/setup.h> 38 39#if 0 40#define DEBUGP(fmt, ...) \ 41 printk(KERN_DEBUG fmt, ##__VA_ARGS__) 42#else 43#define DEBUGP(fmt, ...) \ 44do { \ 45 if (0) \ 46 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ 47} while (0) 48#endif 49 50#ifdef CONFIG_RANDOMIZE_BASE 51static unsigned long module_load_offset; 52 53/* Mutex protects the module_load_offset. */ 54static DEFINE_MUTEX(module_kaslr_mutex); 55 56static unsigned long int get_module_load_offset(void) 57{ 58 if (kaslr_enabled()) { 59 mutex_lock(&module_kaslr_mutex); 60 /* 61 * Calculate the module_load_offset the first time this 62 * code is called. Once calculated it stays the same until 63 * reboot. 64 */ 65 if (module_load_offset == 0) 66 module_load_offset = 67 (get_random_int() % 1024 + 1) * PAGE_SIZE; 68 mutex_unlock(&module_kaslr_mutex); 69 } 70 return module_load_offset; 71} 72#else 73static unsigned long int get_module_load_offset(void) 74{ 75 return 0; 76} 77#endif 78 79void *module_alloc(unsigned long size) 80{ 81 void *p; 82 83 if (PAGE_ALIGN(size) > MODULES_LEN) 84 return NULL; 85 86 p = __vmalloc_node_range(size, MODULE_ALIGN, 87 MODULES_VADDR + get_module_load_offset(), 88 MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, 89 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, 90 __builtin_return_address(0)); 91 if (p && (kasan_module_alloc(p, size) < 0)) { 92 vfree(p); 93 return NULL; 94 } 95 96 return p; 97} 98 99#ifdef CONFIG_X86_32 100int apply_relocate(Elf32_Shdr *sechdrs, 101 const char *strtab, 102 unsigned int symindex, 103 unsigned int relsec, 104 struct module *me) 105{ 106 unsigned int i; 107 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; 108 Elf32_Sym *sym; 109 uint32_t *location; 110 111 DEBUGP("Applying relocate section %u to %u\n", 112 relsec, sechdrs[relsec].sh_info); 113 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 114 /* This is where to make the change */ 115 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 116 + rel[i].r_offset; 117 /* This is the symbol it is referring to. Note that all 118 undefined symbols have been resolved. */ 119 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr 120 + ELF32_R_SYM(rel[i].r_info); 121 122 switch (ELF32_R_TYPE(rel[i].r_info)) { 123 case R_386_32: 124 /* We add the value into the location given */ 125 *location += sym->st_value; 126 break; 127 case R_386_PC32: 128 /* Add the value, subtract its position */ 129 *location += sym->st_value - (uint32_t)location; 130 break; 131 default: 132 pr_err("%s: Unknown relocation: %u\n", 133 me->name, ELF32_R_TYPE(rel[i].r_info)); 134 return -ENOEXEC; 135 } 136 } 137 return 0; 138} 139#else /*X86_64*/ 140int apply_relocate_add(Elf64_Shdr *sechdrs, 141 const char *strtab, 142 unsigned int symindex, 143 unsigned int relsec, 144 struct module *me) 145{ 146 unsigned int i; 147 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 148 Elf64_Sym *sym; 149 void *loc; 150 u64 val; 151 152 DEBUGP("Applying relocate section %u to %u\n", 153 relsec, sechdrs[relsec].sh_info); 154 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 155 /* This is where to make the change */ 156 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 157 + rel[i].r_offset; 158 159 /* This is the symbol it is referring to. Note that all 160 undefined symbols have been resolved. */ 161 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 162 + ELF64_R_SYM(rel[i].r_info); 163 164 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", 165 (int)ELF64_R_TYPE(rel[i].r_info), 166 sym->st_value, rel[i].r_addend, (u64)loc); 167 168 val = sym->st_value + rel[i].r_addend; 169 170 switch (ELF64_R_TYPE(rel[i].r_info)) { 171 case R_X86_64_NONE: 172 break; 173 case R_X86_64_64: 174 *(u64 *)loc = val; 175 break; 176 case R_X86_64_32: 177 *(u32 *)loc = val; 178 if (val != *(u32 *)loc) 179 goto overflow; 180 break; 181 case R_X86_64_32S: 182 *(s32 *)loc = val; 183 if ((s64)val != *(s32 *)loc) 184 goto overflow; 185 break; 186 case R_X86_64_PC32: 187 val -= (u64)loc; 188 *(u32 *)loc = val; 189#if 0 190 if ((s64)val != *(s32 *)loc) 191 goto overflow; 192#endif 193 break; 194 default: 195 pr_err("%s: Unknown rela relocation: %llu\n", 196 me->name, ELF64_R_TYPE(rel[i].r_info)); 197 return -ENOEXEC; 198 } 199 } 200 return 0; 201 202overflow: 203 pr_err("overflow in relocation type %d val %Lx\n", 204 (int)ELF64_R_TYPE(rel[i].r_info), val); 205 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 206 me->name); 207 return -ENOEXEC; 208} 209#endif 210 211int module_finalize(const Elf_Ehdr *hdr, 212 const Elf_Shdr *sechdrs, 213 struct module *me) 214{ 215 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, 216 *para = NULL; 217 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 218 219 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 220 if (!strcmp(".text", secstrings + s->sh_name)) 221 text = s; 222 if (!strcmp(".altinstructions", secstrings + s->sh_name)) 223 alt = s; 224 if (!strcmp(".smp_locks", secstrings + s->sh_name)) 225 locks = s; 226 if (!strcmp(".parainstructions", secstrings + s->sh_name)) 227 para = s; 228 } 229 230 if (alt) { 231 /* patch .altinstructions */ 232 void *aseg = (void *)alt->sh_addr; 233 apply_alternatives(aseg, aseg + alt->sh_size); 234 } 235 if (locks && text) { 236 void *lseg = (void *)locks->sh_addr; 237 void *tseg = (void *)text->sh_addr; 238 alternatives_smp_module_add(me, me->name, 239 lseg, lseg + locks->sh_size, 240 tseg, tseg + text->sh_size); 241 } 242 243 if (para) { 244 void *pseg = (void *)para->sh_addr; 245 apply_paravirt(pseg, pseg + para->sh_size); 246 } 247 248 /* make jump label nops */ 249 jump_label_apply_nops(me); 250 251 return 0; 252} 253 254void module_arch_cleanup(struct module *mod) 255{ 256 alternatives_smp_module_del(mod); 257}