Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

LoongArch: Add ELF and module support

Add ELF-related definition and module relocation code for basic
LoongArch support.

Cc: Jessica Yu <jeyu@kernel.org>
Reviewed-by: WANG Xuerui <git@xen0n.name>
Reviewed-by: Luis Chamberlain <mcgrof@kernel.org>
Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>

+1043
+24
arch/loongarch/include/asm/cpufeature.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * CPU feature definitions for module loading, used by 4 + * module_cpu_feature_match(), see uapi/asm/hwcap.h for LoongArch CPU features. 5 + * 6 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 7 + */ 8 + 9 + #ifndef __ASM_CPUFEATURE_H 10 + #define __ASM_CPUFEATURE_H 11 + 12 + #include <uapi/asm/hwcap.h> 13 + #include <asm/elf.h> 14 + 15 + #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) 16 + 17 + #define cpu_feature(x) ilog2(HWCAP_ ## x) 18 + 19 + static inline bool cpu_have_feature(unsigned int num) 20 + { 21 + return elf_hwcap & (1UL << num); 22 + } 23 + 24 + #endif /* __ASM_CPUFEATURE_H */
+301
arch/loongarch/include/asm/elf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + #ifndef _ASM_ELF_H 6 + #define _ASM_ELF_H 7 + 8 + #include <linux/auxvec.h> 9 + #include <linux/fs.h> 10 + #include <uapi/linux/elf.h> 11 + 12 + #include <asm/current.h> 13 + #include <asm/vdso.h> 14 + 15 + /* The ABI of a file. */ 16 + #define EF_LOONGARCH_ABI_LP64_SOFT_FLOAT 0x1 17 + #define EF_LOONGARCH_ABI_LP64_SINGLE_FLOAT 0x2 18 + #define EF_LOONGARCH_ABI_LP64_DOUBLE_FLOAT 0x3 19 + 20 + #define EF_LOONGARCH_ABI_ILP32_SOFT_FLOAT 0x5 21 + #define EF_LOONGARCH_ABI_ILP32_SINGLE_FLOAT 0x6 22 + #define EF_LOONGARCH_ABI_ILP32_DOUBLE_FLOAT 0x7 23 + 24 + /* LoongArch relocation types used by the dynamic linker */ 25 + #define R_LARCH_NONE 0 26 + #define R_LARCH_32 1 27 + #define R_LARCH_64 2 28 + #define R_LARCH_RELATIVE 3 29 + #define R_LARCH_COPY 4 30 + #define R_LARCH_JUMP_SLOT 5 31 + #define R_LARCH_TLS_DTPMOD32 6 32 + #define R_LARCH_TLS_DTPMOD64 7 33 + #define R_LARCH_TLS_DTPREL32 8 34 + #define R_LARCH_TLS_DTPREL64 9 35 + #define R_LARCH_TLS_TPREL32 10 36 + #define R_LARCH_TLS_TPREL64 11 37 + #define R_LARCH_IRELATIVE 12 38 + #define R_LARCH_MARK_LA 20 39 + #define R_LARCH_MARK_PCREL 21 40 + #define R_LARCH_SOP_PUSH_PCREL 22 41 + #define R_LARCH_SOP_PUSH_ABSOLUTE 23 42 + #define R_LARCH_SOP_PUSH_DUP 24 43 + #define R_LARCH_SOP_PUSH_GPREL 25 44 + #define R_LARCH_SOP_PUSH_TLS_TPREL 26 45 + #define R_LARCH_SOP_PUSH_TLS_GOT 27 46 + #define R_LARCH_SOP_PUSH_TLS_GD 28 47 + #define R_LARCH_SOP_PUSH_PLT_PCREL 29 48 + #define R_LARCH_SOP_ASSERT 30 49 + #define R_LARCH_SOP_NOT 31 50 + #define R_LARCH_SOP_SUB 32 51 + #define R_LARCH_SOP_SL 33 52 + #define R_LARCH_SOP_SR 34 53 + #define R_LARCH_SOP_ADD 35 54 + #define R_LARCH_SOP_AND 36 55 + #define R_LARCH_SOP_IF_ELSE 37 56 + #define R_LARCH_SOP_POP_32_S_10_5 38 57 + #define R_LARCH_SOP_POP_32_U_10_12 39 58 + #define R_LARCH_SOP_POP_32_S_10_12 40 59 + #define R_LARCH_SOP_POP_32_S_10_16 41 60 + #define R_LARCH_SOP_POP_32_S_10_16_S2 42 61 + #define R_LARCH_SOP_POP_32_S_5_20 43 62 + #define R_LARCH_SOP_POP_32_S_0_5_10_16_S2 44 63 + #define R_LARCH_SOP_POP_32_S_0_10_10_16_S2 45 64 + #define R_LARCH_SOP_POP_32_U 46 65 + #define R_LARCH_ADD8 47 66 + #define R_LARCH_ADD16 48 67 + #define R_LARCH_ADD24 49 68 + #define R_LARCH_ADD32 50 69 + #define R_LARCH_ADD64 51 70 + #define R_LARCH_SUB8 52 71 + #define R_LARCH_SUB16 53 72 + #define R_LARCH_SUB24 54 73 + #define R_LARCH_SUB32 55 74 + #define R_LARCH_SUB64 56 75 + #define R_LARCH_GNU_VTINHERIT 57 76 + #define R_LARCH_GNU_VTENTRY 58 77 + 78 + #ifndef ELF_ARCH 79 + 80 + /* ELF register definitions */ 81 + 82 + /* 83 + * General purpose have the following registers: 84 + * Register Number 85 + * GPRs 32 86 + * ORIG_A0 1 87 + * ERA 1 88 + * BADVADDR 1 89 + * CRMD 1 90 + * PRMD 1 91 + * EUEN 1 92 + * ECFG 1 93 + * ESTAT 1 94 + * Reserved 5 95 + */ 96 + #define ELF_NGREG 45 97 + 98 + /* 99 + * Floating point have the following registers: 100 + * Register Number 101 + * FPR 32 102 + * FCC 1 103 + * FCSR 1 104 + */ 105 + #define ELF_NFPREG 34 106 + 107 + typedef unsigned long elf_greg_t; 108 + typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 109 + 110 + typedef double elf_fpreg_t; 111 + typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 112 + 113 + void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); 114 + 115 + #ifdef CONFIG_32BIT 116 + /* 117 + * This is used to ensure we don't load something for the wrong architecture. 118 + */ 119 + #define elf_check_arch elf32_check_arch 120 + 121 + /* 122 + * These are used to set parameters in the core dumps. 123 + */ 124 + #define ELF_CLASS ELFCLASS32 125 + 126 + #define ELF_CORE_COPY_REGS(dest, regs) \ 127 + loongarch_dump_regs32((u32 *)&(dest), (regs)); 128 + 129 + #endif /* CONFIG_32BIT */ 130 + 131 + #ifdef CONFIG_64BIT 132 + /* 133 + * This is used to ensure we don't load something for the wrong architecture. 134 + */ 135 + #define elf_check_arch elf64_check_arch 136 + 137 + /* 138 + * These are used to set parameters in the core dumps. 139 + */ 140 + #define ELF_CLASS ELFCLASS64 141 + 142 + #define ELF_CORE_COPY_REGS(dest, regs) \ 143 + loongarch_dump_regs64((u64 *)&(dest), (regs)); 144 + 145 + #endif /* CONFIG_64BIT */ 146 + 147 + /* 148 + * These are used to set parameters in the core dumps. 149 + */ 150 + #define ELF_DATA ELFDATA2LSB 151 + #define ELF_ARCH EM_LOONGARCH 152 + 153 + #endif /* !defined(ELF_ARCH) */ 154 + 155 + #define loongarch_elf_check_machine(x) ((x)->e_machine == EM_LOONGARCH) 156 + 157 + #define vmcore_elf32_check_arch loongarch_elf_check_machine 158 + #define vmcore_elf64_check_arch loongarch_elf_check_machine 159 + 160 + /* 161 + * Return non-zero if HDR identifies an 32bit ELF binary. 162 + */ 163 + #define elf32_check_arch(hdr) \ 164 + ({ \ 165 + int __res = 1; \ 166 + struct elfhdr *__h = (hdr); \ 167 + \ 168 + if (!loongarch_elf_check_machine(__h)) \ 169 + __res = 0; \ 170 + if (__h->e_ident[EI_CLASS] != ELFCLASS32) \ 171 + __res = 0; \ 172 + \ 173 + __res; \ 174 + }) 175 + 176 + /* 177 + * Return non-zero if HDR identifies an 64bit ELF binary. 178 + */ 179 + #define elf64_check_arch(hdr) \ 180 + ({ \ 181 + int __res = 1; \ 182 + struct elfhdr *__h = (hdr); \ 183 + \ 184 + if (!loongarch_elf_check_machine(__h)) \ 185 + __res = 0; \ 186 + if (__h->e_ident[EI_CLASS] != ELFCLASS64) \ 187 + __res = 0; \ 188 + \ 189 + __res; \ 190 + }) 191 + 192 + #ifdef CONFIG_32BIT 193 + 194 + #define SET_PERSONALITY2(ex, state) \ 195 + do { \ 196 + current->thread.vdso = &vdso_info; \ 197 + \ 198 + loongarch_set_personality_fcsr(state); \ 199 + \ 200 + if (personality(current->personality) != PER_LINUX) \ 201 + set_personality(PER_LINUX); \ 202 + } while (0) 203 + 204 + #endif /* CONFIG_32BIT */ 205 + 206 + #ifdef CONFIG_64BIT 207 + 208 + #define SET_PERSONALITY2(ex, state) \ 209 + do { \ 210 + unsigned int p; \ 211 + \ 212 + clear_thread_flag(TIF_32BIT_REGS); \ 213 + clear_thread_flag(TIF_32BIT_ADDR); \ 214 + \ 215 + current->thread.vdso = &vdso_info; \ 216 + loongarch_set_personality_fcsr(state); \ 217 + \ 218 + p = personality(current->personality); \ 219 + if (p != PER_LINUX32 && p != PER_LINUX) \ 220 + set_personality(PER_LINUX); \ 221 + } while (0) 222 + 223 + #endif /* CONFIG_64BIT */ 224 + 225 + #define CORE_DUMP_USE_REGSET 226 + #define ELF_EXEC_PAGESIZE PAGE_SIZE 227 + 228 + /* 229 + * This yields a mask that user programs can use to figure out what 230 + * instruction set this cpu supports. This could be done in userspace, 231 + * but it's not easy, and we've already done it here. 232 + */ 233 + 234 + #define ELF_HWCAP (elf_hwcap) 235 + extern unsigned int elf_hwcap; 236 + #include <asm/hwcap.h> 237 + 238 + /* 239 + * This yields a string that ld.so will use to load implementation 240 + * specific libraries for optimization. This is more specific in 241 + * intent than poking at uname or /proc/cpuinfo. 242 + */ 243 + 244 + #define ELF_PLATFORM __elf_platform 245 + extern const char *__elf_platform; 246 + 247 + #define ELF_PLAT_INIT(_r, load_addr) do { \ 248 + _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ 249 + _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ 250 + _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ 251 + _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ 252 + _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ 253 + _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ 254 + _r->regs[25] = _r->regs[26] = _r->regs[27] = _r->regs[28] = 0; \ 255 + _r->regs[29] = _r->regs[30] = _r->regs[31] = 0; \ 256 + } while (0) 257 + 258 + /* 259 + * This is the location that an ET_DYN program is loaded if exec'ed. Typical 260 + * use of this is to invoke "./ld.so someprog" to test out a new version of 261 + * the loader. We need to make sure that it is out of the way of the program 262 + * that it will "exec", and that there is sufficient room for the brk. 263 + */ 264 + 265 + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) 266 + 267 + /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ 268 + #define ARCH_DLINFO \ 269 + do { \ 270 + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 271 + (unsigned long)current->mm->context.vdso); \ 272 + } while (0) 273 + 274 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 275 + struct linux_binprm; 276 + extern int arch_setup_additional_pages(struct linux_binprm *bprm, 277 + int uses_interp); 278 + 279 + struct arch_elf_state { 280 + int fp_abi; 281 + int interp_fp_abi; 282 + }; 283 + 284 + #define LOONGARCH_ABI_FP_ANY (0) 285 + 286 + #define INIT_ARCH_ELF_STATE { \ 287 + .fp_abi = LOONGARCH_ABI_FP_ANY, \ 288 + .interp_fp_abi = LOONGARCH_ABI_FP_ANY, \ 289 + } 290 + 291 + #define elf_read_implies_exec(ex, exec_stk) (exec_stk == EXSTACK_DEFAULT) 292 + 293 + extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, 294 + bool is_interp, struct arch_elf_state *state); 295 + 296 + extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, 297 + struct arch_elf_state *state); 298 + 299 + extern void loongarch_set_personality_fcsr(struct arch_elf_state *state); 300 + 301 + #endif /* _ASM_ELF_H */
+10
arch/loongarch/include/asm/exec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + #ifndef _ASM_EXEC_H 6 + #define _ASM_EXEC_H 7 + 8 + extern unsigned long arch_align_stack(unsigned long sp); 9 + 10 + #endif /* _ASM_EXEC_H */
+80
arch/loongarch/include/asm/module.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + #ifndef _ASM_MODULE_H 6 + #define _ASM_MODULE_H 7 + 8 + #include <asm/inst.h> 9 + #include <asm-generic/module.h> 10 + 11 + #define RELA_STACK_DEPTH 16 12 + 13 + struct mod_section { 14 + Elf_Shdr *shdr; 15 + int num_entries; 16 + int max_entries; 17 + }; 18 + 19 + struct mod_arch_specific { 20 + struct mod_section plt; 21 + struct mod_section plt_idx; 22 + }; 23 + 24 + struct plt_entry { 25 + u32 inst_lu12iw; 26 + u32 inst_lu32id; 27 + u32 inst_lu52id; 28 + u32 inst_jirl; 29 + }; 30 + 31 + struct plt_idx_entry { 32 + unsigned long symbol_addr; 33 + }; 34 + 35 + Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val); 36 + 37 + static inline struct plt_entry emit_plt_entry(unsigned long val) 38 + { 39 + u32 lu12iw, lu32id, lu52id, jirl; 40 + 41 + lu12iw = (lu12iw_op << 25 | (((val >> 12) & 0xfffff) << 5) | LOONGARCH_GPR_T1); 42 + lu32id = larch_insn_gen_lu32id(LOONGARCH_GPR_T1, ADDR_IMM(val, LU32ID)); 43 + lu52id = larch_insn_gen_lu52id(LOONGARCH_GPR_T1, LOONGARCH_GPR_T1, ADDR_IMM(val, LU52ID)); 44 + jirl = larch_insn_gen_jirl(0, LOONGARCH_GPR_T1, 0, (val & 0xfff)); 45 + 46 + return (struct plt_entry) { lu12iw, lu32id, lu52id, jirl }; 47 + } 48 + 49 + static inline struct plt_idx_entry emit_plt_idx_entry(unsigned long val) 50 + { 51 + return (struct plt_idx_entry) { val }; 52 + } 53 + 54 + static inline int get_plt_idx(unsigned long val, const struct mod_section *sec) 55 + { 56 + int i; 57 + struct plt_idx_entry *plt_idx = (struct plt_idx_entry *)sec->shdr->sh_addr; 58 + 59 + for (i = 0; i < sec->num_entries; i++) { 60 + if (plt_idx[i].symbol_addr == val) 61 + return i; 62 + } 63 + 64 + return -1; 65 + } 66 + 67 + static inline struct plt_entry *get_plt_entry(unsigned long val, 68 + const struct mod_section *sec_plt, 69 + const struct mod_section *sec_plt_idx) 70 + { 71 + int plt_idx = get_plt_idx(val, sec_plt_idx); 72 + struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; 73 + 74 + if (plt_idx < 0) 75 + return NULL; 76 + 77 + return plt + plt_idx; 78 + } 79 + 80 + #endif /* _ASM_MODULE_H */
+7
arch/loongarch/include/asm/module.lds.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ 3 + SECTIONS { 4 + . = ALIGN(4); 5 + .plt : { BYTE(0) } 6 + .plt.idx : { BYTE(0) } 7 + }
+19
arch/loongarch/include/asm/vermagic.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + #ifndef _ASM_VERMAGIC_H 6 + #define _ASM_VERMAGIC_H 7 + 8 + #define MODULE_PROC_FAMILY "LOONGARCH " 9 + 10 + #ifdef CONFIG_32BIT 11 + #define MODULE_KERNEL_TYPE "32BIT " 12 + #elif defined CONFIG_64BIT 13 + #define MODULE_KERNEL_TYPE "64BIT " 14 + #endif 15 + 16 + #define MODULE_ARCH_VERMAGIC \ 17 + MODULE_PROC_FAMILY MODULE_KERNEL_TYPE 18 + 19 + #endif /* _ASM_VERMAGIC_H */
+17
arch/loongarch/include/uapi/asm/auxvec.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ 2 + /* 3 + * Author: Hanlu Li <lihanlu@loongson.cn> 4 + * Huacai Chen <chenhuacai@loongson.cn> 5 + * 6 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 7 + */ 8 + 9 + #ifndef __ASM_AUXVEC_H 10 + #define __ASM_AUXVEC_H 11 + 12 + /* Location of VDSO image. */ 13 + #define AT_SYSINFO_EHDR 33 14 + 15 + #define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ 16 + 17 + #endif /* __ASM_AUXVEC_H */
+20
arch/loongarch/include/uapi/asm/hwcap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef _UAPI_ASM_HWCAP_H 3 + #define _UAPI_ASM_HWCAP_H 4 + 5 + /* HWCAP flags */ 6 + #define HWCAP_LOONGARCH_CPUCFG (1 << 0) 7 + #define HWCAP_LOONGARCH_LAM (1 << 1) 8 + #define HWCAP_LOONGARCH_UAL (1 << 2) 9 + #define HWCAP_LOONGARCH_FPU (1 << 3) 10 + #define HWCAP_LOONGARCH_LSX (1 << 4) 11 + #define HWCAP_LOONGARCH_LASX (1 << 5) 12 + #define HWCAP_LOONGARCH_CRC32 (1 << 6) 13 + #define HWCAP_LOONGARCH_COMPLEX (1 << 7) 14 + #define HWCAP_LOONGARCH_CRYPTO (1 << 8) 15 + #define HWCAP_LOONGARCH_LVZ (1 << 9) 16 + #define HWCAP_LOONGARCH_LBT_X86 (1 << 10) 17 + #define HWCAP_LOONGARCH_LBT_ARM (1 << 11) 18 + #define HWCAP_LOONGARCH_LBT_MIPS (1 << 12) 19 + 20 + #endif /* _UAPI_ASM_HWCAP_H */
+30
arch/loongarch/kernel/elf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Author: Huacai Chen <chenhuacai@loongson.cn> 4 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 5 + */ 6 + 7 + #include <linux/binfmts.h> 8 + #include <linux/elf.h> 9 + #include <linux/export.h> 10 + #include <linux/sched.h> 11 + 12 + #include <asm/cpu-features.h> 13 + #include <asm/cpu-info.h> 14 + 15 + int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 16 + bool is_interp, struct arch_elf_state *state) 17 + { 18 + return 0; 19 + } 20 + 21 + int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, 22 + struct arch_elf_state *state) 23 + { 24 + return 0; 25 + } 26 + 27 + void loongarch_set_personality_fcsr(struct arch_elf_state *state) 28 + { 29 + current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0; 30 + }
+40
arch/loongarch/kernel/inst.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + #include <asm/inst.h> 6 + 7 + u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm) 8 + { 9 + union loongarch_instruction insn; 10 + 11 + insn.reg1i20_format.opcode = lu32id_op; 12 + insn.reg1i20_format.rd = rd; 13 + insn.reg1i20_format.immediate = imm; 14 + 15 + return insn.word; 16 + } 17 + 18 + u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm) 19 + { 20 + union loongarch_instruction insn; 21 + 22 + insn.reg2i12_format.opcode = lu52id_op; 23 + insn.reg2i12_format.rd = rd; 24 + insn.reg2i12_format.rj = rj; 25 + insn.reg2i12_format.immediate = imm; 26 + 27 + return insn.word; 28 + } 29 + 30 + u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest) 31 + { 32 + union loongarch_instruction insn; 33 + 34 + insn.reg2i16_format.opcode = jirl_op; 35 + insn.reg2i16_format.rd = rd; 36 + insn.reg2i16_format.rj = rj; 37 + insn.reg2i16_format.immediate = (dest - pc) >> 2; 38 + 39 + return insn.word; 40 + }
+121
arch/loongarch/kernel/module-sections.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 + */ 5 + 6 + #include <linux/elf.h> 7 + #include <linux/kernel.h> 8 + #include <linux/module.h> 9 + 10 + Elf_Addr module_emit_plt_entry(struct module *mod, unsigned long val) 11 + { 12 + int nr; 13 + struct mod_section *plt_sec = &mod->arch.plt; 14 + struct mod_section *plt_idx_sec = &mod->arch.plt_idx; 15 + struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec); 16 + struct plt_idx_entry *plt_idx; 17 + 18 + if (plt) 19 + return (Elf_Addr)plt; 20 + 21 + nr = plt_sec->num_entries; 22 + 23 + /* There is no duplicate entry, create a new one */ 24 + plt = (struct plt_entry *)plt_sec->shdr->sh_addr; 25 + plt[nr] = emit_plt_entry(val); 26 + plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr; 27 + plt_idx[nr] = emit_plt_idx_entry(val); 28 + 29 + plt_sec->num_entries++; 30 + plt_idx_sec->num_entries++; 31 + BUG_ON(plt_sec->num_entries > plt_sec->max_entries); 32 + 33 + return (Elf_Addr)&plt[nr]; 34 + } 35 + 36 + static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) 37 + { 38 + return x->r_info == y->r_info && x->r_addend == y->r_addend; 39 + } 40 + 41 + static bool duplicate_rela(const Elf_Rela *rela, int idx) 42 + { 43 + int i; 44 + 45 + for (i = 0; i < idx; i++) { 46 + if (is_rela_equal(&rela[i], &rela[idx])) 47 + return true; 48 + } 49 + 50 + return false; 51 + } 52 + 53 + static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts) 54 + { 55 + unsigned int i, type; 56 + 57 + for (i = 0; i < num; i++) { 58 + type = ELF_R_TYPE(relas[i].r_info); 59 + if (type == R_LARCH_SOP_PUSH_PLT_PCREL) { 60 + if (!duplicate_rela(relas, i)) 61 + (*plts)++; 62 + } 63 + } 64 + } 65 + 66 + int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, 67 + char *secstrings, struct module *mod) 68 + { 69 + unsigned int i, num_plts = 0; 70 + 71 + /* 72 + * Find the empty .plt sections. 73 + */ 74 + for (i = 0; i < ehdr->e_shnum; i++) { 75 + if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) 76 + mod->arch.plt.shdr = sechdrs + i; 77 + else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx")) 78 + mod->arch.plt_idx.shdr = sechdrs + i; 79 + } 80 + 81 + if (!mod->arch.plt.shdr) { 82 + pr_err("%s: module PLT section(s) missing\n", mod->name); 83 + return -ENOEXEC; 84 + } 85 + if (!mod->arch.plt_idx.shdr) { 86 + pr_err("%s: module PLT.IDX section(s) missing\n", mod->name); 87 + return -ENOEXEC; 88 + } 89 + 90 + /* Calculate the maxinum number of entries */ 91 + for (i = 0; i < ehdr->e_shnum; i++) { 92 + int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela); 93 + Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; 94 + Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; 95 + 96 + if (sechdrs[i].sh_type != SHT_RELA) 97 + continue; 98 + 99 + /* ignore relocations that operate on non-exec sections */ 100 + if (!(dst_sec->sh_flags & SHF_EXECINSTR)) 101 + continue; 102 + 103 + count_max_entries(relas, num_rela, &num_plts); 104 + } 105 + 106 + mod->arch.plt.shdr->sh_type = SHT_NOBITS; 107 + mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 108 + mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; 109 + mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); 110 + mod->arch.plt.num_entries = 0; 111 + mod->arch.plt.max_entries = num_plts; 112 + 113 + mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS; 114 + mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC; 115 + mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES; 116 + mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry); 117 + mod->arch.plt_idx.num_entries = 0; 118 + mod->arch.plt_idx.max_entries = num_plts; 119 + 120 + return 0; 121 + }
+374
arch/loongarch/kernel/module.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Author: Hanlu Li <lihanlu@loongson.cn> 4 + * Huacai Chen <chenhuacai@loongson.cn> 5 + * 6 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 7 + */ 8 + 9 + #define pr_fmt(fmt) "kmod: " fmt 10 + 11 + #include <linux/moduleloader.h> 12 + #include <linux/elf.h> 13 + #include <linux/mm.h> 14 + #include <linux/vmalloc.h> 15 + #include <linux/slab.h> 16 + #include <linux/fs.h> 17 + #include <linux/string.h> 18 + #include <linux/kernel.h> 19 + 20 + static inline bool signed_imm_check(long val, unsigned int bit) 21 + { 22 + return -(1L << (bit - 1)) <= val && val < (1L << (bit - 1)); 23 + } 24 + 25 + static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) 26 + { 27 + return val < (1UL << bit); 28 + } 29 + 30 + static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) 31 + { 32 + if (*rela_stack_top >= RELA_STACK_DEPTH) 33 + return -ENOEXEC; 34 + 35 + rela_stack[(*rela_stack_top)++] = stack_value; 36 + pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value); 37 + 38 + return 0; 39 + } 40 + 41 + static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top) 42 + { 43 + if (*rela_stack_top == 0) 44 + return -ENOEXEC; 45 + 46 + *stack_value = rela_stack[--(*rela_stack_top)]; 47 + pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value); 48 + 49 + return 0; 50 + } 51 + 52 + static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v, 53 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 54 + { 55 + return 0; 56 + } 57 + 58 + static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v, 59 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 60 + { 61 + pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type); 62 + return -EINVAL; 63 + } 64 + 65 + static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v, 66 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 67 + { 68 + *location = v; 69 + return 0; 70 + } 71 + 72 + static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v, 73 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 74 + { 75 + *(Elf_Addr *)location = v; 76 + return 0; 77 + } 78 + 79 + static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v, 80 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 81 + { 82 + return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top); 83 + } 84 + 85 + static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v, 86 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 87 + { 88 + return rela_stack_push(v, rela_stack, rela_stack_top); 89 + } 90 + 91 + static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v, 92 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 93 + { 94 + int err = 0; 95 + s64 opr1; 96 + 97 + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); 98 + if (err) 99 + return err; 100 + err = rela_stack_push(opr1, rela_stack, rela_stack_top); 101 + if (err) 102 + return err; 103 + err = rela_stack_push(opr1, rela_stack, rela_stack_top); 104 + if (err) 105 + return err; 106 + 107 + return 0; 108 + } 109 + 110 + static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v, 111 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 112 + { 113 + ptrdiff_t offset = (void *)v - (void *)location; 114 + 115 + if (offset >= SZ_128M) 116 + v = module_emit_plt_entry(mod, v); 117 + 118 + if (offset < -SZ_128M) 119 + v = module_emit_plt_entry(mod, v); 120 + 121 + return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type); 122 + } 123 + 124 + static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v, 125 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 126 + { 127 + int err = 0; 128 + s64 opr1, opr2, opr3; 129 + 130 + if (type == R_LARCH_SOP_IF_ELSE) { 131 + err = rela_stack_pop(&opr3, rela_stack, rela_stack_top); 132 + if (err) 133 + return err; 134 + } 135 + 136 + err = rela_stack_pop(&opr2, rela_stack, rela_stack_top); 137 + if (err) 138 + return err; 139 + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); 140 + if (err) 141 + return err; 142 + 143 + switch (type) { 144 + case R_LARCH_SOP_AND: 145 + err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top); 146 + break; 147 + case R_LARCH_SOP_ADD: 148 + err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top); 149 + break; 150 + case R_LARCH_SOP_SUB: 151 + err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top); 152 + break; 153 + case R_LARCH_SOP_SL: 154 + err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top); 155 + break; 156 + case R_LARCH_SOP_SR: 157 + err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top); 158 + break; 159 + case R_LARCH_SOP_IF_ELSE: 160 + err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top); 161 + break; 162 + default: 163 + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); 164 + return -EINVAL; 165 + } 166 + 167 + return err; 168 + } 169 + 170 + static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v, 171 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 172 + { 173 + int err = 0; 174 + s64 opr1; 175 + union loongarch_instruction *insn = (union loongarch_instruction *)location; 176 + 177 + err = rela_stack_pop(&opr1, rela_stack, rela_stack_top); 178 + if (err) 179 + return err; 180 + 181 + switch (type) { 182 + case R_LARCH_SOP_POP_32_U_10_12: 183 + if (!unsigned_imm_check(opr1, 12)) 184 + goto overflow; 185 + 186 + /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */ 187 + insn->reg2i12_format.immediate = opr1 & 0xfff; 188 + return 0; 189 + case R_LARCH_SOP_POP_32_S_10_12: 190 + if (!signed_imm_check(opr1, 12)) 191 + goto overflow; 192 + 193 + insn->reg2i12_format.immediate = opr1 & 0xfff; 194 + return 0; 195 + case R_LARCH_SOP_POP_32_S_10_16: 196 + if (!signed_imm_check(opr1, 16)) 197 + goto overflow; 198 + 199 + insn->reg2i16_format.immediate = opr1 & 0xffff; 200 + return 0; 201 + case R_LARCH_SOP_POP_32_S_10_16_S2: 202 + if (opr1 % 4) 203 + goto unaligned; 204 + 205 + if (!signed_imm_check(opr1, 18)) 206 + goto overflow; 207 + 208 + insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff; 209 + return 0; 210 + case R_LARCH_SOP_POP_32_S_5_20: 211 + if (!signed_imm_check(opr1, 20)) 212 + goto overflow; 213 + 214 + insn->reg1i20_format.immediate = (opr1) & 0xfffff; 215 + return 0; 216 + case R_LARCH_SOP_POP_32_S_0_5_10_16_S2: 217 + if (opr1 % 4) 218 + goto unaligned; 219 + 220 + if (!signed_imm_check(opr1, 23)) 221 + goto overflow; 222 + 223 + opr1 >>= 2; 224 + insn->reg1i21_format.immediate_l = opr1 & 0xffff; 225 + insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f; 226 + return 0; 227 + case R_LARCH_SOP_POP_32_S_0_10_10_16_S2: 228 + if (opr1 % 4) 229 + goto unaligned; 230 + 231 + if (!signed_imm_check(opr1, 28)) 232 + goto overflow; 233 + 234 + opr1 >>= 2; 235 + insn->reg0i26_format.immediate_l = opr1 & 0xffff; 236 + insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff; 237 + return 0; 238 + case R_LARCH_SOP_POP_32_U: 239 + if (!unsigned_imm_check(opr1, 32)) 240 + goto overflow; 241 + 242 + /* (*(uint32_t *) PC) = opr */ 243 + *location = (u32)opr1; 244 + return 0; 245 + default: 246 + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); 247 + return -EINVAL; 248 + } 249 + 250 + overflow: 251 + pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n", 252 + mod->name, opr1, __func__, type); 253 + return -ENOEXEC; 254 + 255 + unaligned: 256 + pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n", 257 + mod->name, opr1, __func__, type); 258 + return -ENOEXEC; 259 + } 260 + 261 + static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v, 262 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type) 263 + { 264 + switch (type) { 265 + case R_LARCH_ADD32: 266 + *(s32 *)location += v; 267 + return 0; 268 + case R_LARCH_ADD64: 269 + *(s64 *)location += v; 270 + return 0; 271 + case R_LARCH_SUB32: 272 + *(s32 *)location -= v; 273 + return 0; 274 + case R_LARCH_SUB64: 275 + *(s64 *)location -= v; 276 + return 0; 277 + default: 278 + pr_err("%s: Unsupport relocation type %u\n", mod->name, type); 279 + return -EINVAL; 280 + } 281 + } 282 + 283 + /* 284 + * reloc_handlers_rela() - Apply a particular relocation to a module 285 + * @mod: the module to apply the reloc to 286 + * @location: the address at which the reloc is to be applied 287 + * @v: the value of the reloc, with addend for RELA-style 288 + * @rela_stack: the stack used for store relocation info, LOCAL to THIS module 289 + * @rela_stac_top: where the stack operation(pop/push) applies to 290 + * 291 + * Return: 0 upon success, else -ERRNO 292 + */ 293 + typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v, 294 + s64 *rela_stack, size_t *rela_stack_top, unsigned int type); 295 + 296 + /* The handlers for known reloc types */ 297 + static reloc_rela_handler reloc_rela_handlers[] = { 298 + [R_LARCH_NONE ... R_LARCH_SUB64] = apply_r_larch_error, 299 + 300 + [R_LARCH_NONE] = apply_r_larch_none, 301 + [R_LARCH_32] = apply_r_larch_32, 302 + [R_LARCH_64] = apply_r_larch_64, 303 + [R_LARCH_MARK_LA] = apply_r_larch_none, 304 + [R_LARCH_MARK_PCREL] = apply_r_larch_none, 305 + [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel, 306 + [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute, 307 + [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup, 308 + [R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel, 309 + [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop, 310 + [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field, 311 + [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub, 312 + }; 313 + 314 + int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, 315 + unsigned int symindex, unsigned int relsec, 316 + struct module *mod) 317 + { 318 + int i, err; 319 + unsigned int type; 320 + s64 rela_stack[RELA_STACK_DEPTH]; 321 + size_t rela_stack_top = 0; 322 + reloc_rela_handler handler; 323 + void *location; 324 + Elf_Addr v; 325 + Elf_Sym *sym; 326 + Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr; 327 + 328 + pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec, 329 + sechdrs[relsec].sh_info); 330 + 331 + rela_stack_top = 0; 332 + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 333 + /* This is where to make the change */ 334 + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; 335 + /* This is the symbol it is referring to */ 336 + sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info); 337 + if (IS_ERR_VALUE(sym->st_value)) { 338 + /* Ignore unresolved weak symbol */ 339 + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) 340 + continue; 341 + pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name); 342 + return -ENOENT; 343 + } 344 + 345 + type = ELF_R_TYPE(rel[i].r_info); 346 + 347 + if (type < ARRAY_SIZE(reloc_rela_handlers)) 348 + handler = reloc_rela_handlers[type]; 349 + else 350 + handler = NULL; 351 + 352 + if (!handler) { 353 + pr_err("%s: Unknown relocation type %u\n", mod->name, type); 354 + return -EINVAL; 355 + } 356 + 357 + pr_debug("type %d st_value %llx r_addend %llx loc %llx\n", 358 + (int)ELF_R_TYPE(rel[i].r_info), 359 + sym->st_value, rel[i].r_addend, (u64)location); 360 + 361 + v = sym->st_value + rel[i].r_addend; 362 + err = handler(mod, location, v, rela_stack, &rela_stack_top, type); 363 + if (err) 364 + return err; 365 + } 366 + 367 + return 0; 368 + } 369 + 370 + void *module_alloc(unsigned long size) 371 + { 372 + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, 373 + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); 374 + }