Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'objtool/core' of https://git.kernel.org/pub/scm/linux/kernel/git/jpoimboe/linux

This series introduces new objtool features and a klp-build script to
generate livepatch modules using a source .patch as input.

This builds on concepts from the longstanding out-of-tree kpatch [1]
project which began in 2012 and has been used for many years to generate
livepatch modules for production kernels. However, this is a complete
rewrite which incorporates hard-earned lessons from 12+ years of
maintaining kpatch.

Key improvements compared to kpatch-build:

- Integrated with objtool: Leverages objtool's existing control-flow
graph analysis to help detect changed functions.

- Works on vmlinux.o: Supports late-linked objects, making it
compatible with LTO, IBT, and similar.

- Simplified code base: ~3k fewer lines of code.

- Upstream: No more out-of-tree #ifdef hacks, far less cruft.

- Cleaner internals: Vastly simplified logic for symbol/section/reloc
inclusion and special section extraction.

- Robust __LINE__ macro handling: Avoids false positive binary diffs
caused by the __LINE__ macro by introducing a fix-patch-lines script
which injects #line directives into the source .patch to preserve
the original line numbers at compile time.

The primary user interface is the klp-build script which does the
following:

- Builds an original kernel with -function-sections and
-fdata-sections, plus objtool function checksumming.

- Applies the .patch file and rebuilds the kernel using the same
options.

- Runs 'objtool klp diff' to detect changed functions and generate
intermediate binary diff objects.

- Builds a kernel module which links the diff objects with some
livepatch module init code (scripts/livepatch/init.c).

- Finalizes the livepatch module (aka work around linker wreckage)
using 'objtool klp post-link'.

I've tested with a variety of patches on defconfig and Fedora-config
kernels with both GCC and Clang.

+5195 -899
+2 -1
MAINTAINERS
··· 14439 14439 F: Documentation/ABI/testing/sysfs-kernel-livepatch 14440 14440 F: Documentation/livepatch/ 14441 14441 F: arch/powerpc/include/asm/livepatch.h 14442 - F: include/linux/livepatch.h 14442 + F: include/linux/livepatch*.h 14443 14443 F: kernel/livepatch/ 14444 14444 F: kernel/module/livepatch.c 14445 14445 F: samples/livepatch/ 14446 + F: scripts/livepatch/ 14446 14447 F: tools/testing/selftests/livepatch/ 14447 14448 14448 14449 LLC (802.2)
+1 -1
arch/s390/include/asm/nospec-insn.h
··· 19 19 #ifdef CONFIG_EXPOLINE_EXTERN 20 20 SYM_CODE_START(\name) 21 21 #else 22 - .pushsection .text.\name,"axG",@progbits,\name,comdat 22 + .pushsection .text..\name,"axG",@progbits,\name,comdat 23 23 .globl \name 24 24 .hidden \name 25 25 .type \name,@function
+1 -1
arch/s390/kernel/vmlinux.lds.S
··· 51 51 IRQENTRY_TEXT 52 52 SOFTIRQENTRY_TEXT 53 53 FTRACE_HOTPATCH_TRAMPOLINES_TEXT 54 - *(.text.*_indirect_*) 54 + *(.text..*_indirect_*) 55 55 *(.gnu.warning) 56 56 . = ALIGN(PAGE_SIZE); 57 57 _etext = .; /* End of text section */
+1
arch/x86/Kconfig
··· 261 261 select HAVE_FUNCTION_ERROR_INJECTION 262 262 select HAVE_KRETPROBES 263 263 select HAVE_RETHOOK 264 + select HAVE_KLP_BUILD if X86_64 264 265 select HAVE_LIVEPATCH if X86_64 265 266 select HAVE_MIXED_BREAKPOINTS_REGS 266 267 select HAVE_MOD_ARCH_SPECIFIC
+4
arch/x86/include/asm/alternative.h
··· 198 198 199 199 #define ALTINSTR_ENTRY(ft_flags) \ 200 200 ".pushsection .altinstructions,\"a\"\n" \ 201 + ANNOTATE_DATA_SPECIAL \ 201 202 " .long 771b - .\n" /* label */ \ 202 203 " .long 774f - .\n" /* new instruction */ \ 203 204 " .4byte " __stringify(ft_flags) "\n" /* feature + flags */ \ ··· 208 207 209 208 #define ALTINSTR_REPLACEMENT(newinstr) /* replacement */ \ 210 209 ".pushsection .altinstr_replacement, \"ax\"\n" \ 210 + ANNOTATE_DATA_SPECIAL \ 211 211 "# ALT: replacement\n" \ 212 212 "774:\n\t" newinstr "\n775:\n" \ 213 213 ".popsection\n" ··· 339 337 * instruction. See apply_alternatives(). 340 338 */ 341 339 .macro altinstr_entry orig alt ft_flags orig_len alt_len 340 + ANNOTATE_DATA_SPECIAL 342 341 .long \orig - . 343 342 .long \alt - . 344 343 .4byte \ft_flags ··· 368 365 .popsection ; \ 369 366 .pushsection .altinstr_replacement,"ax" ; \ 370 367 743: \ 368 + ANNOTATE_DATA_SPECIAL ; \ 371 369 newinst ; \ 372 370 744: \ 373 371 .popsection ;
+5
arch/x86/include/asm/asm.h
··· 2 2 #ifndef _ASM_X86_ASM_H 3 3 #define _ASM_X86_ASM_H 4 4 5 + #include <linux/annotate.h> 6 + 5 7 #ifdef __ASSEMBLER__ 6 8 # define __ASM_FORM(x, ...) x,## __VA_ARGS__ 7 9 # define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__ ··· 134 132 # define _ASM_EXTABLE_TYPE(from, to, type) \ 135 133 .pushsection "__ex_table","a" ; \ 136 134 .balign 4 ; \ 135 + ANNOTATE_DATA_SPECIAL ; \ 137 136 .long (from) - . ; \ 138 137 .long (to) - . ; \ 139 138 .long type ; \ ··· 182 179 # define _ASM_EXTABLE_TYPE(from, to, type) \ 183 180 " .pushsection \"__ex_table\",\"a\"\n" \ 184 181 " .balign 4\n" \ 182 + ANNOTATE_DATA_SPECIAL \ 185 183 " .long (" #from ") - .\n" \ 186 184 " .long (" #to ") - .\n" \ 187 185 " .long " __stringify(type) " \n" \ ··· 191 187 # define _ASM_EXTABLE_TYPE_REG(from, to, type, reg) \ 192 188 " .pushsection \"__ex_table\",\"a\"\n" \ 193 189 " .balign 4\n" \ 190 + ANNOTATE_DATA_SPECIAL \ 194 191 " .long (" #from ") - .\n" \ 195 192 " .long (" #to ") - .\n" \ 196 193 DEFINE_EXTABLE_TYPE_REG \
+1
arch/x86/include/asm/bug.h
··· 57 57 #define _BUG_FLAGS_ASM(ins, file, line, flags, size, extra) \ 58 58 "1:\t" ins "\n" \ 59 59 ".pushsection __bug_table,\"aw\"\n" \ 60 + ANNOTATE_DATA_SPECIAL \ 60 61 __BUG_ENTRY(file, line, flags) \ 61 62 "\t.org 2b + " size "\n" \ 62 63 ".popsection\n" \
+1
arch/x86/include/asm/cpufeature.h
··· 101 101 asm goto(ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]") 102 102 ".pushsection .altinstr_aux,\"ax\"\n" 103 103 "6:\n" 104 + ANNOTATE_DATA_SPECIAL 104 105 " testb %[bitnum], %a[cap_byte]\n" 105 106 " jnz %l[t_yes]\n" 106 107 " jmp %l[t_no]\n"
+1
arch/x86/include/asm/jump_label.h
··· 15 15 #define JUMP_TABLE_ENTRY(key, label) \ 16 16 ".pushsection __jump_table, \"aw\" \n\t" \ 17 17 _ASM_ALIGN "\n\t" \ 18 + ANNOTATE_DATA_SPECIAL \ 18 19 ".long 1b - . \n\t" \ 19 20 ".long " label " - . \n\t" \ 20 21 _ASM_PTR " " key " - . \n\t" \
+28 -23
arch/x86/kernel/alternative.c
··· 2244 2244 * See entry_{32,64}.S for more details. 2245 2245 */ 2246 2246 2247 - /* 2248 - * We define the int3_magic() function in assembly to control the calling 2249 - * convention such that we can 'call' it from assembly. 2250 - */ 2251 - 2252 - extern void int3_magic(unsigned int *ptr); /* defined in asm */ 2247 + extern void int3_selftest_asm(unsigned int *ptr); 2253 2248 2254 2249 asm ( 2255 2250 " .pushsection .init.text, \"ax\", @progbits\n" 2256 - " .type int3_magic, @function\n" 2257 - "int3_magic:\n" 2251 + " .type int3_selftest_asm, @function\n" 2252 + "int3_selftest_asm:\n" 2258 2253 ANNOTATE_NOENDBR 2259 - " movl $1, (%" _ASM_ARG1 ")\n" 2254 + /* 2255 + * INT3 padded with NOP to CALL_INSN_SIZE. The INT3 triggers an 2256 + * exception, then the int3_exception_nb notifier emulates a call to 2257 + * int3_selftest_callee(). 2258 + */ 2259 + " int3; nop; nop; nop; nop\n" 2260 2260 ASM_RET 2261 - " .size int3_magic, .-int3_magic\n" 2261 + " .size int3_selftest_asm, . - int3_selftest_asm\n" 2262 + " .popsection\n" 2263 + ); 2264 + 2265 + extern void int3_selftest_callee(unsigned int *ptr); 2266 + 2267 + asm ( 2268 + " .pushsection .init.text, \"ax\", @progbits\n" 2269 + " .type int3_selftest_callee, @function\n" 2270 + "int3_selftest_callee:\n" 2271 + ANNOTATE_NOENDBR 2272 + " movl $0x1234, (%" _ASM_ARG1 ")\n" 2273 + ASM_RET 2274 + " .size int3_selftest_callee, . - int3_selftest_callee\n" 2262 2275 " .popsection\n" 2263 2276 ); 2264 2277 ··· 2280 2267 static int __init 2281 2268 int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) 2282 2269 { 2283 - unsigned long selftest = (unsigned long)&int3_selftest_ip; 2270 + unsigned long selftest = (unsigned long)&int3_selftest_asm; 2284 2271 struct die_args *args = data; 2285 2272 struct pt_regs *regs = args->regs; 2286 2273 ··· 2295 2282 if (regs->ip - INT3_INSN_SIZE != selftest) 2296 2283 return NOTIFY_DONE; 2297 2284 2298 - int3_emulate_call(regs, (unsigned long)&int3_magic); 2285 + int3_emulate_call(regs, (unsigned long)&int3_selftest_callee); 2299 2286 return NOTIFY_STOP; 2300 2287 } 2301 2288 ··· 2311 2298 BUG_ON(register_die_notifier(&int3_exception_nb)); 2312 2299 2313 2300 /* 2314 - * Basically: int3_magic(&val); but really complicated :-) 2315 - * 2316 - * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb 2317 - * notifier above will emulate CALL for us. 2301 + * Basically: int3_selftest_callee(&val); but really complicated :-) 2318 2302 */ 2319 - asm volatile ("int3_selftest_ip:\n\t" 2320 - ANNOTATE_NOENDBR 2321 - " int3; nop; nop; nop; nop\n\t" 2322 - : ASM_CALL_CONSTRAINT 2323 - : __ASM_SEL_RAW(a, D) (&val) 2324 - : "memory"); 2303 + int3_selftest_asm(&val); 2325 2304 2326 - BUG_ON(val != 1); 2305 + BUG_ON(val != 0x1234); 2327 2306 2328 2307 unregister_die_notifier(&int3_exception_nb); 2329 2308 }
-4
arch/x86/kernel/kprobes/opt.c
··· 103 103 104 104 asm ( 105 105 ".pushsection .rodata\n" 106 - "optprobe_template_func:\n" 107 106 ".global optprobe_template_entry\n" 108 107 "optprobe_template_entry:\n" 109 108 #ifdef CONFIG_X86_64 ··· 158 159 ".global optprobe_template_end\n" 159 160 "optprobe_template_end:\n" 160 161 ".popsection\n"); 161 - 162 - void optprobe_template_func(void); 163 - STACK_FRAME_NON_STANDARD(optprobe_template_func); 164 162 165 163 #define TMPL_CLAC_IDX \ 166 164 ((long)optprobe_template_clac - (long)optprobe_template_entry)
+9 -6
arch/x86/kernel/module.c
··· 97 97 DEBUGP("%s relocate section %u to %u\n", 98 98 apply ? "Applying" : "Clearing", 99 99 relsec, sechdrs[relsec].sh_info); 100 + 100 101 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 101 102 size_t size; 102 103 ··· 163 162 164 163 if (apply) { 165 164 if (memcmp(loc, &zero, size)) { 166 - pr_err("x86/modules: Invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", 167 - (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 165 + pr_err("x86/modules: Invalid relocation target, existing value is nonzero for sec %u, idx %u, type %d, loc %lx, val %llx\n", 166 + relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), 167 + (unsigned long)loc, val); 168 168 return -ENOEXEC; 169 169 } 170 170 write(loc, &val, size); 171 171 } else { 172 172 if (memcmp(loc, &val, size)) { 173 - pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for type %d, loc %p, val %Lx\n", 174 - (int)ELF64_R_TYPE(rel[i].r_info), loc, val); 173 + pr_warn("x86/modules: Invalid relocation target, existing value does not match expected value for sec %u, idx %u, type %d, loc %lx, val %llx\n", 174 + relsec, i, (int)ELF64_R_TYPE(rel[i].r_info), 175 + (unsigned long)loc, val); 175 176 return -ENOEXEC; 176 177 } 177 178 write(loc, &zero, size); ··· 182 179 return 0; 183 180 184 181 overflow: 185 - pr_err("overflow in relocation type %d val %Lx\n", 186 - (int)ELF64_R_TYPE(rel[i].r_info), val); 182 + pr_err("overflow in relocation type %d val %llx sec %u idx %d\n", 183 + (int)ELF64_R_TYPE(rel[i].r_info), val, relsec, i); 187 184 pr_err("`%s' likely not compiled with -mcmodel=kernel\n", 188 185 me->name); 189 186 return -ENOEXEC;
+4
drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
··· 53 53 usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node, 54 54 struct rb_root_cached *root); 55 55 extern struct usnic_uiom_interval_node * 56 + usnic_uiom_interval_tree_subtree_search(struct usnic_uiom_interval_node *node, 57 + unsigned long start, 58 + unsigned long last); 59 + extern struct usnic_uiom_interval_node * 56 60 usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root, 57 61 unsigned long start, 58 62 unsigned long last);
+12 -28
include/asm-generic/vmlinux.lds.h
··· 87 87 #define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) 88 88 89 89 /* 90 - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which 91 - * generates .data.identifier sections, which need to be pulled in with 92 - * .data. We don't want to pull in .data..other sections, which Linux 93 - * has defined. Same for text and bss. 90 + * Support -ffunction-sections by matching .text and .text.*, 91 + * but exclude '.text..*'. 94 92 * 95 - * With LTO_CLANG, the linker also splits sections by default, so we need 96 - * these macros to combine the sections during the final link. 97 - * 98 - * With AUTOFDO_CLANG and PROPELLER_CLANG, by default, the linker splits 99 - * text sections and regroups functions into subsections. 100 - * 101 - * RODATA_MAIN is not used because existing code already defines .rodata.x 102 - * sections to be brought in with rodata. 93 + * Special .text.* sections that are typically grouped separately, such as 94 + * .text.unlikely or .text.hot, must be matched explicitly before using 95 + * TEXT_MAIN. 103 96 */ 104 - #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \ 105 - defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) 106 97 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* 107 - #else 108 - #define TEXT_MAIN .text 109 - #endif 110 - #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) 98 + 99 + /* 100 + * Support -fdata-sections by matching .data, .data.*, and others, 101 + * but exclude '.data..*'. 102 + */ 111 103 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* 112 104 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* 113 105 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* 114 106 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* 115 107 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* 116 - #else 117 - #define DATA_MAIN .data .data.rel .data.rel.local 118 - #define SDATA_MAIN .sdata 119 - #define RODATA_MAIN .rodata 120 - #define BSS_MAIN .bss 121 - #define SBSS_MAIN .sbss 122 - #endif 123 108 124 109 /* 125 110 * GCC 4.5 and later have a 32 bytes section alignment for structures. ··· 566 581 * during second ld run in second ld pass when generating System.map 567 582 * 568 583 * TEXT_MAIN here will match symbols with a fixed pattern (for example, 569 - * .text.hot or .text.unlikely) if dead code elimination or 570 - * function-section is enabled. Match these symbols first before 571 - * TEXT_MAIN to ensure they are grouped together. 584 + * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure 585 + * they get grouped together. 572 586 * 573 587 * Also placing .text.hot section at the beginning of a page, this 574 588 * would help the TLB performance.
+134
include/linux/annotate.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_ANNOTATE_H 3 + #define _LINUX_ANNOTATE_H 4 + 5 + #include <linux/objtool_types.h> 6 + 7 + #ifdef CONFIG_OBJTOOL 8 + 9 + #ifndef __ASSEMBLY__ 10 + 11 + #define __ASM_ANNOTATE(section, label, type) \ 12 + ".pushsection " section ",\"M\", @progbits, 8\n\t" \ 13 + ".long " __stringify(label) " - .\n\t" \ 14 + ".long " __stringify(type) "\n\t" \ 15 + ".popsection\n\t" 16 + 17 + #define ASM_ANNOTATE_LABEL(label, type) \ 18 + __ASM_ANNOTATE(".discard.annotate_insn", label, type) 19 + 20 + #define ASM_ANNOTATE(type) \ 21 + "911:\n\t" \ 22 + ASM_ANNOTATE_LABEL(911b, type) 23 + 24 + #define ASM_ANNOTATE_DATA(type) \ 25 + "912:\n\t" \ 26 + __ASM_ANNOTATE(".discard.annotate_data", 912b, type) 27 + 28 + #else /* __ASSEMBLY__ */ 29 + 30 + .macro __ANNOTATE section, type 31 + .Lhere_\@: 32 + .pushsection \section, "M", @progbits, 8 33 + .long .Lhere_\@ - . 34 + .long \type 35 + .popsection 36 + .endm 37 + 38 + .macro ANNOTATE type 39 + __ANNOTATE ".discard.annotate_insn", \type 40 + .endm 41 + 42 + .macro ANNOTATE_DATA type 43 + __ANNOTATE ".discard.annotate_data", \type 44 + .endm 45 + 46 + #endif /* __ASSEMBLY__ */ 47 + 48 + #else /* !CONFIG_OBJTOOL */ 49 + #ifndef __ASSEMBLY__ 50 + #define ASM_ANNOTATE_LABEL(label, type) "" 51 + #define ASM_ANNOTATE(type) 52 + #define ASM_ANNOTATE_DATA(type) 53 + #else /* __ASSEMBLY__ */ 54 + .macro ANNOTATE type 55 + .endm 56 + .macro ANNOTATE_DATA type 57 + .endm 58 + #endif /* __ASSEMBLY__ */ 59 + #endif /* !CONFIG_OBJTOOL */ 60 + 61 + #ifndef __ASSEMBLY__ 62 + 63 + /* 64 + * Annotate away the various 'relocation to !ENDBR` complaints; knowing that 65 + * these relocations will never be used for indirect calls. 66 + */ 67 + #define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR) 68 + #define ANNOTATE_NOENDBR_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOENDBR)) 69 + 70 + /* 71 + * This should be used immediately before an indirect jump/call. It tells 72 + * objtool the subsequent indirect jump/call is vouched safe for retpoline 73 + * builds. 74 + */ 75 + #define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE) 76 + /* 77 + * See linux/instrumentation.h 78 + */ 79 + #define ANNOTATE_INSTR_BEGIN(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_BEGIN) 80 + #define ANNOTATE_INSTR_END(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_END) 81 + /* 82 + * objtool annotation to ignore the alternatives and only consider the original 83 + * instruction(s). 84 + */ 85 + #define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS) 86 + /* 87 + * This macro indicates that the following intra-function call is valid. 88 + * Any non-annotated intra-function call will cause objtool to issue a warning. 89 + */ 90 + #define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL) 91 + /* 92 + * Use objtool to validate the entry requirement that all code paths do 93 + * VALIDATE_UNRET_END before RET. 94 + * 95 + * NOTE: The macro must be used at the beginning of a global symbol, otherwise 96 + * it will be ignored. 97 + */ 98 + #define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN) 99 + /* 100 + * This should be used to refer to an instruction that is considered 101 + * terminating, like a noreturn CALL or UD2 when we know they are not -- eg 102 + * WARN using UD2. 103 + */ 104 + #define ANNOTATE_REACHABLE(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_REACHABLE) 105 + /* 106 + * This should not be used; it annotates away CFI violations. There are a few 107 + * valid use cases like kexec handover to the next kernel image, and there is 108 + * no security concern there. 109 + * 110 + * There are also a few real issues annotated away, like EFI because we can't 111 + * control the EFI code. 112 + */ 113 + #define ANNOTATE_NOCFI_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOCFI)) 114 + 115 + /* 116 + * Annotate a special section entry. This emables livepatch module generation 117 + * to find and extract individual special section entries as needed. 118 + */ 119 + #define ANNOTATE_DATA_SPECIAL ASM_ANNOTATE_DATA(ANNOTYPE_DATA_SPECIAL) 120 + 121 + #else /* __ASSEMBLY__ */ 122 + #define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR 123 + #define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE 124 + /* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */ 125 + /* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */ 126 + #define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS 127 + #define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL 128 + #define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN 129 + #define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE 130 + #define ANNOTATE_NOCFI_SYM ANNOTATE type=ANNOTYPE_NOCFI 131 + #define ANNOTATE_DATA_SPECIAL ANNOTATE_DATA type=ANNOTYPE_DATA_SPECIAL 132 + #endif /* __ASSEMBLY__ */ 133 + 134 + #endif /* _LINUX_ANNOTATE_H */
+6 -2
include/linux/compiler.h
··· 163 163 __asm__ ("" : "=r" (var) : "0" (var)) 164 164 #endif 165 165 166 - #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 166 + /* Format: __UNIQUE_ID_<name>_<__COUNTER__> */ 167 + #define __UNIQUE_ID(name) \ 168 + __PASTE(__UNIQUE_ID_, \ 169 + __PASTE(name, \ 170 + __PASTE(_, __COUNTER__))) 167 171 168 172 /** 169 173 * data_race - mark an expression as containing intentional data races ··· 287 283 */ 288 284 #define ___ADDRESSABLE(sym, __attrs) \ 289 285 static void * __used __attrs \ 290 - __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)(uintptr_t)&sym; 286 + __UNIQUE_ID(__PASTE(addressable_, sym)) = (void *)(uintptr_t)&sym; 291 287 292 288 #define __ADDRESSABLE(sym) \ 293 289 ___ADDRESSABLE(sym, __section(".discard.addressable"))
+5 -8
include/linux/elfnote.h
··· 60 60 61 61 #else /* !__ASSEMBLER__ */ 62 62 #include <uapi/linux/elf.h> 63 + #include <linux/compiler.h> 63 64 /* 64 65 * Use an anonymous structure which matches the shape of 65 66 * Elf{32,64}_Nhdr, but includes the name and desc data. The size and 66 67 * type of name and desc depend on the macro arguments. "name" must 67 - * be a literal string, and "desc" must be passed by value. You may 68 - * only define one note per line, since __LINE__ is used to generate 69 - * unique symbols. 68 + * be a literal string, and "desc" must be passed by value. 70 69 */ 71 - #define _ELFNOTE_PASTE(a,b) a##b 72 - #define _ELFNOTE(size, name, unique, type, desc) \ 70 + #define ELFNOTE(size, name, type, desc) \ 73 71 static const struct { \ 74 72 struct elf##size##_note _nhdr; \ 75 73 unsigned char _name[sizeof(name)] \ 76 74 __attribute__((aligned(sizeof(Elf##size##_Word)))); \ 77 75 typeof(desc) _desc \ 78 76 __attribute__((aligned(sizeof(Elf##size##_Word)))); \ 79 - } _ELFNOTE_PASTE(_note_, unique) \ 77 + } __UNIQUE_ID(note) \ 80 78 __used \ 81 79 __attribute__((section(".note." name), \ 82 80 aligned(sizeof(Elf##size##_Word)), \ ··· 87 89 name, \ 88 90 desc \ 89 91 } 90 - #define ELFNOTE(size, name, type, desc) \ 91 - _ELFNOTE(size, name, __LINE__, type, desc) 92 92 93 93 #define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc) 94 94 #define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc) 95 + 95 96 #endif /* __ASSEMBLER__ */ 96 97 97 98 #endif /* _LINUX_ELFNOTE_H */
+2 -1
include/linux/init.h
··· 200 200 201 201 /* Format: <modname>__<counter>_<line>_<fn> */ 202 202 #define __initcall_id(fn) \ 203 + __PASTE(kmod_, \ 203 204 __PASTE(__KBUILD_MODNAME, \ 204 205 __PASTE(__, \ 205 206 __PASTE(__COUNTER__, \ 206 207 __PASTE(_, \ 207 208 __PASTE(__LINE__, \ 208 - __PASTE(_, fn)))))) 209 + __PASTE(_, fn))))))) 209 210 210 211 /* Format: __<prefix>__<iid><id> */ 211 212 #define __initcall_name(prefix, __iid, id) \
+4
include/linux/interval_tree.h
··· 20 20 struct rb_root_cached *root); 21 21 22 22 extern struct interval_tree_node * 23 + interval_tree_subtree_search(struct interval_tree_node *node, 24 + unsigned long start, unsigned long last); 25 + 26 + extern struct interval_tree_node * 23 27 interval_tree_iter_first(struct rb_root_cached *root, 24 28 unsigned long start, unsigned long last); 25 29
+1 -1
include/linux/interval_tree_generic.h
··· 77 77 * Cond2: start <= ITLAST(node) \ 78 78 */ \ 79 79 \ 80 - static ITSTRUCT * \ 80 + ITSTATIC ITSTRUCT * \ 81 81 ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ 82 82 { \ 83 83 while (true) { \
+1 -24
include/linux/livepatch.h
··· 13 13 #include <linux/ftrace.h> 14 14 #include <linux/completion.h> 15 15 #include <linux/list.h> 16 + #include <linux/livepatch_external.h> 16 17 #include <linux/livepatch_sched.h> 17 18 18 19 #if IS_ENABLED(CONFIG_LIVEPATCH) ··· 76 75 bool nop; 77 76 bool patched; 78 77 bool transition; 79 - }; 80 - 81 - struct klp_object; 82 - 83 - /** 84 - * struct klp_callbacks - pre/post live-(un)patch callback structure 85 - * @pre_patch: executed before code patching 86 - * @post_patch: executed after code patching 87 - * @pre_unpatch: executed before code unpatching 88 - * @post_unpatch: executed after code unpatching 89 - * @post_unpatch_enabled: flag indicating if post-unpatch callback 90 - * should run 91 - * 92 - * All callbacks are optional. Only the pre-patch callback, if provided, 93 - * will be unconditionally executed. If the parent klp_object fails to 94 - * patch for any reason, including a non-zero error status returned from 95 - * the pre-patch callback, no further callbacks will be executed. 96 - */ 97 - struct klp_callbacks { 98 - int (*pre_patch)(struct klp_object *obj); 99 - void (*post_patch)(struct klp_object *obj); 100 - void (*pre_unpatch)(struct klp_object *obj); 101 - void (*post_unpatch)(struct klp_object *obj); 102 - bool post_unpatch_enabled; 103 78 }; 104 79 105 80 /**
+76
include/linux/livepatch_external.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * External livepatch interfaces for patch creation tooling 4 + */ 5 + 6 + #ifndef _LINUX_LIVEPATCH_EXTERNAL_H_ 7 + #define _LINUX_LIVEPATCH_EXTERNAL_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + #define KLP_RELOC_SEC_PREFIX ".klp.rela." 12 + #define KLP_SYM_PREFIX ".klp.sym." 13 + 14 + #define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_ 15 + #define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_ 16 + #define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_ 17 + #define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_ 18 + 19 + #define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX) 20 + #define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX) 21 + #define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX) 22 + #define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX) 23 + 24 + struct klp_object; 25 + 26 + typedef int (*klp_pre_patch_t)(struct klp_object *obj); 27 + typedef void (*klp_post_patch_t)(struct klp_object *obj); 28 + typedef void (*klp_pre_unpatch_t)(struct klp_object *obj); 29 + typedef void (*klp_post_unpatch_t)(struct klp_object *obj); 30 + 31 + /** 32 + * struct klp_callbacks - pre/post live-(un)patch callback structure 33 + * @pre_patch: executed before code patching 34 + * @post_patch: executed after code patching 35 + * @pre_unpatch: executed before code unpatching 36 + * @post_unpatch: executed after code unpatching 37 + * @post_unpatch_enabled: flag indicating if post-unpatch callback 38 + * should run 39 + * 40 + * All callbacks are optional. Only the pre-patch callback, if provided, 41 + * will be unconditionally executed. If the parent klp_object fails to 42 + * patch for any reason, including a non-zero error status returned from 43 + * the pre-patch callback, no further callbacks will be executed. 44 + */ 45 + struct klp_callbacks { 46 + klp_pre_patch_t pre_patch; 47 + klp_post_patch_t post_patch; 48 + klp_pre_unpatch_t pre_unpatch; 49 + klp_post_unpatch_t post_unpatch; 50 + bool post_unpatch_enabled; 51 + }; 52 + 53 + /* 54 + * 'struct klp_{func,object}_ext' are compact "external" representations of 55 + * 'struct klp_{func,object}'. They are used by objtool for livepatch 56 + * generation. The structs are then read by the livepatch module and converted 57 + * to the real structs before calling klp_enable_patch(). 58 + * 59 + * TODO make these the official API for klp_enable_patch(). That should 60 + * simplify livepatch's interface as well as its data structure lifetime 61 + * management. 62 + */ 63 + struct klp_func_ext { 64 + const char *old_name; 65 + void *new_func; 66 + unsigned long sympos; 67 + }; 68 + 69 + struct klp_object_ext { 70 + const char *name; 71 + struct klp_func_ext *funcs; 72 + struct klp_callbacks callbacks; 73 + unsigned int nr_funcs; 74 + }; 75 + 76 + #endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
+77
include/linux/livepatch_helpers.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_LIVEPATCH_HELPERS_H 3 + #define _LINUX_LIVEPATCH_HELPERS_H 4 + 5 + /* 6 + * Interfaces for use by livepatch patches 7 + */ 8 + 9 + #include <linux/syscalls.h> 10 + #include <linux/livepatch.h> 11 + 12 + #ifdef MODULE 13 + #define KLP_OBJNAME __KBUILD_MODNAME 14 + #else 15 + #define KLP_OBJNAME vmlinux 16 + #endif 17 + 18 + /* Livepatch callback registration */ 19 + 20 + #define KLP_CALLBACK_PTRS ".discard.klp_callback_ptrs" 21 + 22 + #define KLP_PRE_PATCH_CALLBACK(func) \ 23 + klp_pre_patch_t __used __section(KLP_CALLBACK_PTRS) \ 24 + __PASTE(__KLP_PRE_PATCH_PREFIX, KLP_OBJNAME) = func 25 + 26 + #define KLP_POST_PATCH_CALLBACK(func) \ 27 + klp_post_patch_t __used __section(KLP_CALLBACK_PTRS) \ 28 + __PASTE(__KLP_POST_PATCH_PREFIX, KLP_OBJNAME) = func 29 + 30 + #define KLP_PRE_UNPATCH_CALLBACK(func) \ 31 + klp_pre_unpatch_t __used __section(KLP_CALLBACK_PTRS) \ 32 + __PASTE(__KLP_PRE_UNPATCH_PREFIX, KLP_OBJNAME) = func 33 + 34 + #define KLP_POST_UNPATCH_CALLBACK(func) \ 35 + klp_post_unpatch_t __used __section(KLP_CALLBACK_PTRS) \ 36 + __PASTE(__KLP_POST_UNPATCH_PREFIX, KLP_OBJNAME) = func 37 + 38 + /* 39 + * Replace static_call() usage with this macro when create-diff-object 40 + * recommends it due to the original static call key living in a module. 41 + * 42 + * This converts the static call to a regular indirect call. 43 + */ 44 + #define KLP_STATIC_CALL(name) \ 45 + ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func)) 46 + 47 + /* Syscall patching */ 48 + 49 + #define KLP_SYSCALL_DEFINE1(name, ...) KLP_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) 50 + #define KLP_SYSCALL_DEFINE2(name, ...) KLP_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) 51 + #define KLP_SYSCALL_DEFINE3(name, ...) KLP_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) 52 + #define KLP_SYSCALL_DEFINE4(name, ...) KLP_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__) 53 + #define KLP_SYSCALL_DEFINE5(name, ...) KLP_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__) 54 + #define KLP_SYSCALL_DEFINE6(name, ...) KLP_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__) 55 + 56 + #define KLP_SYSCALL_DEFINEx(x, sname, ...) \ 57 + __KLP_SYSCALL_DEFINEx(x, sname, __VA_ARGS__) 58 + 59 + #ifdef CONFIG_X86_64 60 + // TODO move this to arch/x86/include/asm/syscall_wrapper.h and share code 61 + #define __KLP_SYSCALL_DEFINEx(x, name, ...) \ 62 + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ 63 + static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ 64 + __X64_SYS_STUBx(x, name, __VA_ARGS__) \ 65 + __IA32_SYS_STUBx(x, name, __VA_ARGS__) \ 66 + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ 67 + { \ 68 + long ret = __klp_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ 69 + __MAP(x,__SC_TEST,__VA_ARGS__); \ 70 + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ 71 + return ret; \ 72 + } \ 73 + static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) 74 + 75 + #endif 76 + 77 + #endif /* _LINUX_LIVEPATCH_HELPERS_H */
+2
include/linux/mm.h
··· 3369 3369 struct rb_root_cached *root); 3370 3370 void vma_interval_tree_remove(struct vm_area_struct *node, 3371 3371 struct rb_root_cached *root); 3372 + struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node, 3373 + unsigned long start, unsigned long last); 3372 3374 struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root, 3373 3375 unsigned long start, unsigned long last); 3374 3376 struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
+5 -91
include/linux/objtool.h
··· 3 3 #define _LINUX_OBJTOOL_H 4 4 5 5 #include <linux/objtool_types.h> 6 + #include <linux/annotate.h> 6 7 7 8 #ifdef CONFIG_OBJTOOL 8 9 9 - #include <asm/asm.h> 10 - 11 10 #ifndef __ASSEMBLY__ 12 11 13 - #define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ 12 + #define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ 14 13 "987: \n\t" \ 15 14 ".pushsection .discard.unwind_hints\n\t" \ 15 + ANNOTATE_DATA_SPECIAL \ 16 16 /* struct unwind_hint */ \ 17 17 ".long 987b - .\n\t" \ 18 18 ".short " __stringify(sp_offset) "\n\t" \ ··· 53 53 54 54 #define __ASM_BREF(label) label ## b 55 55 56 - #define __ASM_ANNOTATE(label, type) \ 57 - ".pushsection .discard.annotate_insn,\"M\",@progbits,8\n\t" \ 58 - ".long " __stringify(label) " - .\n\t" \ 59 - ".long " __stringify(type) "\n\t" \ 60 - ".popsection\n\t" 61 - 62 - #define ASM_ANNOTATE(type) \ 63 - "911:\n\t" \ 64 - __ASM_ANNOTATE(911b, type) 65 - 66 56 #else /* __ASSEMBLY__ */ 67 57 68 58 /* ··· 79 89 .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 80 90 .Lhere_\@: 81 91 .pushsection .discard.unwind_hints 92 + ANNOTATE_DATA_SPECIAL 82 93 /* struct unwind_hint */ 83 94 .long .Lhere_\@ - . 84 95 .short \sp_offset ··· 92 101 93 102 .macro STACK_FRAME_NON_STANDARD func:req 94 103 .pushsection .discard.func_stack_frame_non_standard, "aw" 95 - .long \func - . 104 + .quad \func 96 105 .popsection 97 106 .endm 98 107 ··· 100 109 #ifdef CONFIG_FRAME_POINTER 101 110 STACK_FRAME_NON_STANDARD \func 102 111 #endif 103 - .endm 104 - 105 - .macro ANNOTATE type:req 106 - .Lhere_\@: 107 - .pushsection .discard.annotate_insn,"M",@progbits,8 108 - .long .Lhere_\@ - . 109 - .long \type 110 - .popsection 111 112 .endm 112 113 113 114 #endif /* __ASSEMBLY__ */ ··· 111 128 #define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" 112 129 #define STACK_FRAME_NON_STANDARD(func) 113 130 #define STACK_FRAME_NON_STANDARD_FP(func) 114 - #define __ASM_ANNOTATE(label, type) "" 115 - #define ASM_ANNOTATE(type) 116 131 #else 117 132 .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 118 133 .endm 119 134 .macro STACK_FRAME_NON_STANDARD func:req 120 135 .endm 121 - .macro ANNOTATE type:req 122 - .endm 123 136 #endif 124 137 125 138 #endif /* CONFIG_OBJTOOL */ 126 - 127 - #ifndef __ASSEMBLY__ 128 - /* 129 - * Annotate away the various 'relocation to !ENDBR` complaints; knowing that 130 - * these relocations will never be used for indirect calls. 131 - */ 132 - #define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR) 133 - #define ANNOTATE_NOENDBR_SYM(sym) asm(__ASM_ANNOTATE(sym, ANNOTYPE_NOENDBR)) 134 - 135 - /* 136 - * This should be used immediately before an indirect jump/call. It tells 137 - * objtool the subsequent indirect jump/call is vouched safe for retpoline 138 - * builds. 139 - */ 140 - #define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE) 141 - /* 142 - * See linux/instrumentation.h 143 - */ 144 - #define ANNOTATE_INSTR_BEGIN(label) __ASM_ANNOTATE(label, ANNOTYPE_INSTR_BEGIN) 145 - #define ANNOTATE_INSTR_END(label) __ASM_ANNOTATE(label, ANNOTYPE_INSTR_END) 146 - /* 147 - * objtool annotation to ignore the alternatives and only consider the original 148 - * instruction(s). 149 - */ 150 - #define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS) 151 - /* 152 - * This macro indicates that the following intra-function call is valid. 153 - * Any non-annotated intra-function call will cause objtool to issue a warning. 154 - */ 155 - #define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL) 156 - /* 157 - * Use objtool to validate the entry requirement that all code paths do 158 - * VALIDATE_UNRET_END before RET. 159 - * 160 - * NOTE: The macro must be used at the beginning of a global symbol, otherwise 161 - * it will be ignored. 162 - */ 163 - #define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN) 164 - /* 165 - * This should be used to refer to an instruction that is considered 166 - * terminating, like a noreturn CALL or UD2 when we know they are not -- eg 167 - * WARN using UD2. 168 - */ 169 - #define ANNOTATE_REACHABLE(label) __ASM_ANNOTATE(label, ANNOTYPE_REACHABLE) 170 - /* 171 - * This should not be used; it annotates away CFI violations. There are a few 172 - * valid use cases like kexec handover to the next kernel image, and there is 173 - * no security concern there. 174 - * 175 - * There are also a few real issues annotated away, like EFI because we can't 176 - * control the EFI code. 177 - */ 178 - #define ANNOTATE_NOCFI_SYM(sym) asm(__ASM_ANNOTATE(sym, ANNOTYPE_NOCFI)) 179 - 180 - #else 181 - #define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR 182 - #define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE 183 - /* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */ 184 - /* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */ 185 - #define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS 186 - #define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL 187 - #define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN 188 - #define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE 189 - #define ANNOTATE_NOCFI_SYM ANNOTATE type=ANNOTYPE_NOCFI 190 - #endif 191 139 192 140 #if defined(CONFIG_NOINSTR_VALIDATION) && \ 193 141 (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
+2
include/linux/objtool_types.h
··· 67 67 #define ANNOTYPE_REACHABLE 8 68 68 #define ANNOTYPE_NOCFI 9 69 69 70 + #define ANNOTYPE_DATA_SPECIAL 1 71 + 70 72 #endif /* _LINUX_OBJTOOL_TYPES_H */
+12
kernel/livepatch/Kconfig
··· 18 18 module uses the interface provided by this option to register 19 19 a patch, causing calls to patched functions to be redirected 20 20 to new function code contained in the patch module. 21 + 22 + config HAVE_KLP_BUILD 23 + bool 24 + help 25 + Arch supports klp-build 26 + 27 + config KLP_BUILD 28 + def_bool y 29 + depends on LIVEPATCH && HAVE_KLP_BUILD 30 + select OBJTOOL 31 + help 32 + Enable klp-build support
+4 -4
kernel/livepatch/core.c
··· 217 217 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { 218 218 sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info); 219 219 if (sym->st_shndx != SHN_LIVEPATCH) { 220 - pr_err("symbol %s is not marked as a livepatch symbol\n", 221 - strtab + sym->st_name); 220 + pr_err("symbol %s at rela sec %u idx %d is not marked as a livepatch symbol\n", 221 + strtab + sym->st_name, symndx, i); 222 222 return -EINVAL; 223 223 } 224 224 225 225 /* Format: .klp.sym.sym_objname.sym_name,sympos */ 226 226 cnt = sscanf(strtab + sym->st_name, 227 - ".klp.sym.%55[^.].%511[^,],%lu", 227 + KLP_SYM_PREFIX "%55[^.].%511[^,],%lu", 228 228 sym_objname, sym_name, &sympos); 229 229 if (cnt != 3) { 230 230 pr_err("symbol %s has an incorrectly formatted name\n", ··· 303 303 * See comment in klp_resolve_symbols() for an explanation 304 304 * of the selected field width value. 305 305 */ 306 - cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]", 306 + cnt = sscanf(shstrtab + sec->sh_name, KLP_RELOC_SEC_PREFIX "%55[^.]", 307 307 sec_objname); 308 308 if (cnt != 1) { 309 309 pr_err("section %s has an incorrectly formatted name\n",
+1
lib/interval_tree.c
··· 13 13 14 14 EXPORT_SYMBOL_GPL(interval_tree_insert); 15 15 EXPORT_SYMBOL_GPL(interval_tree_remove); 16 + EXPORT_SYMBOL_GPL(interval_tree_subtree_search); 16 17 EXPORT_SYMBOL_GPL(interval_tree_iter_first); 17 18 EXPORT_SYMBOL_GPL(interval_tree_iter_next); 18 19
+4 -3
scripts/Makefile.lib
··· 20 20 name-fix = $(call stringify,$(call name-fix-token,$1)) 21 21 basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget)) 22 22 modname_flags = -DKBUILD_MODNAME=$(call name-fix,$(modname)) \ 23 - -D__KBUILD_MODNAME=kmod_$(call name-fix-token,$(modname)) 23 + -D__KBUILD_MODNAME=$(call name-fix-token,$(modname)) 24 24 modfile_flags = -DKBUILD_MODFILE=$(call stringify,$(modfile)) 25 25 26 26 _c_flags = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), \ ··· 173 173 174 174 objtool := $(objtree)/tools/objtool/objtool 175 175 176 + objtool-args-$(CONFIG_KLP_BUILD) += --checksum 176 177 objtool-args-$(CONFIG_HAVE_JUMP_LABEL_HACK) += --hacks=jump_label 177 178 objtool-args-$(CONFIG_HAVE_NOINSTR_HACK) += --hacks=noinstr 178 179 objtool-args-$(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) += --hacks=skylake ··· 192 191 objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess 193 192 objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV)) += --no-unreachable 194 193 objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES) 195 - objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror 194 + objtool-args-$(CONFIG_OBJTOOL_WERROR) += --werror 196 195 197 196 objtool-args = $(objtool-args-y) \ 198 197 $(if $(delay-objtool), --link) \ 199 198 $(if $(part-of-module), --module) 200 199 201 - delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT)) 200 + delay-objtool := $(or $(CONFIG_LTO_CLANG),$(CONFIG_X86_KERNEL_IBT),$(CONFIG_KLP_BUILD)) 202 201 203 202 cmd_objtool = $(if $(objtool-enabled), ; $(objtool) $(objtool-args) $@) 204 203 cmd_gen_objtooldep = $(if $(objtool-enabled), { echo ; echo '$@: $$(wildcard $(objtool))' ; } >> $(dot-target).cmd)
+1 -1
scripts/Makefile.vmlinux_o
··· 41 41 ifeq ($(delay-objtool),y) 42 42 vmlinux-objtool-args-y += $(objtool-args-y) 43 43 else 44 - vmlinux-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --Werror 44 + vmlinux-objtool-args-$(CONFIG_OBJTOOL_WERROR) += --werror 45 45 endif 46 46 47 47 vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
+14 -5
scripts/faddr2line
··· 1 - #!/bin/bash 1 + #!/usr/bin/env bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 # 4 4 # Translate stack dump function offsets. ··· 76 76 AWK="awk" 77 77 GREP="grep" 78 78 79 + # Enforce ASCII-only output from tools like readelf 80 + # ensuring sed processes strings correctly. 81 + export LANG=C 82 + 79 83 command -v ${AWK} >/dev/null 2>&1 || die "${AWK} isn't installed" 80 84 command -v ${READELF} >/dev/null 2>&1 || die "${READELF} isn't installed" 81 85 command -v ${ADDR2LINE} >/dev/null 2>&1 || die "${ADDR2LINE} isn't installed" ··· 111 107 112 108 run_readelf() { 113 109 local objfile=$1 114 - local out=$(${READELF} --file-header --section-headers --symbols --wide $objfile) 110 + local tmpfile 111 + tmpfile=$(mktemp) 112 + 113 + ${READELF} --file-header --section-headers --symbols --wide "$objfile" > "$tmpfile" 115 114 116 115 # This assumes that readelf first prints the file header, then the section headers, then the symbols. 117 116 # Note: It seems that GNU readelf does not prefix section headers with the "There are X section headers" 118 117 # line when multiple options are given, so let's also match with the "Section Headers:" line. 119 - ELF_FILEHEADER=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/q;p') 120 - ELF_SECHEADERS=$(echo "${out}" | sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/,$p' | sed -n '/Symbol table .* contains [0-9]* entries:/q;p') 121 - ELF_SYMS=$(echo "${out}" | sed -n '/Symbol table .* contains [0-9]* entries:/,$p') 118 + ELF_FILEHEADER=$(sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/q;p' "$tmpfile") 119 + ELF_SECHEADERS=$(sed -n '/There are [0-9]* section headers, starting at offset\|Section Headers:/,$p' "$tmpfile" | sed -n '/Symbol table .* contains [0-9]* entries:/q;p') 120 + ELF_SYMS=$(sed -n '/Symbol table .* contains [0-9]* entries:/,$p' "$tmpfile") 121 + 122 + rm -f -- "$tmpfile" 122 123 } 123 124 124 125 check_vmlinux() {
+79
scripts/livepatch/fix-patch-lines
··· 1 + #!/usr/bin/awk -f 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Use #line directives to preserve original __LINE__ numbers across patches to 5 + # avoid unwanted compilation changes. 6 + 7 + BEGIN { 8 + in_hunk = 0 9 + skip = 0 10 + } 11 + 12 + /^--- / { 13 + skip = $2 !~ /\.(c|h)$/ 14 + print 15 + next 16 + } 17 + 18 + /^@@/ { 19 + if (skip) { 20 + print 21 + next 22 + } 23 + 24 + in_hunk = 1 25 + 26 + # @@ -1,3 +1,4 @@: 27 + # 1: line number in old file 28 + # 3: how many lines the hunk covers in old file 29 + # 1: line number in new file 30 + # 4: how many lines the hunk covers in new file 31 + 32 + match($0, /^@@ -([0-9]+)(,([0-9]+))? \+([0-9]+)(,([0-9]+))? @@/, m) 33 + 34 + # Set 'cur' to the old file's line number at the start of the hunk. It 35 + # gets incremented for every context line and every line removal, so 36 + # that it always represents the old file's current line number. 37 + cur = m[1] 38 + 39 + # last = last line number of current hunk 40 + last = cur + (m[3] ? m[3] : 1) - 1 41 + 42 + need_line_directive = 0 43 + 44 + print 45 + next 46 + } 47 + 48 + { 49 + if (skip || !in_hunk || $0 ~ /^\\ No newline at end of file/) { 50 + print 51 + next 52 + } 53 + 54 + # change line 55 + if ($0 ~ /^[+-]/) { 56 + # inject #line after this group of changes 57 + need_line_directive = 1 58 + 59 + if ($0 ~ /^-/) 60 + cur++ 61 + 62 + print 63 + next 64 + } 65 + 66 + # If this is the first context line after a group of changes, inject 67 + # the #line directive to force the compiler to correct the line 68 + # numbering to match the original file. 69 + if (need_line_directive) { 70 + print "+#line " cur 71 + need_line_directive = 0 72 + } 73 + 74 + if (cur == last) 75 + in_hunk = 0 76 + 77 + cur++ 78 + print 79 + }
+108
scripts/livepatch/init.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Init code for a livepatch kernel module 4 + */ 5 + 6 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 + 8 + #include <linux/kernel.h> 9 + #include <linux/slab.h> 10 + #include <linux/livepatch.h> 11 + 12 + extern struct klp_object_ext __start_klp_objects[]; 13 + extern struct klp_object_ext __stop_klp_objects[]; 14 + 15 + static struct klp_patch *patch; 16 + 17 + static int __init livepatch_mod_init(void) 18 + { 19 + struct klp_object *objs; 20 + unsigned int nr_objs; 21 + int ret; 22 + 23 + nr_objs = __stop_klp_objects - __start_klp_objects; 24 + 25 + if (!nr_objs) { 26 + pr_err("nothing to patch!\n"); 27 + ret = -EINVAL; 28 + goto err; 29 + } 30 + 31 + patch = kzalloc(sizeof(*patch), GFP_KERNEL); 32 + if (!patch) { 33 + ret = -ENOMEM; 34 + goto err; 35 + } 36 + 37 + objs = kzalloc(sizeof(struct klp_object) * (nr_objs + 1), GFP_KERNEL); 38 + if (!objs) { 39 + ret = -ENOMEM; 40 + goto err_free_patch; 41 + } 42 + 43 + for (int i = 0; i < nr_objs; i++) { 44 + struct klp_object_ext *obj_ext = __start_klp_objects + i; 45 + struct klp_func_ext *funcs_ext = obj_ext->funcs; 46 + unsigned int nr_funcs = obj_ext->nr_funcs; 47 + struct klp_func *funcs = objs[i].funcs; 48 + struct klp_object *obj = objs + i; 49 + 50 + funcs = kzalloc(sizeof(struct klp_func) * (nr_funcs + 1), GFP_KERNEL); 51 + if (!funcs) { 52 + ret = -ENOMEM; 53 + for (int j = 0; j < i; j++) 54 + kfree(objs[i].funcs); 55 + goto err_free_objs; 56 + } 57 + 58 + for (int j = 0; j < nr_funcs; j++) { 59 + funcs[j].old_name = funcs_ext[j].old_name; 60 + funcs[j].new_func = funcs_ext[j].new_func; 61 + funcs[j].old_sympos = funcs_ext[j].sympos; 62 + } 63 + 64 + obj->name = obj_ext->name; 65 + obj->funcs = funcs; 66 + 67 + memcpy(&obj->callbacks, &obj_ext->callbacks, sizeof(struct klp_callbacks)); 68 + } 69 + 70 + patch->mod = THIS_MODULE; 71 + patch->objs = objs; 72 + 73 + /* TODO patch->states */ 74 + 75 + #ifdef KLP_NO_REPLACE 76 + patch->replace = false; 77 + #else 78 + patch->replace = true; 79 + #endif 80 + 81 + return klp_enable_patch(patch); 82 + 83 + err_free_objs: 84 + kfree(objs); 85 + err_free_patch: 86 + kfree(patch); 87 + err: 88 + return ret; 89 + } 90 + 91 + static void __exit livepatch_mod_exit(void) 92 + { 93 + unsigned int nr_objs; 94 + 95 + nr_objs = __stop_klp_objects - __start_klp_objects; 96 + 97 + for (int i = 0; i < nr_objs; i++) 98 + kfree(patch->objs[i].funcs); 99 + 100 + kfree(patch->objs); 101 + kfree(patch); 102 + } 103 + 104 + module_init(livepatch_mod_init); 105 + module_exit(livepatch_mod_exit); 106 + MODULE_LICENSE("GPL"); 107 + MODULE_INFO(livepatch, "Y"); 108 + MODULE_DESCRIPTION("Livepatch module");
+827
scripts/livepatch/klp-build
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Build a livepatch module 5 + 6 + # shellcheck disable=SC1090,SC2155 7 + 8 + if (( BASH_VERSINFO[0] < 4 || \ 9 + (BASH_VERSINFO[0] == 4 && BASH_VERSINFO[1] < 4) )); then 10 + echo "error: this script requires bash 4.4+" >&2 11 + exit 1 12 + fi 13 + 14 + set -o errexit 15 + set -o errtrace 16 + set -o pipefail 17 + set -o nounset 18 + 19 + # Allow doing 'cmd | mapfile -t array' instead of 'mapfile -t array < <(cmd)'. 20 + # This helps keep execution in pipes so pipefail+errexit can catch errors. 21 + shopt -s lastpipe 22 + 23 + unset DEBUG_CLONE DIFF_CHECKSUM SKIP_CLEANUP XTRACE 24 + 25 + REPLACE=1 26 + SHORT_CIRCUIT=0 27 + JOBS="$(getconf _NPROCESSORS_ONLN)" 28 + VERBOSE="-s" 29 + shopt -o xtrace | grep -q 'on' && XTRACE=1 30 + 31 + # Avoid removing the previous $TMP_DIR until args have been fully processed. 32 + KEEP_TMP=1 33 + 34 + SCRIPT="$(basename "$0")" 35 + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 36 + FIX_PATCH_LINES="$SCRIPT_DIR/fix-patch-lines" 37 + 38 + SRC="$(pwd)" 39 + OBJ="$(pwd)" 40 + 41 + CONFIG="$OBJ/.config" 42 + TMP_DIR="$OBJ/klp-tmp" 43 + 44 + ORIG_DIR="$TMP_DIR/orig" 45 + PATCHED_DIR="$TMP_DIR/patched" 46 + DIFF_DIR="$TMP_DIR/diff" 47 + KMOD_DIR="$TMP_DIR/kmod" 48 + 49 + STASH_DIR="$TMP_DIR/stash" 50 + TIMESTAMP="$TMP_DIR/timestamp" 51 + PATCH_TMP_DIR="$TMP_DIR/tmp" 52 + 53 + KLP_DIFF_LOG="$DIFF_DIR/diff.log" 54 + 55 + grep0() { 56 + command grep "$@" || true 57 + } 58 + 59 + status() { 60 + echo "$*" 61 + } 62 + 63 + warn() { 64 + echo "error: $SCRIPT: $*" >&2 65 + } 66 + 67 + die() { 68 + warn "$@" 69 + exit 1 70 + } 71 + 72 + declare -a STASHED_FILES 73 + 74 + stash_file() { 75 + local file="$1" 76 + local rel_file="${file#"$SRC"/}" 77 + 78 + [[ ! -e "$file" ]] && die "no file to stash: $file" 79 + 80 + mkdir -p "$STASH_DIR/$(dirname "$rel_file")" 81 + cp -f "$file" "$STASH_DIR/$rel_file" 82 + 83 + STASHED_FILES+=("$rel_file") 84 + } 85 + 86 + restore_files() { 87 + local file 88 + 89 + for file in "${STASHED_FILES[@]}"; do 90 + mv -f "$STASH_DIR/$file" "$SRC/$file" || warn "can't restore file: $file" 91 + done 92 + 93 + STASHED_FILES=() 94 + } 95 + 96 + cleanup() { 97 + set +o nounset 98 + revert_patches "--recount" 99 + restore_files 100 + [[ "$KEEP_TMP" -eq 0 ]] && rm -rf "$TMP_DIR" 101 + return 0 102 + } 103 + 104 + trap_err() { 105 + warn "line ${BASH_LINENO[0]}: '$BASH_COMMAND'" 106 + } 107 + 108 + trap cleanup EXIT INT TERM HUP 109 + trap trap_err ERR 110 + 111 + __usage() { 112 + cat <<EOF 113 + Usage: $SCRIPT [OPTIONS] PATCH_FILE(s) 114 + Generate a livepatch module. 115 + 116 + Options: 117 + -f, --show-first-changed Show address of first changed instruction 118 + -j, --jobs=<jobs> Build jobs to run simultaneously [default: $JOBS] 119 + -o, --output=<file.ko> Output file [default: livepatch-<patch-name>.ko] 120 + --no-replace Disable livepatch atomic replace 121 + -v, --verbose Pass V=1 to kernel/module builds 122 + 123 + Advanced Options: 124 + -d, --debug Show symbol/reloc cloning decisions 125 + -S, --short-circuit=STEP Start at build step (requires prior --keep-tmp) 126 + 1|orig Build original kernel (default) 127 + 2|patched Build patched kernel 128 + 3|diff Diff objects 129 + 4|kmod Build patch module 130 + -T, --keep-tmp Preserve tmp dir on exit 131 + 132 + EOF 133 + } 134 + 135 + usage() { 136 + __usage >&2 137 + } 138 + 139 + process_args() { 140 + local keep_tmp=0 141 + local short 142 + local long 143 + local args 144 + 145 + short="hfj:o:vdS:T" 146 + long="help,show-first-changed,jobs:,output:,no-replace,verbose,debug,short-circuit:,keep-tmp" 147 + 148 + args=$(getopt --options "$short" --longoptions "$long" -- "$@") || { 149 + echo; usage; exit 150 + } 151 + eval set -- "$args" 152 + 153 + while true; do 154 + case "$1" in 155 + -h | --help) 156 + usage 157 + exit 0 158 + ;; 159 + -f | --show-first-changed) 160 + DIFF_CHECKSUM=1 161 + shift 162 + ;; 163 + -j | --jobs) 164 + JOBS="$2" 165 + shift 2 166 + ;; 167 + -o | --output) 168 + [[ "$2" != *.ko ]] && die "output filename should end with .ko" 169 + OUTFILE="$2" 170 + NAME="$(basename "$OUTFILE")" 171 + NAME="${NAME%.ko}" 172 + NAME="$(module_name_string "$NAME")" 173 + shift 2 174 + ;; 175 + --no-replace) 176 + REPLACE=0 177 + shift 178 + ;; 179 + -v | --verbose) 180 + VERBOSE="V=1" 181 + shift 182 + ;; 183 + -d | --debug) 184 + DEBUG_CLONE=1 185 + keep_tmp=1 186 + shift 187 + ;; 188 + -S | --short-circuit) 189 + [[ ! -d "$TMP_DIR" ]] && die "--short-circuit requires preserved klp-tmp dir" 190 + keep_tmp=1 191 + case "$2" in 192 + 1 | orig) SHORT_CIRCUIT=1; ;; 193 + 2 | patched) SHORT_CIRCUIT=2; ;; 194 + 3 | diff) SHORT_CIRCUIT=3; ;; 195 + 4 | mod) SHORT_CIRCUIT=4; ;; 196 + *) die "invalid short-circuit step '$2'" ;; 197 + esac 198 + shift 2 199 + ;; 200 + -T | --keep-tmp) 201 + keep_tmp=1 202 + shift 203 + ;; 204 + --) 205 + shift 206 + break 207 + ;; 208 + *) 209 + usage 210 + exit 1 211 + ;; 212 + esac 213 + done 214 + 215 + if [[ $# -eq 0 ]]; then 216 + usage 217 + exit 1 218 + fi 219 + 220 + KEEP_TMP="$keep_tmp" 221 + PATCHES=("$@") 222 + } 223 + 224 + # temporarily disable xtrace for especially verbose code 225 + xtrace_save() { 226 + [[ -v XTRACE ]] && set +x 227 + return 0 228 + } 229 + 230 + xtrace_restore() { 231 + [[ -v XTRACE ]] && set -x 232 + return 0 233 + } 234 + 235 + validate_config() { 236 + xtrace_save "reading .config" 237 + source "$CONFIG" || die "no .config file in $(dirname "$CONFIG")" 238 + xtrace_restore 239 + 240 + [[ -v CONFIG_LIVEPATCH ]] || \ 241 + die "CONFIG_LIVEPATCH not enabled" 242 + 243 + [[ -v CONFIG_KLP_BUILD ]] || \ 244 + die "CONFIG_KLP_BUILD not enabled" 245 + 246 + [[ -v CONFIG_GCC_PLUGIN_LATENT_ENTROPY ]] && \ 247 + die "kernel option 'CONFIG_GCC_PLUGIN_LATENT_ENTROPY' not supported" 248 + 249 + [[ -v CONFIG_GCC_PLUGIN_RANDSTRUCT ]] && \ 250 + die "kernel option 'CONFIG_GCC_PLUGIN_RANDSTRUCT' not supported" 251 + 252 + return 0 253 + } 254 + 255 + # Only allow alphanumerics and '_' and '-' in the module name. Everything else 256 + # is replaced with '-'. Also truncate to 55 chars so the full name + NUL 257 + # terminator fits in the kernel's 56-byte module name array. 258 + module_name_string() { 259 + echo "${1//[^a-zA-Z0-9_-]/-}" | cut -c 1-55 260 + } 261 + 262 + # If the module name wasn't specified on the cmdline with --output, give it a 263 + # name based on the patch name. 264 + set_module_name() { 265 + [[ -v NAME ]] && return 0 266 + 267 + if [[ "${#PATCHES[@]}" -eq 1 ]]; then 268 + NAME="$(basename "${PATCHES[0]}")" 269 + NAME="${NAME%.*}" 270 + else 271 + NAME="patch" 272 + fi 273 + 274 + NAME="livepatch-$NAME" 275 + NAME="$(module_name_string "$NAME")" 276 + 277 + OUTFILE="$NAME.ko" 278 + } 279 + 280 + # Hardcode the value printed by the localversion script to prevent patch 281 + # application from appending it with '+' due to a dirty git working tree. 282 + set_kernelversion() { 283 + local file="$SRC/scripts/setlocalversion" 284 + local localversion 285 + 286 + stash_file "$file" 287 + 288 + localversion="$(cd "$SRC" && make --no-print-directory kernelversion)" 289 + localversion="$(cd "$SRC" && KERNELVERSION="$localversion" ./scripts/setlocalversion)" 290 + [[ -z "$localversion" ]] && die "setlocalversion failed" 291 + 292 + sed -i "2i echo $localversion; exit 0" scripts/setlocalversion 293 + } 294 + 295 + get_patch_files() { 296 + local patch="$1" 297 + 298 + grep0 -E '^(--- |\+\+\+ )' "$patch" \ 299 + | gawk '{print $2}' \ 300 + | sed 's|^[^/]*/||' \ 301 + | sort -u 302 + } 303 + 304 + # Make sure git re-stats the changed files 305 + git_refresh() { 306 + local patch="$1" 307 + local files=() 308 + 309 + [[ ! -e "$SRC/.git" ]] && return 310 + 311 + get_patch_files "$patch" | mapfile -t files 312 + 313 + ( 314 + cd "$SRC" 315 + git update-index -q --refresh -- "${files[@]}" 316 + ) 317 + } 318 + 319 + check_unsupported_patches() { 320 + local patch 321 + 322 + for patch in "${PATCHES[@]}"; do 323 + local files=() 324 + 325 + get_patch_files "$patch" | mapfile -t files 326 + 327 + for file in "${files[@]}"; do 328 + case "$file" in 329 + lib/*|*.S) 330 + die "unsupported patch to $file" 331 + ;; 332 + esac 333 + done 334 + done 335 + } 336 + 337 + apply_patch() { 338 + local patch="$1" 339 + shift 340 + local extra_args=("$@") 341 + 342 + [[ ! -f "$patch" ]] && die "$patch doesn't exist" 343 + 344 + ( 345 + cd "$SRC" 346 + 347 + # The sed strips the version signature from 'git format-patch', 348 + # otherwise 'git apply --recount' warns. 349 + sed -n '/^-- /q;p' "$patch" | 350 + git apply "${extra_args[@]}" 351 + ) 352 + 353 + APPLIED_PATCHES+=("$patch") 354 + } 355 + 356 + revert_patch() { 357 + local patch="$1" 358 + shift 359 + local extra_args=("$@") 360 + local tmp=() 361 + 362 + ( 363 + cd "$SRC" 364 + 365 + sed -n '/^-- /q;p' "$patch" | 366 + git apply --reverse "${extra_args[@]}" 367 + ) 368 + git_refresh "$patch" 369 + 370 + for p in "${APPLIED_PATCHES[@]}"; do 371 + [[ "$p" == "$patch" ]] && continue 372 + tmp+=("$p") 373 + done 374 + 375 + APPLIED_PATCHES=("${tmp[@]}") 376 + } 377 + 378 + apply_patches() { 379 + local patch 380 + 381 + for patch in "${PATCHES[@]}"; do 382 + apply_patch "$patch" 383 + done 384 + } 385 + 386 + revert_patches() { 387 + local extra_args=("$@") 388 + local patches=("${APPLIED_PATCHES[@]}") 389 + 390 + for (( i=${#patches[@]}-1 ; i>=0 ; i-- )) ; do 391 + revert_patch "${patches[$i]}" "${extra_args[@]}" 392 + done 393 + 394 + APPLIED_PATCHES=() 395 + } 396 + 397 + validate_patches() { 398 + check_unsupported_patches 399 + apply_patches 400 + revert_patches 401 + } 402 + 403 + do_init() { 404 + # We're not yet smart enough to handle anything other than in-tree 405 + # builds in pwd. 406 + [[ ! "$SRC" -ef "$SCRIPT_DIR/../.." ]] && die "please run from the kernel root directory" 407 + [[ ! "$OBJ" -ef "$SCRIPT_DIR/../.." ]] && die "please run from the kernel root directory" 408 + 409 + (( SHORT_CIRCUIT <= 1 )) && rm -rf "$TMP_DIR" 410 + mkdir -p "$TMP_DIR" 411 + 412 + APPLIED_PATCHES=() 413 + 414 + [[ -x "$FIX_PATCH_LINES" ]] || die "can't find fix-patch-lines" 415 + 416 + validate_config 417 + set_module_name 418 + set_kernelversion 419 + } 420 + 421 + # Refresh the patch hunk headers, specifically the line numbers and counts. 422 + refresh_patch() { 423 + local patch="$1" 424 + local tmpdir="$PATCH_TMP_DIR" 425 + local files=() 426 + 427 + rm -rf "$tmpdir" 428 + mkdir -p "$tmpdir/a" 429 + mkdir -p "$tmpdir/b" 430 + 431 + # Get all source files affected by the patch 432 + get_patch_files "$patch" | mapfile -t files 433 + 434 + # Copy orig source files to 'a' 435 + ( cd "$SRC" && echo "${files[@]}" | xargs cp --parents --target-directory="$tmpdir/a" ) 436 + 437 + # Copy patched source files to 'b' 438 + apply_patch "$patch" --recount 439 + ( cd "$SRC" && echo "${files[@]}" | xargs cp --parents --target-directory="$tmpdir/b" ) 440 + revert_patch "$patch" --recount 441 + 442 + # Diff 'a' and 'b' to make a clean patch 443 + ( cd "$tmpdir" && git diff --no-index --no-prefix a b > "$patch" ) || true 444 + } 445 + 446 + # Copy the patches to a temporary directory, fix their lines so as not to 447 + # affect the __LINE__ macro for otherwise unchanged functions further down the 448 + # file, and update $PATCHES to point to the fixed patches. 449 + fix_patches() { 450 + local idx 451 + local i 452 + 453 + rm -f "$TMP_DIR"/*.patch 454 + 455 + idx=0001 456 + for i in "${!PATCHES[@]}"; do 457 + local old_patch="${PATCHES[$i]}" 458 + local tmp_patch="$TMP_DIR/tmp.patch" 459 + local patch="${PATCHES[$i]}" 460 + local new_patch 461 + 462 + new_patch="$TMP_DIR/$idx-fixed-$(basename "$patch")" 463 + 464 + cp -f "$old_patch" "$tmp_patch" 465 + refresh_patch "$tmp_patch" 466 + "$FIX_PATCH_LINES" "$tmp_patch" > "$new_patch" 467 + refresh_patch "$new_patch" 468 + 469 + PATCHES[i]="$new_patch" 470 + 471 + rm -f "$tmp_patch" 472 + idx=$(printf "%04d" $(( 10#$idx + 1 ))) 473 + done 474 + } 475 + 476 + clean_kernel() { 477 + local cmd=() 478 + 479 + cmd=("make") 480 + cmd+=("--silent") 481 + cmd+=("-j$JOBS") 482 + cmd+=("clean") 483 + 484 + ( 485 + cd "$SRC" 486 + "${cmd[@]}" 487 + ) 488 + } 489 + 490 + build_kernel() { 491 + local log="$TMP_DIR/build.log" 492 + local cmd=() 493 + 494 + cmd=("make") 495 + 496 + # When a patch to a kernel module references a newly created unexported 497 + # symbol which lives in vmlinux or another kernel module, the patched 498 + # kernel build fails with the following error: 499 + # 500 + # ERROR: modpost: "klp_string" [fs/xfs/xfs.ko] undefined! 501 + # 502 + # The undefined symbols are working as designed in that case. They get 503 + # resolved later when the livepatch module build link pulls all the 504 + # disparate objects together into the same kernel module. 505 + # 506 + # It would be good to have a way to tell modpost to skip checking for 507 + # undefined symbols altogether. For now, just convert the error to a 508 + # warning with KBUILD_MODPOST_WARN, and grep out the warning to avoid 509 + # confusing the user. 510 + # 511 + cmd+=("KBUILD_MODPOST_WARN=1") 512 + 513 + cmd+=("$VERBOSE") 514 + cmd+=("-j$JOBS") 515 + cmd+=("KCFLAGS=-ffunction-sections -fdata-sections") 516 + cmd+=("vmlinux") 517 + cmd+=("modules") 518 + 519 + ( 520 + cd "$SRC" 521 + "${cmd[@]}" \ 522 + 1> >(tee -a "$log") \ 523 + 2> >(tee -a "$log" | grep0 -v "modpost.*undefined!" >&2) 524 + ) 525 + } 526 + 527 + find_objects() { 528 + local opts=("$@") 529 + 530 + # Find root-level vmlinux.o and non-root-level .ko files, 531 + # excluding klp-tmp/ and .git/ 532 + find "$OBJ" \( -path "$TMP_DIR" -o -path "$OBJ/.git" -o -regex "$OBJ/[^/][^/]*\.ko" \) -prune -o \ 533 + -type f "${opts[@]}" \ 534 + \( -name "*.ko" -o -path "$OBJ/vmlinux.o" \) \ 535 + -printf '%P\n' 536 + } 537 + 538 + # Copy all .o archives to $ORIG_DIR 539 + copy_orig_objects() { 540 + local files=() 541 + 542 + rm -rf "$ORIG_DIR" 543 + mkdir -p "$ORIG_DIR" 544 + 545 + find_objects | mapfile -t files 546 + 547 + xtrace_save "copying orig objects" 548 + for _file in "${files[@]}"; do 549 + local rel_file="${_file/.ko/.o}" 550 + local file="$OBJ/$rel_file" 551 + local file_dir="$(dirname "$file")" 552 + local orig_file="$ORIG_DIR/$rel_file" 553 + local orig_dir="$(dirname "$orig_file")" 554 + local cmd_file="$file_dir/.$(basename "$file").cmd" 555 + 556 + [[ ! -f "$file" ]] && die "missing $(basename "$file") for $_file" 557 + 558 + mkdir -p "$orig_dir" 559 + cp -f "$file" "$orig_dir" 560 + [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$orig_dir" 561 + done 562 + xtrace_restore 563 + 564 + mv -f "$TMP_DIR/build.log" "$ORIG_DIR" 565 + touch "$TIMESTAMP" 566 + } 567 + 568 + # Copy all changed objects to $PATCHED_DIR 569 + copy_patched_objects() { 570 + local files=() 571 + local opts=() 572 + local found=0 573 + 574 + rm -rf "$PATCHED_DIR" 575 + mkdir -p "$PATCHED_DIR" 576 + 577 + # Note this doesn't work with some configs, thus the 'cmp' below. 578 + opts=("-newer") 579 + opts+=("$TIMESTAMP") 580 + 581 + find_objects "${opts[@]}" | mapfile -t files 582 + 583 + xtrace_save "copying changed objects" 584 + for _file in "${files[@]}"; do 585 + local rel_file="${_file/.ko/.o}" 586 + local file="$OBJ/$rel_file" 587 + local orig_file="$ORIG_DIR/$rel_file" 588 + local patched_file="$PATCHED_DIR/$rel_file" 589 + local patched_dir="$(dirname "$patched_file")" 590 + 591 + [[ ! -f "$file" ]] && die "missing $(basename "$file") for $_file" 592 + 593 + cmp -s "$orig_file" "$file" && continue 594 + 595 + mkdir -p "$patched_dir" 596 + cp -f "$file" "$patched_dir" 597 + found=1 598 + done 599 + xtrace_restore 600 + 601 + (( found == 0 )) && die "no changes detected" 602 + 603 + mv -f "$TMP_DIR/build.log" "$PATCHED_DIR" 604 + } 605 + 606 + # Diff changed objects, writing output object to $DIFF_DIR 607 + diff_objects() { 608 + local log="$KLP_DIFF_LOG" 609 + local files=() 610 + local opts=() 611 + 612 + rm -rf "$DIFF_DIR" 613 + mkdir -p "$DIFF_DIR" 614 + 615 + find "$PATCHED_DIR" -type f -name "*.o" | mapfile -t files 616 + [[ ${#files[@]} -eq 0 ]] && die "no changes detected" 617 + 618 + [[ -v DEBUG_CLONE ]] && opts=("--debug") 619 + 620 + # Diff all changed objects 621 + for file in "${files[@]}"; do 622 + local rel_file="${file#"$PATCHED_DIR"/}" 623 + local orig_file="$rel_file" 624 + local patched_file="$PATCHED_DIR/$rel_file" 625 + local out_file="$DIFF_DIR/$rel_file" 626 + local filter=() 627 + local cmd=() 628 + 629 + mkdir -p "$(dirname "$out_file")" 630 + 631 + cmd=("$SRC/tools/objtool/objtool") 632 + cmd+=("klp") 633 + cmd+=("diff") 634 + (( ${#opts[@]} > 0 )) && cmd+=("${opts[@]}") 635 + cmd+=("$orig_file") 636 + cmd+=("$patched_file") 637 + cmd+=("$out_file") 638 + 639 + if [[ -v DIFF_CHECKSUM ]]; then 640 + filter=("grep0") 641 + filter+=("-Ev") 642 + filter+=("DEBUG: .*checksum: ") 643 + else 644 + filter=("cat") 645 + fi 646 + 647 + ( 648 + cd "$ORIG_DIR" 649 + "${cmd[@]}" \ 650 + 1> >(tee -a "$log") \ 651 + 2> >(tee -a "$log" | "${filter[@]}" >&2) || \ 652 + die "objtool klp diff failed" 653 + ) 654 + done 655 + } 656 + 657 + # For each changed object, run objtool with --debug-checksum to get the 658 + # per-instruction checksums, and then diff those to find the first changed 659 + # instruction for each function. 660 + diff_checksums() { 661 + local orig_log="$ORIG_DIR/checksum.log" 662 + local patched_log="$PATCHED_DIR/checksum.log" 663 + local -A funcs 664 + local cmd=() 665 + local line 666 + local file 667 + local func 668 + 669 + gawk '/\.o: changed function: / { 670 + sub(/:$/, "", $1) 671 + print $1, $NF 672 + }' "$KLP_DIFF_LOG" | mapfile -t lines 673 + 674 + for line in "${lines[@]}"; do 675 + read -r file func <<< "$line" 676 + if [[ ! -v funcs["$file"] ]]; then 677 + funcs["$file"]="$func" 678 + else 679 + funcs["$file"]+=" $func" 680 + fi 681 + done 682 + 683 + cmd=("$SRC/tools/objtool/objtool") 684 + cmd+=("--checksum") 685 + cmd+=("--link") 686 + cmd+=("--dry-run") 687 + 688 + for file in "${!funcs[@]}"; do 689 + local opt="--debug-checksum=${funcs[$file]// /,}" 690 + 691 + ( 692 + cd "$ORIG_DIR" 693 + "${cmd[@]}" "$opt" "$file" &> "$orig_log" || \ 694 + ( cat "$orig_log" >&2; die "objtool --debug-checksum failed" ) 695 + 696 + cd "$PATCHED_DIR" 697 + "${cmd[@]}" "$opt" "$file" &> "$patched_log" || \ 698 + ( cat "$patched_log" >&2; die "objtool --debug-checksum failed" ) 699 + ) 700 + 701 + for func in ${funcs[$file]}; do 702 + diff <( grep0 -E "^DEBUG: .*checksum: $func " "$orig_log" | sed "s|$ORIG_DIR/||") \ 703 + <( grep0 -E "^DEBUG: .*checksum: $func " "$patched_log" | sed "s|$PATCHED_DIR/||") \ 704 + | gawk '/^< DEBUG: / { 705 + gsub(/:/, "") 706 + printf "%s: %s: %s\n", $3, $5, $6 707 + exit 708 + }' || true 709 + done 710 + done 711 + } 712 + 713 + # Build and post-process livepatch module in $KMOD_DIR 714 + build_patch_module() { 715 + local makefile="$KMOD_DIR/Kbuild" 716 + local log="$KMOD_DIR/build.log" 717 + local kmod_file 718 + local cflags=() 719 + local files=() 720 + local cmd=() 721 + 722 + rm -rf "$KMOD_DIR" 723 + mkdir -p "$KMOD_DIR" 724 + 725 + cp -f "$SRC/scripts/livepatch/init.c" "$KMOD_DIR" 726 + 727 + echo "obj-m := $NAME.o" > "$makefile" 728 + echo -n "$NAME-y := init.o" >> "$makefile" 729 + 730 + find "$DIFF_DIR" -type f -name "*.o" | mapfile -t files 731 + [[ ${#files[@]} -eq 0 ]] && die "no changes detected" 732 + 733 + for file in "${files[@]}"; do 734 + local rel_file="${file#"$DIFF_DIR"/}" 735 + local orig_file="$ORIG_DIR/$rel_file" 736 + local orig_dir="$(dirname "$orig_file")" 737 + local kmod_file="$KMOD_DIR/$rel_file" 738 + local kmod_dir="$(dirname "$kmod_file")" 739 + local cmd_file="$orig_dir/.$(basename "$file").cmd" 740 + 741 + mkdir -p "$kmod_dir" 742 + cp -f "$file" "$kmod_dir" 743 + [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$kmod_dir" 744 + 745 + # Tell kbuild this is a prebuilt object 746 + cp -f "$file" "${kmod_file}_shipped" 747 + 748 + echo -n " $rel_file" >> "$makefile" 749 + done 750 + 751 + echo >> "$makefile" 752 + 753 + cflags=("-ffunction-sections") 754 + cflags+=("-fdata-sections") 755 + [[ $REPLACE -eq 0 ]] && cflags+=("-DKLP_NO_REPLACE") 756 + 757 + cmd=("make") 758 + cmd+=("$VERBOSE") 759 + cmd+=("-j$JOBS") 760 + cmd+=("--directory=.") 761 + cmd+=("M=$KMOD_DIR") 762 + cmd+=("KCFLAGS=${cflags[*]}") 763 + 764 + # Build a "normal" kernel module with init.c and the diffed objects 765 + ( 766 + cd "$SRC" 767 + "${cmd[@]}" \ 768 + 1> >(tee -a "$log") \ 769 + 2> >(tee -a "$log" >&2) 770 + ) 771 + 772 + kmod_file="$KMOD_DIR/$NAME.ko" 773 + 774 + # Save off the intermediate binary for debugging 775 + cp -f "$kmod_file" "$kmod_file.orig" 776 + 777 + # Work around issue where slight .config change makes corrupt BTF 778 + objcopy --remove-section=.BTF "$kmod_file" 779 + 780 + # Fix (and work around) linker wreckage for klp syms / relocs 781 + "$SRC/tools/objtool/objtool" klp post-link "$kmod_file" || die "objtool klp post-link failed" 782 + 783 + cp -f "$kmod_file" "$OUTFILE" 784 + } 785 + 786 + 787 + ################################################################################ 788 + 789 + process_args "$@" 790 + do_init 791 + 792 + if (( SHORT_CIRCUIT <= 1 )); then 793 + status "Validating patch(es)" 794 + validate_patches 795 + status "Building original kernel" 796 + clean_kernel 797 + build_kernel 798 + status "Copying original object files" 799 + copy_orig_objects 800 + fi 801 + 802 + if (( SHORT_CIRCUIT <= 2 )); then 803 + status "Fixing patch(es)" 804 + fix_patches 805 + apply_patches 806 + status "Building patched kernel" 807 + build_kernel 808 + revert_patches 809 + status "Copying patched object files" 810 + copy_patched_objects 811 + fi 812 + 813 + if (( SHORT_CIRCUIT <= 3 )); then 814 + status "Diffing objects" 815 + diff_objects 816 + if [[ -v DIFF_CHECKSUM ]]; then 817 + status "Finding first changed instructions" 818 + diff_checksums 819 + fi 820 + fi 821 + 822 + if (( SHORT_CIRCUIT <= 4 )); then 823 + status "Building patch module: $OUTFILE" 824 + build_patch_module 825 + fi 826 + 827 + status "SUCCESS"
+5
scripts/mod/modpost.c
··· 606 606 strstarts(symname, "_savevr_") || 607 607 strcmp(symname, ".TOC.") == 0) 608 608 return 1; 609 + 610 + /* ignore linker-created section bounds variables */ 611 + if (strstarts(symname, "__start_") || strstarts(symname, "__stop_")) 612 + return 1; 613 + 609 614 /* Do not ignore this symbol */ 610 615 return 0; 611 616 }
+14 -8
scripts/module.lds.S
··· 34 34 35 35 __patchable_function_entries : { *(__patchable_function_entries) } 36 36 37 + __klp_funcs 0: ALIGN(8) { KEEP(*(__klp_funcs)) } 38 + 39 + __klp_objects 0: ALIGN(8) { 40 + __start_klp_objects = .; 41 + KEEP(*(__klp_objects)) 42 + __stop_klp_objects = .; 43 + } 44 + 37 45 #ifdef CONFIG_ARCH_USES_CFI_TRAPS 38 - __kcfi_traps : { KEEP(*(.kcfi_traps)) } 46 + __kcfi_traps : { KEEP(*(.kcfi_traps)) } 39 47 #endif 40 48 41 - #ifdef CONFIG_LTO_CLANG 42 - /* 43 - * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and 44 - * -ffunction-sections, which increases the size of the final module. 45 - * Merge the split sections in the final binary. 46 - */ 49 + .text : { 50 + *(.text .text.[0-9a-zA-Z_]*) 51 + } 52 + 47 53 .bss : { 48 54 *(.bss .bss.[0-9a-zA-Z_]*) 49 55 *(.bss..L*) ··· 64 58 *(.rodata .rodata.[0-9a-zA-Z_]*) 65 59 *(.rodata..L*) 66 60 } 67 - #endif 61 + 68 62 MOD_SEPARATE_CODETAG_SECTIONS() 69 63 } 70 64
+2
tools/build/Build
··· 1 + hostprogs := fixdep 2 + fixdep-y := fixdep.o
+21 -2
tools/build/Makefile
··· 37 37 $(Q)$(MAKE) -C feature OUTPUT=$(TMP_O) clean >/dev/null 38 38 endif 39 39 40 - $(OUTPUT)fixdep: $(srctree)/tools/build/fixdep.c 41 - $(QUIET_CC)$(HOSTCC) $(KBUILD_HOSTCFLAGS) $(KBUILD_HOSTLDFLAGS) -o $@ $< 40 + include $(srctree)/tools/build/Makefile.include 41 + 42 + FIXDEP := $(OUTPUT)fixdep 43 + FIXDEP_IN := $(OUTPUT)fixdep-in.o 44 + 45 + # To track fixdep's dependencies properly, fixdep needs to run on itself. 46 + # Build it twice the first time. 47 + $(FIXDEP_IN): FORCE 48 + $(Q)if [ ! -f $(FIXDEP) ]; then \ 49 + $(MAKE) $(build)=fixdep HOSTCFLAGS="$(KBUILD_HOSTCFLAGS)"; \ 50 + rm -f $(FIXDEP).o; \ 51 + fi 52 + $(Q)$(MAKE) $(build)=fixdep HOSTCFLAGS="$(KBUILD_HOSTCFLAGS)" 53 + 54 + 55 + $(FIXDEP): $(FIXDEP_IN) 56 + $(QUIET_LINK)$(HOSTCC) $(FIXDEP_IN) $(KBUILD_HOSTLDFLAGS) -o $@ 57 + 58 + FORCE: 59 + 60 + .PHONY: FORCE
+3 -7
tools/include/linux/interval_tree_generic.h
··· 77 77 * Cond2: start <= ITLAST(node) \ 78 78 */ \ 79 79 \ 80 - static ITSTRUCT * \ 80 + ITSTATIC ITSTRUCT * \ 81 81 ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ 82 82 { \ 83 83 while (true) { \ ··· 104 104 if (ITSTART(node) <= last) { /* Cond1 */ \ 105 105 if (start <= ITLAST(node)) /* Cond2 */ \ 106 106 return node; /* node is leftmost match */ \ 107 - if (node->ITRB.rb_right) { \ 108 - node = rb_entry(node->ITRB.rb_right, \ 109 - ITSTRUCT, ITRB); \ 110 - if (start <= node->ITSUBTREE) \ 111 - continue; \ 112 - } \ 107 + node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \ 108 + continue; \ 113 109 } \ 114 110 return NULL; /* No match */ \ 115 111 } \
+76
tools/include/linux/livepatch_external.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * External livepatch interfaces for patch creation tooling 4 + */ 5 + 6 + #ifndef _LINUX_LIVEPATCH_EXTERNAL_H_ 7 + #define _LINUX_LIVEPATCH_EXTERNAL_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + #define KLP_RELOC_SEC_PREFIX ".klp.rela." 12 + #define KLP_SYM_PREFIX ".klp.sym." 13 + 14 + #define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_ 15 + #define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_ 16 + #define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_ 17 + #define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_ 18 + 19 + #define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX) 20 + #define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX) 21 + #define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX) 22 + #define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX) 23 + 24 + struct klp_object; 25 + 26 + typedef int (*klp_pre_patch_t)(struct klp_object *obj); 27 + typedef void (*klp_post_patch_t)(struct klp_object *obj); 28 + typedef void (*klp_pre_unpatch_t)(struct klp_object *obj); 29 + typedef void (*klp_post_unpatch_t)(struct klp_object *obj); 30 + 31 + /** 32 + * struct klp_callbacks - pre/post live-(un)patch callback structure 33 + * @pre_patch: executed before code patching 34 + * @post_patch: executed after code patching 35 + * @pre_unpatch: executed before code unpatching 36 + * @post_unpatch: executed after code unpatching 37 + * @post_unpatch_enabled: flag indicating if post-unpatch callback 38 + * should run 39 + * 40 + * All callbacks are optional. Only the pre-patch callback, if provided, 41 + * will be unconditionally executed. If the parent klp_object fails to 42 + * patch for any reason, including a non-zero error status returned from 43 + * the pre-patch callback, no further callbacks will be executed. 44 + */ 45 + struct klp_callbacks { 46 + klp_pre_patch_t pre_patch; 47 + klp_post_patch_t post_patch; 48 + klp_pre_unpatch_t pre_unpatch; 49 + klp_post_unpatch_t post_unpatch; 50 + bool post_unpatch_enabled; 51 + }; 52 + 53 + /* 54 + * 'struct klp_{func,object}_ext' are compact "external" representations of 55 + * 'struct klp_{func,object}'. They are used by objtool for livepatch 56 + * generation. The structs are then read by the livepatch module and converted 57 + * to the real structs before calling klp_enable_patch(). 58 + * 59 + * TODO make these the official API for klp_enable_patch(). That should 60 + * simplify livepatch's interface as well as its data structure lifetime 61 + * management. 62 + */ 63 + struct klp_func_ext { 64 + const char *old_name; 65 + void *new_func; 66 + unsigned long sympos; 67 + }; 68 + 69 + struct klp_object_ext { 70 + const char *name; 71 + struct klp_func_ext *funcs; 72 + struct klp_callbacks callbacks; 73 + unsigned int nr_funcs; 74 + }; 75 + 76 + #endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
+2
tools/include/linux/objtool_types.h
··· 67 67 #define ANNOTYPE_REACHABLE 8 68 68 #define ANNOTYPE_NOCFI 9 69 69 70 + #define ANNOTYPE_DATA_SPECIAL 1 71 + 70 72 #endif /* _LINUX_OBJTOOL_TYPES_H */
+14
tools/include/linux/string.h
··· 44 44 return strncmp(str, prefix, strlen(prefix)) == 0; 45 45 } 46 46 47 + /* 48 + * Checks if a string ends with another. 49 + */ 50 + static inline bool str_ends_with(const char *str, const char *substr) 51 + { 52 + size_t len = strlen(str); 53 + size_t sublen = strlen(substr); 54 + 55 + if (sublen > len) 56 + return false; 57 + 58 + return !strcmp(str + len - sublen, substr); 59 + } 60 + 47 61 extern char * __must_check skip_spaces(const char *); 48 62 49 63 extern char *strim(char *);
+2 -2
tools/objtool/Build
··· 8 8 objtool-y += elf.o 9 9 objtool-y += objtool.o 10 10 11 - objtool-$(BUILD_ORC) += orc_gen.o 12 - objtool-$(BUILD_ORC) += orc_dump.o 11 + objtool-$(BUILD_ORC) += orc_gen.o orc_dump.o 12 + objtool-$(BUILD_KLP) += builtin-klp.o klp-diff.o klp-post-link.o 13 13 14 14 objtool-y += libstring.o 15 15 objtool-y += libctype.o
+32 -16
tools/objtool/Makefile
··· 2 2 include ../scripts/Makefile.include 3 3 include ../scripts/Makefile.arch 4 4 5 + ifeq ($(SRCARCH),x86) 6 + BUILD_ORC := y 7 + ARCH_HAS_KLP := y 8 + endif 9 + 10 + ifeq ($(SRCARCH),loongarch) 11 + BUILD_ORC := y 12 + endif 13 + 14 + ifeq ($(ARCH_HAS_KLP),y) 15 + HAVE_XXHASH = $(shell echo "int main() {}" | \ 16 + $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n) 17 + ifeq ($(HAVE_XXHASH),y) 18 + BUILD_KLP := y 19 + LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \ 20 + -DBUILD_KLP 21 + LIBXXHASH_LIBS := $(shell $(HOSTPKG_CONFIG) libxxhash --libs 2>/dev/null || echo -lxxhash) 22 + endif 23 + endif 24 + 25 + export BUILD_ORC BUILD_KLP 26 + 5 27 ifeq ($(srctree),) 6 28 srctree := $(patsubst %/,%,$(dir $(CURDIR))) 7 29 srctree := $(patsubst %/,%,$(dir $(srctree))) ··· 45 23 46 24 all: $(OBJTOOL) 47 25 26 + WARNINGS := -Werror -Wall -Wextra -Wmissing-prototypes \ 27 + -Wmissing-declarations -Wwrite-strings \ 28 + -Wno-implicit-fallthrough -Wno-sign-compare \ 29 + -Wno-unused-parameter 30 + 48 31 INCLUDES := -I$(srctree)/tools/include \ 49 32 -I$(srctree)/tools/include/uapi \ 50 33 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ ··· 57 30 -I$(srctree)/tools/objtool/include \ 58 31 -I$(srctree)/tools/objtool/arch/$(SRCARCH)/include \ 59 32 -I$(LIBSUBCMD_OUTPUT)/include 60 - # Note, EXTRA_WARNINGS here was determined for CC and not HOSTCC, it 61 - # is passed here to match a legacy behavior. 62 - WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -Wno-nested-externs 63 - OBJTOOL_CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS) 64 - OBJTOOL_LDFLAGS := $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) 33 + 34 + OBJTOOL_CFLAGS := -std=gnu11 -fomit-frame-pointer -O2 -g $(WARNINGS) \ 35 + $(INCLUDES) $(LIBELF_FLAGS) $(LIBXXHASH_CFLAGS) $(HOSTCFLAGS) 36 + 37 + OBJTOOL_LDFLAGS := $(LIBSUBCMD) $(LIBELF_LIBS) $(LIBXXHASH_LIBS) $(HOSTLDFLAGS) 65 38 66 39 # Allow old libelf to be used: 67 40 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - 2>/dev/null | grep elf_getshdr) ··· 73 46 AWK = awk 74 47 MKDIR = mkdir 75 48 76 - BUILD_ORC := n 77 - 78 - ifeq ($(SRCARCH),x86) 79 - BUILD_ORC := y 80 - endif 81 - 82 - ifeq ($(SRCARCH),loongarch) 83 - BUILD_ORC := y 84 - endif 85 - 86 - export BUILD_ORC 87 49 export srctree OUTPUT CFLAGS SRCARCH AWK 88 50 include $(srctree)/tools/build/Makefile.include 89 51
+3 -3
tools/objtool/arch/loongarch/decode.c
··· 7 7 #include <linux/objtool_types.h> 8 8 #include <arch/elf.h> 9 9 10 - int arch_ftrace_match(char *name) 10 + int arch_ftrace_match(const char *name) 11 11 { 12 12 return !strcmp(name, "_mcount"); 13 13 } ··· 17 17 return insn->offset + (insn->immediate << 2); 18 18 } 19 19 20 - unsigned long arch_dest_reloc_offset(int addend) 20 + s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc) 21 21 { 22 - return addend; 22 + return reloc_addend(reloc); 23 23 } 24 24 25 25 bool arch_pc_relative_reloc(struct reloc *reloc)
-1
tools/objtool/arch/loongarch/orc.c
··· 5 5 #include <objtool/check.h> 6 6 #include <objtool/orc.h> 7 7 #include <objtool/warn.h> 8 - #include <objtool/endianness.h> 9 8 10 9 int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) 11 10 {
+3 -4
tools/objtool/arch/powerpc/decode.c
··· 7 7 #include <objtool/arch.h> 8 8 #include <objtool/warn.h> 9 9 #include <objtool/builtin.h> 10 - #include <objtool/endianness.h> 11 10 12 - int arch_ftrace_match(char *name) 11 + int arch_ftrace_match(const char *name) 13 12 { 14 13 return !strcmp(name, "_mcount"); 15 14 } 16 15 17 - unsigned long arch_dest_reloc_offset(int addend) 16 + s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc) 18 17 { 19 - return addend; 18 + return reloc_addend(reloc); 20 19 } 21 20 22 21 bool arch_callee_saved_reg(unsigned char reg)
+59 -4
tools/objtool/arch/x86/decode.c
··· 19 19 #include <objtool/elf.h> 20 20 #include <objtool/arch.h> 21 21 #include <objtool/warn.h> 22 - #include <objtool/endianness.h> 23 22 #include <objtool/builtin.h> 24 23 #include <arch/elf.h> 25 24 26 - int arch_ftrace_match(char *name) 25 + int arch_ftrace_match(const char *name) 27 26 { 28 27 return !strcmp(name, "__fentry__"); 29 28 } ··· 67 68 } 68 69 } 69 70 70 - unsigned long arch_dest_reloc_offset(int addend) 71 + /* Undo the effects of __pa_symbol() if necessary */ 72 + static unsigned long phys_to_virt(unsigned long pa) 71 73 { 72 - return addend + 4; 74 + s64 va = pa; 75 + 76 + if (va > 0) 77 + va &= ~(0x80000000); 78 + 79 + return va; 80 + } 81 + 82 + s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc) 83 + { 84 + s64 addend = reloc_addend(reloc); 85 + 86 + if (arch_pc_relative_reloc(reloc)) 87 + addend += insn->offset + insn->len - reloc_offset(reloc); 88 + 89 + return phys_to_virt(addend); 90 + } 91 + 92 + static void scan_for_insn(struct section *sec, unsigned long offset, 93 + unsigned long *insn_off, unsigned int *insn_len) 94 + { 95 + unsigned long o = 0; 96 + struct insn insn; 97 + 98 + while (1) { 99 + 100 + insn_decode(&insn, sec->data->d_buf + o, sec_size(sec) - o, 101 + INSN_MODE_64); 102 + 103 + if (o + insn.length > offset) { 104 + *insn_off = o; 105 + *insn_len = insn.length; 106 + return; 107 + } 108 + 109 + o += insn.length; 110 + } 111 + } 112 + 113 + u64 arch_adjusted_addend(struct reloc *reloc) 114 + { 115 + unsigned int type = reloc_type(reloc); 116 + s64 addend = reloc_addend(reloc); 117 + unsigned long insn_off; 118 + unsigned int insn_len; 119 + 120 + if (type == R_X86_64_PLT32) 121 + return addend + 4; 122 + 123 + if (type != R_X86_64_PC32 || !is_text_sec(reloc->sec->base)) 124 + return addend; 125 + 126 + scan_for_insn(reloc->sec->base, reloc_offset(reloc), 127 + &insn_off, &insn_len); 128 + 129 + return addend + insn_off + insn_len - reloc_offset(reloc); 73 130 } 74 131 75 132 unsigned long arch_jump_destination(struct instruction *insn)
-1
tools/objtool/arch/x86/orc.c
··· 5 5 #include <objtool/check.h> 6 6 #include <objtool/orc.h> 7 7 #include <objtool/warn.h> 8 - #include <objtool/endianness.h> 9 8 10 9 int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) 11 10 {
+1 -1
tools/objtool/arch/x86/special.c
··· 89 89 /* look for a relocation which references .rodata */ 90 90 text_reloc = find_reloc_by_dest_range(file->elf, insn->sec, 91 91 insn->offset, insn->len); 92 - if (!text_reloc || text_reloc->sym->type != STT_SECTION || 92 + if (!text_reloc || !is_sec_sym(text_reloc->sym) || 93 93 !text_reloc->sym->sec->rodata) 94 94 return NULL; 95 95
+55 -43
tools/objtool/builtin-check.c
··· 73 73 74 74 static const struct option check_options[] = { 75 75 OPT_GROUP("Actions:"), 76 + OPT_BOOLEAN(0, "checksum", &opts.checksum, "generate per-function checksums"), 77 + OPT_BOOLEAN(0, "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"), 76 78 OPT_CALLBACK_OPTARG('h', "hacks", NULL, NULL, "jump_label,noinstr,skylake", "patch toolchain bugs/limitations", parse_hacks), 77 - OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"), 78 - OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"), 79 - OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"), 80 - OPT_BOOLEAN(0, "orc", &opts.orc, "generate ORC metadata"), 81 - OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"), 82 - OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"), 83 - OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"), 84 - OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"), 85 - OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"), 86 - OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"), 87 - OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"), 88 - OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"), 89 - OPT_BOOLEAN(0 , "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"), 90 - OPT_BOOLEAN(0 , "noabs", &opts.noabs, "reject absolute references in allocatable sections"), 91 - OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump), 79 + OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"), 80 + OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"), 81 + OPT_BOOLEAN(0, "noabs", &opts.noabs, "reject absolute references in allocatable sections"), 82 + OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"), 83 + OPT_BOOLEAN(0, "orc", &opts.orc, "generate ORC metadata"), 84 + OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"), 85 + OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"), 86 + OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"), 87 + OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"), 88 + OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"), 89 + OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"), 90 + OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"), 91 + OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"), 92 + OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump), 92 93 93 94 OPT_GROUP("Options:"), 94 - OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"), 95 - OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"), 96 - OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"), 97 - OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"), 98 - OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"), 99 - OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"), 100 - OPT_STRING('o', "output", &opts.output, "file", "output file name"), 101 - OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"), 102 - OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"), 103 - OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"), 104 - OPT_BOOLEAN(0, "Werror", &opts.werror, "return error on warnings"), 95 + OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"), 96 + OPT_BOOLEAN(0, "backup", &opts.backup, "create backup (.orig) file on warning/error"), 97 + OPT_STRING(0, "debug-checksum", &opts.debug_checksum, "funcs", "enable checksum debug output"), 98 + OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"), 99 + OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"), 100 + OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"), 101 + OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"), 102 + OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"), 103 + OPT_STRING('o', "output", &opts.output, "file", "output file name"), 104 + OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"), 105 + OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"), 106 + OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"), 107 + OPT_BOOLEAN(0, "werror", &opts.werror, "return error on warnings"), 105 108 106 109 OPT_END(), 107 110 }; ··· 162 159 return false; 163 160 } 164 161 165 - if (opts.hack_jump_label || 162 + #ifndef BUILD_KLP 163 + if (opts.checksum) { 164 + ERROR("--checksum not supported; install xxhash-devel and recompile"); 165 + return false; 166 + } 167 + #endif 168 + 169 + if (opts.debug_checksum && !opts.checksum) { 170 + ERROR("--debug-checksum requires --checksum"); 171 + return false; 172 + } 173 + 174 + if (opts.checksum || 175 + opts.hack_jump_label || 166 176 opts.hack_noinstr || 167 177 opts.ibt || 168 178 opts.mcount || ··· 259 243 ERROR_GLIBC("strdup(%s)", argv[i]); 260 244 exit(1); 261 245 } 262 - }; 246 + } 263 247 } 264 248 265 - void print_args(void) 249 + int make_backup(void) 266 250 { 267 - char *backup = NULL; 268 - 269 - if (opts.output || opts.dryrun) 270 - goto print; 251 + char *backup; 271 252 272 253 /* 273 254 * Make a backup before kbuild deletes the file so the error ··· 273 260 backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1); 274 261 if (!backup) { 275 262 ERROR_GLIBC("malloc"); 276 - goto print; 263 + return 1; 277 264 } 278 265 279 266 strcpy(backup, objname); 280 267 strcat(backup, ORIG_SUFFIX); 281 - if (copy_file(objname, backup)) { 282 - backup = NULL; 283 - goto print; 284 - } 268 + if (copy_file(objname, backup)) 269 + return 1; 285 270 286 - print: 287 271 /* 288 - * Print the cmdline args to make it easier to recreate. If '--output' 289 - * wasn't used, add it to the printed args with the backup as input. 272 + * Print the cmdline args to make it easier to recreate. 290 273 */ 274 + 291 275 fprintf(stderr, "%s", orig_argv[0]); 292 276 293 277 for (int i = 1; i < orig_argc; i++) { 294 278 char *arg = orig_argv[i]; 295 279 296 - if (backup && !strcmp(arg, objname)) 280 + /* Modify the printed args to use the backup */ 281 + if (!opts.output && !strcmp(arg, objname)) 297 282 fprintf(stderr, " %s -o %s", backup, objname); 298 283 else 299 284 fprintf(stderr, " %s", arg); 300 285 } 301 286 302 287 fprintf(stderr, "\n"); 288 + return 0; 303 289 } 304 290 305 291 int objtool_run(int argc, const char **argv) ··· 344 332 if (!opts.dryrun && file->elf->changed && elf_write(file->elf)) 345 333 return 1; 346 334 347 - return 0; 335 + return elf_close(file->elf); 348 336 }
+53
tools/objtool/builtin-klp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + #include <subcmd/parse-options.h> 3 + #include <string.h> 4 + #include <stdlib.h> 5 + #include <objtool/builtin.h> 6 + #include <objtool/objtool.h> 7 + #include <objtool/klp.h> 8 + 9 + struct subcmd { 10 + const char *name; 11 + const char *description; 12 + int (*fn)(int, const char **); 13 + }; 14 + 15 + static struct subcmd subcmds[] = { 16 + { "diff", "Generate binary diff of two object files", cmd_klp_diff, }, 17 + { "post-link", "Finalize klp symbols/relocs after module linking", cmd_klp_post_link, }, 18 + }; 19 + 20 + static void cmd_klp_usage(void) 21 + { 22 + fprintf(stderr, "usage: objtool klp <subcommand> [<options>]\n\n"); 23 + fprintf(stderr, "Subcommands:\n"); 24 + 25 + for (int i = 0; i < ARRAY_SIZE(subcmds); i++) { 26 + struct subcmd *cmd = &subcmds[i]; 27 + 28 + fprintf(stderr, " %s\t%s\n", cmd->name, cmd->description); 29 + } 30 + 31 + exit(1); 32 + } 33 + 34 + int cmd_klp(int argc, const char **argv) 35 + { 36 + argc--; 37 + argv++; 38 + 39 + if (!argc) 40 + cmd_klp_usage(); 41 + 42 + if (argc) { 43 + for (int i = 0; i < ARRAY_SIZE(subcmds); i++) { 44 + struct subcmd *cmd = &subcmds[i]; 45 + 46 + if (!strcmp(cmd->name, argv[0])) 47 + return cmd->fn(argc, argv); 48 + } 49 + } 50 + 51 + cmd_klp_usage(); 52 + return 0; 53 + }
+519 -356
tools/objtool/check.c
··· 3 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 4 */ 5 5 6 + #define _GNU_SOURCE /* memmem() */ 6 7 #include <string.h> 7 8 #include <stdlib.h> 8 9 #include <inttypes.h> ··· 15 14 #include <objtool/check.h> 16 15 #include <objtool/special.h> 17 16 #include <objtool/warn.h> 18 - #include <objtool/endianness.h> 17 + #include <objtool/checksum.h> 18 + #include <objtool/util.h> 19 19 20 20 #include <linux/objtool_types.h> 21 21 #include <linux/hashtable.h> ··· 108 106 #define for_each_insn(file, insn) \ 109 107 for (struct section *__sec, *__fake = (struct section *)1; \ 110 108 __fake; __fake = NULL) \ 111 - for_each_sec(file, __sec) \ 109 + for_each_sec(file->elf, __sec) \ 112 110 sec_for_each_insn(file, __sec, insn) 113 111 114 112 #define func_for_each_insn(file, func, insn) \ ··· 188 186 } 189 187 190 188 /* 191 - * Checks if a string ends with another. 192 - */ 193 - static bool str_ends_with(const char *s, const char *sub) 194 - { 195 - const int slen = strlen(s); 196 - const int sublen = strlen(sub); 197 - 198 - if (sublen > slen) 199 - return 0; 200 - 201 - return !memcmp(s + slen - sublen, sub, sublen); 202 - } 203 - 204 - /* 205 189 * Checks if a function is a Rust "noreturn" one. 206 190 */ 207 191 static bool is_rust_noreturn(const struct symbol *func) ··· 249 261 if (!func) 250 262 return false; 251 263 252 - if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) { 264 + if (!is_local_sym(func)) { 253 265 if (is_rust_noreturn(func)) 254 266 return true; 255 267 ··· 258 270 return true; 259 271 } 260 272 261 - if (func->bind == STB_WEAK) 273 + if (is_weak_sym(func)) 262 274 return false; 263 275 264 276 if (!func->len) ··· 418 430 struct symbol *func; 419 431 unsigned long offset; 420 432 struct instruction *insn; 421 - int ret; 422 433 423 - for_each_sec(file, sec) { 434 + for_each_sec(file->elf, sec) { 424 435 struct instruction *insns = NULL; 425 436 u8 prev_len = 0; 426 437 u8 idx = 0; 427 438 428 - if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 439 + if (!is_text_sec(sec)) 429 440 continue; 430 441 431 442 if (strcmp(sec->name, ".altinstr_replacement") && ··· 447 460 if (!strcmp(sec->name, ".init.text") && !opts.module) 448 461 sec->init = true; 449 462 450 - for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { 463 + for (offset = 0; offset < sec_size(sec); offset += insn->len) { 451 464 if (!insns || idx == INSN_CHUNK_MAX) { 452 - insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); 465 + insns = calloc(INSN_CHUNK_SIZE, sizeof(*insn)); 453 466 if (!insns) { 454 467 ERROR_GLIBC("calloc"); 455 468 return -1; ··· 466 479 insn->offset = offset; 467 480 insn->prev_len = prev_len; 468 481 469 - ret = arch_decode_instruction(file, sec, offset, 470 - sec->sh.sh_size - offset, 471 - insn); 472 - if (ret) 473 - return ret; 482 + if (arch_decode_instruction(file, sec, offset, sec_size(sec) - offset, insn)) 483 + return -1; 474 484 475 485 prev_len = insn->len; 476 486 ··· 484 500 } 485 501 486 502 sec_for_each_sym(sec, func) { 487 - if (func->type != STT_NOTYPE && func->type != STT_FUNC) 503 + if (!is_notype_sym(func) && !is_func_sym(func)) 488 504 continue; 489 505 490 - if (func->offset == sec->sh.sh_size) { 506 + if (func->offset == sec_size(sec)) { 491 507 /* Heuristic: likely an "end" symbol */ 492 - if (func->type == STT_NOTYPE) 508 + if (is_notype_sym(func)) 493 509 continue; 494 510 ERROR("%s(): STT_FUNC at end of section", func->name); 495 511 return -1; ··· 505 521 506 522 sym_for_each_insn(file, func, insn) { 507 523 insn->sym = func; 508 - if (func->type == STT_FUNC && 524 + if (is_func_sym(func) && 509 525 insn->type == INSN_ENDBR && 510 526 list_empty(&insn->call_node)) { 511 527 if (insn->offset == func->offset) { ··· 549 565 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long); 550 566 551 567 func = reloc->sym; 552 - if (func->type == STT_SECTION) 568 + if (is_sec_sym(func)) 553 569 func = find_symbol_by_offset(reloc->sym->sec, 554 570 reloc_addend(reloc)); 555 571 if (!func) { ··· 583 599 }; 584 600 const char *pv_ops; 585 601 struct symbol *sym; 586 - int idx, nr, ret; 602 + int idx, nr; 587 603 588 604 if (!opts.noinstr) 589 605 return 0; ··· 595 611 return 0; 596 612 597 613 nr = sym->len / sizeof(unsigned long); 598 - file->pv_ops = calloc(sizeof(struct pv_state), nr); 614 + file->pv_ops = calloc(nr, sizeof(struct pv_state)); 599 615 if (!file->pv_ops) { 600 616 ERROR_GLIBC("calloc"); 601 617 return -1; ··· 605 621 INIT_LIST_HEAD(&file->pv_ops[idx].targets); 606 622 607 623 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++) { 608 - ret = add_pv_ops(file, pv_ops); 609 - if (ret) 610 - return ret; 624 + if (add_pv_ops(file, pv_ops)) 625 + return -1; 611 626 } 612 627 613 628 return 0; 629 + } 630 + 631 + static bool is_livepatch_module(struct objtool_file *file) 632 + { 633 + struct section *sec; 634 + 635 + if (!opts.module) 636 + return false; 637 + 638 + sec = find_section_by_name(file->elf, ".modinfo"); 639 + if (!sec) 640 + return false; 641 + 642 + return memmem(sec->data->d_buf, sec_size(sec), "\0livepatch=Y", 12); 614 643 } 615 644 616 645 static int create_static_call_sections(struct objtool_file *file) ··· 637 640 638 641 sec = find_section_by_name(file->elf, ".static_call_sites"); 639 642 if (sec) { 640 - INIT_LIST_HEAD(&file->static_call_list); 641 - WARN("file already has .static_call_sites section, skipping"); 643 + /* 644 + * Livepatch modules may have already extracted the static call 645 + * site entries to take advantage of vmlinux static call 646 + * privileges. 647 + */ 648 + if (!file->klp) 649 + WARN("file already has .static_call_sites section, skipping"); 650 + 642 651 return 0; 643 652 } 644 653 ··· 688 685 689 686 key_sym = find_symbol_by_name(file->elf, tmp); 690 687 if (!key_sym) { 691 - if (!opts.module) { 688 + if (!opts.module || file->klp) { 692 689 ERROR("static_call: can't find static_call_key symbol: %s", tmp); 693 690 return -1; 694 691 } ··· 831 828 struct symbol *sym = insn->sym; 832 829 *site = 0; 833 830 834 - if (opts.module && sym && sym->type == STT_FUNC && 831 + if (opts.module && sym && is_func_sym(sym) && 835 832 insn->offset == sym->offset && 836 833 (!strcmp(sym->name, "init_module") || 837 834 !strcmp(sym->name, "cleanup_module"))) { ··· 859 856 860 857 sec = find_section_by_name(file->elf, ".cfi_sites"); 861 858 if (sec) { 862 - INIT_LIST_HEAD(&file->call_list); 863 859 WARN("file already has .cfi_sites section, skipping"); 864 860 return 0; 865 861 } 866 862 867 863 idx = 0; 868 - for_each_sym(file, sym) { 869 - if (sym->type != STT_FUNC) 864 + for_each_sym(file->elf, sym) { 865 + if (!is_func_sym(sym)) 870 866 continue; 871 867 872 868 if (strncmp(sym->name, "__cfi_", 6)) ··· 880 878 return -1; 881 879 882 880 idx = 0; 883 - for_each_sym(file, sym) { 884 - if (sym->type != STT_FUNC) 881 + for_each_sym(file->elf, sym) { 882 + if (!is_func_sym(sym)) 885 883 continue; 886 884 887 885 if (strncmp(sym->name, "__cfi_", 6)) ··· 907 905 908 906 sec = find_section_by_name(file->elf, "__mcount_loc"); 909 907 if (sec) { 910 - INIT_LIST_HEAD(&file->mcount_loc_list); 911 - WARN("file already has __mcount_loc section, skipping"); 908 + /* 909 + * Livepatch modules have already extracted their __mcount_loc 910 + * entries to cover the !CONFIG_FTRACE_MCOUNT_USE_OBJTOOL case. 911 + */ 912 + if (!file->klp) 913 + WARN("file already has __mcount_loc section, skipping"); 914 + 912 915 return 0; 913 916 } 914 917 ··· 957 950 958 951 sec = find_section_by_name(file->elf, ".call_sites"); 959 952 if (sec) { 960 - INIT_LIST_HEAD(&file->call_list); 961 953 WARN("file already has .call_sites section, skipping"); 962 954 return 0; 963 955 } ··· 986 980 987 981 return 0; 988 982 } 983 + 984 + #ifdef BUILD_KLP 985 + static int create_sym_checksum_section(struct objtool_file *file) 986 + { 987 + struct section *sec; 988 + struct symbol *sym; 989 + unsigned int idx = 0; 990 + struct sym_checksum *checksum; 991 + size_t entsize = sizeof(struct sym_checksum); 992 + 993 + sec = find_section_by_name(file->elf, ".discard.sym_checksum"); 994 + if (sec) { 995 + if (!opts.dryrun) 996 + WARN("file already has .discard.sym_checksum section, skipping"); 997 + 998 + return 0; 999 + } 1000 + 1001 + for_each_sym(file->elf, sym) 1002 + if (sym->csum.checksum) 1003 + idx++; 1004 + 1005 + if (!idx) 1006 + return 0; 1007 + 1008 + sec = elf_create_section_pair(file->elf, ".discard.sym_checksum", entsize, 1009 + idx, idx); 1010 + if (!sec) 1011 + return -1; 1012 + 1013 + idx = 0; 1014 + for_each_sym(file->elf, sym) { 1015 + if (!sym->csum.checksum) 1016 + continue; 1017 + 1018 + if (!elf_init_reloc(file->elf, sec->rsec, idx, idx * entsize, 1019 + sym, 0, R_TEXT64)) 1020 + return -1; 1021 + 1022 + checksum = (struct sym_checksum *)sec->data->d_buf + idx; 1023 + checksum->addr = 0; /* reloc */ 1024 + checksum->checksum = sym->csum.checksum; 1025 + 1026 + mark_sec_changed(file->elf, sec, true); 1027 + 1028 + idx++; 1029 + } 1030 + 1031 + return 0; 1032 + } 1033 + #else 1034 + static int create_sym_checksum_section(struct objtool_file *file) { return -EINVAL; } 1035 + #endif 989 1036 990 1037 /* 991 1038 * Warnings shouldn't be reported for ignored functions. ··· 1491 1432 } 1492 1433 1493 1434 static bool is_first_func_insn(struct objtool_file *file, 1494 - struct instruction *insn, struct symbol *sym) 1435 + struct instruction *insn) 1495 1436 { 1496 - if (insn->offset == sym->offset) 1437 + struct symbol *func = insn_func(insn); 1438 + 1439 + if (!func) 1440 + return false; 1441 + 1442 + if (insn->offset == func->offset) 1497 1443 return true; 1498 1444 1499 1445 /* Allow direct CALL/JMP past ENDBR */ ··· 1506 1442 struct instruction *prev = prev_insn_same_sym(file, insn); 1507 1443 1508 1444 if (prev && prev->type == INSN_ENDBR && 1509 - insn->offset == sym->offset + prev->len) 1445 + insn->offset == func->offset + prev->len) 1510 1446 return true; 1511 1447 } 1512 1448 ··· 1514 1450 } 1515 1451 1516 1452 /* 1517 - * A sibling call is a tail-call to another symbol -- to differentiate from a 1518 - * recursive tail-call which is to the same symbol. 1519 - */ 1520 - static bool jump_is_sibling_call(struct objtool_file *file, 1521 - struct instruction *from, struct instruction *to) 1522 - { 1523 - struct symbol *fs = from->sym; 1524 - struct symbol *ts = to->sym; 1525 - 1526 - /* Not a sibling call if from/to a symbol hole */ 1527 - if (!fs || !ts) 1528 - return false; 1529 - 1530 - /* Not a sibling call if not targeting the start of a symbol. */ 1531 - if (!is_first_func_insn(file, to, ts)) 1532 - return false; 1533 - 1534 - /* Disallow sibling calls into STT_NOTYPE */ 1535 - if (ts->type == STT_NOTYPE) 1536 - return false; 1537 - 1538 - /* Must not be self to be a sibling */ 1539 - return fs->pfunc != ts->pfunc; 1540 - } 1541 - 1542 - /* 1543 1453 * Find the destination instructions for all jumps. 1544 1454 */ 1545 1455 static int add_jump_destinations(struct objtool_file *file) 1546 1456 { 1547 - struct instruction *insn, *jump_dest; 1457 + struct instruction *insn; 1548 1458 struct reloc *reloc; 1549 - struct section *dest_sec; 1550 - unsigned long dest_off; 1551 - int ret; 1552 1459 1553 1460 for_each_insn(file, insn) { 1554 1461 struct symbol *func = insn_func(insn); 1462 + struct instruction *dest_insn; 1463 + struct section *dest_sec; 1464 + struct symbol *dest_sym; 1465 + unsigned long dest_off; 1466 + 1467 + if (!is_static_jump(insn)) 1468 + continue; 1555 1469 1556 1470 if (insn->jump_dest) { 1557 1471 /* ··· 1538 1496 */ 1539 1497 continue; 1540 1498 } 1541 - if (!is_static_jump(insn)) 1542 - continue; 1543 1499 1544 1500 reloc = insn_reloc(file, insn); 1545 1501 if (!reloc) { 1546 1502 dest_sec = insn->sec; 1547 1503 dest_off = arch_jump_destination(insn); 1548 - } else if (reloc->sym->type == STT_SECTION) { 1549 - dest_sec = reloc->sym->sec; 1550 - dest_off = arch_dest_reloc_offset(reloc_addend(reloc)); 1551 - } else if (reloc->sym->retpoline_thunk) { 1552 - ret = add_retpoline_call(file, insn); 1553 - if (ret) 1554 - return ret; 1555 - continue; 1556 - } else if (reloc->sym->return_thunk) { 1557 - add_return_call(file, insn, true); 1558 - continue; 1559 - } else if (func) { 1560 - /* 1561 - * External sibling call or internal sibling call with 1562 - * STT_FUNC reloc. 1563 - */ 1564 - ret = add_call_dest(file, insn, reloc->sym, true); 1565 - if (ret) 1566 - return ret; 1567 - continue; 1568 - } else if (reloc->sym->sec->idx) { 1569 - dest_sec = reloc->sym->sec; 1570 - dest_off = reloc->sym->sym.st_value + 1571 - arch_dest_reloc_offset(reloc_addend(reloc)); 1504 + dest_sym = dest_sec->sym; 1572 1505 } else { 1573 - /* non-func asm code jumping to another file */ 1574 - continue; 1506 + dest_sym = reloc->sym; 1507 + if (is_undef_sym(dest_sym)) { 1508 + if (dest_sym->retpoline_thunk) { 1509 + if (add_retpoline_call(file, insn)) 1510 + return -1; 1511 + continue; 1512 + } 1513 + 1514 + if (dest_sym->return_thunk) { 1515 + add_return_call(file, insn, true); 1516 + continue; 1517 + } 1518 + 1519 + /* External symbol */ 1520 + if (func) { 1521 + /* External sibling call */ 1522 + if (add_call_dest(file, insn, dest_sym, true)) 1523 + return -1; 1524 + continue; 1525 + } 1526 + 1527 + /* Non-func asm code jumping to external symbol */ 1528 + continue; 1529 + } 1530 + 1531 + dest_sec = dest_sym->sec; 1532 + dest_off = dest_sym->offset + arch_insn_adjusted_addend(insn, reloc); 1575 1533 } 1576 1534 1577 - jump_dest = find_insn(file, dest_sec, dest_off); 1578 - if (!jump_dest) { 1535 + dest_insn = find_insn(file, dest_sec, dest_off); 1536 + if (!dest_insn) { 1579 1537 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1580 1538 1581 1539 /* 1582 - * This is a special case for retbleed_untrain_ret(). 1583 - * It jumps to __x86_return_thunk(), but objtool 1584 - * can't find the thunk's starting RET 1585 - * instruction, because the RET is also in the 1586 - * middle of another instruction. Objtool only 1587 - * knows about the outer instruction. 1540 + * retbleed_untrain_ret() jumps to 1541 + * __x86_return_thunk(), but objtool can't find 1542 + * the thunk's starting RET instruction, 1543 + * because the RET is also in the middle of 1544 + * another instruction. Objtool only knows 1545 + * about the outer instruction. 1588 1546 */ 1589 1547 if (sym && sym->embedded_insn) { 1590 1548 add_return_call(file, insn, false); ··· 1592 1550 } 1593 1551 1594 1552 /* 1595 - * GCOV/KCOV dead code can jump to the end of the 1596 - * function/section. 1553 + * GCOV/KCOV dead code can jump to the end of 1554 + * the function/section. 1597 1555 */ 1598 1556 if (file->ignore_unreachables && func && 1599 1557 dest_sec == insn->sec && 1600 1558 dest_off == func->offset + func->len) 1601 1559 continue; 1602 1560 1603 - ERROR_INSN(insn, "can't find jump dest instruction at %s+0x%lx", 1604 - dest_sec->name, dest_off); 1561 + ERROR_INSN(insn, "can't find jump dest instruction at %s", 1562 + offstr(dest_sec, dest_off)); 1605 1563 return -1; 1606 1564 } 1607 1565 1608 - /* 1609 - * An intra-TU jump in retpoline.o might not have a relocation 1610 - * for its jump dest, in which case the above 1611 - * add_{retpoline,return}_call() didn't happen. 1612 - */ 1613 - if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) { 1614 - if (jump_dest->sym->retpoline_thunk) { 1615 - ret = add_retpoline_call(file, insn); 1616 - if (ret) 1617 - return ret; 1618 - continue; 1619 - } 1620 - if (jump_dest->sym->return_thunk) { 1621 - add_return_call(file, insn, true); 1622 - continue; 1623 - } 1566 + if (!dest_sym || is_sec_sym(dest_sym)) { 1567 + dest_sym = dest_insn->sym; 1568 + if (!dest_sym) 1569 + goto set_jump_dest; 1624 1570 } 1625 1571 1626 - /* 1627 - * Cross-function jump. 1628 - */ 1629 - if (func && insn_func(jump_dest) && func != insn_func(jump_dest)) { 1630 - 1631 - /* 1632 - * For GCC 8+, create parent/child links for any cold 1633 - * subfunctions. This is _mostly_ redundant with a 1634 - * similar initialization in read_symbols(). 1635 - * 1636 - * If a function has aliases, we want the *first* such 1637 - * function in the symbol table to be the subfunction's 1638 - * parent. In that case we overwrite the 1639 - * initialization done in read_symbols(). 1640 - * 1641 - * However this code can't completely replace the 1642 - * read_symbols() code because this doesn't detect the 1643 - * case where the parent function's only reference to a 1644 - * subfunction is through a jump table. 1645 - */ 1646 - if (!strstr(func->name, ".cold") && 1647 - strstr(insn_func(jump_dest)->name, ".cold")) { 1648 - func->cfunc = insn_func(jump_dest); 1649 - insn_func(jump_dest)->pfunc = func; 1650 - } 1651 - } 1652 - 1653 - if (jump_is_sibling_call(file, insn, jump_dest)) { 1654 - /* 1655 - * Internal sibling call without reloc or with 1656 - * STT_SECTION reloc. 1657 - */ 1658 - ret = add_call_dest(file, insn, insn_func(jump_dest), true); 1659 - if (ret) 1660 - return ret; 1572 + if (dest_sym->retpoline_thunk && dest_insn->offset == dest_sym->offset) { 1573 + if (add_retpoline_call(file, insn)) 1574 + return -1; 1661 1575 continue; 1662 1576 } 1663 1577 1664 - insn->jump_dest = jump_dest; 1578 + if (dest_sym->return_thunk && dest_insn->offset == dest_sym->offset) { 1579 + add_return_call(file, insn, true); 1580 + continue; 1581 + } 1582 + 1583 + if (!insn->sym || insn->sym == dest_insn->sym) 1584 + goto set_jump_dest; 1585 + 1586 + /* 1587 + * Internal cross-function jump. 1588 + */ 1589 + 1590 + /* 1591 + * For GCC 8+, create parent/child links for any cold 1592 + * subfunctions. This is _mostly_ redundant with a 1593 + * similar initialization in read_symbols(). 1594 + * 1595 + * If a function has aliases, we want the *first* such 1596 + * function in the symbol table to be the subfunction's 1597 + * parent. In that case we overwrite the 1598 + * initialization done in read_symbols(). 1599 + * 1600 + * However this code can't completely replace the 1601 + * read_symbols() code because this doesn't detect the 1602 + * case where the parent function's only reference to a 1603 + * subfunction is through a jump table. 1604 + */ 1605 + if (func && dest_sym->cold) { 1606 + func->cfunc = dest_sym; 1607 + dest_sym->pfunc = func; 1608 + goto set_jump_dest; 1609 + } 1610 + 1611 + if (is_first_func_insn(file, dest_insn)) { 1612 + /* Internal sibling call */ 1613 + if (add_call_dest(file, insn, dest_sym, true)) 1614 + return -1; 1615 + continue; 1616 + } 1617 + 1618 + set_jump_dest: 1619 + insn->jump_dest = dest_insn; 1665 1620 } 1666 1621 1667 1622 return 0; ··· 1684 1645 unsigned long dest_off; 1685 1646 struct symbol *dest; 1686 1647 struct reloc *reloc; 1687 - int ret; 1688 1648 1689 1649 for_each_insn(file, insn) { 1690 1650 struct symbol *func = insn_func(insn); ··· 1695 1657 dest_off = arch_jump_destination(insn); 1696 1658 dest = find_call_destination(insn->sec, dest_off); 1697 1659 1698 - ret = add_call_dest(file, insn, dest, false); 1699 - if (ret) 1700 - return ret; 1660 + if (add_call_dest(file, insn, dest, false)) 1661 + return -1; 1701 1662 1702 1663 if (func && func->ignore) 1703 1664 continue; ··· 1706 1669 return -1; 1707 1670 } 1708 1671 1709 - if (func && insn_call_dest(insn)->type != STT_FUNC) { 1672 + if (func && !is_func_sym(insn_call_dest(insn))) { 1710 1673 ERROR_INSN(insn, "unsupported call to non-function"); 1711 1674 return -1; 1712 1675 } 1713 1676 1714 - } else if (reloc->sym->type == STT_SECTION) { 1715 - dest_off = arch_dest_reloc_offset(reloc_addend(reloc)); 1677 + } else if (is_sec_sym(reloc->sym)) { 1678 + dest_off = arch_insn_adjusted_addend(insn, reloc); 1716 1679 dest = find_call_destination(reloc->sym->sec, dest_off); 1717 1680 if (!dest) { 1718 1681 ERROR_INSN(insn, "can't find call dest symbol at %s+0x%lx", ··· 1720 1683 return -1; 1721 1684 } 1722 1685 1723 - ret = add_call_dest(file, insn, dest, false); 1724 - if (ret) 1725 - return ret; 1686 + if (add_call_dest(file, insn, dest, false)) 1687 + return -1; 1726 1688 1727 1689 } else if (reloc->sym->retpoline_thunk) { 1728 - ret = add_retpoline_call(file, insn); 1729 - if (ret) 1730 - return ret; 1690 + if (add_retpoline_call(file, insn)) 1691 + return -1; 1731 1692 1732 1693 } else { 1733 - ret = add_call_dest(file, insn, reloc->sym, false); 1734 - if (ret) 1735 - return ret; 1694 + if (add_call_dest(file, insn, reloc->sym, false)) 1695 + return -1; 1736 1696 } 1737 1697 } 1738 1698 ··· 1817 1783 nop->type = INSN_NOP; 1818 1784 nop->sym = orig_insn->sym; 1819 1785 nop->alt_group = new_alt_group; 1786 + nop->fake = 1; 1820 1787 } 1821 1788 1822 1789 if (!special_alt->new_len) { ··· 1947 1912 struct instruction *orig_insn, *new_insn; 1948 1913 struct special_alt *special_alt, *tmp; 1949 1914 struct alternative *alt; 1950 - int ret; 1951 1915 1952 1916 if (special_get_alts(file->elf, &special_alts)) 1953 1917 return -1; ··· 1978 1944 continue; 1979 1945 } 1980 1946 1981 - ret = handle_group_alt(file, special_alt, orig_insn, 1982 - &new_insn); 1983 - if (ret) 1984 - return ret; 1947 + if (handle_group_alt(file, special_alt, orig_insn, &new_insn)) 1948 + return -1; 1985 1949 1986 1950 } else if (special_alt->jump_or_nop) { 1987 - ret = handle_jump_alt(file, special_alt, orig_insn, 1988 - &new_insn); 1989 - if (ret) 1990 - return ret; 1951 + if (handle_jump_alt(file, special_alt, orig_insn, &new_insn)) 1952 + return -1; 1991 1953 } 1992 1954 1993 1955 alt = calloc(1, sizeof(*alt)); ··· 2171 2141 struct symbol *func) 2172 2142 { 2173 2143 struct instruction *insn; 2174 - int ret; 2175 2144 2176 2145 func_for_each_insn(file, func, insn) { 2177 2146 if (!insn_jump_table(insn)) 2178 2147 continue; 2179 2148 2180 - ret = add_jump_table(file, insn); 2181 - if (ret) 2182 - return ret; 2149 + if (add_jump_table(file, insn)) 2150 + return -1; 2183 2151 } 2184 2152 2185 2153 return 0; ··· 2191 2163 static int add_jump_table_alts(struct objtool_file *file) 2192 2164 { 2193 2165 struct symbol *func; 2194 - int ret; 2195 2166 2196 2167 if (!file->rodata) 2197 2168 return 0; 2198 2169 2199 - for_each_sym(file, func) { 2200 - if (func->type != STT_FUNC) 2170 + for_each_sym(file->elf, func) { 2171 + if (!is_func_sym(func)) 2201 2172 continue; 2202 2173 2203 2174 mark_func_jump_tables(file, func); 2204 - ret = add_func_jump_tables(file, func); 2205 - if (ret) 2206 - return ret; 2175 + if (add_func_jump_tables(file, func)) 2176 + return -1; 2207 2177 } 2208 2178 2209 2179 return 0; ··· 2235 2209 return -1; 2236 2210 } 2237 2211 2238 - if (sec->sh.sh_size % sizeof(struct unwind_hint)) { 2212 + if (sec_size(sec) % sizeof(struct unwind_hint)) { 2239 2213 ERROR("struct unwind_hint size mismatch"); 2240 2214 return -1; 2241 2215 } 2242 2216 2243 2217 file->hints = true; 2244 2218 2245 - for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { 2219 + for (i = 0; i < sec_size(sec) / sizeof(struct unwind_hint); i++) { 2246 2220 hint = (struct unwind_hint *)sec->data->d_buf + i; 2247 2221 2248 2222 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); ··· 2251 2225 return -1; 2252 2226 } 2253 2227 2254 - if (reloc->sym->type == STT_SECTION) { 2255 - offset = reloc_addend(reloc); 2256 - } else if (reloc->sym->local_label) { 2257 - offset = reloc->sym->offset; 2258 - } else { 2259 - ERROR("unexpected relocation symbol type in %s", sec->rsec->name); 2260 - return -1; 2261 - } 2228 + offset = reloc->sym->offset + reloc_addend(reloc); 2262 2229 2263 2230 insn = find_insn(file, reloc->sym->sec, offset); 2264 2231 if (!insn) { ··· 2280 2261 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 2281 2262 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 2282 2263 2283 - if (sym && sym->bind == STB_GLOBAL) { 2264 + if (sym && is_global_sym(sym)) { 2284 2265 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { 2285 2266 ERROR_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); 2286 2267 return -1; ··· 2318 2299 struct instruction *insn; 2319 2300 struct reloc *reloc; 2320 2301 uint64_t offset; 2321 - int type, ret; 2302 + int type; 2322 2303 2323 2304 sec = find_section_by_name(file->elf, ".discard.annotate_insn"); 2324 2305 if (!sec) ··· 2336 2317 sec->sh.sh_entsize = 8; 2337 2318 } 2338 2319 2339 - for_each_reloc(sec->rsec, reloc) { 2340 - type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * sec->sh.sh_entsize) + 4); 2341 - type = bswap_if_needed(file->elf, type); 2320 + if (sec_num_entries(sec) != sec_num_entries(sec->rsec)) { 2321 + ERROR("bad .discard.annotate_insn section: missing relocs"); 2322 + return -1; 2323 + } 2342 2324 2325 + for_each_reloc(sec->rsec, reloc) { 2326 + type = annotype(file->elf, sec, reloc); 2343 2327 offset = reloc->sym->offset + reloc_addend(reloc); 2344 2328 insn = find_insn(file, reloc->sym->sec, offset); 2345 2329 ··· 2351 2329 return -1; 2352 2330 } 2353 2331 2354 - ret = func(file, type, insn); 2355 - if (ret < 0) 2356 - return ret; 2332 + if (func(file, type, insn)) 2333 + return -1; 2357 2334 } 2358 2335 2359 2336 return 0; ··· 2492 2471 { 2493 2472 struct symbol *func; 2494 2473 2495 - for_each_sym(file, func) { 2496 - if (func->type == STT_NOTYPE && strstarts(func->name, ".L")) 2474 + for_each_sym(file->elf, func) { 2475 + if (is_notype_sym(func) && strstarts(func->name, ".L")) 2497 2476 func->local_label = true; 2498 2477 2499 - if (func->bind != STB_GLOBAL) 2478 + if (!is_global_sym(func)) 2500 2479 continue; 2501 2480 2502 2481 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, ··· 2537 2516 * 2538 2517 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2539 2518 */ 2540 - for_each_sec(file, sec) { 2519 + for_each_sec(file->elf, sec) { 2541 2520 if ((!strncmp(sec->name, ".rodata", 7) && 2542 2521 !strstr(sec->name, ".str1.")) || 2543 2522 !strncmp(sec->name, ".data.rel.ro", 12)) { ··· 2549 2528 file->rodata = found; 2550 2529 } 2551 2530 2531 + static void mark_holes(struct objtool_file *file) 2532 + { 2533 + struct instruction *insn; 2534 + bool in_hole = false; 2535 + 2536 + if (!opts.link) 2537 + return; 2538 + 2539 + /* 2540 + * Whole archive runs might encounter dead code from weak symbols. 2541 + * This is where the linker will have dropped the weak symbol in 2542 + * favour of a regular symbol, but leaves the code in place. 2543 + */ 2544 + for_each_insn(file, insn) { 2545 + if (insn->sym || !find_symbol_hole_containing(insn->sec, insn->offset)) { 2546 + in_hole = false; 2547 + continue; 2548 + } 2549 + 2550 + /* Skip function padding and pfx code */ 2551 + if (!in_hole && insn->type == INSN_NOP) 2552 + continue; 2553 + 2554 + in_hole = true; 2555 + insn->hole = 1; 2556 + 2557 + /* 2558 + * If this hole jumps to a .cold function, mark it ignore. 2559 + */ 2560 + if (insn->jump_dest) { 2561 + struct symbol *dest_func = insn_func(insn->jump_dest); 2562 + 2563 + if (dest_func && dest_func->cold) 2564 + dest_func->ignore = true; 2565 + } 2566 + } 2567 + } 2568 + 2569 + static bool validate_branch_enabled(void) 2570 + { 2571 + return opts.stackval || 2572 + opts.orc || 2573 + opts.uaccess || 2574 + opts.checksum; 2575 + } 2576 + 2552 2577 static int decode_sections(struct objtool_file *file) 2553 2578 { 2554 - int ret; 2579 + file->klp = is_livepatch_module(file); 2555 2580 2556 2581 mark_rodata(file); 2557 2582 2558 - ret = init_pv_ops(file); 2559 - if (ret) 2560 - return ret; 2583 + if (init_pv_ops(file)) 2584 + return -1; 2561 2585 2562 2586 /* 2563 2587 * Must be before add_{jump_call}_destination. 2564 2588 */ 2565 - ret = classify_symbols(file); 2566 - if (ret) 2567 - return ret; 2589 + if (classify_symbols(file)) 2590 + return -1; 2568 2591 2569 - ret = decode_instructions(file); 2570 - if (ret) 2571 - return ret; 2592 + if (decode_instructions(file)) 2593 + return -1; 2572 2594 2573 - ret = add_ignores(file); 2574 - if (ret) 2575 - return ret; 2595 + if (add_ignores(file)) 2596 + return -1; 2576 2597 2577 2598 add_uaccess_safe(file); 2578 2599 2579 - ret = read_annotate(file, __annotate_early); 2580 - if (ret) 2581 - return ret; 2600 + if (read_annotate(file, __annotate_early)) 2601 + return -1; 2582 2602 2583 2603 /* 2584 2604 * Must be before add_jump_destinations(), which depends on 'func' 2585 2605 * being set for alternatives, to enable proper sibling call detection. 2586 2606 */ 2587 - if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) { 2588 - ret = add_special_section_alts(file); 2589 - if (ret) 2590 - return ret; 2607 + if (validate_branch_enabled() || opts.noinstr || opts.hack_jump_label) { 2608 + if (add_special_section_alts(file)) 2609 + return -1; 2591 2610 } 2592 2611 2593 - ret = add_jump_destinations(file); 2594 - if (ret) 2595 - return ret; 2612 + if (add_jump_destinations(file)) 2613 + return -1; 2596 2614 2597 2615 /* 2598 2616 * Must be before add_call_destination(); it changes INSN_CALL to 2599 2617 * INSN_JUMP. 2600 2618 */ 2601 - ret = read_annotate(file, __annotate_ifc); 2602 - if (ret) 2603 - return ret; 2619 + if (read_annotate(file, __annotate_ifc)) 2620 + return -1; 2604 2621 2605 - ret = add_call_destinations(file); 2606 - if (ret) 2607 - return ret; 2622 + if (add_call_destinations(file)) 2623 + return -1; 2608 2624 2609 - ret = add_jump_table_alts(file); 2610 - if (ret) 2611 - return ret; 2625 + if (add_jump_table_alts(file)) 2626 + return -1; 2612 2627 2613 - ret = read_unwind_hints(file); 2614 - if (ret) 2615 - return ret; 2628 + if (read_unwind_hints(file)) 2629 + return -1; 2630 + 2631 + /* Must be after add_jump_destinations() */ 2632 + mark_holes(file); 2616 2633 2617 2634 /* 2618 2635 * Must be after add_call_destinations() such that it can override 2619 2636 * dead_end_function() marks. 2620 2637 */ 2621 - ret = read_annotate(file, __annotate_late); 2622 - if (ret) 2623 - return ret; 2638 + if (read_annotate(file, __annotate_late)) 2639 + return -1; 2624 2640 2625 2641 return 0; 2626 2642 } ··· 3411 3353 if (!reloc || strcmp(reloc->sym->name, "pv_ops")) 3412 3354 return false; 3413 3355 3414 - idx = (arch_dest_reloc_offset(reloc_addend(reloc)) / sizeof(void *)); 3356 + idx = arch_insn_adjusted_addend(insn, reloc) / sizeof(void *); 3415 3357 3416 3358 if (file->pv_ops[idx].clean) 3417 3359 return true; ··· 3597 3539 return alt_insn->type == INSN_CLAC || alt_insn->type == INSN_STAC; 3598 3540 } 3599 3541 3542 + static int checksum_debug_init(struct objtool_file *file) 3543 + { 3544 + char *dup, *s; 3545 + 3546 + if (!opts.debug_checksum) 3547 + return 0; 3548 + 3549 + dup = strdup(opts.debug_checksum); 3550 + if (!dup) { 3551 + ERROR_GLIBC("strdup"); 3552 + return -1; 3553 + } 3554 + 3555 + s = dup; 3556 + while (*s) { 3557 + struct symbol *func; 3558 + char *comma; 3559 + 3560 + comma = strchr(s, ','); 3561 + if (comma) 3562 + *comma = '\0'; 3563 + 3564 + func = find_symbol_by_name(file->elf, s); 3565 + if (!func || !is_func_sym(func)) 3566 + WARN("--debug-checksum: can't find '%s'", s); 3567 + else 3568 + func->debug_checksum = 1; 3569 + 3570 + if (!comma) 3571 + break; 3572 + 3573 + s = comma + 1; 3574 + } 3575 + 3576 + free(dup); 3577 + return 0; 3578 + } 3579 + 3580 + static void checksum_update_insn(struct objtool_file *file, struct symbol *func, 3581 + struct instruction *insn) 3582 + { 3583 + struct reloc *reloc = insn_reloc(file, insn); 3584 + unsigned long offset; 3585 + struct symbol *sym; 3586 + 3587 + if (insn->fake) 3588 + return; 3589 + 3590 + checksum_update(func, insn, insn->sec->data->d_buf + insn->offset, insn->len); 3591 + 3592 + if (!reloc) { 3593 + struct symbol *call_dest = insn_call_dest(insn); 3594 + 3595 + if (call_dest) 3596 + checksum_update(func, insn, call_dest->demangled_name, 3597 + strlen(call_dest->demangled_name)); 3598 + return; 3599 + } 3600 + 3601 + sym = reloc->sym; 3602 + offset = arch_insn_adjusted_addend(insn, reloc); 3603 + 3604 + if (is_string_sec(sym->sec)) { 3605 + char *str; 3606 + 3607 + str = sym->sec->data->d_buf + sym->offset + offset; 3608 + checksum_update(func, insn, str, strlen(str)); 3609 + return; 3610 + } 3611 + 3612 + if (is_sec_sym(sym)) { 3613 + sym = find_symbol_containing(reloc->sym->sec, offset); 3614 + if (!sym) 3615 + return; 3616 + 3617 + offset -= sym->offset; 3618 + } 3619 + 3620 + checksum_update(func, insn, sym->demangled_name, strlen(sym->demangled_name)); 3621 + checksum_update(func, insn, &offset, sizeof(offset)); 3622 + } 3623 + 3600 3624 /* 3601 3625 * Follow the branch starting at the given instruction, and recursively follow 3602 3626 * any other branches (jumps). Meanwhile, track the frame pointer state at ··· 3690 3550 { 3691 3551 struct alternative *alt; 3692 3552 struct instruction *next_insn, *prev_insn = NULL; 3693 - struct section *sec; 3694 3553 u8 visited; 3695 3554 int ret; 3696 3555 3697 3556 if (func && func->ignore) 3698 3557 return 0; 3699 3558 3700 - sec = insn->sec; 3701 - 3702 3559 while (1) { 3703 3560 next_insn = next_insn_to_validate(file, insn); 3704 3561 3562 + if (opts.checksum && func && insn->sec) 3563 + checksum_update_insn(file, func, insn); 3564 + 3705 3565 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) { 3706 3566 /* Ignore KCFI type preambles, which always fall through */ 3707 - if (!strncmp(func->name, "__cfi_", 6) || 3708 - !strncmp(func->name, "__pfx_", 6) || 3709 - !strncmp(func->name, "__pi___cfi_", 11) || 3710 - !strncmp(func->name, "__pi___pfx_", 11)) 3567 + if (is_prefix_func(func)) 3711 3568 return 0; 3712 3569 3713 3570 if (file->ignore_unreachables) ··· 3935 3798 3936 3799 WARN("%s%sunexpected end of section %s", 3937 3800 func ? func->name : "", func ? "(): " : "", 3938 - sec->name); 3801 + insn->sec->name); 3939 3802 return 1; 3940 3803 } 3941 3804 ··· 3951 3814 struct insn_state *state) 3952 3815 { 3953 3816 if (insn->hint && !insn->visited) { 3954 - int ret = validate_branch(file, insn_func(insn), insn, *state); 3817 + struct symbol *func = insn_func(insn); 3818 + int ret; 3819 + 3820 + if (opts.checksum) 3821 + checksum_init(func); 3822 + 3823 + ret = validate_branch(file, func, insn, *state); 3955 3824 if (ret) 3956 3825 BT_INSN(insn, "<=== (hint)"); 3957 3826 return ret; ··· 4201 4058 struct instruction *prev_insn; 4202 4059 int i; 4203 4060 4204 - if (insn->type == INSN_NOP || insn->type == INSN_TRAP || (func && func->ignore)) 4061 + if (insn->type == INSN_NOP || insn->type == INSN_TRAP || 4062 + insn->hole || (func && func->ignore)) 4205 4063 return true; 4206 4064 4207 4065 /* ··· 4212 4068 if (!strcmp(insn->sec->name, ".altinstr_replacement") || 4213 4069 !strcmp(insn->sec->name, ".altinstr_aux")) 4214 4070 return true; 4215 - 4216 - /* 4217 - * Whole archive runs might encounter dead code from weak symbols. 4218 - * This is where the linker will have dropped the weak symbol in 4219 - * favour of a regular symbol, but leaves the code in place. 4220 - * 4221 - * In this case we'll find a piece of code (whole function) that is not 4222 - * covered by a !section symbol. Ignore them. 4223 - */ 4224 - if (opts.link && !func) { 4225 - int size = find_symbol_hole_containing(insn->sec, insn->offset); 4226 - unsigned long end = insn->offset + size; 4227 - 4228 - if (!size) /* not a hole */ 4229 - return false; 4230 - 4231 - if (size < 0) /* hole until the end */ 4232 - return true; 4233 - 4234 - sec_for_each_insn_continue(file, insn) { 4235 - /* 4236 - * If we reach a visited instruction at or before the 4237 - * end of the hole, ignore the unreachable. 4238 - */ 4239 - if (insn->visited) 4240 - return true; 4241 - 4242 - if (insn->offset >= end) 4243 - break; 4244 - 4245 - /* 4246 - * If this hole jumps to a .cold function, mark it ignore too. 4247 - */ 4248 - if (insn->jump_dest && insn_func(insn->jump_dest) && 4249 - strstr(insn_func(insn->jump_dest)->name, ".cold")) { 4250 - insn_func(insn->jump_dest)->ignore = true; 4251 - } 4252 - } 4253 - 4254 - return false; 4255 - } 4256 4071 4257 4072 if (!func) 4258 4073 return false; ··· 4264 4161 return false; 4265 4162 } 4266 4163 4267 - static int add_prefix_symbol(struct objtool_file *file, struct symbol *func) 4164 + /* 4165 + * For FineIBT or kCFI, a certain number of bytes preceding the function may be 4166 + * NOPs. Those NOPs may be rewritten at runtime and executed, so give them a 4167 + * proper function name: __pfx_<func>. 4168 + * 4169 + * The NOPs may not exist for the following cases: 4170 + * 4171 + * - compiler cloned functions (*.cold, *.part0, etc) 4172 + * - asm functions created with inline asm or without SYM_FUNC_START() 4173 + * 4174 + * Also, the function may already have a prefix from a previous objtool run 4175 + * (livepatch extracted functions, or manually running objtool multiple times). 4176 + * 4177 + * So return 0 if the NOPs are missing or the function already has a prefix 4178 + * symbol. 4179 + */ 4180 + static int create_prefix_symbol(struct objtool_file *file, struct symbol *func) 4268 4181 { 4269 4182 struct instruction *insn, *prev; 4183 + char name[SYM_NAME_LEN]; 4270 4184 struct cfi_state *cfi; 4271 4185 4272 - insn = find_insn(file, func->sec, func->offset); 4273 - if (!insn) 4186 + if (!is_func_sym(func) || is_prefix_func(func) || 4187 + func->cold || func->static_call_tramp) 4188 + return 0; 4189 + 4190 + if ((strlen(func->name) + sizeof("__pfx_") > SYM_NAME_LEN)) { 4191 + WARN("%s: symbol name too long, can't create __pfx_ symbol", 4192 + func->name); 4193 + return 0; 4194 + } 4195 + 4196 + if (snprintf_check(name, SYM_NAME_LEN, "__pfx_%s", func->name)) 4274 4197 return -1; 4198 + 4199 + if (file->klp) { 4200 + struct symbol *pfx; 4201 + 4202 + pfx = find_symbol_by_offset(func->sec, func->offset - opts.prefix); 4203 + if (pfx && is_prefix_func(pfx) && !strcmp(pfx->name, name)) 4204 + return 0; 4205 + } 4206 + 4207 + insn = find_insn(file, func->sec, func->offset); 4208 + if (!insn) { 4209 + WARN("%s: can't find starting instruction", func->name); 4210 + return -1; 4211 + } 4275 4212 4276 4213 for (prev = prev_insn_same_sec(file, insn); 4277 4214 prev; ··· 4319 4176 u64 offset; 4320 4177 4321 4178 if (prev->type != INSN_NOP) 4322 - return -1; 4179 + return 0; 4323 4180 4324 4181 offset = func->offset - prev->offset; 4325 4182 4326 4183 if (offset > opts.prefix) 4327 - return -1; 4184 + return 0; 4328 4185 4329 4186 if (offset < opts.prefix) 4330 4187 continue; 4331 4188 4332 - elf_create_prefix_symbol(file->elf, func, opts.prefix); 4189 + if (!elf_create_symbol(file->elf, name, func->sec, 4190 + GELF_ST_BIND(func->sym.st_info), 4191 + GELF_ST_TYPE(func->sym.st_info), 4192 + prev->offset, opts.prefix)) 4193 + return -1; 4194 + 4333 4195 break; 4334 4196 } 4335 4197 4336 4198 if (!prev) 4337 - return -1; 4199 + return 0; 4338 4200 4339 4201 if (!insn->cfi) { 4340 4202 /* ··· 4357 4209 return 0; 4358 4210 } 4359 4211 4360 - static int add_prefix_symbols(struct objtool_file *file) 4212 + static int create_prefix_symbols(struct objtool_file *file) 4361 4213 { 4362 4214 struct section *sec; 4363 4215 struct symbol *func; 4364 4216 4365 - for_each_sec(file, sec) { 4366 - if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4217 + for_each_sec(file->elf, sec) { 4218 + if (!is_text_sec(sec)) 4367 4219 continue; 4368 4220 4369 4221 sec_for_each_sym(sec, func) { 4370 - if (func->type != STT_FUNC) 4371 - continue; 4372 - 4373 - add_prefix_symbol(file, func); 4222 + if (create_prefix_symbol(file, func)) 4223 + return -1; 4374 4224 } 4375 4225 } 4376 4226 ··· 4379 4233 struct symbol *sym, struct insn_state *state) 4380 4234 { 4381 4235 struct instruction *insn; 4236 + struct symbol *func; 4382 4237 int ret; 4383 4238 4384 4239 if (!sym->len) { ··· 4397 4250 if (opts.uaccess) 4398 4251 state->uaccess = sym->uaccess_safe; 4399 4252 4400 - ret = validate_branch(file, insn_func(insn), insn, *state); 4253 + func = insn_func(insn); 4254 + 4255 + if (opts.checksum) 4256 + checksum_init(func); 4257 + 4258 + ret = validate_branch(file, func, insn, *state); 4401 4259 if (ret) 4402 4260 BT_INSN(insn, "<=== (sym)"); 4261 + 4262 + if (opts.checksum) 4263 + checksum_finish(func); 4264 + 4403 4265 return ret; 4404 4266 } 4405 4267 ··· 4419 4263 int warnings = 0; 4420 4264 4421 4265 sec_for_each_sym(sec, func) { 4422 - if (func->type != STT_FUNC) 4266 + if (!is_func_sym(func)) 4423 4267 continue; 4424 4268 4425 4269 init_insn_state(file, &state, sec); ··· 4462 4306 struct section *sec; 4463 4307 int warnings = 0; 4464 4308 4465 - for_each_sec(file, sec) { 4466 - if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 4309 + for_each_sec(file->elf, sec) { 4310 + if (!is_text_sec(sec)) 4467 4311 continue; 4468 4312 4469 4313 warnings += validate_section(file, sec); ··· 4590 4434 reloc_offset(reloc) + 1, 4591 4435 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) { 4592 4436 4593 - off = reloc->sym->offset; 4594 - if (reloc_type(reloc) == R_X86_64_PC32 || 4595 - reloc_type(reloc) == R_X86_64_PLT32) 4596 - off += arch_dest_reloc_offset(reloc_addend(reloc)); 4597 - else 4598 - off += reloc_addend(reloc); 4437 + off = reloc->sym->offset + arch_insn_adjusted_addend(insn, reloc); 4599 4438 4600 4439 dest = find_insn(file, reloc->sym->sec, off); 4601 4440 if (!dest) ··· 4641 4490 for_each_insn(file, insn) 4642 4491 warnings += validate_ibt_insn(file, insn); 4643 4492 4644 - for_each_sec(file, sec) { 4493 + for_each_sec(file->elf, sec) { 4645 4494 4646 4495 /* Already done by validate_ibt_insn() */ 4647 - if (sec->sh.sh_flags & SHF_EXECINSTR) 4496 + if (is_text_sec(sec)) 4648 4497 continue; 4649 4498 4650 4499 if (!sec->rsec) ··· 4659 4508 !strncmp(sec->name, ".debug", 6) || 4660 4509 !strcmp(sec->name, ".altinstructions") || 4661 4510 !strcmp(sec->name, ".ibt_endbr_seal") || 4511 + !strcmp(sec->name, ".kcfi_traps") || 4662 4512 !strcmp(sec->name, ".orc_unwind_ip") || 4663 - !strcmp(sec->name, ".parainstructions") || 4664 4513 !strcmp(sec->name, ".retpoline_sites") || 4665 4514 !strcmp(sec->name, ".smp_locks") || 4666 4515 !strcmp(sec->name, ".static_call_sites") || ··· 4669 4518 !strcmp(sec->name, "__bug_table") || 4670 4519 !strcmp(sec->name, "__ex_table") || 4671 4520 !strcmp(sec->name, "__jump_table") || 4521 + !strcmp(sec->name, "__klp_funcs") || 4672 4522 !strcmp(sec->name, "__mcount_loc") || 4673 - !strcmp(sec->name, ".kcfi_traps") || 4674 4523 !strcmp(sec->name, ".llvm.call-graph-profile") || 4675 4524 !strcmp(sec->name, ".llvm_bb_addr_map") || 4676 4525 !strcmp(sec->name, "__tracepoints") || 4677 - strstr(sec->name, "__patchable_function_entries")) 4526 + !strcmp(sec->name, "__patchable_function_entries")) 4678 4527 continue; 4679 4528 4680 4529 for_each_reloc(sec->rsec, reloc) ··· 4803 4652 struct symbol *sym; 4804 4653 char *funcs = NULL, *tmp; 4805 4654 4806 - for_each_sym(file, sym) { 4655 + for_each_sym(file->elf, sym) { 4807 4656 if (sym->warned) { 4808 4657 if (!funcs) { 4809 4658 funcs = malloc(strlen(sym->name) + 1); ··· 4843 4692 struct reloc *reloc; 4844 4693 int ret = 0; 4845 4694 4846 - for_each_sec(file, sec) { 4695 + for_each_sec(file->elf, sec) { 4847 4696 /* absolute references in non-loadable sections are fine */ 4848 4697 if (!(sec->sh.sh_flags & SHF_ALLOC)) 4849 4698 continue; ··· 4917 4766 cfi_hash_add(&init_cfi); 4918 4767 cfi_hash_add(&func_cfi); 4919 4768 4769 + ret = checksum_debug_init(file); 4770 + if (ret) 4771 + goto out; 4772 + 4920 4773 ret = decode_sections(file); 4921 4774 if (ret) 4922 4775 goto out; ··· 4931 4776 if (opts.retpoline) 4932 4777 warnings += validate_retpoline(file); 4933 4778 4934 - if (opts.stackval || opts.orc || opts.uaccess) { 4779 + if (validate_branch_enabled()) { 4935 4780 int w = 0; 4936 4781 4937 4782 w += validate_functions(file); ··· 4996 4841 } 4997 4842 4998 4843 if (opts.prefix) { 4999 - ret = add_prefix_symbols(file); 4844 + ret = create_prefix_symbols(file); 5000 4845 if (ret) 5001 4846 goto out; 5002 4847 } ··· 5009 4854 5010 4855 if (opts.noabs) 5011 4856 warnings += check_abs_references(file); 4857 + 4858 + if (opts.checksum) { 4859 + ret = create_sym_checksum_section(file); 4860 + if (ret) 4861 + goto out; 4862 + } 5012 4863 5013 4864 if (opts.orc && nr_insns) { 5014 4865 ret = orc_create(file); ··· 5041 4880 if (opts.verbose) { 5042 4881 if (opts.werror && warnings) 5043 4882 WARN("%d warning(s) upgraded to errors", warnings); 5044 - print_args(); 5045 4883 disas_warned_funcs(file); 5046 4884 } 4885 + 4886 + if (opts.backup && make_backup()) 4887 + return 1; 5047 4888 5048 4889 return ret; 5049 4890 }
+585 -196
tools/objtool/elf.c
··· 16 16 #include <string.h> 17 17 #include <unistd.h> 18 18 #include <errno.h> 19 + #include <libgen.h> 20 + #include <ctype.h> 19 21 #include <linux/interval_tree_generic.h> 20 22 #include <objtool/builtin.h> 21 - 22 23 #include <objtool/elf.h> 23 24 #include <objtool/warn.h> 25 + 26 + #define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1)) 27 + #define ALIGN_UP_POW2(x) (1U << ((8 * sizeof(x)) - __builtin_clz((x) - 1U))) 28 + #define MAX(a, b) ((a) > (b) ? (a) : (b)) 24 29 25 30 static inline u32 str_hash(const char *str) 26 31 { ··· 97 92 98 93 static inline unsigned long __sym_last(struct symbol *s) 99 94 { 100 - return s->offset + s->len - 1; 95 + return s->offset + (s->len ? s->len - 1 : 0); 101 96 } 102 97 103 98 INTERVAL_TREE_DEFINE(struct symbol, node, unsigned long, __subtree_last, 104 - __sym_start, __sym_last, static, __sym) 99 + __sym_start, __sym_last, static inline __maybe_unused, 100 + __sym) 105 101 106 102 #define __sym_for_each(_iter, _tree, _start, _end) \ 107 103 for (_iter = __sym_iter_first((_tree), (_start), (_end)); \ ··· 114 108 }; 115 109 116 110 /* 117 - * Find !section symbol where @offset is after it. 111 + * Find the last symbol before @offset. 118 112 */ 119 113 static int symbol_hole_by_offset(const void *key, const struct rb_node *node) 120 114 { ··· 125 119 return -1; 126 120 127 121 if (sh->key >= s->offset + s->len) { 128 - if (s->type != STT_SECTION) 129 - sh->sym = s; 122 + sh->sym = s; 130 123 return 1; 131 124 } 132 125 ··· 175 170 struct symbol *iter; 176 171 177 172 __sym_for_each(iter, tree, offset, offset) { 178 - if (iter->offset == offset && iter->type != STT_SECTION) 173 + if (iter->offset == offset && !is_sec_sym(iter)) 179 174 return iter; 180 175 } 181 176 ··· 188 183 struct symbol *iter; 189 184 190 185 __sym_for_each(iter, tree, offset, offset) { 191 - if (iter->offset == offset && iter->type == STT_FUNC) 186 + if (iter->offset == offset && is_func_sym(iter)) 192 187 return iter; 193 188 } 194 189 ··· 198 193 struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset) 199 194 { 200 195 struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree; 201 - struct symbol *iter; 196 + struct symbol *sym = NULL, *tmp; 202 197 203 - __sym_for_each(iter, tree, offset, offset) { 204 - if (iter->type != STT_SECTION) 205 - return iter; 198 + __sym_for_each(tmp, tree, offset, offset) { 199 + if (tmp->len) { 200 + if (!sym) { 201 + sym = tmp; 202 + continue; 203 + } 204 + 205 + if (sym->offset != tmp->offset || sym->len != tmp->len) { 206 + /* 207 + * In the rare case of overlapping symbols, 208 + * pick the smaller one. 209 + * 210 + * TODO: outlaw overlapping symbols 211 + */ 212 + if (tmp->len < sym->len) 213 + sym = tmp; 214 + } 215 + } 206 216 } 207 217 208 - return NULL; 218 + return sym; 209 219 } 210 220 211 221 /* ··· 269 249 struct symbol *iter; 270 250 271 251 __sym_for_each(iter, tree, offset, offset) { 272 - if (iter->type == STT_FUNC) 252 + if (is_func_sym(iter)) 273 253 return iter; 274 254 } 275 255 ··· 282 262 283 263 elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) { 284 264 if (!strcmp(sym->name, name)) 265 + return sym; 266 + } 267 + 268 + return NULL; 269 + } 270 + 271 + struct symbol *find_global_symbol_by_name(const struct elf *elf, const char *name) 272 + { 273 + struct symbol *sym; 274 + 275 + elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) { 276 + if (!strcmp(sym->name, name) && !is_local_sym(sym)) 285 277 return sym; 286 278 } 287 279 ··· 390 358 return -1; 391 359 } 392 360 393 - if (sec->sh.sh_size != 0 && !is_dwarf_section(sec)) { 361 + if (sec_size(sec) != 0 && !is_dwarf_section(sec)) { 394 362 sec->data = elf_getdata(s, NULL); 395 363 if (!sec->data) { 396 364 ERROR_ELF("elf_getdata"); 397 365 return -1; 398 366 } 399 367 if (sec->data->d_off != 0 || 400 - sec->data->d_size != sec->sh.sh_size) { 368 + sec->data->d_size != sec_size(sec)) { 401 369 ERROR("unexpected data attributes for %s", sec->name); 402 370 return -1; 403 371 } ··· 425 393 return 0; 426 394 } 427 395 428 - static void elf_add_symbol(struct elf *elf, struct symbol *sym) 396 + static const char *demangle_name(struct symbol *sym) 397 + { 398 + char *str; 399 + 400 + if (!is_local_sym(sym)) 401 + return sym->name; 402 + 403 + if (!is_func_sym(sym) && !is_object_sym(sym)) 404 + return sym->name; 405 + 406 + if (!strstarts(sym->name, "__UNIQUE_ID_") && !strchr(sym->name, '.')) 407 + return sym->name; 408 + 409 + str = strdup(sym->name); 410 + if (!str) { 411 + ERROR_GLIBC("strdup"); 412 + return NULL; 413 + } 414 + 415 + for (int i = strlen(str) - 1; i >= 0; i--) { 416 + char c = str[i]; 417 + 418 + if (!isdigit(c) && c != '.') { 419 + str[i + 1] = '\0'; 420 + break; 421 + } 422 + }; 423 + 424 + return str; 425 + } 426 + 427 + static int elf_add_symbol(struct elf *elf, struct symbol *sym) 429 428 { 430 429 struct list_head *entry; 431 430 struct rb_node *pnode; ··· 468 405 sym->type = GELF_ST_TYPE(sym->sym.st_info); 469 406 sym->bind = GELF_ST_BIND(sym->sym.st_info); 470 407 471 - if (sym->type == STT_FILE) 408 + if (is_file_sym(sym)) 472 409 elf->num_files++; 473 410 474 411 sym->offset = sym->sym.st_value; 475 412 sym->len = sym->sym.st_size; 476 413 477 414 __sym_for_each(iter, &sym->sec->symbol_tree, sym->offset, sym->offset) { 478 - if (iter->offset == sym->offset && iter->type == sym->type) 415 + if (iter->offset == sym->offset && iter->type == sym->type && 416 + iter->len == sym->len) 479 417 iter->alias = sym; 480 418 } 481 419 ··· 487 423 else 488 424 entry = &sym->sec->symbol_list; 489 425 list_add(&sym->list, entry); 426 + 427 + list_add_tail(&sym->global_list, &elf->symbols); 490 428 elf_hash_add(symbol, &sym->hash, sym->idx); 491 429 elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name)); 492 430 493 - /* 494 - * Don't store empty STT_NOTYPE symbols in the rbtree. They 495 - * can exist within a function, confusing the sorting. 496 - */ 497 - if (!sym->len) 498 - __sym_remove(sym, &sym->sec->symbol_tree); 431 + if (is_func_sym(sym) && 432 + (strstarts(sym->name, "__pfx_") || 433 + strstarts(sym->name, "__cfi_") || 434 + strstarts(sym->name, "__pi___pfx_") || 435 + strstarts(sym->name, "__pi___cfi_"))) 436 + sym->prefix = 1; 437 + 438 + if (strstarts(sym->name, ".klp.sym")) 439 + sym->klp = 1; 440 + 441 + if (!sym->klp && is_func_sym(sym) && strstr(sym->name, ".cold")) 442 + sym->cold = 1; 443 + sym->pfunc = sym->cfunc = sym; 444 + 445 + sym->demangled_name = demangle_name(sym); 446 + if (!sym->demangled_name) 447 + return -1; 448 + 449 + return 0; 499 450 } 500 451 501 452 static int read_symbols(struct elf *elf) ··· 548 469 ERROR_GLIBC("calloc"); 549 470 return -1; 550 471 } 472 + 473 + INIT_LIST_HEAD(&elf->symbols); 474 + 551 475 for (i = 0; i < symbols_nr; i++) { 552 476 sym = &elf->symbol_data[i]; 553 477 ··· 559 477 if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym, 560 478 &shndx)) { 561 479 ERROR_ELF("gelf_getsymshndx"); 562 - goto err; 480 + return -1; 563 481 } 564 482 565 483 sym->name = elf_strptr(elf->elf, symtab->sh.sh_link, 566 484 sym->sym.st_name); 567 485 if (!sym->name) { 568 486 ERROR_ELF("elf_strptr"); 569 - goto err; 487 + return -1; 570 488 } 571 489 572 490 if ((sym->sym.st_shndx > SHN_UNDEF && ··· 578 496 sym->sec = find_section_by_index(elf, shndx); 579 497 if (!sym->sec) { 580 498 ERROR("couldn't find section for symbol %s", sym->name); 581 - goto err; 499 + return -1; 582 500 } 583 501 if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) { 584 502 sym->name = sym->sec->name; ··· 587 505 } else 588 506 sym->sec = find_section_by_index(elf, 0); 589 507 590 - elf_add_symbol(elf, sym); 508 + if (elf_add_symbol(elf, sym)) 509 + return -1; 591 510 } 592 511 593 512 if (opts.stats) { ··· 601 518 sec_for_each_sym(sec, sym) { 602 519 char *pname; 603 520 size_t pnamelen; 604 - if (sym->type != STT_FUNC) 521 + 522 + if (!sym->cold) 605 523 continue; 606 - 607 - if (sym->pfunc == NULL) 608 - sym->pfunc = sym; 609 - 610 - if (sym->cfunc == NULL) 611 - sym->cfunc = sym; 612 524 613 525 coldstr = strstr(sym->name, ".cold"); 614 - if (!coldstr) 615 - continue; 526 + if (!coldstr) { 527 + ERROR("%s(): cold subfunction without \".cold\"?", sym->name); 528 + return -1; 529 + } 616 530 617 531 pnamelen = coldstr - sym->name; 618 532 pname = strndup(sym->name, pnamelen); ··· 646 566 } 647 567 648 568 return 0; 649 - 650 - err: 651 - free(sym); 652 - return -1; 653 569 } 654 570 655 571 static int mark_group_syms(struct elf *elf) ··· 659 583 return -1; 660 584 } 661 585 662 - list_for_each_entry(sec, &elf->sections, list) { 586 + for_each_sec(elf, sec) { 663 587 if (sec->sh.sh_type == SHT_GROUP && 664 588 sec->sh.sh_link == symtab->idx) { 665 589 sym = find_symbol_by_index(elf, sec->sh.sh_info); ··· 700 624 static int elf_update_symbol(struct elf *elf, struct section *symtab, 701 625 struct section *symtab_shndx, struct symbol *sym) 702 626 { 703 - Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF; 627 + Elf32_Word shndx; 704 628 Elf_Data *symtab_data = NULL, *shndx_data = NULL; 705 629 Elf64_Xword entsize = symtab->sh.sh_entsize; 706 630 int max_idx, idx = sym->idx; ··· 708 632 bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE && 709 633 sym->sym.st_shndx != SHN_XINDEX; 710 634 711 - if (is_special_shndx) 712 - shndx = sym->sym.st_shndx; 635 + shndx = is_special_shndx ? sym->sym.st_shndx : sym->sec->idx; 713 636 714 637 s = elf_getscn(elf->elf, symtab->idx); 715 638 if (!s) { ··· 806 731 } 807 732 808 733 /* setup extended section index magic and write the symbol */ 809 - if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) { 734 + if (shndx < SHN_LORESERVE || is_special_shndx) { 810 735 sym->sym.st_shndx = shndx; 811 736 if (!shndx_data) 812 737 shndx = 0; ··· 826 751 return 0; 827 752 } 828 753 829 - static struct symbol * 830 - __elf_create_symbol(struct elf *elf, struct symbol *sym) 754 + struct symbol *elf_create_symbol(struct elf *elf, const char *name, 755 + struct section *sec, unsigned int bind, 756 + unsigned int type, unsigned long offset, 757 + size_t size) 831 758 { 832 759 struct section *symtab, *symtab_shndx; 833 760 Elf32_Word first_non_local, new_idx; 834 - struct symbol *old; 761 + struct symbol *old, *sym; 762 + 763 + sym = calloc(1, sizeof(*sym)); 764 + if (!sym) { 765 + ERROR_GLIBC("calloc"); 766 + return NULL; 767 + } 768 + 769 + sym->name = strdup(name); 770 + if (!sym->name) { 771 + ERROR_GLIBC("strdup"); 772 + return NULL; 773 + } 774 + 775 + if (type != STT_SECTION) { 776 + sym->sym.st_name = elf_add_string(elf, NULL, sym->name); 777 + if (sym->sym.st_name == -1) 778 + return NULL; 779 + } 780 + 781 + if (sec) { 782 + sym->sec = sec; 783 + } else { 784 + sym->sec = find_section_by_index(elf, 0); 785 + if (!sym->sec) { 786 + ERROR("no NULL section"); 787 + return NULL; 788 + } 789 + } 790 + 791 + sym->sym.st_info = GELF_ST_INFO(bind, type); 792 + sym->sym.st_value = offset; 793 + sym->sym.st_size = size; 835 794 836 795 symtab = find_section_by_name(elf, ".symtab"); 837 - if (symtab) { 838 - symtab_shndx = find_section_by_name(elf, ".symtab_shndx"); 839 - } else { 796 + if (!symtab) { 840 797 ERROR("no .symtab"); 841 798 return NULL; 842 799 } 843 800 801 + symtab_shndx = find_section_by_name(elf, ".symtab_shndx"); 802 + 844 803 new_idx = sec_num_entries(symtab); 845 804 846 - if (GELF_ST_BIND(sym->sym.st_info) != STB_LOCAL) 805 + if (bind != STB_LOCAL) 847 806 goto non_local; 848 807 849 808 /* ··· 915 806 916 807 non_local: 917 808 sym->idx = new_idx; 918 - if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) { 919 - ERROR("elf_update_symbol"); 809 + if (sym->idx && elf_update_symbol(elf, symtab, symtab_shndx, sym)) 920 810 return NULL; 921 - } 922 811 923 812 symtab->sh.sh_size += symtab->sh.sh_entsize; 924 813 mark_sec_changed(elf, symtab, true); ··· 926 819 mark_sec_changed(elf, symtab_shndx, true); 927 820 } 928 821 822 + if (elf_add_symbol(elf, sym)) 823 + return NULL; 824 + 929 825 return sym; 930 826 } 931 827 932 - static struct symbol * 933 - elf_create_section_symbol(struct elf *elf, struct section *sec) 828 + struct symbol *elf_create_section_symbol(struct elf *elf, struct section *sec) 934 829 { 935 830 struct symbol *sym = calloc(1, sizeof(*sym)); 936 831 937 - if (!sym) { 938 - ERROR_GLIBC("malloc"); 832 + sym = elf_create_symbol(elf, sec->name, sec, STB_LOCAL, STT_SECTION, 0, 0); 833 + if (!sym) 939 834 return NULL; 940 - } 941 835 942 - sym->name = sec->name; 943 - sym->sec = sec; 944 - 945 - // st_name 0 946 - sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION); 947 - // st_other 0 948 - // st_value 0 949 - // st_size 0 950 - 951 - sym = __elf_create_symbol(elf, sym); 952 - if (sym) 953 - elf_add_symbol(elf, sym); 836 + sec->sym = sym; 954 837 955 838 return sym; 956 839 } 957 840 958 - static int elf_add_string(struct elf *elf, struct section *strtab, char *str); 959 - 960 - struct symbol * 961 - elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size) 962 - { 963 - struct symbol *sym = calloc(1, sizeof(*sym)); 964 - size_t namelen = strlen(orig->name) + sizeof("__pfx_"); 965 - char *name = malloc(namelen); 966 - 967 - if (!sym || !name) { 968 - ERROR_GLIBC("malloc"); 969 - return NULL; 970 - } 971 - 972 - snprintf(name, namelen, "__pfx_%s", orig->name); 973 - 974 - sym->name = name; 975 - sym->sec = orig->sec; 976 - 977 - sym->sym.st_name = elf_add_string(elf, NULL, name); 978 - sym->sym.st_info = orig->sym.st_info; 979 - sym->sym.st_value = orig->sym.st_value - size; 980 - sym->sym.st_size = size; 981 - 982 - sym = __elf_create_symbol(elf, sym); 983 - if (sym) 984 - elf_add_symbol(elf, sym); 985 - 986 - return sym; 987 - } 988 - 989 - static struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec, 990 - unsigned int reloc_idx, 991 - unsigned long offset, struct symbol *sym, 992 - s64 addend, unsigned int type) 841 + struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec, 842 + unsigned int reloc_idx, unsigned long offset, 843 + struct symbol *sym, s64 addend, unsigned int type) 993 844 { 994 845 struct reloc *reloc, empty = { 0 }; 995 846 ··· 987 922 unsigned long insn_off) 988 923 { 989 924 struct symbol *sym = insn_sec->sym; 990 - int addend = insn_off; 925 + s64 addend = insn_off; 991 926 992 - if (!(insn_sec->sh.sh_flags & SHF_EXECINSTR)) { 927 + if (!is_text_sec(insn_sec)) { 993 928 ERROR("bad call to %s() for data symbol %s", __func__, sym->name); 994 929 return NULL; 995 930 } ··· 1004 939 sym = elf_create_section_symbol(elf, insn_sec); 1005 940 if (!sym) 1006 941 return NULL; 1007 - 1008 - insn_sec->sym = sym; 1009 942 } 1010 943 1011 944 return elf_init_reloc(elf, sec->rsec, reloc_idx, offset, sym, addend, ··· 1016 953 struct symbol *sym, 1017 954 s64 addend) 1018 955 { 1019 - if (sym->sec && (sec->sh.sh_flags & SHF_EXECINSTR)) { 956 + if (is_text_sec(sec)) { 1020 957 ERROR("bad call to %s() for text symbol %s", __func__, sym->name); 1021 958 return NULL; 1022 959 } ··· 1049 986 1050 987 rsec->base->rsec = rsec; 1051 988 1052 - nr_reloc = 0; 989 + /* nr_alloc_relocs=0: libelf owns d_buf */ 990 + rsec->nr_alloc_relocs = 0; 991 + 1053 992 rsec->relocs = calloc(sec_num_entries(rsec), sizeof(*reloc)); 1054 993 if (!rsec->relocs) { 1055 994 ERROR_GLIBC("calloc"); 1056 995 return -1; 1057 996 } 997 + 998 + nr_reloc = 0; 1058 999 for (i = 0; i < sec_num_entries(rsec); i++) { 1059 1000 reloc = &rsec->relocs[i]; 1060 1001 ··· 1111 1044 goto err; 1112 1045 } 1113 1046 1047 + elf->name = strdup(name); 1048 + if (!elf->name) { 1049 + ERROR_GLIBC("strdup"); 1050 + return NULL; 1051 + } 1052 + 1114 1053 if ((flags & O_ACCMODE) == O_RDONLY) 1115 1054 cmd = ELF_C_READ_MMAP; 1116 1055 else if ((flags & O_ACCMODE) == O_RDWR) ··· 1154 1081 return NULL; 1155 1082 } 1156 1083 1157 - static int elf_add_string(struct elf *elf, struct section *strtab, char *str) 1084 + struct elf *elf_create_file(GElf_Ehdr *ehdr, const char *name) 1158 1085 { 1159 - Elf_Data *data; 1160 - Elf_Scn *s; 1161 - int len; 1086 + struct section *null, *symtab, *strtab, *shstrtab; 1087 + char *dir, *base, *tmp_name; 1088 + struct symbol *sym; 1089 + struct elf *elf; 1090 + 1091 + elf_version(EV_CURRENT); 1092 + 1093 + elf = calloc(1, sizeof(*elf)); 1094 + if (!elf) { 1095 + ERROR_GLIBC("calloc"); 1096 + return NULL; 1097 + } 1098 + 1099 + INIT_LIST_HEAD(&elf->sections); 1100 + 1101 + dir = strdup(name); 1102 + if (!dir) { 1103 + ERROR_GLIBC("strdup"); 1104 + return NULL; 1105 + } 1106 + 1107 + dir = dirname(dir); 1108 + 1109 + base = strdup(name); 1110 + if (!base) { 1111 + ERROR_GLIBC("strdup"); 1112 + return NULL; 1113 + } 1114 + 1115 + base = basename(base); 1116 + 1117 + tmp_name = malloc(256); 1118 + if (!tmp_name) { 1119 + ERROR_GLIBC("malloc"); 1120 + return NULL; 1121 + } 1122 + 1123 + snprintf(tmp_name, 256, "%s/%s.XXXXXX", dir, base); 1124 + 1125 + elf->fd = mkstemp(tmp_name); 1126 + if (elf->fd == -1) { 1127 + ERROR_GLIBC("can't create tmp file"); 1128 + exit(1); 1129 + } 1130 + 1131 + elf->tmp_name = tmp_name; 1132 + 1133 + elf->name = strdup(name); 1134 + if (!elf->name) { 1135 + ERROR_GLIBC("strdup"); 1136 + return NULL; 1137 + } 1138 + 1139 + elf->elf = elf_begin(elf->fd, ELF_C_WRITE, NULL); 1140 + if (!elf->elf) { 1141 + ERROR_ELF("elf_begin"); 1142 + return NULL; 1143 + } 1144 + 1145 + if (!gelf_newehdr(elf->elf, ELFCLASS64)) { 1146 + ERROR_ELF("gelf_newehdr"); 1147 + return NULL; 1148 + } 1149 + 1150 + memcpy(&elf->ehdr, ehdr, sizeof(elf->ehdr)); 1151 + 1152 + if (!gelf_update_ehdr(elf->elf, &elf->ehdr)) { 1153 + ERROR_ELF("gelf_update_ehdr"); 1154 + return NULL; 1155 + } 1156 + 1157 + INIT_LIST_HEAD(&elf->symbols); 1158 + 1159 + if (!elf_alloc_hash(section, 1000) || 1160 + !elf_alloc_hash(section_name, 1000) || 1161 + !elf_alloc_hash(symbol, 10000) || 1162 + !elf_alloc_hash(symbol_name, 10000) || 1163 + !elf_alloc_hash(reloc, 100000)) 1164 + return NULL; 1165 + 1166 + null = elf_create_section(elf, NULL, 0, 0, SHT_NULL, 0, 0); 1167 + shstrtab = elf_create_section(elf, NULL, 0, 0, SHT_STRTAB, 1, 0); 1168 + strtab = elf_create_section(elf, NULL, 0, 0, SHT_STRTAB, 1, 0); 1169 + 1170 + if (!null || !shstrtab || !strtab) 1171 + return NULL; 1172 + 1173 + null->name = ""; 1174 + shstrtab->name = ".shstrtab"; 1175 + strtab->name = ".strtab"; 1176 + 1177 + null->sh.sh_name = elf_add_string(elf, shstrtab, null->name); 1178 + shstrtab->sh.sh_name = elf_add_string(elf, shstrtab, shstrtab->name); 1179 + strtab->sh.sh_name = elf_add_string(elf, shstrtab, strtab->name); 1180 + 1181 + if (null->sh.sh_name == -1 || shstrtab->sh.sh_name == -1 || strtab->sh.sh_name == -1) 1182 + return NULL; 1183 + 1184 + elf_hash_add(section_name, &null->name_hash, str_hash(null->name)); 1185 + elf_hash_add(section_name, &strtab->name_hash, str_hash(strtab->name)); 1186 + elf_hash_add(section_name, &shstrtab->name_hash, str_hash(shstrtab->name)); 1187 + 1188 + if (elf_add_string(elf, strtab, "") == -1) 1189 + return NULL; 1190 + 1191 + symtab = elf_create_section(elf, ".symtab", 0x18, 0x18, SHT_SYMTAB, 0x8, 0); 1192 + if (!symtab) 1193 + return NULL; 1194 + 1195 + symtab->sh.sh_link = strtab->idx; 1196 + symtab->sh.sh_info = 1; 1197 + 1198 + elf->ehdr.e_shstrndx = shstrtab->idx; 1199 + if (!gelf_update_ehdr(elf->elf, &elf->ehdr)) { 1200 + ERROR_ELF("gelf_update_ehdr"); 1201 + return NULL; 1202 + } 1203 + 1204 + sym = calloc(1, sizeof(*sym)); 1205 + if (!sym) { 1206 + ERROR_GLIBC("calloc"); 1207 + return NULL; 1208 + } 1209 + 1210 + sym->name = ""; 1211 + sym->sec = null; 1212 + elf_add_symbol(elf, sym); 1213 + 1214 + return elf; 1215 + } 1216 + 1217 + unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char *str) 1218 + { 1219 + unsigned int offset; 1162 1220 1163 1221 if (!strtab) 1164 1222 strtab = find_section_by_name(elf, ".strtab"); ··· 1298 1094 return -1; 1299 1095 } 1300 1096 1301 - s = elf_getscn(elf->elf, strtab->idx); 1302 - if (!s) { 1303 - ERROR_ELF("elf_getscn"); 1097 + if (!strtab->sh.sh_addralign) { 1098 + ERROR("'%s': invalid sh_addralign", strtab->name); 1304 1099 return -1; 1305 1100 } 1306 1101 1307 - data = elf_newdata(s); 1308 - if (!data) { 1309 - ERROR_ELF("elf_newdata"); 1102 + offset = ALIGN_UP(strtab->sh.sh_size, strtab->sh.sh_addralign); 1103 + 1104 + if (!elf_add_data(elf, strtab, str, strlen(str) + 1)) 1310 1105 return -1; 1311 - } 1312 1106 1313 - data->d_buf = str; 1314 - data->d_size = strlen(str) + 1; 1315 - data->d_align = 1; 1316 - 1317 - len = strtab->sh.sh_size; 1318 - strtab->sh.sh_size += data->d_size; 1319 - 1320 - mark_sec_changed(elf, strtab, true); 1321 - 1322 - return len; 1107 + return offset; 1323 1108 } 1324 1109 1325 - struct section *elf_create_section(struct elf *elf, const char *name, 1326 - size_t entsize, unsigned int nr) 1110 + void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_t size) 1327 1111 { 1328 - struct section *sec, *shstrtab; 1329 - size_t size = entsize * nr; 1112 + unsigned long offset; 1330 1113 Elf_Scn *s; 1331 1114 1332 - sec = malloc(sizeof(*sec)); 1333 - if (!sec) { 1334 - ERROR_GLIBC("malloc"); 1115 + if (!sec->sh.sh_addralign) { 1116 + ERROR("'%s': invalid sh_addralign", sec->name); 1335 1117 return NULL; 1336 1118 } 1337 - memset(sec, 0, sizeof(*sec)); 1338 1119 1339 - INIT_LIST_HEAD(&sec->symbol_list); 1340 - 1341 - s = elf_newscn(elf->elf); 1120 + s = elf_getscn(elf->elf, sec->idx); 1342 1121 if (!s) { 1343 - ERROR_ELF("elf_newscn"); 1122 + ERROR_ELF("elf_getscn"); 1344 1123 return NULL; 1345 1124 } 1346 - 1347 - sec->name = strdup(name); 1348 - if (!sec->name) { 1349 - ERROR_GLIBC("strdup"); 1350 - return NULL; 1351 - } 1352 - 1353 - sec->idx = elf_ndxscn(s); 1354 1125 1355 1126 sec->data = elf_newdata(s); 1356 1127 if (!sec->data) { ··· 1333 1154 return NULL; 1334 1155 } 1335 1156 1157 + sec->data->d_buf = calloc(1, size); 1158 + if (!sec->data->d_buf) { 1159 + ERROR_GLIBC("calloc"); 1160 + return NULL; 1161 + } 1162 + 1163 + if (data) 1164 + memcpy(sec->data->d_buf, data, size); 1165 + 1336 1166 sec->data->d_size = size; 1337 1167 sec->data->d_align = 1; 1338 1168 1169 + offset = ALIGN_UP(sec->sh.sh_size, sec->sh.sh_addralign); 1170 + sec->sh.sh_size = offset + size; 1171 + 1172 + mark_sec_changed(elf, sec, true); 1173 + 1174 + return sec->data->d_buf; 1175 + } 1176 + 1177 + struct section *elf_create_section(struct elf *elf, const char *name, 1178 + size_t size, size_t entsize, 1179 + unsigned int type, unsigned int align, 1180 + unsigned int flags) 1181 + { 1182 + struct section *sec, *shstrtab; 1183 + Elf_Scn *s; 1184 + 1185 + if (name && find_section_by_name(elf, name)) { 1186 + ERROR("section '%s' already exists", name); 1187 + return NULL; 1188 + } 1189 + 1190 + sec = calloc(1, sizeof(*sec)); 1191 + if (!sec) { 1192 + ERROR_GLIBC("calloc"); 1193 + return NULL; 1194 + } 1195 + 1196 + INIT_LIST_HEAD(&sec->symbol_list); 1197 + 1198 + /* don't actually create the section, just the data structures */ 1199 + if (type == SHT_NULL) 1200 + goto add; 1201 + 1202 + s = elf_newscn(elf->elf); 1203 + if (!s) { 1204 + ERROR_ELF("elf_newscn"); 1205 + return NULL; 1206 + } 1207 + 1208 + sec->idx = elf_ndxscn(s); 1209 + 1339 1210 if (size) { 1340 - sec->data->d_buf = malloc(size); 1341 - if (!sec->data->d_buf) { 1342 - ERROR_GLIBC("malloc"); 1211 + sec->data = elf_newdata(s); 1212 + if (!sec->data) { 1213 + ERROR_ELF("elf_newdata"); 1343 1214 return NULL; 1344 1215 } 1345 - memset(sec->data->d_buf, 0, size); 1216 + 1217 + sec->data->d_size = size; 1218 + sec->data->d_align = 1; 1219 + 1220 + sec->data->d_buf = calloc(1, size); 1221 + if (!sec->data->d_buf) { 1222 + ERROR_GLIBC("calloc"); 1223 + return NULL; 1224 + } 1346 1225 } 1347 1226 1348 1227 if (!gelf_getshdr(s, &sec->sh)) { ··· 1410 1173 1411 1174 sec->sh.sh_size = size; 1412 1175 sec->sh.sh_entsize = entsize; 1413 - sec->sh.sh_type = SHT_PROGBITS; 1414 - sec->sh.sh_addralign = 1; 1415 - sec->sh.sh_flags = SHF_ALLOC; 1176 + sec->sh.sh_type = type; 1177 + sec->sh.sh_addralign = align; 1178 + sec->sh.sh_flags = flags; 1416 1179 1417 - /* Add section name to .shstrtab (or .strtab for Clang) */ 1418 - shstrtab = find_section_by_name(elf, ".shstrtab"); 1419 - if (!shstrtab) 1420 - shstrtab = find_section_by_name(elf, ".strtab"); 1421 - if (!shstrtab) { 1422 - ERROR("can't find .shstrtab or .strtab section"); 1423 - return NULL; 1180 + if (name) { 1181 + sec->name = strdup(name); 1182 + if (!sec->name) { 1183 + ERROR("strdup"); 1184 + return NULL; 1185 + } 1186 + 1187 + /* Add section name to .shstrtab (or .strtab for Clang) */ 1188 + shstrtab = find_section_by_name(elf, ".shstrtab"); 1189 + if (!shstrtab) { 1190 + shstrtab = find_section_by_name(elf, ".strtab"); 1191 + if (!shstrtab) { 1192 + ERROR("can't find .shstrtab or .strtab"); 1193 + return NULL; 1194 + } 1195 + } 1196 + sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name); 1197 + if (sec->sh.sh_name == -1) 1198 + return NULL; 1199 + 1200 + elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name)); 1424 1201 } 1425 - sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name); 1426 - if (sec->sh.sh_name == -1) 1427 - return NULL; 1428 1202 1203 + add: 1429 1204 list_add_tail(&sec->list, &elf->sections); 1430 1205 elf_hash_add(section, &sec->hash, sec->idx); 1431 - elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name)); 1432 1206 1433 1207 mark_sec_changed(elf, sec, true); 1434 1208 1435 1209 return sec; 1436 1210 } 1437 1211 1438 - static struct section *elf_create_rela_section(struct elf *elf, 1439 - struct section *sec, 1440 - unsigned int reloc_nr) 1212 + static int elf_alloc_reloc(struct elf *elf, struct section *rsec) 1213 + { 1214 + struct reloc *old_relocs, *old_relocs_end, *new_relocs; 1215 + unsigned int nr_relocs_old = sec_num_entries(rsec); 1216 + unsigned int nr_relocs_new = nr_relocs_old + 1; 1217 + unsigned long nr_alloc; 1218 + struct symbol *sym; 1219 + 1220 + if (!rsec->data) { 1221 + rsec->data = elf_newdata(elf_getscn(elf->elf, rsec->idx)); 1222 + if (!rsec->data) { 1223 + ERROR_ELF("elf_newdata"); 1224 + return -1; 1225 + } 1226 + 1227 + rsec->data->d_align = 1; 1228 + rsec->data->d_type = ELF_T_RELA; 1229 + rsec->data->d_buf = NULL; 1230 + } 1231 + 1232 + rsec->data->d_size = nr_relocs_new * elf_rela_size(elf); 1233 + rsec->sh.sh_size = rsec->data->d_size; 1234 + 1235 + nr_alloc = MAX(64, ALIGN_UP_POW2(nr_relocs_new)); 1236 + if (nr_alloc <= rsec->nr_alloc_relocs) 1237 + return 0; 1238 + 1239 + if (rsec->data->d_buf && !rsec->nr_alloc_relocs) { 1240 + void *orig_buf = rsec->data->d_buf; 1241 + 1242 + /* 1243 + * The original d_buf is owned by libelf so it can't be 1244 + * realloced. 1245 + */ 1246 + rsec->data->d_buf = malloc(nr_alloc * elf_rela_size(elf)); 1247 + if (!rsec->data->d_buf) { 1248 + ERROR_GLIBC("malloc"); 1249 + return -1; 1250 + } 1251 + memcpy(rsec->data->d_buf, orig_buf, 1252 + nr_relocs_old * elf_rela_size(elf)); 1253 + } else { 1254 + rsec->data->d_buf = realloc(rsec->data->d_buf, 1255 + nr_alloc * elf_rela_size(elf)); 1256 + if (!rsec->data->d_buf) { 1257 + ERROR_GLIBC("realloc"); 1258 + return -1; 1259 + } 1260 + } 1261 + 1262 + rsec->nr_alloc_relocs = nr_alloc; 1263 + 1264 + old_relocs = rsec->relocs; 1265 + new_relocs = calloc(nr_alloc, sizeof(struct reloc)); 1266 + if (!new_relocs) { 1267 + ERROR_GLIBC("calloc"); 1268 + return -1; 1269 + } 1270 + 1271 + if (!old_relocs) 1272 + goto done; 1273 + 1274 + /* 1275 + * The struct reloc's address has changed. Update all the symbols and 1276 + * relocs which reference it. 1277 + */ 1278 + 1279 + old_relocs_end = &old_relocs[nr_relocs_old]; 1280 + for_each_sym(elf, sym) { 1281 + struct reloc *reloc; 1282 + 1283 + reloc = sym->relocs; 1284 + if (!reloc) 1285 + continue; 1286 + 1287 + if (reloc >= old_relocs && reloc < old_relocs_end) 1288 + sym->relocs = &new_relocs[reloc - old_relocs]; 1289 + 1290 + while (1) { 1291 + struct reloc *next_reloc = sym_next_reloc(reloc); 1292 + 1293 + if (!next_reloc) 1294 + break; 1295 + 1296 + if (next_reloc >= old_relocs && next_reloc < old_relocs_end) 1297 + set_sym_next_reloc(reloc, &new_relocs[next_reloc - old_relocs]); 1298 + 1299 + reloc = next_reloc; 1300 + } 1301 + } 1302 + 1303 + memcpy(new_relocs, old_relocs, nr_relocs_old * sizeof(struct reloc)); 1304 + 1305 + for (int i = 0; i < nr_relocs_old; i++) { 1306 + struct reloc *old = &old_relocs[i]; 1307 + struct reloc *new = &new_relocs[i]; 1308 + u32 key = reloc_hash(old); 1309 + 1310 + elf_hash_del(reloc, &old->hash, key); 1311 + elf_hash_add(reloc, &new->hash, key); 1312 + } 1313 + 1314 + free(old_relocs); 1315 + done: 1316 + rsec->relocs = new_relocs; 1317 + return 0; 1318 + } 1319 + 1320 + struct section *elf_create_rela_section(struct elf *elf, struct section *sec, 1321 + unsigned int nr_relocs) 1441 1322 { 1442 1323 struct section *rsec; 1443 1324 char *rsec_name; ··· 1568 1213 strcpy(rsec_name, ".rela"); 1569 1214 strcat(rsec_name, sec->name); 1570 1215 1571 - rsec = elf_create_section(elf, rsec_name, elf_rela_size(elf), reloc_nr); 1216 + rsec = elf_create_section(elf, rsec_name, nr_relocs * elf_rela_size(elf), 1217 + elf_rela_size(elf), SHT_RELA, elf_addr_size(elf), 1218 + SHF_INFO_LINK); 1572 1219 free(rsec_name); 1573 1220 if (!rsec) 1574 1221 return NULL; 1575 1222 1576 - rsec->data->d_type = ELF_T_RELA; 1577 - rsec->sh.sh_type = SHT_RELA; 1578 - rsec->sh.sh_addralign = elf_addr_size(elf); 1223 + if (nr_relocs) { 1224 + rsec->data->d_type = ELF_T_RELA; 1225 + 1226 + rsec->nr_alloc_relocs = nr_relocs; 1227 + rsec->relocs = calloc(nr_relocs, sizeof(struct reloc)); 1228 + if (!rsec->relocs) { 1229 + ERROR_GLIBC("calloc"); 1230 + return NULL; 1231 + } 1232 + } 1233 + 1579 1234 rsec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx; 1580 1235 rsec->sh.sh_info = sec->idx; 1581 - rsec->sh.sh_flags = SHF_INFO_LINK; 1582 - 1583 - rsec->relocs = calloc(sec_num_entries(rsec), sizeof(struct reloc)); 1584 - if (!rsec->relocs) { 1585 - ERROR_GLIBC("calloc"); 1586 - return NULL; 1587 - } 1588 1236 1589 1237 sec->rsec = rsec; 1590 1238 rsec->base = sec; ··· 1595 1237 return rsec; 1596 1238 } 1597 1239 1240 + struct reloc *elf_create_reloc(struct elf *elf, struct section *sec, 1241 + unsigned long offset, 1242 + struct symbol *sym, s64 addend, 1243 + unsigned int type) 1244 + { 1245 + struct section *rsec = sec->rsec; 1246 + 1247 + if (!rsec) { 1248 + rsec = elf_create_rela_section(elf, sec, 0); 1249 + if (!rsec) 1250 + return NULL; 1251 + } 1252 + 1253 + if (find_reloc_by_dest(elf, sec, offset)) { 1254 + ERROR_FUNC(sec, offset, "duplicate reloc"); 1255 + return NULL; 1256 + } 1257 + 1258 + if (elf_alloc_reloc(elf, rsec)) 1259 + return NULL; 1260 + 1261 + mark_sec_changed(elf, rsec, true); 1262 + 1263 + return elf_init_reloc(elf, rsec, sec_num_entries(rsec) - 1, offset, sym, 1264 + addend, type); 1265 + } 1266 + 1598 1267 struct section *elf_create_section_pair(struct elf *elf, const char *name, 1599 1268 size_t entsize, unsigned int nr, 1600 - unsigned int reloc_nr) 1269 + unsigned int nr_relocs) 1601 1270 { 1602 1271 struct section *sec; 1603 1272 1604 - sec = elf_create_section(elf, name, entsize, nr); 1273 + sec = elf_create_section(elf, name, nr * entsize, entsize, 1274 + SHT_PROGBITS, 1, SHF_ALLOC); 1605 1275 if (!sec) 1606 1276 return NULL; 1607 1277 1608 - if (!elf_create_rela_section(elf, sec, reloc_nr)) 1278 + if (!elf_create_rela_section(elf, sec, nr_relocs)) 1609 1279 return NULL; 1610 1280 1611 1281 return sec; ··· 1668 1282 */ 1669 1283 static int elf_truncate_section(struct elf *elf, struct section *sec) 1670 1284 { 1671 - u64 size = sec->sh.sh_size; 1285 + u64 size = sec_size(sec); 1672 1286 bool truncated = false; 1673 1287 Elf_Data *data = NULL; 1674 1288 Elf_Scn *s; ··· 1682 1296 for (;;) { 1683 1297 /* get next data descriptor for the relevant section */ 1684 1298 data = elf_getdata(s, data); 1685 - 1686 1299 if (!data) { 1687 1300 if (size) { 1688 1301 ERROR("end of section data but non-zero size left\n"); ··· 1717 1332 1718 1333 /* Update changed relocation sections and section headers: */ 1719 1334 list_for_each_entry(sec, &elf->sections, list) { 1720 - if (sec->truncate) 1721 - elf_truncate_section(elf, sec); 1335 + if (sec->truncate && elf_truncate_section(elf, sec)) 1336 + return -1; 1722 1337 1723 1338 if (sec_changed(sec)) { 1724 1339 s = elf_getscn(elf->elf, sec->idx); ··· 1751 1366 return 0; 1752 1367 } 1753 1368 1754 - void elf_close(struct elf *elf) 1369 + int elf_close(struct elf *elf) 1755 1370 { 1756 1371 if (elf->elf) 1757 1372 elf_end(elf->elf); ··· 1759 1374 if (elf->fd > 0) 1760 1375 close(elf->fd); 1761 1376 1377 + if (elf->tmp_name && rename(elf->tmp_name, elf->name)) 1378 + return -1; 1379 + 1762 1380 /* 1763 1381 * NOTE: All remaining allocations are leaked on purpose. Objtool is 1764 1382 * about to exit anyway. 1765 1383 */ 1384 + return 0; 1766 1385 }
+3 -2
tools/objtool/include/objtool/arch.h
··· 71 71 72 72 struct instruction; 73 73 74 - int arch_ftrace_match(char *name); 74 + int arch_ftrace_match(const char *name); 75 75 76 76 void arch_initial_func_cfi_state(struct cfi_init_state *state); 77 77 ··· 83 83 84 84 unsigned long arch_jump_destination(struct instruction *insn); 85 85 86 - unsigned long arch_dest_reloc_offset(int addend); 86 + s64 arch_insn_adjusted_addend(struct instruction *insn, struct reloc *reloc); 87 + u64 arch_adjusted_addend(struct reloc *reloc); 87 88 88 89 const char *arch_nop_insn(int len); 89 90 const char *arch_ret_insn(int len);
+8 -3
tools/objtool/include/objtool/builtin.h
··· 9 9 10 10 struct opts { 11 11 /* actions: */ 12 + bool cfi; 13 + bool checksum; 12 14 bool dump_orc; 13 15 bool hack_jump_label; 14 16 bool hack_noinstr; 15 17 bool hack_skylake; 16 18 bool ibt; 17 19 bool mcount; 20 + bool noabs; 18 21 bool noinstr; 19 22 bool orc; 20 23 bool retpoline; ··· 28 25 bool static_call; 29 26 bool uaccess; 30 27 int prefix; 31 - bool cfi; 32 - bool noabs; 33 28 34 29 /* options: */ 35 30 bool backtrace; 31 + bool backup; 32 + const char *debug_checksum; 36 33 bool dryrun; 37 34 bool link; 38 35 bool mnop; ··· 51 48 52 49 int objtool_run(int argc, const char **argv); 53 50 54 - void print_args(void); 51 + int make_backup(void); 52 + 53 + int cmd_klp(int argc, const char **argv); 55 54 56 55 #endif /* _BUILTIN_H */
+4 -2
tools/objtool/include/objtool/check.h
··· 64 64 noendbr : 1, 65 65 unret : 1, 66 66 visited : 4, 67 - no_reloc : 1; 68 - /* 10 bit hole */ 67 + no_reloc : 1, 68 + hole : 1, 69 + fake : 1; 70 + /* 9 bit hole */ 69 71 70 72 struct alt_group *alt_group; 71 73 struct instruction *jump_dest;
+43
tools/objtool/include/objtool/checksum.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _OBJTOOL_CHECKSUM_H 3 + #define _OBJTOOL_CHECKSUM_H 4 + 5 + #include <objtool/elf.h> 6 + 7 + #ifdef BUILD_KLP 8 + 9 + static inline void checksum_init(struct symbol *func) 10 + { 11 + if (func && !func->csum.state) { 12 + func->csum.state = XXH3_createState(); 13 + XXH3_64bits_reset(func->csum.state); 14 + } 15 + } 16 + 17 + static inline void checksum_update(struct symbol *func, 18 + struct instruction *insn, 19 + const void *data, size_t size) 20 + { 21 + XXH3_64bits_update(func->csum.state, data, size); 22 + dbg_checksum(func, insn, XXH3_64bits_digest(func->csum.state)); 23 + } 24 + 25 + static inline void checksum_finish(struct symbol *func) 26 + { 27 + if (func && func->csum.state) { 28 + func->csum.checksum = XXH3_64bits_digest(func->csum.state); 29 + func->csum.state = NULL; 30 + } 31 + } 32 + 33 + #else /* !BUILD_KLP */ 34 + 35 + static inline void checksum_init(struct symbol *func) {} 36 + static inline void checksum_update(struct symbol *func, 37 + struct instruction *insn, 38 + const void *data, size_t size) {} 39 + static inline void checksum_finish(struct symbol *func) {} 40 + 41 + #endif /* !BUILD_KLP */ 42 + 43 + #endif /* _OBJTOOL_CHECKSUM_H */
+25
tools/objtool/include/objtool/checksum_types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _OBJTOOL_CHECKSUM_TYPES_H 3 + #define _OBJTOOL_CHECKSUM_TYPES_H 4 + 5 + struct sym_checksum { 6 + u64 addr; 7 + u64 checksum; 8 + }; 9 + 10 + #ifdef BUILD_KLP 11 + 12 + #include <xxhash.h> 13 + 14 + struct checksum { 15 + XXH3_state_t *state; 16 + XXH64_hash_t checksum; 17 + }; 18 + 19 + #else /* !BUILD_KLP */ 20 + 21 + struct checksum {}; 22 + 23 + #endif /* !BUILD_KLP */ 24 + 25 + #endif /* _OBJTOOL_CHECKSUM_TYPES_H */
+173 -23
tools/objtool/include/objtool/elf.h
··· 8 8 9 9 #include <stdio.h> 10 10 #include <gelf.h> 11 + #include <linux/string.h> 11 12 #include <linux/list.h> 12 13 #include <linux/hashtable.h> 13 14 #include <linux/rbtree.h> 14 15 #include <linux/jhash.h> 16 + 17 + #include <objtool/endianness.h> 18 + #include <objtool/checksum_types.h> 15 19 #include <arch/elf.h> 20 + 21 + #define SEC_NAME_LEN 1024 22 + #define SYM_NAME_LEN 512 23 + 24 + #define bswap_if_needed(elf, val) __bswap_if_needed(&elf->ehdr, val) 16 25 17 26 #ifdef LIBELF_USE_DEPRECATED 18 27 # define elf_getshdrnum elf_getshnum ··· 49 40 struct section *base, *rsec; 50 41 struct symbol *sym; 51 42 Elf_Data *data; 52 - char *name; 43 + const char *name; 53 44 int idx; 54 45 bool _changed, text, rodata, noinstr, init, truncate; 55 46 struct reloc *relocs; 47 + unsigned long nr_alloc_relocs; 48 + struct section *twin; 56 49 }; 57 50 58 51 struct symbol { 59 52 struct list_head list; 53 + struct list_head global_list; 60 54 struct rb_node node; 61 55 struct elf_hash_node hash; 62 56 struct elf_hash_node name_hash; 63 57 GElf_Sym sym; 64 58 struct section *sec; 65 - char *name; 59 + const char *name, *demangled_name; 66 60 unsigned int idx, len; 67 61 unsigned long offset; 68 62 unsigned long __subtree_last; ··· 83 71 u8 frame_pointer : 1; 84 72 u8 ignore : 1; 85 73 u8 nocfi : 1; 74 + u8 cold : 1; 75 + u8 prefix : 1; 76 + u8 debug_checksum : 1; 77 + u8 changed : 1; 78 + u8 included : 1; 79 + u8 klp : 1; 86 80 struct list_head pv_target; 87 81 struct reloc *relocs; 88 82 struct section *group_sec; 83 + struct checksum csum; 84 + struct symbol *twin, *clone; 89 85 }; 90 86 91 87 struct reloc { ··· 108 88 GElf_Ehdr ehdr; 109 89 int fd; 110 90 bool changed; 111 - char *name; 91 + const char *name, *tmp_name; 112 92 unsigned int num_files; 113 93 struct list_head sections; 94 + struct list_head symbols; 114 95 unsigned long num_relocs; 115 96 116 97 int symbol_bits; ··· 131 110 }; 132 111 133 112 struct elf *elf_open_read(const char *name, int flags); 113 + struct elf *elf_create_file(GElf_Ehdr *ehdr, const char *name); 134 114 135 115 struct section *elf_create_section(struct elf *elf, const char *name, 136 - size_t entsize, unsigned int nr); 116 + size_t size, size_t entsize, 117 + unsigned int type, unsigned int align, 118 + unsigned int flags); 137 119 struct section *elf_create_section_pair(struct elf *elf, const char *name, 138 120 size_t entsize, unsigned int nr, 139 121 unsigned int reloc_nr); 140 122 141 - struct symbol *elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size); 123 + struct section *elf_create_rela_section(struct elf *elf, struct section *sec, 124 + unsigned int reloc_nr); 125 + 126 + struct symbol *elf_create_symbol(struct elf *elf, const char *name, 127 + struct section *sec, unsigned int bind, 128 + unsigned int type, unsigned long offset, 129 + size_t size); 130 + struct symbol *elf_create_section_symbol(struct elf *elf, struct section *sec); 131 + 132 + void *elf_add_data(struct elf *elf, struct section *sec, const void *data, 133 + size_t size); 134 + 135 + unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char *str); 136 + 137 + struct reloc *elf_create_reloc(struct elf *elf, struct section *sec, 138 + unsigned long offset, struct symbol *sym, 139 + s64 addend, unsigned int type); 140 + 141 + struct reloc *elf_init_reloc(struct elf *elf, struct section *rsec, 142 + unsigned int reloc_idx, unsigned long offset, 143 + struct symbol *sym, s64 addend, unsigned int type); 142 144 143 145 struct reloc *elf_init_reloc_text_sym(struct elf *elf, struct section *sec, 144 146 unsigned long offset, ··· 175 131 struct symbol *sym, 176 132 s64 addend); 177 133 178 - int elf_write_insn(struct elf *elf, struct section *sec, 179 - unsigned long offset, unsigned int len, 180 - const char *insn); 134 + int elf_write_insn(struct elf *elf, struct section *sec, unsigned long offset, 135 + unsigned int len, const char *insn); 136 + 181 137 int elf_write(struct elf *elf); 182 - void elf_close(struct elf *elf); 138 + int elf_close(struct elf *elf); 183 139 184 140 struct section *find_section_by_name(const struct elf *elf, const char *name); 185 141 struct symbol *find_func_by_offset(struct section *sec, unsigned long offset); 186 142 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); 187 143 struct symbol *find_symbol_by_name(const struct elf *elf, const char *name); 144 + struct symbol *find_global_symbol_by_name(const struct elf *elf, const char *name); 188 145 struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset); 189 146 int find_symbol_hole_containing(const struct section *sec, unsigned long offset); 190 147 struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset); ··· 223 178 return elf_addr_size(elf) == 4 ? R_TEXT32 : R_TEXT64; 224 179 } 225 180 181 + static inline bool is_undef_sym(struct symbol *sym) 182 + { 183 + return !sym->sec->idx; 184 + } 185 + 186 + static inline bool is_null_sym(struct symbol *sym) 187 + { 188 + return !sym->idx; 189 + } 190 + 191 + static inline bool is_sec_sym(struct symbol *sym) 192 + { 193 + return sym->type == STT_SECTION; 194 + } 195 + 196 + static inline bool is_object_sym(struct symbol *sym) 197 + { 198 + return sym->type == STT_OBJECT; 199 + } 200 + 201 + static inline bool is_func_sym(struct symbol *sym) 202 + { 203 + return sym->type == STT_FUNC; 204 + } 205 + 206 + static inline bool is_file_sym(struct symbol *sym) 207 + { 208 + return sym->type == STT_FILE; 209 + } 210 + 211 + static inline bool is_notype_sym(struct symbol *sym) 212 + { 213 + return sym->type == STT_NOTYPE; 214 + } 215 + 216 + static inline bool is_global_sym(struct symbol *sym) 217 + { 218 + return sym->bind == STB_GLOBAL; 219 + } 220 + 221 + static inline bool is_weak_sym(struct symbol *sym) 222 + { 223 + return sym->bind == STB_WEAK; 224 + } 225 + 226 + static inline bool is_local_sym(struct symbol *sym) 227 + { 228 + return sym->bind == STB_LOCAL; 229 + } 230 + 231 + static inline bool is_prefix_func(struct symbol *sym) 232 + { 233 + return sym->prefix; 234 + } 235 + 226 236 static inline bool is_reloc_sec(struct section *sec) 227 237 { 228 238 return sec->sh.sh_type == SHT_RELA || sec->sh.sh_type == SHT_REL; 239 + } 240 + 241 + static inline bool is_string_sec(struct section *sec) 242 + { 243 + return sec->sh.sh_flags & SHF_STRINGS; 244 + } 245 + 246 + static inline bool is_text_sec(struct section *sec) 247 + { 248 + return sec->sh.sh_flags & SHF_EXECINSTR; 229 249 } 230 250 231 251 static inline bool sec_changed(struct section *sec) ··· 331 221 * Elf64_Rela: 24 bytes 332 222 */ 333 223 return reloc->sec->sh.sh_entsize < 16; 224 + } 225 + 226 + static inline unsigned long sec_size(struct section *sec) 227 + { 228 + return sec->sh.sh_size; 334 229 } 335 230 336 231 #define __get_reloc_field(reloc, field) \ ··· 415 300 mark_sec_changed(elf, reloc->sec, true); 416 301 } 417 302 303 + static inline unsigned int annotype(struct elf *elf, struct section *sec, 304 + struct reloc *reloc) 305 + { 306 + unsigned int type; 307 + 308 + type = *(u32 *)(sec->data->d_buf + (reloc_idx(reloc) * 8) + 4); 309 + return bswap_if_needed(elf, type); 310 + } 311 + 418 312 #define RELOC_JUMP_TABLE_BIT 1UL 419 313 420 314 /* Does reloc mark the beginning of a jump table? */ ··· 449 325 reloc->_sym_next_reloc = (unsigned long)next | bit; 450 326 } 451 327 452 - #define for_each_sec(file, sec) \ 453 - list_for_each_entry(sec, &file->elf->sections, list) 328 + #define for_each_sec(elf, sec) \ 329 + list_for_each_entry(sec, &elf->sections, list) 454 330 455 331 #define sec_for_each_sym(sec, sym) \ 456 332 list_for_each_entry(sym, &sec->symbol_list, list) 457 333 458 - #define for_each_sym(file, sym) \ 459 - for (struct section *__sec, *__fake = (struct section *)1; \ 460 - __fake; __fake = NULL) \ 461 - for_each_sec(file, __sec) \ 462 - sec_for_each_sym(__sec, sym) 334 + #define sec_prev_sym(sym) \ 335 + sym->sec && sym->list.prev != &sym->sec->symbol_list ? \ 336 + list_prev_entry(sym, list) : NULL 337 + 338 + #define for_each_sym(elf, sym) \ 339 + list_for_each_entry(sym, &elf->symbols, global_list) 340 + 341 + #define for_each_sym_continue(elf, sym) \ 342 + list_for_each_entry_continue(sym, &elf->symbols, global_list) 343 + 344 + #define rsec_next_reloc(rsec, reloc) \ 345 + reloc_idx(reloc) < sec_num_entries(rsec) - 1 ? reloc + 1 : NULL 463 346 464 347 #define for_each_reloc(rsec, reloc) \ 465 - for (int __i = 0, __fake = 1; __fake; __fake = 0) \ 466 - for (reloc = rsec->relocs; \ 467 - __i < sec_num_entries(rsec); \ 468 - __i++, reloc++) 348 + for (reloc = rsec->relocs; reloc; reloc = rsec_next_reloc(rsec, reloc)) 469 349 470 350 #define for_each_reloc_from(rsec, reloc) \ 471 - for (int __i = reloc_idx(reloc); \ 472 - __i < sec_num_entries(rsec); \ 473 - __i++, reloc++) 351 + for (; reloc; reloc = rsec_next_reloc(rsec, reloc)) 352 + 353 + #define for_each_reloc_continue(rsec, reloc) \ 354 + for (reloc = rsec_next_reloc(rsec, reloc); reloc; \ 355 + reloc = rsec_next_reloc(rsec, reloc)) 356 + 357 + #define sym_for_each_reloc(elf, sym, reloc) \ 358 + for (reloc = find_reloc_by_dest_range(elf, sym->sec, \ 359 + sym->offset, sym->len); \ 360 + reloc && reloc_offset(reloc) < sym->offset + sym->len; \ 361 + reloc = rsec_next_reloc(sym->sec->rsec, reloc)) 362 + 363 + static inline struct symbol *get_func_prefix(struct symbol *func) 364 + { 365 + struct symbol *prev; 366 + 367 + if (!is_func_sym(func)) 368 + return NULL; 369 + 370 + prev = sec_prev_sym(func); 371 + if (prev && is_prefix_func(prev)) 372 + return prev; 373 + 374 + return NULL; 375 + } 474 376 475 377 #define OFFSET_STRIDE_BITS 4 476 378 #define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
+4 -5
tools/objtool/include/objtool/endianness.h
··· 4 4 5 5 #include <linux/kernel.h> 6 6 #include <endian.h> 7 - #include <objtool/elf.h> 8 7 9 8 /* 10 9 * Does a byte swap if target file endianness doesn't match the host, i.e. cross ··· 11 12 * To be used for multi-byte values conversion, which are read from / about 12 13 * to be written to a target native endianness ELF file. 13 14 */ 14 - static inline bool need_bswap(struct elf *elf) 15 + static inline bool need_bswap(GElf_Ehdr *ehdr) 15 16 { 16 17 return (__BYTE_ORDER == __LITTLE_ENDIAN) ^ 17 - (elf->ehdr.e_ident[EI_DATA] == ELFDATA2LSB); 18 + (ehdr->e_ident[EI_DATA] == ELFDATA2LSB); 18 19 } 19 20 20 - #define bswap_if_needed(elf, val) \ 21 + #define __bswap_if_needed(ehdr, val) \ 21 22 ({ \ 22 23 __typeof__(val) __ret; \ 23 - bool __need_bswap = need_bswap(elf); \ 24 + bool __need_bswap = need_bswap(ehdr); \ 24 25 switch (sizeof(val)) { \ 25 26 case 8: \ 26 27 __ret = __need_bswap ? bswap_64(val) : (val); break; \
+35
tools/objtool/include/objtool/klp.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _OBJTOOL_KLP_H 3 + #define _OBJTOOL_KLP_H 4 + 5 + #define SHF_RELA_LIVEPATCH 0x00100000 6 + #define SHN_LIVEPATCH 0xff20 7 + 8 + /* 9 + * __klp_objects and __klp_funcs are created by klp diff and used by the patch 10 + * module init code to build the klp_patch, klp_object and klp_func structs 11 + * needed by the livepatch API. 12 + */ 13 + #define KLP_OBJECTS_SEC "__klp_objects" 14 + #define KLP_FUNCS_SEC "__klp_funcs" 15 + 16 + /* 17 + * __klp_relocs is an intermediate section which are created by klp diff and 18 + * converted into KLP symbols/relas by "objtool klp post-link". This is needed 19 + * to work around the linker, which doesn't preserve SHN_LIVEPATCH or 20 + * SHF_RELA_LIVEPATCH, nor does it support having two RELA sections for a 21 + * single PROGBITS section. 22 + */ 23 + #define KLP_RELOCS_SEC "__klp_relocs" 24 + #define KLP_STRINGS_SEC ".rodata.klp.str1.1" 25 + 26 + struct klp_reloc { 27 + void *offset; 28 + void *sym; 29 + u32 type; 30 + }; 31 + 32 + int cmd_klp_diff(int argc, const char **argv); 33 + int cmd_klp_post_link(int argc, const char **argv); 34 + 35 + #endif /* _OBJTOOL_KLP_H */
+3 -1
tools/objtool/include/objtool/objtool.h
··· 28 28 struct list_head mcount_loc_list; 29 29 struct list_head endbr_list; 30 30 struct list_head call_list; 31 - bool ignore_unreachables, hints, rodata; 31 + bool ignore_unreachables, hints, rodata, klp; 32 32 33 33 unsigned int nr_endbr; 34 34 unsigned int nr_endbr_int; ··· 38 38 39 39 struct pv_state *pv_ops; 40 40 }; 41 + 42 + char *top_level_dir(const char *file); 41 43 42 44 struct objtool_file *objtool_open_read(const char *_objname); 43 45
+19
tools/objtool/include/objtool/util.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + #ifndef _UTIL_H 3 + #define _UTIL_H 4 + 5 + #include <objtool/warn.h> 6 + 7 + #define snprintf_check(str, size, format, args...) \ 8 + ({ \ 9 + int __ret = snprintf(str, size, format, args); \ 10 + if (__ret < 0) \ 11 + ERROR_GLIBC("snprintf"); \ 12 + else if (__ret >= size) \ 13 + ERROR("snprintf() failed for '" format "'", args); \ 14 + else \ 15 + __ret = 0; \ 16 + __ret; \ 17 + }) 18 + 19 + #endif /* _UTIL_H */
+40
tools/objtool/include/objtool/warn.h
··· 102 102 #define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__) 103 103 #define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__) 104 104 105 + extern bool debug; 106 + extern int indent; 107 + 108 + static inline void unindent(int *unused) { indent--; } 109 + 110 + #define __dbg(format, ...) \ 111 + fprintf(stderr, \ 112 + "DEBUG: %s%s" format "\n", \ 113 + objname ?: "", \ 114 + objname ? ": " : "", \ 115 + ##__VA_ARGS__) 116 + 117 + #define dbg(args...) \ 118 + ({ \ 119 + if (unlikely(debug)) \ 120 + __dbg(args); \ 121 + }) 122 + 123 + #define __dbg_indent(format, ...) \ 124 + ({ \ 125 + if (unlikely(debug)) \ 126 + __dbg("%*s" format, indent * 8, "", ##__VA_ARGS__); \ 127 + }) 128 + 129 + #define dbg_indent(args...) \ 130 + int __attribute__((cleanup(unindent))) __dummy_##__COUNTER__; \ 131 + __dbg_indent(args); \ 132 + indent++ 133 + 134 + #define dbg_checksum(func, insn, checksum) \ 135 + ({ \ 136 + if (unlikely(insn->sym && insn->sym->pfunc && \ 137 + insn->sym->pfunc->debug_checksum)) { \ 138 + char *insn_off = offstr(insn->sec, insn->offset); \ 139 + __dbg("checksum: %s %s %016lx", \ 140 + func->name, insn_off, checksum); \ 141 + free(insn_off); \ 142 + } \ 143 + }) 144 + 105 145 #endif /* _WARN_H */
+1723
tools/objtool/klp-diff.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + #define _GNU_SOURCE /* memmem() */ 3 + #include <subcmd/parse-options.h> 4 + #include <stdlib.h> 5 + #include <string.h> 6 + #include <libgen.h> 7 + #include <stdio.h> 8 + #include <ctype.h> 9 + 10 + #include <objtool/objtool.h> 11 + #include <objtool/warn.h> 12 + #include <objtool/arch.h> 13 + #include <objtool/klp.h> 14 + #include <objtool/util.h> 15 + #include <arch/special.h> 16 + 17 + #include <linux/objtool_types.h> 18 + #include <linux/livepatch_external.h> 19 + #include <linux/stringify.h> 20 + #include <linux/string.h> 21 + #include <linux/jhash.h> 22 + 23 + #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) 24 + 25 + struct elfs { 26 + struct elf *orig, *patched, *out; 27 + const char *modname; 28 + }; 29 + 30 + struct export { 31 + struct hlist_node hash; 32 + char *mod, *sym; 33 + }; 34 + 35 + static const char * const klp_diff_usage[] = { 36 + "objtool klp diff [<options>] <in1.o> <in2.o> <out.o>", 37 + NULL, 38 + }; 39 + 40 + static const struct option klp_diff_options[] = { 41 + OPT_GROUP("Options:"), 42 + OPT_BOOLEAN('d', "debug", &debug, "enable debug output"), 43 + OPT_END(), 44 + }; 45 + 46 + static DEFINE_HASHTABLE(exports, 15); 47 + 48 + static inline u32 str_hash(const char *str) 49 + { 50 + return jhash(str, strlen(str), 0); 51 + } 52 + 53 + static char *escape_str(const char *orig) 54 + { 55 + size_t len = 0; 56 + const char *a; 57 + char *b, *new; 58 + 59 + for (a = orig; *a; a++) { 60 + switch (*a) { 61 + case '\001': len += 5; break; 62 + case '\n': 63 + case '\t': len += 2; break; 64 + default: len++; 65 + } 66 + } 67 + 68 + new = malloc(len + 1); 69 + if (!new) 70 + return NULL; 71 + 72 + for (a = orig, b = new; *a; a++) { 73 + switch (*a) { 74 + case '\001': memcpy(b, "<SOH>", 5); b += 5; break; 75 + case '\n': *b++ = '\\'; *b++ = 'n'; break; 76 + case '\t': *b++ = '\\'; *b++ = 't'; break; 77 + default: *b++ = *a; 78 + } 79 + } 80 + 81 + *b = '\0'; 82 + return new; 83 + } 84 + 85 + static int read_exports(void) 86 + { 87 + const char *symvers = "Module.symvers"; 88 + char line[1024], *path = NULL; 89 + unsigned int line_num = 1; 90 + FILE *file; 91 + 92 + file = fopen(symvers, "r"); 93 + if (!file) { 94 + path = top_level_dir(symvers); 95 + if (!path) { 96 + ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers); 97 + return -1; 98 + } 99 + 100 + file = fopen(path, "r"); 101 + if (!file) { 102 + ERROR_GLIBC("fopen"); 103 + return -1; 104 + } 105 + } 106 + 107 + while (fgets(line, 1024, file)) { 108 + char *sym, *mod, *type; 109 + struct export *export; 110 + 111 + sym = strchr(line, '\t'); 112 + if (!sym) { 113 + ERROR("malformed Module.symvers (sym) at line %d", line_num); 114 + return -1; 115 + } 116 + 117 + *sym++ = '\0'; 118 + 119 + mod = strchr(sym, '\t'); 120 + if (!mod) { 121 + ERROR("malformed Module.symvers (mod) at line %d", line_num); 122 + return -1; 123 + } 124 + 125 + *mod++ = '\0'; 126 + 127 + type = strchr(mod, '\t'); 128 + if (!type) { 129 + ERROR("malformed Module.symvers (type) at line %d", line_num); 130 + return -1; 131 + } 132 + 133 + *type++ = '\0'; 134 + 135 + if (*sym == '\0' || *mod == '\0') { 136 + ERROR("malformed Module.symvers at line %d", line_num); 137 + return -1; 138 + } 139 + 140 + export = calloc(1, sizeof(*export)); 141 + if (!export) { 142 + ERROR_GLIBC("calloc"); 143 + return -1; 144 + } 145 + 146 + export->mod = strdup(mod); 147 + if (!export->mod) { 148 + ERROR_GLIBC("strdup"); 149 + return -1; 150 + } 151 + 152 + export->sym = strdup(sym); 153 + if (!export->sym) { 154 + ERROR_GLIBC("strdup"); 155 + return -1; 156 + } 157 + 158 + hash_add(exports, &export->hash, str_hash(sym)); 159 + } 160 + 161 + free(path); 162 + fclose(file); 163 + 164 + return 0; 165 + } 166 + 167 + static int read_sym_checksums(struct elf *elf) 168 + { 169 + struct section *sec; 170 + 171 + sec = find_section_by_name(elf, ".discard.sym_checksum"); 172 + if (!sec) { 173 + ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?", 174 + elf->name); 175 + return -1; 176 + } 177 + 178 + if (!sec->rsec) { 179 + ERROR("missing reloc section for .discard.sym_checksum"); 180 + return -1; 181 + } 182 + 183 + if (sec_size(sec) % sizeof(struct sym_checksum)) { 184 + ERROR("struct sym_checksum size mismatch"); 185 + return -1; 186 + } 187 + 188 + for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) { 189 + struct sym_checksum *sym_checksum; 190 + struct reloc *reloc; 191 + struct symbol *sym; 192 + 193 + sym_checksum = (struct sym_checksum *)sec->data->d_buf + i; 194 + 195 + reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum)); 196 + if (!reloc) { 197 + ERROR("can't find reloc for sym_checksum[%d]", i); 198 + return -1; 199 + } 200 + 201 + sym = reloc->sym; 202 + 203 + if (is_sec_sym(sym)) { 204 + ERROR("not sure how to handle section %s", sym->name); 205 + return -1; 206 + } 207 + 208 + if (is_func_sym(sym)) 209 + sym->csum.checksum = sym_checksum->checksum; 210 + } 211 + 212 + return 0; 213 + } 214 + 215 + static struct symbol *first_file_symbol(struct elf *elf) 216 + { 217 + struct symbol *sym; 218 + 219 + for_each_sym(elf, sym) { 220 + if (is_file_sym(sym)) 221 + return sym; 222 + } 223 + 224 + return NULL; 225 + } 226 + 227 + static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym) 228 + { 229 + for_each_sym_continue(elf, sym) { 230 + if (is_file_sym(sym)) 231 + return sym; 232 + } 233 + 234 + return NULL; 235 + } 236 + 237 + /* 238 + * Certain static local variables should never be correlated. They will be 239 + * used in place rather than referencing the originals. 240 + */ 241 + static bool is_uncorrelated_static_local(struct symbol *sym) 242 + { 243 + static const char * const vars[] = { 244 + "__already_done.", 245 + "__func__.", 246 + "__key.", 247 + "__warned.", 248 + "_entry.", 249 + "_entry_ptr.", 250 + "_rs.", 251 + "descriptor.", 252 + "CSWTCH.", 253 + }; 254 + 255 + if (!is_object_sym(sym) || !is_local_sym(sym)) 256 + return false; 257 + 258 + if (!strcmp(sym->sec->name, ".data.once")) 259 + return true; 260 + 261 + for (int i = 0; i < ARRAY_SIZE(vars); i++) { 262 + if (strstarts(sym->name, vars[i])) 263 + return true; 264 + } 265 + 266 + return false; 267 + } 268 + 269 + /* 270 + * Clang emits several useless .Ltmp_* code labels. 271 + */ 272 + static bool is_clang_tmp_label(struct symbol *sym) 273 + { 274 + return sym->type == STT_NOTYPE && 275 + is_text_sec(sym->sec) && 276 + strstarts(sym->name, ".Ltmp") && 277 + isdigit(sym->name[5]); 278 + } 279 + 280 + static bool is_special_section(struct section *sec) 281 + { 282 + static const char * const specials[] = { 283 + ".altinstructions", 284 + ".smp_locks", 285 + "__bug_table", 286 + "__ex_table", 287 + "__jump_table", 288 + "__mcount_loc", 289 + 290 + /* 291 + * Extract .static_call_sites here to inherit non-module 292 + * preferential treatment. The later static call processing 293 + * during klp module build will be skipped when it sees this 294 + * section already exists. 295 + */ 296 + ".static_call_sites", 297 + }; 298 + 299 + static const char * const non_special_discards[] = { 300 + ".discard.addressable", 301 + ".discard.sym_checksum", 302 + }; 303 + 304 + if (is_text_sec(sec)) 305 + return false; 306 + 307 + for (int i = 0; i < ARRAY_SIZE(specials); i++) { 308 + if (!strcmp(sec->name, specials[i])) 309 + return true; 310 + } 311 + 312 + /* Most .discard data sections are special */ 313 + for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) { 314 + if (!strcmp(sec->name, non_special_discards[i])) 315 + return false; 316 + } 317 + 318 + return strstarts(sec->name, ".discard."); 319 + } 320 + 321 + /* 322 + * These sections are referenced by special sections but aren't considered 323 + * special sections themselves. 324 + */ 325 + static bool is_special_section_aux(struct section *sec) 326 + { 327 + static const char * const specials_aux[] = { 328 + ".altinstr_replacement", 329 + ".altinstr_aux", 330 + }; 331 + 332 + for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) { 333 + if (!strcmp(sec->name, specials_aux[i])) 334 + return true; 335 + } 336 + 337 + return false; 338 + } 339 + 340 + /* 341 + * These symbols should never be correlated, so their local patched versions 342 + * are used instead of linking to the originals. 343 + */ 344 + static bool dont_correlate(struct symbol *sym) 345 + { 346 + return is_file_sym(sym) || 347 + is_null_sym(sym) || 348 + is_sec_sym(sym) || 349 + is_prefix_func(sym) || 350 + is_uncorrelated_static_local(sym) || 351 + is_clang_tmp_label(sym) || 352 + is_string_sec(sym->sec) || 353 + is_special_section(sym->sec) || 354 + is_special_section_aux(sym->sec) || 355 + strstarts(sym->name, "__initcall__"); 356 + } 357 + 358 + /* 359 + * For each symbol in the original kernel, find its corresponding "twin" in the 360 + * patched kernel. 361 + */ 362 + static int correlate_symbols(struct elfs *e) 363 + { 364 + struct symbol *file1_sym, *file2_sym; 365 + struct symbol *sym1, *sym2; 366 + 367 + /* Correlate locals */ 368 + for (file1_sym = first_file_symbol(e->orig), 369 + file2_sym = first_file_symbol(e->patched); ; 370 + file1_sym = next_file_symbol(e->orig, file1_sym), 371 + file2_sym = next_file_symbol(e->patched, file2_sym)) { 372 + 373 + if (!file1_sym && file2_sym) { 374 + ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name); 375 + return -1; 376 + } 377 + 378 + if (file1_sym && !file2_sym) { 379 + ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name); 380 + return -1; 381 + } 382 + 383 + if (!file1_sym) 384 + break; 385 + 386 + if (strcmp(file1_sym->name, file2_sym->name)) { 387 + ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name); 388 + return -1; 389 + } 390 + 391 + file1_sym->twin = file2_sym; 392 + file2_sym->twin = file1_sym; 393 + 394 + sym1 = file1_sym; 395 + 396 + for_each_sym_continue(e->orig, sym1) { 397 + if (is_file_sym(sym1) || !is_local_sym(sym1)) 398 + break; 399 + 400 + if (dont_correlate(sym1)) 401 + continue; 402 + 403 + sym2 = file2_sym; 404 + for_each_sym_continue(e->patched, sym2) { 405 + if (is_file_sym(sym2) || !is_local_sym(sym2)) 406 + break; 407 + 408 + if (sym2->twin || dont_correlate(sym2)) 409 + continue; 410 + 411 + if (strcmp(sym1->demangled_name, sym2->demangled_name)) 412 + continue; 413 + 414 + sym1->twin = sym2; 415 + sym2->twin = sym1; 416 + break; 417 + } 418 + } 419 + } 420 + 421 + /* Correlate globals */ 422 + for_each_sym(e->orig, sym1) { 423 + if (sym1->bind == STB_LOCAL) 424 + continue; 425 + 426 + sym2 = find_global_symbol_by_name(e->patched, sym1->name); 427 + 428 + if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) { 429 + sym1->twin = sym2; 430 + sym2->twin = sym1; 431 + } 432 + } 433 + 434 + for_each_sym(e->orig, sym1) { 435 + if (sym1->twin || dont_correlate(sym1)) 436 + continue; 437 + WARN("no correlation: %s", sym1->name); 438 + } 439 + 440 + return 0; 441 + } 442 + 443 + /* "sympos" is used by livepatch to disambiguate duplicate symbol names */ 444 + static unsigned long find_sympos(struct elf *elf, struct symbol *sym) 445 + { 446 + bool vmlinux = str_ends_with(objname, "vmlinux.o"); 447 + unsigned long sympos = 0, nr_matches = 0; 448 + bool has_dup = false; 449 + struct symbol *s; 450 + 451 + if (sym->bind != STB_LOCAL) 452 + return 0; 453 + 454 + if (vmlinux && sym->type == STT_FUNC) { 455 + /* 456 + * HACK: Unfortunately, symbol ordering can differ between 457 + * vmlinux.o and vmlinux due to the linker script emitting 458 + * .text.unlikely* before .text*. Count .text.unlikely* first. 459 + * 460 + * TODO: Disambiguate symbols more reliably (checksums?) 461 + */ 462 + for_each_sym(elf, s) { 463 + if (strstarts(s->sec->name, ".text.unlikely") && 464 + !strcmp(s->name, sym->name)) { 465 + nr_matches++; 466 + if (s == sym) 467 + sympos = nr_matches; 468 + else 469 + has_dup = true; 470 + } 471 + } 472 + for_each_sym(elf, s) { 473 + if (!strstarts(s->sec->name, ".text.unlikely") && 474 + !strcmp(s->name, sym->name)) { 475 + nr_matches++; 476 + if (s == sym) 477 + sympos = nr_matches; 478 + else 479 + has_dup = true; 480 + } 481 + } 482 + } else { 483 + for_each_sym(elf, s) { 484 + if (!strcmp(s->name, sym->name)) { 485 + nr_matches++; 486 + if (s == sym) 487 + sympos = nr_matches; 488 + else 489 + has_dup = true; 490 + } 491 + } 492 + } 493 + 494 + if (!sympos) { 495 + ERROR("can't find sympos for %s", sym->name); 496 + return ULONG_MAX; 497 + } 498 + 499 + return has_dup ? sympos : 0; 500 + } 501 + 502 + static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym); 503 + 504 + static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym, 505 + bool data_too) 506 + { 507 + struct section *out_sec = NULL; 508 + unsigned long offset = 0; 509 + struct symbol *out_sym; 510 + 511 + if (data_too && !is_undef_sym(patched_sym)) { 512 + struct section *patched_sec = patched_sym->sec; 513 + 514 + out_sec = find_section_by_name(elf, patched_sec->name); 515 + if (!out_sec) { 516 + out_sec = elf_create_section(elf, patched_sec->name, 0, 517 + patched_sec->sh.sh_entsize, 518 + patched_sec->sh.sh_type, 519 + patched_sec->sh.sh_addralign, 520 + patched_sec->sh.sh_flags); 521 + if (!out_sec) 522 + return NULL; 523 + } 524 + 525 + if (is_string_sec(patched_sym->sec)) { 526 + out_sym = elf_create_section_symbol(elf, out_sec); 527 + if (!out_sym) 528 + return NULL; 529 + 530 + goto sym_created; 531 + } 532 + 533 + if (!is_sec_sym(patched_sym)) 534 + offset = sec_size(out_sec); 535 + 536 + if (patched_sym->len || is_sec_sym(patched_sym)) { 537 + void *data = NULL; 538 + size_t size; 539 + 540 + /* bss doesn't have data */ 541 + if (patched_sym->sec->data->d_buf) 542 + data = patched_sym->sec->data->d_buf + patched_sym->offset; 543 + 544 + if (is_sec_sym(patched_sym)) 545 + size = sec_size(patched_sym->sec); 546 + else 547 + size = patched_sym->len; 548 + 549 + if (!elf_add_data(elf, out_sec, data, size)) 550 + return NULL; 551 + } 552 + } 553 + 554 + out_sym = elf_create_symbol(elf, patched_sym->name, out_sec, 555 + patched_sym->bind, patched_sym->type, 556 + offset, patched_sym->len); 557 + if (!out_sym) 558 + return NULL; 559 + 560 + sym_created: 561 + patched_sym->clone = out_sym; 562 + out_sym->clone = patched_sym; 563 + 564 + return out_sym; 565 + } 566 + 567 + static const char *sym_type(struct symbol *sym) 568 + { 569 + switch (sym->type) { 570 + case STT_NOTYPE: return "NOTYPE"; 571 + case STT_OBJECT: return "OBJECT"; 572 + case STT_FUNC: return "FUNC"; 573 + case STT_SECTION: return "SECTION"; 574 + case STT_FILE: return "FILE"; 575 + default: return "UNKNOWN"; 576 + } 577 + } 578 + 579 + static const char *sym_bind(struct symbol *sym) 580 + { 581 + switch (sym->bind) { 582 + case STB_LOCAL: return "LOCAL"; 583 + case STB_GLOBAL: return "GLOBAL"; 584 + case STB_WEAK: return "WEAK"; 585 + default: return "UNKNOWN"; 586 + } 587 + } 588 + 589 + /* 590 + * Copy a symbol to the output object, optionally including its data and 591 + * relocations. 592 + */ 593 + static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym, 594 + bool data_too) 595 + { 596 + struct symbol *pfx; 597 + 598 + if (patched_sym->clone) 599 + return patched_sym->clone; 600 + 601 + dbg_indent("%s%s", patched_sym->name, data_too ? " [+DATA]" : ""); 602 + 603 + /* Make sure the prefix gets cloned first */ 604 + if (is_func_sym(patched_sym) && data_too) { 605 + pfx = get_func_prefix(patched_sym); 606 + if (pfx) 607 + clone_symbol(e, pfx, true); 608 + } 609 + 610 + if (!__clone_symbol(e->out, patched_sym, data_too)) 611 + return NULL; 612 + 613 + if (data_too && clone_sym_relocs(e, patched_sym)) 614 + return NULL; 615 + 616 + return patched_sym->clone; 617 + } 618 + 619 + static void mark_included_function(struct symbol *func) 620 + { 621 + struct symbol *pfx; 622 + 623 + func->included = 1; 624 + 625 + /* Include prefix function */ 626 + pfx = get_func_prefix(func); 627 + if (pfx) 628 + pfx->included = 1; 629 + 630 + /* Make sure .cold parent+child always stay together */ 631 + if (func->cfunc && func->cfunc != func) 632 + func->cfunc->included = 1; 633 + if (func->pfunc && func->pfunc != func) 634 + func->pfunc->included = 1; 635 + } 636 + 637 + /* 638 + * Copy all changed functions (and their dependencies) from the patched object 639 + * to the output object. 640 + */ 641 + static int mark_changed_functions(struct elfs *e) 642 + { 643 + struct symbol *sym_orig, *patched_sym; 644 + bool changed = false; 645 + 646 + /* Find changed functions */ 647 + for_each_sym(e->orig, sym_orig) { 648 + if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig)) 649 + continue; 650 + 651 + patched_sym = sym_orig->twin; 652 + if (!patched_sym) 653 + continue; 654 + 655 + if (sym_orig->csum.checksum != patched_sym->csum.checksum) { 656 + patched_sym->changed = 1; 657 + mark_included_function(patched_sym); 658 + changed = true; 659 + } 660 + } 661 + 662 + /* Find added functions and print them */ 663 + for_each_sym(e->patched, patched_sym) { 664 + if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym)) 665 + continue; 666 + 667 + if (!patched_sym->twin) { 668 + printf("%s: new function: %s\n", objname, patched_sym->name); 669 + mark_included_function(patched_sym); 670 + changed = true; 671 + } 672 + } 673 + 674 + /* Print changed functions */ 675 + for_each_sym(e->patched, patched_sym) { 676 + if (patched_sym->changed) 677 + printf("%s: changed function: %s\n", objname, patched_sym->name); 678 + } 679 + 680 + return !changed ? -1 : 0; 681 + } 682 + 683 + static int clone_included_functions(struct elfs *e) 684 + { 685 + struct symbol *patched_sym; 686 + 687 + for_each_sym(e->patched, patched_sym) { 688 + if (patched_sym->included) { 689 + if (!clone_symbol(e, patched_sym, true)) 690 + return -1; 691 + } 692 + } 693 + 694 + return 0; 695 + } 696 + 697 + /* 698 + * Determine whether a relocation should reference the section rather than the 699 + * underlying symbol. 700 + */ 701 + static bool section_reference_needed(struct section *sec) 702 + { 703 + /* 704 + * String symbols are zero-length and uncorrelated. It's easier to 705 + * deal with them as section symbols. 706 + */ 707 + if (is_string_sec(sec)) 708 + return true; 709 + 710 + /* 711 + * .rodata has mostly anonymous data so there's no way to determine the 712 + * length of a needed reference. just copy the whole section if needed. 713 + */ 714 + if (strstarts(sec->name, ".rodata")) 715 + return true; 716 + 717 + /* UBSAN anonymous data */ 718 + if (strstarts(sec->name, ".data..Lubsan") || /* GCC */ 719 + strstarts(sec->name, ".data..L__unnamed_")) /* Clang */ 720 + return true; 721 + 722 + return false; 723 + } 724 + 725 + static bool is_reloc_allowed(struct reloc *reloc) 726 + { 727 + return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym); 728 + } 729 + 730 + static struct export *find_export(struct symbol *sym) 731 + { 732 + struct export *export; 733 + 734 + hash_for_each_possible(exports, export, hash, str_hash(sym->name)) { 735 + if (!strcmp(export->sym, sym->name)) 736 + return export; 737 + } 738 + 739 + return NULL; 740 + } 741 + 742 + static const char *__find_modname(struct elfs *e) 743 + { 744 + struct section *sec; 745 + char *name; 746 + 747 + sec = find_section_by_name(e->orig, ".modinfo"); 748 + if (!sec) { 749 + ERROR("missing .modinfo section"); 750 + return NULL; 751 + } 752 + 753 + name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6); 754 + if (name) 755 + return name + 6; 756 + 757 + name = strdup(e->orig->name); 758 + if (!name) { 759 + ERROR_GLIBC("strdup"); 760 + return NULL; 761 + } 762 + 763 + for (char *c = name; *c; c++) { 764 + if (*c == '/') 765 + name = c + 1; 766 + else if (*c == '-') 767 + *c = '_'; 768 + else if (*c == '.') { 769 + *c = '\0'; 770 + break; 771 + } 772 + } 773 + 774 + return name; 775 + } 776 + 777 + /* Get the object's module name as defined by the kernel (and klp_object) */ 778 + static const char *find_modname(struct elfs *e) 779 + { 780 + const char *modname; 781 + 782 + if (e->modname) 783 + return e->modname; 784 + 785 + modname = __find_modname(e); 786 + e->modname = modname; 787 + return modname; 788 + } 789 + 790 + /* 791 + * Copying a function from its native compiled environment to a kernel module 792 + * removes its natural access to local functions/variables and unexported 793 + * globals. References to such symbols need to be converted to KLP relocs so 794 + * the kernel arch relocation code knows to apply them and where to find the 795 + * symbols. Particularly, duplicate static symbols need to be disambiguated. 796 + */ 797 + static bool klp_reloc_needed(struct reloc *patched_reloc) 798 + { 799 + struct symbol *patched_sym = patched_reloc->sym; 800 + struct export *export; 801 + 802 + /* no external symbol to reference */ 803 + if (dont_correlate(patched_sym)) 804 + return false; 805 + 806 + /* For included functions, a regular reloc will do. */ 807 + if (patched_sym->included) 808 + return false; 809 + 810 + /* 811 + * If exported by a module, it has to be a klp reloc. Thanks to the 812 + * clusterfunk that is late module patching, the patch module is 813 + * allowed to be loaded before any modules it depends on. 814 + * 815 + * If exported by vmlinux, a normal reloc will do. 816 + */ 817 + export = find_export(patched_sym); 818 + if (export) 819 + return strcmp(export->mod, "vmlinux"); 820 + 821 + if (!patched_sym->twin) { 822 + /* 823 + * Presumably the symbol and its reference were added by the 824 + * patch. The symbol could be defined in this .o or in another 825 + * .o in the patch module. 826 + * 827 + * This check needs to be *after* the export check due to the 828 + * possibility of the patch adding a new UNDEF reference to an 829 + * exported symbol. 830 + */ 831 + return false; 832 + } 833 + 834 + /* Unexported symbol which lives in the original vmlinux or module. */ 835 + return true; 836 + } 837 + 838 + static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc) 839 + { 840 + struct symbol *sym = reloc->sym; 841 + struct section *sec = sym->sec; 842 + 843 + if (!sec->sym && !elf_create_section_symbol(elf, sec)) 844 + return -1; 845 + 846 + reloc->sym = sec->sym; 847 + set_reloc_sym(elf, reloc, sym->idx); 848 + set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc)); 849 + return 0; 850 + } 851 + 852 + static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc) 853 + { 854 + struct symbol *sym = reloc->sym; 855 + struct section *sec = sym->sec; 856 + 857 + /* If the symbol has a dedicated section, it's easy to find */ 858 + sym = find_symbol_by_offset(sec, 0); 859 + if (sym && sym->len == sec_size(sec)) 860 + goto found_sym; 861 + 862 + /* No dedicated section; find the symbol manually */ 863 + sym = find_symbol_containing(sec, arch_adjusted_addend(reloc)); 864 + if (!sym) { 865 + /* 866 + * This can happen for special section references to weak code 867 + * whose symbol has been stripped by the linker. 868 + */ 869 + return -1; 870 + } 871 + 872 + found_sym: 873 + reloc->sym = sym; 874 + set_reloc_sym(elf, reloc, sym->idx); 875 + set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset); 876 + return 0; 877 + } 878 + 879 + /* 880 + * Convert a relocation symbol reference to the needed format: either a section 881 + * symbol or the underlying symbol itself. 882 + */ 883 + static int convert_reloc_sym(struct elf *elf, struct reloc *reloc) 884 + { 885 + if (is_reloc_allowed(reloc)) 886 + return 0; 887 + 888 + if (section_reference_needed(reloc->sym->sec)) 889 + return convert_reloc_sym_to_secsym(elf, reloc); 890 + else 891 + return convert_reloc_secsym_to_sym(elf, reloc); 892 + } 893 + 894 + /* 895 + * Convert a regular relocation to a klp relocation (sort of). 896 + */ 897 + static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc, 898 + struct section *sec, unsigned long offset, 899 + struct export *export) 900 + { 901 + struct symbol *patched_sym = patched_reloc->sym; 902 + s64 addend = reloc_addend(patched_reloc); 903 + const char *sym_modname, *sym_orig_name; 904 + static struct section *klp_relocs; 905 + struct symbol *sym, *klp_sym; 906 + unsigned long klp_reloc_off; 907 + char sym_name[SYM_NAME_LEN]; 908 + struct klp_reloc klp_reloc; 909 + unsigned long sympos; 910 + 911 + if (!patched_sym->twin) { 912 + ERROR("unexpected klp reloc for new symbol %s", patched_sym->name); 913 + return -1; 914 + } 915 + 916 + /* 917 + * Keep the original reloc intact for now to avoid breaking objtool run 918 + * which relies on proper relocations for many of its features. This 919 + * will be disabled later by "objtool klp post-link". 920 + * 921 + * Convert it to UNDEF (and WEAK to avoid modpost warnings). 922 + */ 923 + 924 + sym = patched_sym->clone; 925 + if (!sym) { 926 + /* STB_WEAK: avoid modpost undefined symbol warnings */ 927 + sym = elf_create_symbol(e->out, patched_sym->name, NULL, 928 + STB_WEAK, patched_sym->type, 0, 0); 929 + if (!sym) 930 + return -1; 931 + 932 + patched_sym->clone = sym; 933 + sym->clone = patched_sym; 934 + } 935 + 936 + if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc))) 937 + return -1; 938 + 939 + /* 940 + * Create the KLP symbol. 941 + */ 942 + 943 + if (export) { 944 + sym_modname = export->mod; 945 + sym_orig_name = export->sym; 946 + sympos = 0; 947 + } else { 948 + sym_modname = find_modname(e); 949 + if (!sym_modname) 950 + return -1; 951 + 952 + sym_orig_name = patched_sym->twin->name; 953 + sympos = find_sympos(e->orig, patched_sym->twin); 954 + if (sympos == ULONG_MAX) 955 + return -1; 956 + } 957 + 958 + /* symbol format: .klp.sym.modname.sym_name,sympos */ 959 + if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld", 960 + sym_modname, sym_orig_name, sympos)) 961 + return -1; 962 + 963 + klp_sym = find_symbol_by_name(e->out, sym_name); 964 + if (!klp_sym) { 965 + __dbg_indent("%s", sym_name); 966 + 967 + /* STB_WEAK: avoid modpost undefined symbol warnings */ 968 + klp_sym = elf_create_symbol(e->out, sym_name, NULL, 969 + STB_WEAK, patched_sym->type, 0, 0); 970 + if (!klp_sym) 971 + return -1; 972 + } 973 + 974 + /* 975 + * Create the __klp_relocs entry. This will be converted to an actual 976 + * KLP rela by "objtool klp post-link". 977 + * 978 + * This intermediate step is necessary to prevent corruption by the 979 + * linker, which doesn't know how to properly handle two rela sections 980 + * applying to the same base section. 981 + */ 982 + 983 + if (!klp_relocs) { 984 + klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0, 985 + 0, SHT_PROGBITS, 8, SHF_ALLOC); 986 + if (!klp_relocs) 987 + return -1; 988 + } 989 + 990 + klp_reloc_off = sec_size(klp_relocs); 991 + memset(&klp_reloc, 0, sizeof(klp_reloc)); 992 + 993 + klp_reloc.type = reloc_type(patched_reloc); 994 + if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc))) 995 + return -1; 996 + 997 + /* klp_reloc.offset */ 998 + if (!sec->sym && !elf_create_section_symbol(e->out, sec)) 999 + return -1; 1000 + 1001 + if (!elf_create_reloc(e->out, klp_relocs, 1002 + klp_reloc_off + offsetof(struct klp_reloc, offset), 1003 + sec->sym, offset, R_ABS64)) 1004 + return -1; 1005 + 1006 + /* klp_reloc.sym */ 1007 + if (!elf_create_reloc(e->out, klp_relocs, 1008 + klp_reloc_off + offsetof(struct klp_reloc, sym), 1009 + klp_sym, addend, R_ABS64)) 1010 + return -1; 1011 + 1012 + return 0; 1013 + } 1014 + 1015 + #define dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp) \ 1016 + dbg_indent("%s+0x%lx: %s%s0x%lx [%s%s%s%s%s%s]", \ 1017 + sec->name, offset, patched_sym->name, \ 1018 + addend >= 0 ? "+" : "-", labs(addend), \ 1019 + sym_type(patched_sym), \ 1020 + patched_sym->type == STT_SECTION ? "" : " ", \ 1021 + patched_sym->type == STT_SECTION ? "" : sym_bind(patched_sym), \ 1022 + is_undef_sym(patched_sym) ? " UNDEF" : "", \ 1023 + export ? " EXPORTED" : "", \ 1024 + klp ? " KLP" : "") 1025 + 1026 + /* Copy a reloc and its symbol to the output object */ 1027 + static int clone_reloc(struct elfs *e, struct reloc *patched_reloc, 1028 + struct section *sec, unsigned long offset) 1029 + { 1030 + struct symbol *patched_sym = patched_reloc->sym; 1031 + struct export *export = find_export(patched_sym); 1032 + long addend = reloc_addend(patched_reloc); 1033 + struct symbol *out_sym; 1034 + bool klp; 1035 + 1036 + if (!is_reloc_allowed(patched_reloc)) { 1037 + ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc), 1038 + "missing symbol for reference to %s+%ld", 1039 + patched_sym->name, addend); 1040 + return -1; 1041 + } 1042 + 1043 + klp = klp_reloc_needed(patched_reloc); 1044 + 1045 + dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp); 1046 + 1047 + if (klp) { 1048 + if (clone_reloc_klp(e, patched_reloc, sec, offset, export)) 1049 + return -1; 1050 + 1051 + return 0; 1052 + } 1053 + 1054 + /* 1055 + * Why !export sets 'data_too': 1056 + * 1057 + * Unexported non-klp symbols need to live in the patch module, 1058 + * otherwise there will be unresolved symbols. Notably, this includes: 1059 + * 1060 + * - New functions/data 1061 + * - String sections 1062 + * - Special section entries 1063 + * - Uncorrelated static local variables 1064 + * - UBSAN sections 1065 + */ 1066 + out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export); 1067 + if (!out_sym) 1068 + return -1; 1069 + 1070 + /* 1071 + * For strings, all references use section symbols, thanks to 1072 + * section_reference_needed(). clone_symbol() has cloned an empty 1073 + * version of the string section. Now copy the string itself. 1074 + */ 1075 + if (is_string_sec(patched_sym->sec)) { 1076 + const char *str = patched_sym->sec->data->d_buf + addend; 1077 + 1078 + __dbg_indent("\"%s\"", escape_str(str)); 1079 + 1080 + addend = elf_add_string(e->out, out_sym->sec, str); 1081 + if (addend == -1) 1082 + return -1; 1083 + } 1084 + 1085 + if (!elf_create_reloc(e->out, sec, offset, out_sym, addend, 1086 + reloc_type(patched_reloc))) 1087 + return -1; 1088 + 1089 + return 0; 1090 + } 1091 + 1092 + /* Copy all relocs needed for a symbol's contents */ 1093 + static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym) 1094 + { 1095 + struct section *patched_rsec = patched_sym->sec->rsec; 1096 + struct reloc *patched_reloc; 1097 + unsigned long start, end; 1098 + struct symbol *out_sym; 1099 + 1100 + out_sym = patched_sym->clone; 1101 + if (!out_sym) { 1102 + ERROR("no clone for %s", patched_sym->name); 1103 + return -1; 1104 + } 1105 + 1106 + if (!patched_rsec) 1107 + return 0; 1108 + 1109 + if (!is_sec_sym(patched_sym) && !patched_sym->len) 1110 + return 0; 1111 + 1112 + if (is_string_sec(patched_sym->sec)) 1113 + return 0; 1114 + 1115 + if (is_sec_sym(patched_sym)) { 1116 + start = 0; 1117 + end = sec_size(patched_sym->sec); 1118 + } else { 1119 + start = patched_sym->offset; 1120 + end = start + patched_sym->len; 1121 + } 1122 + 1123 + for_each_reloc(patched_rsec, patched_reloc) { 1124 + unsigned long offset; 1125 + 1126 + if (reloc_offset(patched_reloc) < start || 1127 + reloc_offset(patched_reloc) >= end) 1128 + continue; 1129 + 1130 + /* 1131 + * Skip any reloc referencing .altinstr_aux. Its code is 1132 + * always patched by alternatives. See ALTERNATIVE_TERNARY(). 1133 + */ 1134 + if (patched_reloc->sym->sec && 1135 + !strcmp(patched_reloc->sym->sec->name, ".altinstr_aux")) 1136 + continue; 1137 + 1138 + if (convert_reloc_sym(e->patched, patched_reloc)) { 1139 + ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc), 1140 + "failed to convert reloc sym '%s' to its proper format", 1141 + patched_reloc->sym->name); 1142 + return -1; 1143 + } 1144 + 1145 + offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset); 1146 + 1147 + if (clone_reloc(e, patched_reloc, out_sym->sec, offset)) 1148 + return -1; 1149 + } 1150 + return 0; 1151 + 1152 + } 1153 + 1154 + static int create_fake_symbol(struct elf *elf, struct section *sec, 1155 + unsigned long offset, size_t size) 1156 + { 1157 + char name[SYM_NAME_LEN]; 1158 + unsigned int type; 1159 + static int ctr; 1160 + char *c; 1161 + 1162 + if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++)) 1163 + return -1; 1164 + 1165 + for (c = name; *c; c++) 1166 + if (*c == '.') 1167 + *c = '_'; 1168 + 1169 + /* 1170 + * STT_NOTYPE: Prevent objtool from validating .altinstr_replacement 1171 + * while still allowing objdump to disassemble it. 1172 + */ 1173 + type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT; 1174 + return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1; 1175 + } 1176 + 1177 + /* 1178 + * Special sections (alternatives, etc) are basically arrays of structs. 1179 + * For all the special sections, create a symbol for each struct entry. This 1180 + * is a bit cumbersome, but it makes the extracting of the individual entries 1181 + * much more straightforward. 1182 + * 1183 + * There are three ways to identify the entry sizes for a special section: 1184 + * 1185 + * 1) ELF section header sh_entsize: Ideally this would be used almost 1186 + * everywhere. But unfortunately the toolchains make it difficult. The 1187 + * assembler .[push]section directive syntax only takes entsize when 1188 + * combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with 1189 + * SHF_WRITE. And some special sections do need to be writable. 1190 + * 1191 + * Another place this wouldn't work is .altinstr_replacement, whose entries 1192 + * don't have a fixed size. 1193 + * 1194 + * 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which 1195 + * points to the beginning of each entry. The size of the entry is then 1196 + * inferred by the location of the subsequent annotation (or end of 1197 + * section). 1198 + * 1199 + * 3) Simple array of pointers: If the special section is just a basic array of 1200 + * pointers, the entry size can be inferred by the number of relocations. 1201 + * No annotations needed. 1202 + * 1203 + * Note I also tried to create per-entry symbols at the time of creation, in 1204 + * the original [inline] asm. Unfortunately, creating uniquely named symbols 1205 + * is trickier than one might think, especially with Clang inline asm. I 1206 + * eventually just gave up trying to make that work, in favor of using 1207 + * ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact. 1208 + */ 1209 + static int create_fake_symbols(struct elf *elf) 1210 + { 1211 + struct section *sec; 1212 + struct reloc *reloc; 1213 + 1214 + /* 1215 + * 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries: 1216 + */ 1217 + 1218 + sec = find_section_by_name(elf, ".discard.annotate_data"); 1219 + if (!sec || !sec->rsec) 1220 + return 0; 1221 + 1222 + for_each_reloc(sec->rsec, reloc) { 1223 + unsigned long offset, size; 1224 + struct reloc *next_reloc; 1225 + 1226 + if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL) 1227 + continue; 1228 + 1229 + offset = reloc_addend(reloc); 1230 + 1231 + size = 0; 1232 + next_reloc = reloc; 1233 + for_each_reloc_continue(sec->rsec, next_reloc) { 1234 + if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL || 1235 + next_reloc->sym->sec != reloc->sym->sec) 1236 + continue; 1237 + 1238 + size = reloc_addend(next_reloc) - offset; 1239 + break; 1240 + } 1241 + 1242 + if (!size) 1243 + size = sec_size(reloc->sym->sec) - offset; 1244 + 1245 + if (create_fake_symbol(elf, reloc->sym->sec, offset, size)) 1246 + return -1; 1247 + } 1248 + 1249 + /* 1250 + * 2) Make symbols for sh_entsize, and simple arrays of pointers: 1251 + */ 1252 + 1253 + for_each_sec(elf, sec) { 1254 + unsigned int entry_size; 1255 + unsigned long offset; 1256 + 1257 + if (!is_special_section(sec) || find_symbol_by_offset(sec, 0)) 1258 + continue; 1259 + 1260 + if (!sec->rsec) { 1261 + ERROR("%s: missing special section relocations", sec->name); 1262 + return -1; 1263 + } 1264 + 1265 + entry_size = sec->sh.sh_entsize; 1266 + if (!entry_size) { 1267 + entry_size = arch_reloc_size(sec->rsec->relocs); 1268 + if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) { 1269 + ERROR("%s: missing special section entsize or annotations", sec->name); 1270 + return -1; 1271 + } 1272 + } 1273 + 1274 + for (offset = 0; offset < sec_size(sec); offset += entry_size) { 1275 + if (create_fake_symbol(elf, sec, offset, entry_size)) 1276 + return -1; 1277 + } 1278 + } 1279 + 1280 + return 0; 1281 + } 1282 + 1283 + /* Keep a special section entry if it references an included function */ 1284 + static bool should_keep_special_sym(struct elf *elf, struct symbol *sym) 1285 + { 1286 + struct reloc *reloc; 1287 + 1288 + if (is_sec_sym(sym) || !sym->sec->rsec) 1289 + return false; 1290 + 1291 + sym_for_each_reloc(elf, sym, reloc) { 1292 + if (convert_reloc_sym(elf, reloc)) 1293 + continue; 1294 + 1295 + if (is_func_sym(reloc->sym) && reloc->sym->included) 1296 + return true; 1297 + } 1298 + 1299 + return false; 1300 + } 1301 + 1302 + /* 1303 + * Klp relocations aren't allowed for __jump_table and .static_call_sites if 1304 + * the referenced symbol lives in a kernel module, because such klp relocs may 1305 + * be applied after static branch/call init, resulting in code corruption. 1306 + * 1307 + * Validate a special section entry to avoid that. Note that an inert 1308 + * tracepoint is harmless enough, in that case just skip the entry and print a 1309 + * warning. Otherwise, return an error. 1310 + * 1311 + * This is only a temporary limitation which will be fixed when livepatch adds 1312 + * support for submodules: fully self-contained modules which are embedded in 1313 + * the top-level livepatch module's data and which can be loaded on demand when 1314 + * their corresponding to-be-patched module gets loaded. Then klp relocs can 1315 + * be retired. 1316 + * 1317 + * Return: 1318 + * -1: error: validation failed 1319 + * 1: warning: tracepoint skipped 1320 + * 0: success 1321 + */ 1322 + static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym) 1323 + { 1324 + bool static_branch = !strcmp(sym->sec->name, "__jump_table"); 1325 + bool static_call = !strcmp(sym->sec->name, ".static_call_sites"); 1326 + struct symbol *code_sym = NULL; 1327 + unsigned long code_offset = 0; 1328 + struct reloc *reloc; 1329 + int ret = 0; 1330 + 1331 + if (!static_branch && !static_call) 1332 + return 0; 1333 + 1334 + sym_for_each_reloc(e->patched, sym, reloc) { 1335 + const char *sym_modname; 1336 + struct export *export; 1337 + 1338 + /* Static branch/call keys are always STT_OBJECT */ 1339 + if (reloc->sym->type != STT_OBJECT) { 1340 + 1341 + /* Save code location which can be printed below */ 1342 + if (reloc->sym->type == STT_FUNC && !code_sym) { 1343 + code_sym = reloc->sym; 1344 + code_offset = reloc_addend(reloc); 1345 + } 1346 + 1347 + continue; 1348 + } 1349 + 1350 + if (!klp_reloc_needed(reloc)) 1351 + continue; 1352 + 1353 + export = find_export(reloc->sym); 1354 + if (export) { 1355 + sym_modname = export->mod; 1356 + } else { 1357 + sym_modname = find_modname(e); 1358 + if (!sym_modname) 1359 + return -1; 1360 + } 1361 + 1362 + /* vmlinux keys are ok */ 1363 + if (!strcmp(sym_modname, "vmlinux")) 1364 + continue; 1365 + 1366 + if (static_branch) { 1367 + if (strstarts(reloc->sym->name, "__tracepoint_")) { 1368 + WARN("%s: disabling unsupported tracepoint %s", 1369 + code_sym->name, reloc->sym->name + 13); 1370 + ret = 1; 1371 + continue; 1372 + } 1373 + 1374 + ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead", 1375 + code_sym->name, code_offset, reloc->sym->name); 1376 + return -1; 1377 + } 1378 + 1379 + /* static call */ 1380 + if (strstarts(reloc->sym->name, "__SCK__tp_func_")) { 1381 + ret = 1; 1382 + continue; 1383 + } 1384 + 1385 + ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead", 1386 + code_sym->name, code_offset, reloc->sym->name); 1387 + return -1; 1388 + } 1389 + 1390 + return ret; 1391 + } 1392 + 1393 + static int clone_special_section(struct elfs *e, struct section *patched_sec) 1394 + { 1395 + struct symbol *patched_sym; 1396 + 1397 + /* 1398 + * Extract all special section symbols (and their dependencies) which 1399 + * reference included functions. 1400 + */ 1401 + sec_for_each_sym(patched_sec, patched_sym) { 1402 + int ret; 1403 + 1404 + if (!is_object_sym(patched_sym)) 1405 + continue; 1406 + 1407 + if (!should_keep_special_sym(e->patched, patched_sym)) 1408 + continue; 1409 + 1410 + ret = validate_special_section_klp_reloc(e, patched_sym); 1411 + if (ret < 0) 1412 + return -1; 1413 + if (ret > 0) 1414 + continue; 1415 + 1416 + if (!clone_symbol(e, patched_sym, true)) 1417 + return -1; 1418 + } 1419 + 1420 + return 0; 1421 + } 1422 + 1423 + /* Extract only the needed bits from special sections */ 1424 + static int clone_special_sections(struct elfs *e) 1425 + { 1426 + struct section *patched_sec; 1427 + 1428 + if (create_fake_symbols(e->patched)) 1429 + return -1; 1430 + 1431 + for_each_sec(e->patched, patched_sec) { 1432 + if (is_special_section(patched_sec)) { 1433 + if (clone_special_section(e, patched_sec)) 1434 + return -1; 1435 + } 1436 + } 1437 + 1438 + return 0; 1439 + } 1440 + 1441 + /* 1442 + * Create __klp_objects and __klp_funcs sections which are intermediate 1443 + * sections provided as input to the patch module's init code for building the 1444 + * klp_patch, klp_object and klp_func structs for the livepatch API. 1445 + */ 1446 + static int create_klp_sections(struct elfs *e) 1447 + { 1448 + size_t obj_size = sizeof(struct klp_object_ext); 1449 + size_t func_size = sizeof(struct klp_func_ext); 1450 + struct section *obj_sec, *funcs_sec, *str_sec; 1451 + struct symbol *funcs_sym, *str_sym, *sym; 1452 + char sym_name[SYM_NAME_LEN]; 1453 + unsigned int nr_funcs = 0; 1454 + const char *modname; 1455 + void *obj_data; 1456 + s64 addend; 1457 + 1458 + obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0); 1459 + if (!obj_sec) 1460 + return -1; 1461 + 1462 + funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0); 1463 + if (!funcs_sec) 1464 + return -1; 1465 + 1466 + funcs_sym = elf_create_section_symbol(e->out, funcs_sec); 1467 + if (!funcs_sym) 1468 + return -1; 1469 + 1470 + str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0, 1471 + SHT_PROGBITS, 1, 1472 + SHF_ALLOC | SHF_STRINGS | SHF_MERGE); 1473 + if (!str_sec) 1474 + return -1; 1475 + 1476 + if (elf_add_string(e->out, str_sec, "") == -1) 1477 + return -1; 1478 + 1479 + str_sym = elf_create_section_symbol(e->out, str_sec); 1480 + if (!str_sym) 1481 + return -1; 1482 + 1483 + /* allocate klp_object_ext */ 1484 + obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size); 1485 + if (!obj_data) 1486 + return -1; 1487 + 1488 + modname = find_modname(e); 1489 + if (!modname) 1490 + return -1; 1491 + 1492 + /* klp_object_ext.name */ 1493 + if (strcmp(modname, "vmlinux")) { 1494 + addend = elf_add_string(e->out, str_sec, modname); 1495 + if (addend == -1) 1496 + return -1; 1497 + 1498 + if (!elf_create_reloc(e->out, obj_sec, 1499 + offsetof(struct klp_object_ext, name), 1500 + str_sym, addend, R_ABS64)) 1501 + return -1; 1502 + } 1503 + 1504 + /* klp_object_ext.funcs */ 1505 + if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs), 1506 + funcs_sym, 0, R_ABS64)) 1507 + return -1; 1508 + 1509 + for_each_sym(e->out, sym) { 1510 + unsigned long offset = nr_funcs * func_size; 1511 + unsigned long sympos; 1512 + void *func_data; 1513 + 1514 + if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed) 1515 + continue; 1516 + 1517 + /* allocate klp_func_ext */ 1518 + func_data = elf_add_data(e->out, funcs_sec, NULL, func_size); 1519 + if (!func_data) 1520 + return -1; 1521 + 1522 + /* klp_func_ext.old_name */ 1523 + addend = elf_add_string(e->out, str_sec, sym->clone->twin->name); 1524 + if (addend == -1) 1525 + return -1; 1526 + 1527 + if (!elf_create_reloc(e->out, funcs_sec, 1528 + offset + offsetof(struct klp_func_ext, old_name), 1529 + str_sym, addend, R_ABS64)) 1530 + return -1; 1531 + 1532 + /* klp_func_ext.new_func */ 1533 + if (!elf_create_reloc(e->out, funcs_sec, 1534 + offset + offsetof(struct klp_func_ext, new_func), 1535 + sym, 0, R_ABS64)) 1536 + return -1; 1537 + 1538 + /* klp_func_ext.sympos */ 1539 + BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos)); 1540 + sympos = find_sympos(e->orig, sym->clone->twin); 1541 + if (sympos == ULONG_MAX) 1542 + return -1; 1543 + memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos, 1544 + sizeof_field(struct klp_func_ext, sympos)); 1545 + 1546 + nr_funcs++; 1547 + } 1548 + 1549 + /* klp_object_ext.nr_funcs */ 1550 + BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs)); 1551 + memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs, 1552 + sizeof_field(struct klp_object_ext, nr_funcs)); 1553 + 1554 + /* 1555 + * Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and 1556 + * friends, and add them to the klp object. 1557 + */ 1558 + 1559 + if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname)) 1560 + return -1; 1561 + 1562 + sym = find_symbol_by_name(e->out, sym_name); 1563 + if (sym) { 1564 + struct reloc *reloc; 1565 + 1566 + reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1567 + 1568 + if (!elf_create_reloc(e->out, obj_sec, 1569 + offsetof(struct klp_object_ext, callbacks) + 1570 + offsetof(struct klp_callbacks, pre_patch), 1571 + reloc->sym, reloc_addend(reloc), R_ABS64)) 1572 + return -1; 1573 + } 1574 + 1575 + if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname)) 1576 + return -1; 1577 + 1578 + sym = find_symbol_by_name(e->out, sym_name); 1579 + if (sym) { 1580 + struct reloc *reloc; 1581 + 1582 + reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1583 + 1584 + if (!elf_create_reloc(e->out, obj_sec, 1585 + offsetof(struct klp_object_ext, callbacks) + 1586 + offsetof(struct klp_callbacks, post_patch), 1587 + reloc->sym, reloc_addend(reloc), R_ABS64)) 1588 + return -1; 1589 + } 1590 + 1591 + if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname)) 1592 + return -1; 1593 + 1594 + sym = find_symbol_by_name(e->out, sym_name); 1595 + if (sym) { 1596 + struct reloc *reloc; 1597 + 1598 + reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1599 + 1600 + if (!elf_create_reloc(e->out, obj_sec, 1601 + offsetof(struct klp_object_ext, callbacks) + 1602 + offsetof(struct klp_callbacks, pre_unpatch), 1603 + reloc->sym, reloc_addend(reloc), R_ABS64)) 1604 + return -1; 1605 + } 1606 + 1607 + if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname)) 1608 + return -1; 1609 + 1610 + sym = find_symbol_by_name(e->out, sym_name); 1611 + if (sym) { 1612 + struct reloc *reloc; 1613 + 1614 + reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset); 1615 + 1616 + if (!elf_create_reloc(e->out, obj_sec, 1617 + offsetof(struct klp_object_ext, callbacks) + 1618 + offsetof(struct klp_callbacks, post_unpatch), 1619 + reloc->sym, reloc_addend(reloc), R_ABS64)) 1620 + return -1; 1621 + } 1622 + 1623 + return 0; 1624 + } 1625 + 1626 + /* 1627 + * Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols 1628 + * can be accessed via normal relocs. 1629 + */ 1630 + static int copy_import_ns(struct elfs *e) 1631 + { 1632 + struct section *patched_sec, *out_sec = NULL; 1633 + char *import_ns, *data_end; 1634 + 1635 + patched_sec = find_section_by_name(e->patched, ".modinfo"); 1636 + if (!patched_sec) 1637 + return 0; 1638 + 1639 + import_ns = patched_sec->data->d_buf; 1640 + if (!import_ns) 1641 + return 0; 1642 + 1643 + for (data_end = import_ns + sec_size(patched_sec); 1644 + import_ns < data_end; 1645 + import_ns += strlen(import_ns) + 1) { 1646 + 1647 + import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10); 1648 + if (!import_ns) 1649 + return 0; 1650 + 1651 + if (!out_sec) { 1652 + out_sec = find_section_by_name(e->out, ".modinfo"); 1653 + if (!out_sec) { 1654 + out_sec = elf_create_section(e->out, ".modinfo", 0, 1655 + patched_sec->sh.sh_entsize, 1656 + patched_sec->sh.sh_type, 1657 + patched_sec->sh.sh_addralign, 1658 + patched_sec->sh.sh_flags); 1659 + if (!out_sec) 1660 + return -1; 1661 + } 1662 + } 1663 + 1664 + if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1)) 1665 + return -1; 1666 + } 1667 + 1668 + return 0; 1669 + } 1670 + 1671 + int cmd_klp_diff(int argc, const char **argv) 1672 + { 1673 + struct elfs e = {0}; 1674 + 1675 + argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0); 1676 + if (argc != 3) 1677 + usage_with_options(klp_diff_usage, klp_diff_options); 1678 + 1679 + objname = argv[0]; 1680 + 1681 + e.orig = elf_open_read(argv[0], O_RDONLY); 1682 + e.patched = elf_open_read(argv[1], O_RDONLY); 1683 + e.out = NULL; 1684 + 1685 + if (!e.orig || !e.patched) 1686 + return -1; 1687 + 1688 + if (read_exports()) 1689 + return -1; 1690 + 1691 + if (read_sym_checksums(e.orig)) 1692 + return -1; 1693 + 1694 + if (read_sym_checksums(e.patched)) 1695 + return -1; 1696 + 1697 + if (correlate_symbols(&e)) 1698 + return -1; 1699 + 1700 + if (mark_changed_functions(&e)) 1701 + return 0; 1702 + 1703 + e.out = elf_create_file(&e.orig->ehdr, argv[2]); 1704 + if (!e.out) 1705 + return -1; 1706 + 1707 + if (clone_included_functions(&e)) 1708 + return -1; 1709 + 1710 + if (clone_special_sections(&e)) 1711 + return -1; 1712 + 1713 + if (create_klp_sections(&e)) 1714 + return -1; 1715 + 1716 + if (copy_import_ns(&e)) 1717 + return -1; 1718 + 1719 + if (elf_write(e.out)) 1720 + return -1; 1721 + 1722 + return elf_close(e.out); 1723 + }
+168
tools/objtool/klp-post-link.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Read the intermediate KLP reloc/symbol representations created by klp diff 4 + * and convert them to the proper format required by livepatch. This needs to 5 + * run last to avoid linker wreckage. Linkers don't tend to handle the "two 6 + * rela sections for a single base section" case very well, nor do they like 7 + * SHN_LIVEPATCH. 8 + * 9 + * This is the final tool in the livepatch module generation pipeline: 10 + * 11 + * kernel builds -> objtool klp diff -> module link -> objtool klp post-link 12 + */ 13 + 14 + #include <fcntl.h> 15 + #include <gelf.h> 16 + #include <objtool/objtool.h> 17 + #include <objtool/warn.h> 18 + #include <objtool/klp.h> 19 + #include <objtool/util.h> 20 + #include <linux/livepatch_external.h> 21 + 22 + static int fix_klp_relocs(struct elf *elf) 23 + { 24 + struct section *symtab, *klp_relocs; 25 + 26 + klp_relocs = find_section_by_name(elf, KLP_RELOCS_SEC); 27 + if (!klp_relocs) 28 + return 0; 29 + 30 + symtab = find_section_by_name(elf, ".symtab"); 31 + if (!symtab) { 32 + ERROR("missing .symtab"); 33 + return -1; 34 + } 35 + 36 + for (int i = 0; i < sec_size(klp_relocs) / sizeof(struct klp_reloc); i++) { 37 + struct klp_reloc *klp_reloc; 38 + unsigned long klp_reloc_off; 39 + struct section *sec, *tmp, *klp_rsec; 40 + unsigned long offset; 41 + struct reloc *reloc; 42 + char sym_modname[64]; 43 + char rsec_name[SEC_NAME_LEN]; 44 + u64 addend; 45 + struct symbol *sym, *klp_sym; 46 + 47 + klp_reloc_off = i * sizeof(*klp_reloc); 48 + klp_reloc = klp_relocs->data->d_buf + klp_reloc_off; 49 + 50 + /* 51 + * Read __klp_relocs[i]: 52 + */ 53 + 54 + /* klp_reloc.sec_offset */ 55 + reloc = find_reloc_by_dest(elf, klp_relocs, 56 + klp_reloc_off + offsetof(struct klp_reloc, offset)); 57 + if (!reloc) { 58 + ERROR("malformed " KLP_RELOCS_SEC " section"); 59 + return -1; 60 + } 61 + 62 + sec = reloc->sym->sec; 63 + offset = reloc_addend(reloc); 64 + 65 + /* klp_reloc.sym */ 66 + reloc = find_reloc_by_dest(elf, klp_relocs, 67 + klp_reloc_off + offsetof(struct klp_reloc, sym)); 68 + if (!reloc) { 69 + ERROR("malformed " KLP_RELOCS_SEC " section"); 70 + return -1; 71 + } 72 + 73 + klp_sym = reloc->sym; 74 + addend = reloc_addend(reloc); 75 + 76 + /* symbol format: .klp.sym.modname.sym_name,sympos */ 77 + if (sscanf(klp_sym->name + strlen(KLP_SYM_PREFIX), "%55[^.]", sym_modname) != 1) 78 + ERROR("can't find modname in klp symbol '%s'", klp_sym->name); 79 + 80 + /* 81 + * Create the KLP rela: 82 + */ 83 + 84 + /* section format: .klp.rela.sec_objname.section_name */ 85 + if (snprintf_check(rsec_name, SEC_NAME_LEN, 86 + KLP_RELOC_SEC_PREFIX "%s.%s", 87 + sym_modname, sec->name)) 88 + return -1; 89 + 90 + klp_rsec = find_section_by_name(elf, rsec_name); 91 + if (!klp_rsec) { 92 + klp_rsec = elf_create_section(elf, rsec_name, 0, 93 + elf_rela_size(elf), 94 + SHT_RELA, elf_addr_size(elf), 95 + SHF_ALLOC | SHF_INFO_LINK | SHF_RELA_LIVEPATCH); 96 + if (!klp_rsec) 97 + return -1; 98 + 99 + klp_rsec->sh.sh_link = symtab->idx; 100 + klp_rsec->sh.sh_info = sec->idx; 101 + klp_rsec->base = sec; 102 + } 103 + 104 + tmp = sec->rsec; 105 + sec->rsec = klp_rsec; 106 + if (!elf_create_reloc(elf, sec, offset, klp_sym, addend, klp_reloc->type)) 107 + return -1; 108 + sec->rsec = tmp; 109 + 110 + /* 111 + * Fix up the corresponding KLP symbol: 112 + */ 113 + 114 + klp_sym->sym.st_shndx = SHN_LIVEPATCH; 115 + if (!gelf_update_sym(symtab->data, klp_sym->idx, &klp_sym->sym)) { 116 + ERROR_ELF("gelf_update_sym"); 117 + return -1; 118 + } 119 + 120 + /* 121 + * Disable the original non-KLP reloc by converting it to R_*_NONE: 122 + */ 123 + 124 + reloc = find_reloc_by_dest(elf, sec, offset); 125 + sym = reloc->sym; 126 + sym->sym.st_shndx = SHN_LIVEPATCH; 127 + set_reloc_type(elf, reloc, 0); 128 + if (!gelf_update_sym(symtab->data, sym->idx, &sym->sym)) { 129 + ERROR_ELF("gelf_update_sym"); 130 + return -1; 131 + } 132 + } 133 + 134 + return 0; 135 + } 136 + 137 + /* 138 + * This runs on the livepatch module after all other linking has been done. It 139 + * converts the intermediate __klp_relocs section into proper KLP relocs to be 140 + * processed by livepatch. This needs to run last to avoid linker wreckage. 141 + * Linkers don't tend to handle the "two rela sections for a single base 142 + * section" case very well, nor do they appreciate SHN_LIVEPATCH. 143 + */ 144 + int cmd_klp_post_link(int argc, const char **argv) 145 + { 146 + struct elf *elf; 147 + 148 + argc--; 149 + argv++; 150 + 151 + if (argc != 1) { 152 + fprintf(stderr, "%d\n", argc); 153 + fprintf(stderr, "usage: objtool link <file.ko>\n"); 154 + return -1; 155 + } 156 + 157 + elf = elf_open_read(argv[0], O_RDWR); 158 + if (!elf) 159 + return -1; 160 + 161 + if (fix_klp_relocs(elf)) 162 + return -1; 163 + 164 + if (elf_write(elf)) 165 + return -1; 166 + 167 + return elf_close(elf); 168 + }
+41 -1
tools/objtool/objtool.c
··· 16 16 #include <objtool/objtool.h> 17 17 #include <objtool/warn.h> 18 18 19 - bool help; 19 + bool debug; 20 + int indent; 20 21 21 22 static struct objtool_file file; 22 23 ··· 72 71 return 0; 73 72 } 74 73 74 + char *top_level_dir(const char *file) 75 + { 76 + ssize_t len, self_len, file_len; 77 + char self[PATH_MAX], *str; 78 + int i; 79 + 80 + len = readlink("/proc/self/exe", self, sizeof(self) - 1); 81 + if (len <= 0) 82 + return NULL; 83 + self[len] = '\0'; 84 + 85 + for (i = 0; i < 3; i++) { 86 + char *s = strrchr(self, '/'); 87 + if (!s) 88 + return NULL; 89 + *s = '\0'; 90 + } 91 + 92 + self_len = strlen(self); 93 + file_len = strlen(file); 94 + 95 + str = malloc(self_len + file_len + 2); 96 + if (!str) 97 + return NULL; 98 + 99 + memcpy(str, self, self_len); 100 + str[self_len] = '/'; 101 + strcpy(str + self_len + 1, file); 102 + 103 + return str; 104 + } 105 + 106 + 75 107 int main(int argc, const char **argv) 76 108 { 77 109 static const char *UNUSED = "OBJTOOL_NOT_IMPLEMENTED"; ··· 112 78 /* libsubcmd init */ 113 79 exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED); 114 80 pager_init(UNUSED); 81 + 82 + if (argc > 1 && !strcmp(argv[1], "klp")) { 83 + argc--; 84 + argv++; 85 + return cmd_klp(argc, argv); 86 + } 115 87 116 88 return objtool_run(argc, argv); 117 89 }
-1
tools/objtool/orc_dump.c
··· 8 8 #include <objtool/objtool.h> 9 9 #include <objtool/orc.h> 10 10 #include <objtool/warn.h> 11 - #include <objtool/endianness.h> 12 11 13 12 int orc_dump(const char *filename) 14 13 {
+6 -3
tools/objtool/orc_gen.c
··· 12 12 #include <objtool/check.h> 13 13 #include <objtool/orc.h> 14 14 #include <objtool/warn.h> 15 - #include <objtool/endianness.h> 16 15 17 16 struct orc_list_entry { 18 17 struct list_head list; ··· 56 57 57 58 /* Build a deduplicated list of ORC entries: */ 58 59 INIT_LIST_HEAD(&orc_list); 59 - for_each_sec(file, sec) { 60 + for_each_sec(file->elf, sec) { 60 61 struct orc_entry orc, prev_orc = {0}; 61 62 struct instruction *insn; 62 63 bool empty = true; ··· 126 127 return -1; 127 128 } 128 129 orc_sec = elf_create_section(file->elf, ".orc_unwind", 129 - sizeof(struct orc_entry), nr); 130 + nr * sizeof(struct orc_entry), 131 + sizeof(struct orc_entry), 132 + SHT_PROGBITS, 133 + 1, 134 + SHF_ALLOC); 130 135 if (!orc_sec) 131 136 return -1; 132 137
+5 -9
tools/objtool/special.c
··· 15 15 #include <objtool/builtin.h> 16 16 #include <objtool/special.h> 17 17 #include <objtool/warn.h> 18 - #include <objtool/endianness.h> 19 18 20 19 struct special_entry { 21 20 const char *sec; ··· 132 133 struct section *sec; 133 134 unsigned int nr_entries; 134 135 struct special_alt *alt; 135 - int idx, ret; 136 + int idx; 136 137 137 138 INIT_LIST_HEAD(alts); 138 139 ··· 141 142 if (!sec) 142 143 continue; 143 144 144 - if (sec->sh.sh_size % entry->size != 0) { 145 + if (sec_size(sec) % entry->size != 0) { 145 146 ERROR("%s size not a multiple of %d", sec->name, entry->size); 146 147 return -1; 147 148 } 148 149 149 - nr_entries = sec->sh.sh_size / entry->size; 150 + nr_entries = sec_size(sec) / entry->size; 150 151 151 152 for (idx = 0; idx < nr_entries; idx++) { 152 153 alt = malloc(sizeof(*alt)); ··· 156 157 } 157 158 memset(alt, 0, sizeof(*alt)); 158 159 159 - ret = get_alt_entry(elf, entry, sec, idx, alt); 160 - if (ret > 0) 161 - continue; 162 - if (ret < 0) 163 - return ret; 160 + if (get_alt_entry(elf, entry, sec, idx, alt)) 161 + return -1; 164 162 165 163 list_add_tail(&alt->list, alts); 166 164 }
+2
tools/objtool/sync-check.sh
··· 16 16 arch/x86/include/asm/emulate_prefix.h 17 17 arch/x86/lib/x86-opcode-map.txt 18 18 arch/x86/tools/gen-insn-attr-x86.awk 19 + include/linux/interval_tree_generic.h 20 + include/linux/livepatch_external.h 19 21 include/linux/static_call_types.h 20 22 " 21 23
+7
tools/objtool/weak.c
··· 8 8 #include <stdbool.h> 9 9 #include <errno.h> 10 10 #include <objtool/objtool.h> 11 + #include <objtool/arch.h> 12 + #include <objtool/builtin.h> 11 13 12 14 #define UNSUPPORTED(name) \ 13 15 ({ \ ··· 25 23 int __weak orc_create(struct objtool_file *file) 26 24 { 27 25 UNSUPPORTED("ORC"); 26 + } 27 + 28 + int __weak cmd_klp(int argc, const char **argv) 29 + { 30 + UNSUPPORTED("klp"); 28 31 }