at v5.5 100 lines 3.2 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_TEXT_PATCHING_H 3#define _ASM_X86_TEXT_PATCHING_H 4 5#include <linux/types.h> 6#include <linux/stddef.h> 7#include <asm/ptrace.h> 8 9struct paravirt_patch_site; 10#ifdef CONFIG_PARAVIRT 11void apply_paravirt(struct paravirt_patch_site *start, 12 struct paravirt_patch_site *end); 13#else 14static inline void apply_paravirt(struct paravirt_patch_site *start, 15 struct paravirt_patch_site *end) 16{} 17#define __parainstructions NULL 18#define __parainstructions_end NULL 19#endif 20 21/* 22 * Currently, the max observed size in the kernel code is 23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. 24 * Raise it if needed. 25 */ 26#define POKE_MAX_OPCODE_SIZE 5 27 28struct text_poke_loc { 29 void *addr; 30 int len; 31 s32 rel32; 32 u8 opcode; 33 const u8 text[POKE_MAX_OPCODE_SIZE]; 34}; 35 36extern void text_poke_early(void *addr, const void *opcode, size_t len); 37 38/* 39 * Clear and restore the kernel write-protection flag on the local CPU. 40 * Allows the kernel to edit read-only pages. 41 * Side-effect: any interrupt handler running between save and restore will have 42 * the ability to write to read-only pages. 43 * 44 * Warning: 45 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and 46 * no thread can be preempted in the instructions being modified (no iret to an 47 * invalid instruction possible) or if the instructions are changed from a 48 * consistent state to another consistent state atomically. 49 * On the local CPU you need to be protected against NMI or MCE handlers seeing 50 * an inconsistent instruction while you patch. 51 */ 52extern void *text_poke(void *addr, const void *opcode, size_t len); 53extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); 54extern int poke_int3_handler(struct pt_regs *regs); 55extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); 56extern void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries); 57extern void text_poke_loc_init(struct text_poke_loc *tp, void *addr, 58 const void *opcode, size_t len, const void *emulate); 59extern int after_bootmem; 60extern __ro_after_init struct mm_struct *poking_mm; 61extern __ro_after_init unsigned long poking_addr; 62 63#ifndef CONFIG_UML_X86 64static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) 65{ 66 regs->ip = ip; 67} 68 69#define INT3_INSN_SIZE 1 70#define INT3_INSN_OPCODE 0xCC 71 72#define CALL_INSN_SIZE 5 73#define CALL_INSN_OPCODE 0xE8 74 75#define JMP32_INSN_SIZE 5 76#define JMP32_INSN_OPCODE 0xE9 77 78#define JMP8_INSN_SIZE 2 79#define JMP8_INSN_OPCODE 0xEB 80 81static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) 82{ 83 /* 84 * The int3 handler in entry_64.S adds a gap between the 85 * stack where the break point happened, and the saving of 86 * pt_regs. We can extend the original stack because of 87 * this gap. See the idtentry macro's create_gap option. 88 */ 89 regs->sp -= sizeof(unsigned long); 90 *(unsigned long *)regs->sp = val; 91} 92 93static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) 94{ 95 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); 96 int3_emulate_jmp(regs, func); 97} 98#endif /* !CONFIG_UML_X86 */ 99 100#endif /* _ASM_X86_TEXT_PATCHING_H */