at v5.18 5.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2#ifndef _ASM_POWERPC_CODE_PATCHING_H 3#define _ASM_POWERPC_CODE_PATCHING_H 4 5/* 6 * Copyright 2008, Michael Ellerman, IBM Corporation. 7 */ 8 9#include <asm/types.h> 10#include <asm/ppc-opcode.h> 11#include <linux/string.h> 12#include <linux/kallsyms.h> 13#include <asm/asm-compat.h> 14#include <asm/inst.h> 15 16/* Flags for create_branch: 17 * "b" == create_branch(addr, target, 0); 18 * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); 19 * "bl" == create_branch(addr, target, BRANCH_SET_LINK); 20 * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); 21 */ 22#define BRANCH_SET_LINK 0x1 23#define BRANCH_ABSOLUTE 0x2 24 25bool is_offset_in_branch_range(long offset); 26bool is_offset_in_cond_branch_range(long offset); 27int create_branch(ppc_inst_t *instr, const u32 *addr, 28 unsigned long target, int flags); 29int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 30 unsigned long target, int flags); 31int patch_branch(u32 *addr, unsigned long target, int flags); 32int patch_instruction(u32 *addr, ppc_inst_t instr); 33int raw_patch_instruction(u32 *addr, ppc_inst_t instr); 34 35static inline unsigned long patch_site_addr(s32 *site) 36{ 37 return (unsigned long)site + *site; 38} 39 40static inline int patch_instruction_site(s32 *site, ppc_inst_t instr) 41{ 42 return patch_instruction((u32 *)patch_site_addr(site), instr); 43} 44 45static inline int patch_branch_site(s32 *site, unsigned long target, int flags) 46{ 47 return patch_branch((u32 *)patch_site_addr(site), target, flags); 48} 49 50static inline int modify_instruction(unsigned int *addr, unsigned int clr, 51 unsigned int set) 52{ 53 return patch_instruction(addr, ppc_inst((*addr & ~clr) | set)); 54} 55 56static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set) 57{ 58 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); 59} 60 61static inline unsigned int branch_opcode(ppc_inst_t instr) 62{ 63 return ppc_inst_primary_opcode(instr) & 0x3F; 64} 65 66static inline int instr_is_branch_iform(ppc_inst_t instr) 67{ 68 return branch_opcode(instr) == 18; 69} 70 71static inline int instr_is_branch_bform(ppc_inst_t instr) 72{ 73 return branch_opcode(instr) == 16; 74} 75 76int instr_is_relative_branch(ppc_inst_t instr); 77int instr_is_relative_link_branch(ppc_inst_t instr); 78unsigned long branch_target(const u32 *instr); 79int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src); 80bool is_conditional_branch(ppc_inst_t instr); 81 82#define OP_RT_RA_MASK 0xffff0000UL 83#define LIS_R2 (PPC_RAW_LIS(_R2, 0)) 84#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0)) 85#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0)) 86 87 88static inline unsigned long ppc_function_entry(void *func) 89{ 90#ifdef PPC64_ELF_ABI_v2 91 u32 *insn = func; 92 93 /* 94 * A PPC64 ABIv2 function may have a local and a global entry 95 * point. We need to use the local entry point when patching 96 * functions, so identify and step over the global entry point 97 * sequence. 98 * 99 * The global entry point sequence is always of the form: 100 * 101 * addis r2,r12,XXXX 102 * addi r2,r2,XXXX 103 * 104 * A linker optimisation may convert the addis to lis: 105 * 106 * lis r2,XXXX 107 * addi r2,r2,XXXX 108 */ 109 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || 110 ((*insn & OP_RT_RA_MASK) == LIS_R2)) && 111 ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) 112 return (unsigned long)(insn + 2); 113 else 114 return (unsigned long)func; 115#elif defined(PPC64_ELF_ABI_v1) 116 /* 117 * On PPC64 ABIv1 the function pointer actually points to the 118 * function's descriptor. The first entry in the descriptor is the 119 * address of the function text. 120 */ 121 return ((struct func_desc *)func)->addr; 122#else 123 return (unsigned long)func; 124#endif 125} 126 127static inline unsigned long ppc_global_function_entry(void *func) 128{ 129#ifdef PPC64_ELF_ABI_v2 130 /* PPC64 ABIv2 the global entry point is at the address */ 131 return (unsigned long)func; 132#else 133 /* All other cases there is no change vs ppc_function_entry() */ 134 return ppc_function_entry(func); 135#endif 136} 137 138/* 139 * Wrapper around kallsyms_lookup() to return function entry address: 140 * - For ABIv1, we lookup the dot variant. 141 * - For ABIv2, we return the local entry point. 142 */ 143static inline unsigned long ppc_kallsyms_lookup_name(const char *name) 144{ 145 unsigned long addr; 146#ifdef PPC64_ELF_ABI_v1 147 /* check for dot variant */ 148 char dot_name[1 + KSYM_NAME_LEN]; 149 bool dot_appended = false; 150 151 if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN) 152 return 0; 153 154 if (name[0] != '.') { 155 dot_name[0] = '.'; 156 dot_name[1] = '\0'; 157 strlcat(dot_name, name, sizeof(dot_name)); 158 dot_appended = true; 159 } else { 160 dot_name[0] = '\0'; 161 strlcat(dot_name, name, sizeof(dot_name)); 162 } 163 addr = kallsyms_lookup_name(dot_name); 164 if (!addr && dot_appended) 165 /* Let's try the original non-dot symbol lookup */ 166 addr = kallsyms_lookup_name(name); 167#elif defined(PPC64_ELF_ABI_v2) 168 addr = kallsyms_lookup_name(name); 169 if (addr) 170 addr = ppc_function_entry((void *)addr); 171#else 172 addr = kallsyms_lookup_name(name); 173#endif 174 return addr; 175} 176 177#ifdef CONFIG_PPC64 178/* 179 * Some instruction encodings commonly used in dynamic ftracing 180 * and function live patching. 181 */ 182 183/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */ 184#ifdef PPC64_ELF_ABI_v2 185#define R2_STACK_OFFSET 24 186#else 187#define R2_STACK_OFFSET 40 188#endif 189 190#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET) 191 192/* usually preceded by a mflr r0 */ 193#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF) 194#endif /* CONFIG_PPC64 */ 195 196#endif /* _ASM_POWERPC_CODE_PATCHING_H */