Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Add prefixed instructions to instruction data type

For powerpc64, redefine the ppc_inst type so both word and prefixed
instructions can be represented. On powerpc32 the type will remain the
same. Update places which had assumed instructions to be 4 bytes long.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Reviewed-by: Alistair Popple <alistair@popple.id.au>
[mpe: Rework the get_user_inst() macros to be parameterised, and don't
assign to the dest if an error occurred. Use CONFIG_PPC64 not
__powerpc64__ in a few places. Address other comments from
Christophe. Fix some sparse complaints.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200506034050.24806-24-jniethe5@gmail.com

authored by

Jordan Niethe and committed by
Michael Ellerman
650b55b7 7a8818e0

+176 -18
+65 -7
arch/powerpc/include/asm/inst.h
··· 2 2 #ifndef _ASM_POWERPC_INST_H 3 3 #define _ASM_POWERPC_INST_H 4 4 5 + #include <asm/ppc-opcode.h> 6 + 5 7 /* 6 8 * Instruction data type for POWER 7 9 */ 8 10 9 11 struct ppc_inst { 10 12 u32 val; 13 + #ifdef CONFIG_PPC64 14 + u32 suffix; 15 + #endif 11 16 } __packed; 12 - 13 - #define ppc_inst(x) ((struct ppc_inst){ .val = x }) 14 17 15 18 static inline u32 ppc_inst_val(struct ppc_inst x) 16 19 { 17 20 return x.val; 18 21 } 19 22 20 - static inline int ppc_inst_len(struct ppc_inst x) 21 - { 22 - return sizeof(struct ppc_inst); 23 - } 24 - 25 23 static inline int ppc_inst_primary_opcode(struct ppc_inst x) 26 24 { 27 25 return ppc_inst_val(x) >> 26; 26 + } 27 + 28 + #ifdef CONFIG_PPC64 29 + #define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff }) 30 + 31 + #define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) }) 32 + 33 + static inline u32 ppc_inst_suffix(struct ppc_inst x) 34 + { 35 + return x.suffix; 36 + } 37 + 38 + static inline bool ppc_inst_prefixed(struct ppc_inst x) 39 + { 40 + return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff; 41 + } 42 + 43 + static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) 44 + { 45 + return ppc_inst_prefix(swab32(ppc_inst_val(x)), 46 + swab32(ppc_inst_suffix(x))); 47 + } 48 + 49 + static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr) 50 + { 51 + u32 val, suffix; 52 + 53 + val = *(u32 *)ptr; 54 + if ((val >> 26) == OP_PREFIX) { 55 + suffix = *((u32 *)ptr + 1); 56 + return ppc_inst_prefix(val, suffix); 57 + } else { 58 + return ppc_inst(val); 59 + } 60 + } 61 + 62 + static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) 63 + { 64 + return *(u64 *)&x == *(u64 *)&y; 65 + } 66 + 67 + #else 68 + 69 + #define ppc_inst(x) ((struct ppc_inst){ .val = x }) 70 + 71 + static inline bool ppc_inst_prefixed(struct ppc_inst x) 72 + { 73 + return false; 74 + } 75 + 76 + static inline u32 ppc_inst_suffix(struct ppc_inst x) 77 + { 78 + return 0; 28 79 } 29 80 30 81 static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) ··· 91 40 static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) 92 41 { 93 42 return ppc_inst_val(x) == ppc_inst_val(y); 43 + } 44 + 45 + #endif /* CONFIG_PPC64 */ 46 + 47 + static inline int ppc_inst_len(struct ppc_inst x) 48 + { 49 + return ppc_inst_prefixed(x) ? 8 : 4; 94 50 } 95 51 96 52 int probe_user_read_inst(struct ppc_inst *inst,
+1 -1
arch/powerpc/include/asm/kprobes.h
··· 43 43 extern kprobe_opcode_t optprobe_template_end[]; 44 44 45 45 /* Fixed instruction size for powerpc */ 46 - #define MAX_INSN_SIZE 1 46 + #define MAX_INSN_SIZE 2 47 47 #define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */ 48 48 #define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry) 49 49 #define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
+3
arch/powerpc/include/asm/ppc-opcode.h
··· 158 158 /* VMX Vector Store Instructions */ 159 159 #define OP_31_XOP_STVX 231 160 160 161 + /* Prefixed Instructions */ 162 + #define OP_PREFIX 1 163 + 161 164 #define OP_31 31 162 165 #define OP_LWZ 32 163 166 #define OP_STFS 52
+36
arch/powerpc/include/asm/uaccess.h
··· 105 105 #define __put_user_inatomic(x, ptr) \ 106 106 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 107 107 108 + #ifdef CONFIG_PPC64 109 + 110 + #define ___get_user_instr(gu_op, dest, ptr) \ 111 + ({ \ 112 + long __gui_ret = 0; \ 113 + unsigned long __gui_ptr = (unsigned long)ptr; \ 114 + struct ppc_inst __gui_inst; \ 115 + unsigned int __prefix, __suffix; \ 116 + __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \ 117 + if (__gui_ret == 0) { \ 118 + if ((__prefix >> 26) == OP_PREFIX) { \ 119 + __gui_ret = gu_op(__suffix, \ 120 + (unsigned int __user *)__gui_ptr + 1); \ 121 + __gui_inst = ppc_inst_prefix(__prefix, \ 122 + __suffix); \ 123 + } else { \ 124 + __gui_inst = ppc_inst(__prefix); \ 125 + } \ 126 + if (__gui_ret == 0) \ 127 + (dest) = __gui_inst; \ 128 + } \ 129 + __gui_ret; \ 130 + }) 131 + 132 + #define get_user_instr(x, ptr) \ 133 + ___get_user_instr(get_user, x, ptr) 134 + 135 + #define __get_user_instr(x, ptr) \ 136 + ___get_user_instr(__get_user, x, ptr) 137 + 138 + #define __get_user_instr_inatomic(x, ptr) \ 139 + ___get_user_instr(__get_user_inatomic, x, ptr) 140 + 141 + #else /* !CONFIG_PPC64 */ 108 142 #define get_user_instr(x, ptr) \ 109 143 get_user((x).val, (u32 __user *)(ptr)) 110 144 ··· 147 113 148 114 #define __get_user_instr_inatomic(x, ptr) \ 149 115 __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32)) 116 + 117 + #endif /* CONFIG_PPC64 */ 150 118 151 119 extern long __put_user_bad(void); 152 120
+1 -1
arch/powerpc/include/asm/uprobes.h
··· 15 15 16 16 typedef ppc_opcode_t uprobe_opcode_t; 17 17 18 - #define MAX_UINSN_BYTES 4 18 + #define MAX_UINSN_BYTES 8 19 19 #define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES) 20 20 21 21 /* The following alias is needed for reference from arch-agnostic code */
+1 -1
arch/powerpc/kernel/crash_dump.c
··· 46 46 * two instructions it doesn't require any registers. 47 47 */ 48 48 patch_instruction(p, ppc_inst(PPC_INST_NOP)); 49 - patch_branch(++p, addr + PHYSICAL_START, 0); 49 + patch_branch((void *)p + 4, addr + PHYSICAL_START, 0); 50 50 } 51 51 52 52 void __init setup_kdump_trampoline(void)
+4 -2
arch/powerpc/kernel/optprobes.c
··· 198 198 199 199 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 200 200 { 201 - struct ppc_inst branch_op_callback, branch_emulate_step; 201 + struct ppc_inst branch_op_callback, branch_emulate_step, temp; 202 202 kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff; 203 203 long b_offset; 204 204 unsigned long nip, size; ··· 282 282 /* 283 283 * 3. load instruction to be emulated into relevant register, and 284 284 */ 285 - patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX); 285 + temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn); 286 + patch_imm64_load_insns(ppc_inst_val(temp) | ((u64)ppc_inst_suffix(temp) << 32), 287 + 4, buff + TMPL_INSN_IDX); 286 288 287 289 /* 288 290 * 4. branch back from trampoline
+3
arch/powerpc/kernel/optprobes_head.S
··· 94 94 /* 2, Pass instruction to be emulated in r4 */ 95 95 nop 96 96 nop 97 + nop 98 + nop 99 + nop 97 100 98 101 .global optprobe_template_call_emulate 99 102 optprobe_template_call_emulate:
+12 -1
arch/powerpc/lib/code-patching.c
··· 24 24 { 25 25 int err = 0; 26 26 27 - __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); 27 + if (!ppc_inst_prefixed(instr)) { 28 + __put_user_asm(ppc_inst_val(instr), patch_addr, err, "stw"); 29 + } else { 30 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 31 + __put_user_asm((u64)ppc_inst_suffix(instr) << 32 | 32 + ppc_inst_val(instr), patch_addr, err, "std"); 33 + #else 34 + __put_user_asm((u64)ppc_inst_val(instr) << 32 | 35 + ppc_inst_suffix(instr), patch_addr, err, "std"); 36 + #endif 37 + } 38 + 28 39 if (err) 29 40 return err; 30 41
+3 -2
arch/powerpc/lib/feature-fixups.c
··· 84 84 src = alt_start; 85 85 dest = start; 86 86 87 - for (; src < alt_end; src++, dest++) { 87 + for (; src < alt_end; src = (void *)src + ppc_inst_len(ppc_inst_read(src)), 88 + (dest = (void *)dest + ppc_inst_len(ppc_inst_read(dest)))) { 88 89 if (patch_alt_instruction(src, dest, alt_start, alt_end)) 89 90 return 1; 90 91 } 91 92 92 - for (; dest < end; dest++) 93 + for (; dest < end; dest = (void *)dest + ppc_inst_len(ppc_inst(PPC_INST_NOP))) 93 94 raw_patch_instruction(dest, ppc_inst(PPC_INST_NOP)); 94 95 95 96 return 0;
+40
arch/powerpc/lib/inst.c
··· 4 4 */ 5 5 6 6 #include <linux/uaccess.h> 7 + #include <asm/disassemble.h> 7 8 #include <asm/inst.h> 9 + #include <asm/ppc-opcode.h> 8 10 11 + #ifdef CONFIG_PPC64 12 + int probe_user_read_inst(struct ppc_inst *inst, 13 + struct ppc_inst __user *nip) 14 + { 15 + unsigned int val, suffix; 16 + int err; 17 + 18 + err = probe_user_read(&val, nip, sizeof(val)); 19 + if (err) 20 + return err; 21 + if (get_op(val) == OP_PREFIX) { 22 + err = probe_user_read(&suffix, (void __user *)nip + 4, 4); 23 + *inst = ppc_inst_prefix(val, suffix); 24 + } else { 25 + *inst = ppc_inst(val); 26 + } 27 + return err; 28 + } 29 + 30 + int probe_kernel_read_inst(struct ppc_inst *inst, 31 + struct ppc_inst *src) 32 + { 33 + unsigned int val, suffix; 34 + int err; 35 + 36 + err = probe_kernel_read(&val, src, sizeof(val)); 37 + if (err) 38 + return err; 39 + if (get_op(val) == OP_PREFIX) { 40 + err = probe_kernel_read(&suffix, (void *)src + 4, 4); 41 + *inst = ppc_inst_prefix(val, suffix); 42 + } else { 43 + *inst = ppc_inst(val); 44 + } 45 + return err; 46 + } 47 + #else /* !CONFIG_PPC64 */ 9 48 int probe_user_read_inst(struct ppc_inst *inst, 10 49 struct ppc_inst __user *nip) 11 50 { ··· 70 31 71 32 return err; 72 33 } 34 + #endif /* CONFIG_PPC64 */
+3 -1
arch/powerpc/lib/sstep.c
··· 1169 1169 unsigned long int imm; 1170 1170 unsigned long int val, val2; 1171 1171 unsigned int mb, me, sh; 1172 - unsigned int word; 1172 + unsigned int word, suffix; 1173 1173 long ival; 1174 1174 1175 1175 word = ppc_inst_val(instr); 1176 + suffix = ppc_inst_suffix(instr); 1177 + 1176 1178 op->type = COMPUTE; 1177 1179 1178 1180 opcode = ppc_inst_primary_opcode(instr);
+2 -2
arch/powerpc/xmon/xmon.c
··· 758 758 759 759 /* Are we at the trap at bp->instr[1] for some bp? */ 760 760 bp = in_breakpoint_table(regs->nip, &offset); 761 - if (bp != NULL && offset == 4) { 762 - regs->nip = bp->address + 4; 761 + if (bp != NULL && (offset == 4 || offset == 8)) { 762 + regs->nip = bp->address + offset; 763 763 atomic_dec(&bp->ref_count); 764 764 return 1; 765 765 }
+2
arch/powerpc/xmon/xmon_bpts.S
··· 4 4 #include <asm/asm-offsets.h> 5 5 #include "xmon_bpts.h" 6 6 7 + /* Prefixed instructions can not cross 64 byte boundaries */ 8 + .align 6 7 9 .global bpt_table 8 10 bpt_table: 9 11 .space NBPTS * BPT_SIZE