Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/inst: Define ppc_inst_t

In order to stop using 'struct ppc_inst' on PPC32,
define a ppc_inst_t typedef.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/fe5baa2c66fea9db05a8b300b3e8d2880a42596c.1638208156.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
c545b9f0 3261d99a

+112 -112
+9 -9
arch/powerpc/include/asm/code-patching.h
··· 24 24 25 25 bool is_offset_in_branch_range(long offset); 26 26 bool is_offset_in_cond_branch_range(long offset); 27 - int create_branch(struct ppc_inst *instr, const u32 *addr, 27 + int create_branch(ppc_inst_t *instr, const u32 *addr, 28 28 unsigned long target, int flags); 29 - int create_cond_branch(struct ppc_inst *instr, const u32 *addr, 29 + int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 30 30 unsigned long target, int flags); 31 31 int patch_branch(u32 *addr, unsigned long target, int flags); 32 - int patch_instruction(u32 *addr, struct ppc_inst instr); 33 - int raw_patch_instruction(u32 *addr, struct ppc_inst instr); 32 + int patch_instruction(u32 *addr, ppc_inst_t instr); 33 + int raw_patch_instruction(u32 *addr, ppc_inst_t instr); 34 34 35 35 static inline unsigned long patch_site_addr(s32 *site) 36 36 { 37 37 return (unsigned long)site + *site; 38 38 } 39 39 40 - static inline int patch_instruction_site(s32 *site, struct ppc_inst instr) 40 + static inline int patch_instruction_site(s32 *site, ppc_inst_t instr) 41 41 { 42 42 return patch_instruction((u32 *)patch_site_addr(site), instr); 43 43 } ··· 58 58 return modify_instruction((unsigned int *)patch_site_addr(site), clr, set); 59 59 } 60 60 61 - int instr_is_relative_branch(struct ppc_inst instr); 62 - int instr_is_relative_link_branch(struct ppc_inst instr); 61 + int instr_is_relative_branch(ppc_inst_t instr); 62 + int instr_is_relative_link_branch(ppc_inst_t instr); 63 63 unsigned long branch_target(const u32 *instr); 64 - int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src); 65 - extern bool is_conditional_branch(struct ppc_inst instr); 64 + int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src); 65 + bool is_conditional_branch(ppc_inst_t instr); 66 66 #ifdef CONFIG_PPC_BOOK3E_64 67 67 void __patch_exception(int exc, unsigned long addr); 68 68 #define patch_exception(exc, name) do { \
+2 -2
arch/powerpc/include/asm/hw_breakpoint.h
··· 56 56 return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1; 57 57 } 58 58 59 - bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr, 59 + bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr, 60 60 unsigned long ea, int type, int size, 61 61 struct arch_hw_breakpoint *info); 62 62 63 - void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, 63 + void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr, 64 64 int *type, int *size, unsigned long *ea); 65 65 66 66 #ifdef CONFIG_HAVE_HW_BREAKPOINT
+18 -18
arch/powerpc/include/asm/inst.h
··· 8 8 ({ \ 9 9 long __gui_ret; \ 10 10 u32 __user *__gui_ptr = (u32 __user *)ptr; \ 11 - struct ppc_inst __gui_inst; \ 11 + ppc_inst_t __gui_inst; \ 12 12 unsigned int __prefix, __suffix; \ 13 13 \ 14 14 __chk_user_ptr(ptr); \ ··· 34 34 * Instruction data type for POWER 35 35 */ 36 36 37 - struct ppc_inst { 37 + typedef struct { 38 38 u32 val; 39 39 #ifdef CONFIG_PPC64 40 40 u32 suffix; 41 41 #endif 42 - } __packed; 42 + } __packed ppc_inst_t; 43 43 44 - static inline u32 ppc_inst_val(struct ppc_inst x) 44 + static inline u32 ppc_inst_val(ppc_inst_t x) 45 45 { 46 46 return x.val; 47 47 } 48 48 49 - static inline int ppc_inst_primary_opcode(struct ppc_inst x) 49 + static inline int ppc_inst_primary_opcode(ppc_inst_t x) 50 50 { 51 51 return ppc_inst_val(x) >> 26; 52 52 } 53 53 54 - #define ppc_inst(x) ((struct ppc_inst){ .val = (x) }) 54 + #define ppc_inst(x) ((ppc_inst_t){ .val = (x) }) 55 55 56 56 #ifdef CONFIG_PPC64 57 - #define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) }) 57 + #define ppc_inst_prefix(x, y) ((ppc_inst_t){ .val = (x), .suffix = (y) }) 58 58 59 - static inline u32 ppc_inst_suffix(struct ppc_inst x) 59 + static inline u32 ppc_inst_suffix(ppc_inst_t x) 60 60 { 61 61 return x.suffix; 62 62 } ··· 64 64 #else 65 65 #define ppc_inst_prefix(x, y) ((void)y, ppc_inst(x)) 66 66 67 - static inline u32 ppc_inst_suffix(struct ppc_inst x) 67 + static inline u32 ppc_inst_suffix(ppc_inst_t x) 68 68 { 69 69 return 0; 70 70 } 71 71 72 72 #endif /* CONFIG_PPC64 */ 73 73 74 - static inline struct ppc_inst ppc_inst_read(const u32 *ptr) 74 + static inline ppc_inst_t ppc_inst_read(const u32 *ptr) 75 75 { 76 76 if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX) 77 77 return ppc_inst_prefix(*ptr, *(ptr + 1)); ··· 79 79 return ppc_inst(*ptr); 80 80 } 81 81 82 - static inline bool ppc_inst_prefixed(struct ppc_inst x) 82 + static inline bool ppc_inst_prefixed(ppc_inst_t x) 83 83 { 84 84 return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX; 85 85 } 86 86 87 - static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x) 87 + static inline ppc_inst_t ppc_inst_swab(ppc_inst_t x) 88 88 { 89 89 return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x))); 90 90 } 91 91 92 - static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y) 92 + static inline bool ppc_inst_equal(ppc_inst_t x, ppc_inst_t y) 93 93 { 94 94 if (ppc_inst_val(x) != ppc_inst_val(y)) 95 95 return false; ··· 98 98 return ppc_inst_suffix(x) == ppc_inst_suffix(y); 99 99 } 100 100 101 - static inline int ppc_inst_len(struct ppc_inst x) 101 + static inline int ppc_inst_len(ppc_inst_t x) 102 102 { 103 103 return ppc_inst_prefixed(x) ? 8 : 4; 104 104 } ··· 109 109 */ 110 110 static inline u32 *ppc_inst_next(u32 *location, u32 *value) 111 111 { 112 - struct ppc_inst tmp; 112 + ppc_inst_t tmp; 113 113 114 114 tmp = ppc_inst_read(value); 115 115 116 116 return (void *)location + ppc_inst_len(tmp); 117 117 } 118 118 119 - static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x) 119 + static inline unsigned long ppc_inst_as_ulong(ppc_inst_t x) 120 120 { 121 121 if (IS_ENABLED(CONFIG_PPC32)) 122 122 return ppc_inst_val(x); ··· 128 128 129 129 #define PPC_INST_STR_LEN sizeof("00000000 00000000") 130 130 131 - static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_inst x) 131 + static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x) 132 132 { 133 133 if (ppc_inst_prefixed(x)) 134 134 sprintf(str, "%08x %08x", ppc_inst_val(x), ppc_inst_suffix(x)); ··· 145 145 __str; \ 146 146 }) 147 147 148 - int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src); 148 + int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src); 149 149 150 150 #endif /* _ASM_POWERPC_INST_H */
+2 -2
arch/powerpc/include/asm/sstep.h
··· 145 145 * otherwise. 146 146 */ 147 147 extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 148 - struct ppc_inst instr); 148 + ppc_inst_t instr); 149 149 150 150 /* 151 151 * Emulate an instruction that can be executed just by updating ··· 162 162 * 0 if it could not be emulated, or -1 for an instruction that 163 163 * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.). 164 164 */ 165 - extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr); 165 + int emulate_step(struct pt_regs *regs, ppc_inst_t instr); 166 166 167 167 /* 168 168 * Emulate a load or store instruction by reading/writing the
+2 -2
arch/powerpc/kernel/align.c
··· 105 105 * so we don't need the address swizzling. 106 106 */ 107 107 static int emulate_spe(struct pt_regs *regs, unsigned int reg, 108 - struct ppc_inst ppc_instr) 108 + ppc_inst_t ppc_instr) 109 109 { 110 110 union { 111 111 u64 ll; ··· 300 300 301 301 int fix_alignment(struct pt_regs *regs) 302 302 { 303 - struct ppc_inst instr; 303 + ppc_inst_t instr; 304 304 struct instruction_op op; 305 305 int r, type; 306 306
+1 -1
arch/powerpc/kernel/epapr_paravirt.c
··· 37 37 return -1; 38 38 39 39 for (i = 0; i < (len / 4); i++) { 40 - struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i])); 40 + ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i])); 41 41 patch_instruction(epapr_hypercall_start + i, inst); 42 42 #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) 43 43 patch_instruction(epapr_ev_idle_start + i, inst);
+2 -2
arch/powerpc/kernel/hw_breakpoint.c
··· 523 523 524 524 static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp, 525 525 struct arch_hw_breakpoint **info, int *hit, 526 - struct ppc_inst instr) 526 + ppc_inst_t instr) 527 527 { 528 528 int i; 529 529 int stepped; ··· 616 616 int hit[HBP_NUM_MAX] = {0}; 617 617 int nr_hit = 0; 618 618 bool ptrace_bp = false; 619 - struct ppc_inst instr = ppc_inst(0); 619 + ppc_inst_t instr = ppc_inst(0); 620 620 int type = 0; 621 621 int size = 0; 622 622 unsigned long ea;
+2 -2
arch/powerpc/kernel/hw_breakpoint_constraints.c
··· 80 80 * Return true if the event is valid wrt dawr configuration, 81 81 * including extraneous exception. Otherwise return false. 82 82 */ 83 - bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr, 83 + bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr, 84 84 unsigned long ea, int type, int size, 85 85 struct arch_hw_breakpoint *info) 86 86 { ··· 127 127 return false; 128 128 } 129 129 130 - void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr, 130 + void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr, 131 131 int *type, int *size, unsigned long *ea) 132 132 { 133 133 struct instruction_op op;
+2 -2
arch/powerpc/kernel/kprobes.c
··· 124 124 { 125 125 int ret = 0; 126 126 struct kprobe *prev; 127 - struct ppc_inst insn = ppc_inst_read(p->addr); 127 + ppc_inst_t insn = ppc_inst_read(p->addr); 128 128 129 129 if ((unsigned long)p->addr & 0x03) { 130 130 printk("Attempt to register kprobe at an unaligned address\n"); ··· 244 244 static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) 245 245 { 246 246 int ret; 247 - struct ppc_inst insn = ppc_inst_read(p->ainsn.insn); 247 + ppc_inst_t insn = ppc_inst_read(p->ainsn.insn); 248 248 249 249 /* regs->nip is also adjusted if emulate_step returns 1 */ 250 250 ret = emulate_step(regs, insn);
+1 -1
arch/powerpc/kernel/mce_power.c
··· 455 455 * in real-mode is tricky and can lead to recursive 456 456 * faults 457 457 */ 458 - struct ppc_inst instr; 458 + ppc_inst_t instr; 459 459 unsigned long pfn, instr_addr; 460 460 struct instruction_op op; 461 461 struct pt_regs tmp = *regs;
+2 -2
arch/powerpc/kernel/optprobes.c
··· 153 153 154 154 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 155 155 { 156 - struct ppc_inst branch_op_callback, branch_emulate_step, temp; 156 + ppc_inst_t branch_op_callback, branch_emulate_step, temp; 157 157 unsigned long op_callback_addr, emulate_step_addr; 158 158 kprobe_opcode_t *buff; 159 159 long b_offset; ··· 269 269 270 270 void arch_optimize_kprobes(struct list_head *oplist) 271 271 { 272 - struct ppc_inst instr; 272 + ppc_inst_t instr; 273 273 struct optimized_kprobe *op; 274 274 struct optimized_kprobe *tmp; 275 275
+1 -1
arch/powerpc/kernel/process.c
··· 628 628 { 629 629 struct arch_hw_breakpoint null_brk = {0}; 630 630 struct arch_hw_breakpoint *info; 631 - struct ppc_inst instr = ppc_inst(0); 631 + ppc_inst_t instr = ppc_inst(0); 632 632 int type = 0; 633 633 int size = 0; 634 634 unsigned long ea;
+1 -1
arch/powerpc/kernel/setup_32.c
··· 75 75 notrace void __init machine_init(u64 dt_ptr) 76 76 { 77 77 u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache); 78 - struct ppc_inst insn; 78 + ppc_inst_t insn; 79 79 80 80 /* Configure static keys first, now that we're relocated. */ 81 81 setup_feature_keys();
+27 -27
arch/powerpc/kernel/trace/ftrace.c
··· 41 41 #define NUM_FTRACE_TRAMPS 8 42 42 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; 43 43 44 - static struct ppc_inst 44 + static ppc_inst_t 45 45 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 46 46 { 47 - struct ppc_inst op; 47 + ppc_inst_t op; 48 48 49 49 addr = ppc_function_entry((void *)addr); 50 50 ··· 55 55 } 56 56 57 57 static int 58 - ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) 58 + ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) 59 59 { 60 - struct ppc_inst replaced; 60 + ppc_inst_t replaced; 61 61 62 62 /* 63 63 * Note: ··· 90 90 */ 91 91 static int test_24bit_addr(unsigned long ip, unsigned long addr) 92 92 { 93 - struct ppc_inst op; 93 + ppc_inst_t op; 94 94 addr = ppc_function_entry((void *)addr); 95 95 96 96 /* use the create_branch to verify that this offset can be branched */ 97 97 return create_branch(&op, (u32 *)ip, addr, 0) == 0; 98 98 } 99 99 100 - static int is_bl_op(struct ppc_inst op) 100 + static int is_bl_op(ppc_inst_t op) 101 101 { 102 102 return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; 103 103 } 104 104 105 - static int is_b_op(struct ppc_inst op) 105 + static int is_b_op(ppc_inst_t op) 106 106 { 107 107 return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; 108 108 } 109 109 110 - static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op) 110 + static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) 111 111 { 112 112 int offset; 113 113 ··· 127 127 { 128 128 unsigned long entry, ptr, tramp; 129 129 unsigned long ip = rec->ip; 130 - struct ppc_inst op, pop; 130 + ppc_inst_t op, pop; 131 131 132 132 /* read where this goes */ 133 133 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { ··· 221 221 __ftrace_make_nop(struct module *mod, 222 222 struct dyn_ftrace *rec, unsigned long addr) 223 223 { 224 - struct ppc_inst op; 224 + ppc_inst_t op; 225 225 unsigned long ip = rec->ip; 226 226 unsigned long tramp, ptr; 227 227 ··· 262 262 static unsigned long find_ftrace_tramp(unsigned long ip) 263 263 { 264 264 int i; 265 - struct ppc_inst instr; 265 + ppc_inst_t instr; 266 266 267 267 /* 268 268 * We have the compiler generated long_branch tramps at the end ··· 300 300 static int setup_mcount_compiler_tramp(unsigned long tramp) 301 301 { 302 302 int i; 303 - struct ppc_inst op; 303 + ppc_inst_t op; 304 304 unsigned long ptr; 305 - struct ppc_inst instr; 305 + ppc_inst_t instr; 306 306 static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; 307 307 308 308 /* Is this a known long jump tramp? */ ··· 367 367 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) 368 368 { 369 369 unsigned long tramp, ip = rec->ip; 370 - struct ppc_inst op; 370 + ppc_inst_t op; 371 371 372 372 /* Read where this goes */ 373 373 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { ··· 407 407 struct dyn_ftrace *rec, unsigned long addr) 408 408 { 409 409 unsigned long ip = rec->ip; 410 - struct ppc_inst old, new; 410 + ppc_inst_t old, new; 411 411 412 412 /* 413 413 * If the calling address is more that 24 bits away, ··· 460 460 */ 461 461 #ifndef CONFIG_MPROFILE_KERNEL 462 462 static int 463 - expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) 463 + expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 464 464 { 465 465 /* 466 466 * We expect to see: ··· 478 478 } 479 479 #else 480 480 static int 481 - expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) 481 + expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 482 482 { 483 483 /* look for patched "NOP" on ppc64 with -mprofile-kernel */ 484 484 if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()))) ··· 490 490 static int 491 491 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 492 492 { 493 - struct ppc_inst op[2]; 494 - struct ppc_inst instr; 493 + ppc_inst_t op[2]; 494 + ppc_inst_t instr; 495 495 void *ip = (void *)rec->ip; 496 496 unsigned long entry, ptr, tramp; 497 497 struct module *mod = rec->arch.mod; ··· 559 559 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 560 560 { 561 561 int err; 562 - struct ppc_inst op; 562 + ppc_inst_t op; 563 563 u32 *ip = (u32 *)rec->ip; 564 564 struct module *mod = rec->arch.mod; 565 565 unsigned long tramp; ··· 609 609 610 610 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) 611 611 { 612 - struct ppc_inst op; 612 + ppc_inst_t op; 613 613 void *ip = (void *)rec->ip; 614 614 unsigned long tramp, entry, ptr; 615 615 ··· 657 657 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 658 658 { 659 659 unsigned long ip = rec->ip; 660 - struct ppc_inst old, new; 660 + ppc_inst_t old, new; 661 661 662 662 /* 663 663 * If the calling address is more that 24 bits away, ··· 696 696 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 697 697 unsigned long addr) 698 698 { 699 - struct ppc_inst op; 699 + ppc_inst_t op; 700 700 unsigned long ip = rec->ip; 701 701 unsigned long entry, ptr, tramp; 702 702 struct module *mod = rec->arch.mod; ··· 790 790 unsigned long addr) 791 791 { 792 792 unsigned long ip = rec->ip; 793 - struct ppc_inst old, new; 793 + ppc_inst_t old, new; 794 794 795 795 /* 796 796 * If the calling address is more that 24 bits away, ··· 830 830 int ftrace_update_ftrace_func(ftrace_func_t func) 831 831 { 832 832 unsigned long ip = (unsigned long)(&ftrace_call); 833 - struct ppc_inst old, new; 833 + ppc_inst_t old, new; 834 834 int ret; 835 835 836 836 old = ppc_inst_read((u32 *)&ftrace_call); ··· 915 915 unsigned long ip = (unsigned long)(&ftrace_graph_call); 916 916 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 917 917 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 918 - struct ppc_inst old, new; 918 + ppc_inst_t old, new; 919 919 920 920 old = ftrace_call_replace(ip, stub, 0); 921 921 new = ftrace_call_replace(ip, addr, 0); ··· 928 928 unsigned long ip = (unsigned long)(&ftrace_graph_call); 929 929 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 930 930 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 931 - struct ppc_inst old, new; 931 + ppc_inst_t old, new; 932 932 933 933 old = ftrace_call_replace(ip, addr, 0); 934 934 new = ftrace_call_replace(ip, stub, 0);
+1 -1
arch/powerpc/kernel/vecemu.c
··· 261 261 262 262 int emulate_altivec(struct pt_regs *regs) 263 263 { 264 - struct ppc_inst instr; 264 + ppc_inst_t instr; 265 265 unsigned int i, word; 266 266 unsigned int va, vb, vc, vd; 267 267 vector128 *vrs;
+19 -19
arch/powerpc/lib/code-patching.c
··· 18 18 #include <asm/setup.h> 19 19 #include <asm/inst.h> 20 20 21 - static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr) 21 + static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 22 22 { 23 23 if (!ppc_inst_prefixed(instr)) { 24 24 u32 val = ppc_inst_val(instr); ··· 39 39 return -EFAULT; 40 40 } 41 41 42 - int raw_patch_instruction(u32 *addr, struct ppc_inst instr) 42 + int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 43 43 { 44 44 return __patch_instruction(addr, instr, addr); 45 45 } ··· 141 141 return 0; 142 142 } 143 143 144 - static int do_patch_instruction(u32 *addr, struct ppc_inst instr) 144 + static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 145 145 { 146 146 int err; 147 147 u32 *patch_addr = NULL; ··· 180 180 } 181 181 #else /* !CONFIG_STRICT_KERNEL_RWX */ 182 182 183 - static int do_patch_instruction(u32 *addr, struct ppc_inst instr) 183 + static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 184 184 { 185 185 return raw_patch_instruction(addr, instr); 186 186 } 187 187 188 188 #endif /* CONFIG_STRICT_KERNEL_RWX */ 189 189 190 - int patch_instruction(u32 *addr, struct ppc_inst instr) 190 + int patch_instruction(u32 *addr, ppc_inst_t instr) 191 191 { 192 192 /* Make sure we aren't patching a freed init section */ 193 193 if (init_mem_is_free && init_section_contains(addr, 4)) { ··· 200 200 201 201 int patch_branch(u32 *addr, unsigned long target, int flags) 202 202 { 203 - struct ppc_inst instr; 203 + ppc_inst_t instr; 204 204 205 205 create_branch(&instr, addr, target, flags); 206 206 return patch_instruction(addr, instr); ··· 237 237 * Helper to check if a given instruction is a conditional branch 238 238 * Derived from the conditional checks in analyse_instr() 239 239 */ 240 - bool is_conditional_branch(struct ppc_inst instr) 240 + bool is_conditional_branch(ppc_inst_t instr) 241 241 { 242 242 unsigned int opcode = ppc_inst_primary_opcode(instr); 243 243 ··· 255 255 } 256 256 NOKPROBE_SYMBOL(is_conditional_branch); 257 257 258 - int create_branch(struct ppc_inst *instr, const u32 *addr, 258 + int create_branch(ppc_inst_t *instr, const u32 *addr, 259 259 unsigned long target, int flags) 260 260 { 261 261 long offset; ··· 275 275 return 0; 276 276 } 277 277 278 - int create_cond_branch(struct ppc_inst *instr, const u32 *addr, 278 + int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 279 279 unsigned long target, int flags) 280 280 { 281 281 long offset; ··· 294 294 return 0; 295 295 } 296 296 297 - static unsigned int branch_opcode(struct ppc_inst instr) 297 + static unsigned int branch_opcode(ppc_inst_t instr) 298 298 { 299 299 return ppc_inst_primary_opcode(instr) & 0x3F; 300 300 } 301 301 302 - static int instr_is_branch_iform(struct ppc_inst instr) 302 + static int instr_is_branch_iform(ppc_inst_t instr) 303 303 { 304 304 return branch_opcode(instr) == 18; 305 305 } 306 306 307 - static int instr_is_branch_bform(struct ppc_inst instr) 307 + static int instr_is_branch_bform(ppc_inst_t instr) 308 308 { 309 309 return branch_opcode(instr) == 16; 310 310 } 311 311 312 - int instr_is_relative_branch(struct ppc_inst instr) 312 + int instr_is_relative_branch(ppc_inst_t instr) 313 313 { 314 314 if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 315 315 return 0; ··· 317 317 return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 318 318 } 319 319 320 - int instr_is_relative_link_branch(struct ppc_inst instr) 320 + int instr_is_relative_link_branch(ppc_inst_t instr) 321 321 { 322 322 return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 323 323 } ··· 364 364 return 0; 365 365 } 366 366 367 - int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src) 367 + int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 368 368 { 369 369 unsigned long target; 370 370 target = branch_target(src); ··· 417 417 static void __init test_branch_iform(void) 418 418 { 419 419 int err; 420 - struct ppc_inst instr; 420 + ppc_inst_t instr; 421 421 u32 tmp[2]; 422 422 u32 *iptr = tmp; 423 423 unsigned long addr = (unsigned long)tmp; ··· 499 499 { 500 500 u32 *iptr; 501 501 unsigned long dest; 502 - struct ppc_inst instr; 502 + ppc_inst_t instr; 503 503 504 504 /* Check we can create a function call */ 505 505 iptr = (u32 *)ppc_function_entry(test_trampoline); ··· 513 513 { 514 514 int err; 515 515 unsigned long addr; 516 - struct ppc_inst instr; 516 + ppc_inst_t instr; 517 517 u32 tmp[2]; 518 518 u32 *iptr = tmp; 519 519 unsigned int flags; ··· 591 591 { 592 592 unsigned long addr; 593 593 void *p, *q; 594 - struct ppc_inst instr; 594 + ppc_inst_t instr; 595 595 void *buf; 596 596 597 597 buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
+2 -2
arch/powerpc/lib/feature-fixups.c
··· 47 47 static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end) 48 48 { 49 49 int err; 50 - struct ppc_inst instr; 50 + ppc_inst_t instr; 51 51 52 52 instr = ppc_inst_read(src); 53 53 ··· 624 624 static void do_final_fixups(void) 625 625 { 626 626 #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) 627 - struct ppc_inst inst; 627 + ppc_inst_t inst; 628 628 u32 *src, *dest, *end; 629 629 630 630 if (PHYSICAL_START == 0)
+2 -2
arch/powerpc/lib/sstep.c
··· 1354 1354 * otherwise. 1355 1355 */ 1356 1356 int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, 1357 - struct ppc_inst instr) 1357 + ppc_inst_t instr) 1358 1358 { 1359 1359 #ifdef CONFIG_PPC64 1360 1360 unsigned int suffixopcode, prefixtype, prefix_r; ··· 3578 3578 * or -1 if the instruction is one that should not be stepped, 3579 3579 * such as an rfid, or a mtmsrd that would clear MSR_RI. 3580 3580 */ 3581 - int emulate_step(struct pt_regs *regs, struct ppc_inst instr) 3581 + int emulate_step(struct pt_regs *regs, ppc_inst_t instr) 3582 3582 { 3583 3583 struct instruction_op op; 3584 3584 int r, err, type;
+5 -5
arch/powerpc/lib/test_emulate_step.c
··· 792 792 #ifdef CONFIG_VSX 793 793 static void __init test_plxvp_pstxvp(void) 794 794 { 795 - struct ppc_inst instr; 795 + ppc_inst_t instr; 796 796 struct pt_regs regs; 797 797 union { 798 798 vector128 a; ··· 906 906 struct { 907 907 char *descr; 908 908 unsigned long flags; 909 - struct ppc_inst instr; 909 + ppc_inst_t instr; 910 910 struct pt_regs regs; 911 911 } subtests[MAX_SUBTESTS + 1]; 912 912 }; ··· 1600 1600 }; 1601 1601 1602 1602 static int __init emulate_compute_instr(struct pt_regs *regs, 1603 - struct ppc_inst instr, 1603 + ppc_inst_t instr, 1604 1604 bool negative) 1605 1605 { 1606 1606 int analysed; ··· 1627 1627 } 1628 1628 1629 1629 static int __init execute_compute_instr(struct pt_regs *regs, 1630 - struct ppc_inst instr) 1630 + ppc_inst_t instr) 1631 1631 { 1632 1632 extern int exec_instr(struct pt_regs *regs); 1633 1633 ··· 1658 1658 struct compute_test *test; 1659 1659 struct pt_regs *regs, exp, got; 1660 1660 unsigned int i, j, k; 1661 - struct ppc_inst instr; 1661 + ppc_inst_t instr; 1662 1662 bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative; 1663 1663 1664 1664 for (i = 0; i < ARRAY_SIZE(compute_tests); i++) {
+1 -1
arch/powerpc/mm/maccess.c
··· 12 12 return is_kernel_addr((unsigned long)unsafe_src); 13 13 } 14 14 15 - int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src) 15 + int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src) 16 16 { 17 17 unsigned int val, suffix; 18 18 int err;
+1 -1
arch/powerpc/perf/8xx-pmu.c
··· 153 153 154 154 static void mpc8xx_pmu_del(struct perf_event *event, int flags) 155 155 { 156 - struct ppc_inst insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2)); 156 + ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2)); 157 157 158 158 mpc8xx_pmu_read(event); 159 159
+7 -7
arch/powerpc/xmon/xmon.c
··· 125 125 static int cmds(struct pt_regs *); 126 126 static int mread(unsigned long, void *, int); 127 127 static int mwrite(unsigned long, void *, int); 128 - static int mread_instr(unsigned long, struct ppc_inst *); 128 + static int mread_instr(unsigned long, ppc_inst_t *); 129 129 static int handle_fault(struct pt_regs *); 130 130 static void byterev(unsigned char *, int); 131 131 static void memex(void); ··· 908 908 static void insert_bpts(void) 909 909 { 910 910 int i; 911 - struct ppc_inst instr, instr2; 911 + ppc_inst_t instr, instr2; 912 912 struct bpt *bp, *bp2; 913 913 914 914 bp = bpts; ··· 988 988 { 989 989 int i; 990 990 struct bpt *bp; 991 - struct ppc_inst instr; 991 + ppc_inst_t instr; 992 992 993 993 bp = bpts; 994 994 for (i = 0; i < NBPTS; ++i, ++bp) { ··· 1204 1204 */ 1205 1205 static int do_step(struct pt_regs *regs) 1206 1206 { 1207 - struct ppc_inst instr; 1207 + ppc_inst_t instr; 1208 1208 int stepped; 1209 1209 1210 1210 force_enable_xmon(); ··· 1459 1459 */ 1460 1460 static long check_bp_loc(unsigned long addr) 1461 1461 { 1462 - struct ppc_inst instr; 1462 + ppc_inst_t instr; 1463 1463 1464 1464 addr &= ~3; 1465 1465 if (!is_kernel_addr(addr)) { ··· 2306 2306 } 2307 2307 2308 2308 static int 2309 - mread_instr(unsigned long adrs, struct ppc_inst *instr) 2309 + mread_instr(unsigned long adrs, ppc_inst_t *instr) 2310 2310 { 2311 2311 volatile int n; 2312 2312 ··· 3028 3028 { 3029 3029 int nr, dotted; 3030 3030 unsigned long first_adr; 3031 - struct ppc_inst inst, last_inst = ppc_inst(0); 3031 + ppc_inst_t inst, last_inst = ppc_inst(0); 3032 3032 3033 3033 dotted = 0; 3034 3034 for (first_adr = adr; count > 0; --count, adr += ppc_inst_len(inst)) {
+2 -2
arch/powerpc/xmon/xmon_bpts.h
··· 5 5 #define NBPTS 256 6 6 #ifndef __ASSEMBLY__ 7 7 #include <asm/inst.h> 8 - #define BPT_SIZE (sizeof(struct ppc_inst) * 2) 9 - #define BPT_WORDS (BPT_SIZE / sizeof(struct ppc_inst)) 8 + #define BPT_SIZE (sizeof(ppc_inst_t) * 2) 9 + #define BPT_WORDS (BPT_SIZE / sizeof(ppc_inst_t)) 10 10 11 11 extern unsigned int bpt_table[NBPTS * BPT_WORDS]; 12 12 #endif /* __ASSEMBLY__ */