Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: probes: Cleanup kprobes endianness conversions

The core kprobes code uses kprobe_opcode_t for the in-memory
representation of an instruction, using 'kprobe_opcode_t *' for XOL
slots. As arm64 instructions are always little-endian 32-bit values,
kprobes_opcode_t should be __le32, but at the moment kprobe_opcode_t
is typedef'd to u32.

Today there is no functional issue as we convert values via
cpu_to_le32() and le32_to_cpu() where necessary, but these conversions
are inconsistent with the types used, causing sparse warnings:

| CHECK arch/arm64/kernel/probes/kprobes.c
| arch/arm64/kernel/probes/kprobes.c:102:21: warning: cast to restricted __le32
| CHECK arch/arm64/kernel/probes/decode-insn.c
| arch/arm64/kernel/probes/decode-insn.c:122:46: warning: cast to restricted __le32
| arch/arm64/kernel/probes/decode-insn.c:124:50: warning: cast to restricted __le32
| arch/arm64/kernel/probes/decode-insn.c:136:31: warning: cast to restricted __le32

Improve this by making kprobes_opcode_t a typedef for __le32 and
consistently using this for pointers to executable instructions. With
this change we can rely on the type system to tell us where conversions
are necessary.

Since kprobe::opcode is changed from u32 to __le32, the existing
le32_to_cpu() converion moves from the point this is initialized (in
arch_prepare_kprobe()) to the points this is consumed when passed to
a handler or text patching function. As kprobe::opcode isn't altered or
consumed elsewhere, this shouldn't result in a functional change.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20241008155851.801546-6-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Mark Rutland and committed by
Catalin Marinas
dd0eb50e 6105c5d4

+8 -7
+2 -2
arch/arm64/include/asm/probes.h
··· 16 16 probes_handler_t *handler; 17 17 }; 18 18 #ifdef CONFIG_KPROBES 19 - typedef u32 kprobe_opcode_t; 19 + typedef __le32 kprobe_opcode_t; 20 20 struct arch_specific_insn { 21 21 struct arch_probe_insn api; 22 - probe_opcode_t *xol_insn; 22 + kprobe_opcode_t *xol_insn; 23 23 /* restore address after step xol */ 24 24 unsigned long xol_restore; 25 25 };
+1 -1
arch/arm64/kernel/probes/decode-insn.c
··· 134 134 { 135 135 enum probe_insn decoded; 136 136 probe_opcode_t insn = le32_to_cpu(*addr); 137 - probe_opcode_t *scan_end = NULL; 137 + kprobe_opcode_t *scan_end = NULL; 138 138 unsigned long size = 0, offset = 0; 139 139 struct arch_probe_insn *api = &asi->api; 140 140
+5 -4
arch/arm64/kernel/probes/kprobes.c
··· 64 64 * the BRK exception handler, so it is unnecessary to generate 65 65 * Contex-Synchronization-Event via ISB again. 66 66 */ 67 - aarch64_insn_patch_text_nosync(addr, p->opcode); 67 + aarch64_insn_patch_text_nosync(addr, le32_to_cpu(p->opcode)); 68 68 aarch64_insn_patch_text_nosync(addr + 1, BRK64_OPCODE_KPROBES_SS); 69 69 70 70 /* ··· 85 85 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 86 86 87 87 if (p->ainsn.api.handler) 88 - p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); 88 + p->ainsn.api.handler(le32_to_cpu(p->opcode), (long)p->addr, regs); 89 89 90 90 /* single step simulated, now go for post processing */ 91 91 post_kprobe_handler(p, kcb, regs); ··· 99 99 return -EINVAL; 100 100 101 101 /* copy instruction */ 102 - p->opcode = le32_to_cpu(*p->addr); 102 + p->opcode = *p->addr; 103 103 104 104 if (search_exception_tables(probe_addr)) 105 105 return -EINVAL; ··· 142 142 void __kprobes arch_disarm_kprobe(struct kprobe *p) 143 143 { 144 144 void *addr = p->addr; 145 + u32 insn = le32_to_cpu(p->opcode); 145 146 146 - aarch64_insn_patch_text(&addr, &p->opcode, 1); 147 + aarch64_insn_patch_text(&addr, &insn, 1); 147 148 } 148 149 149 150 void __kprobes arch_remove_kprobe(struct kprobe *p)