Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Move patching utilities out of instruction encoding/decoding

Files insn.[c|h] containt some functions used for instruction patching.
In order to reuse the instruction encoder/decoder, move the patching
utilities to their own file.

Signed-off-by: Julien Thierry <jthierry@redhat.com>
Link: https://lore.kernel.org/r/20210303170536.1838032-2-jthierry@redhat.com
[will: Include patching.h in insn.h to fix header mess; add __ASSEMBLY__ guards]
Signed-off-by: Will Deacon <will@kernel.org>

Signed-off-by: Will Deacon <will@kernel.org>

authored by

Julien Thierry and committed by
Will Deacon
5f154c4e c4681547

+168 -152
+1 -5
arch/arm64/include/asm/insn.h
··· 11 11 #include <linux/types.h> 12 12 13 13 #include <asm/alternative.h> 14 + #include <asm/patching.h> 14 15 15 16 #ifndef __ASSEMBLY__ 16 17 /* ··· 380 379 return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn); 381 380 } 382 381 383 - int aarch64_insn_read(void *addr, u32 *insnp); 384 - int aarch64_insn_write(void *addr, u32 insn); 385 382 enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); 386 383 bool aarch64_insn_uses_literal(u32 insn); 387 384 bool aarch64_insn_is_branch(u32 insn); ··· 485 486 enum aarch64_insn_prfm_policy policy); 486 487 s32 aarch64_get_branch_offset(u32 insn); 487 488 u32 aarch64_set_branch_offset(u32 insn, s32 offset); 488 - 489 - int aarch64_insn_patch_text_nosync(void *addr, u32 insn); 490 - int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); 491 489 492 490 s32 aarch64_insn_adrp_get_offset(u32 insn); 493 491 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
+15
arch/arm64/include/asm/patching.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + #ifndef __ASM_PATCHING_H 3 + #define __ASM_PATCHING_H 4 + 5 + #include <linux/types.h> 6 + 7 + #ifndef __ASSEMBLY__ 8 + int aarch64_insn_read(void *addr, u32 *insnp); 9 + int aarch64_insn_write(void *addr, u32 insn); 10 + 11 + int aarch64_insn_patch_text_nosync(void *addr, u32 insn); 12 + int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); 13 + #endif /* __ASSEMBLY__ */ 14 + 15 + #endif /* __ASM_PATCHING_H */
+1 -1
arch/arm64/kernel/Makefile
··· 22 22 return_address.o cpuinfo.o cpu_errata.o \ 23 23 cpufeature.o alternative.o cacheinfo.o \ 24 24 smp.o smp_spin_table.o topology.o smccc-call.o \ 25 - syscall.o proton-pack.o idreg-override.o 25 + syscall.o proton-pack.o idreg-override.o patching.o 26 26 27 27 targets += efi-entry.o 28 28
+3 -146
arch/arm64/kernel/insn.c
··· 7 7 */ 8 8 #include <linux/bitops.h> 9 9 #include <linux/bug.h> 10 - #include <linux/compiler.h> 11 - #include <linux/kernel.h> 12 - #include <linux/mm.h> 13 - #include <linux/smp.h> 14 - #include <linux/spinlock.h> 15 - #include <linux/stop_machine.h> 10 + #include <linux/printk.h> 11 + #include <linux/sizes.h> 16 12 #include <linux/types.h> 17 - #include <linux/uaccess.h> 18 13 19 - #include <asm/cacheflush.h> 20 14 #include <asm/debug-monitors.h> 21 - #include <asm/fixmap.h> 15 + #include <asm/errno.h> 22 16 #include <asm/insn.h> 23 17 #include <asm/kprobes.h> 24 - #include <asm/sections.h> 25 18 26 19 #define AARCH64_INSN_SF_BIT BIT(31) 27 20 #define AARCH64_INSN_N_BIT BIT(22) ··· 76 83 aarch64_insn_is_bcond(insn)); 77 84 } 78 85 79 - static DEFINE_RAW_SPINLOCK(patch_lock); 80 - 81 - static bool is_exit_text(unsigned long addr) 82 - { 83 - /* discarded with init text/data */ 84 - return system_state < SYSTEM_RUNNING && 85 - addr >= (unsigned long)__exittext_begin && 86 - addr < (unsigned long)__exittext_end; 87 - } 88 - 89 - static bool is_image_text(unsigned long addr) 90 - { 91 - return core_kernel_text(addr) || is_exit_text(addr); 92 - } 93 - 94 - static void __kprobes *patch_map(void *addr, int fixmap) 95 - { 96 - unsigned long uintaddr = (uintptr_t) addr; 97 - bool image = is_image_text(uintaddr); 98 - struct page *page; 99 - 100 - if (image) 101 - page = phys_to_page(__pa_symbol(addr)); 102 - else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 103 - page = vmalloc_to_page(addr); 104 - else 105 - return addr; 106 - 107 - BUG_ON(!page); 108 - return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + 109 - (uintaddr & ~PAGE_MASK)); 110 - } 111 - 112 - static void __kprobes patch_unmap(int fixmap) 113 - { 114 - clear_fixmap(fixmap); 115 - } 116 - /* 117 - * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always 118 - * little-endian. 119 - */ 120 - int __kprobes aarch64_insn_read(void *addr, u32 *insnp) 121 - { 122 - int ret; 123 - __le32 val; 124 - 125 - ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); 126 - if (!ret) 127 - *insnp = le32_to_cpu(val); 128 - 129 - return ret; 130 - } 131 - 132 - static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) 133 - { 134 - void *waddr = addr; 135 - unsigned long flags = 0; 136 - int ret; 137 - 138 - raw_spin_lock_irqsave(&patch_lock, flags); 139 - waddr = patch_map(addr, FIX_TEXT_POKE0); 140 - 141 - ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); 142 - 143 - patch_unmap(FIX_TEXT_POKE0); 144 - raw_spin_unlock_irqrestore(&patch_lock, flags); 145 - 146 - return ret; 147 - } 148 - 149 - int __kprobes aarch64_insn_write(void *addr, u32 insn) 150 - { 151 - return __aarch64_insn_write(addr, cpu_to_le32(insn)); 152 - } 153 - 154 86 bool __kprobes aarch64_insn_uses_literal(u32 insn) 155 87 { 156 88 /* ldr/ldrsw (literal), prfm */ ··· 103 185 aarch64_insn_is_blr(insn) || 104 186 aarch64_insn_is_blr_auth(insn) || 105 187 aarch64_insn_is_bcond(insn); 106 - } 107 - 108 - int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) 109 - { 110 - u32 *tp = addr; 111 - int ret; 112 - 113 - /* A64 instructions must be word aligned */ 114 - if ((uintptr_t)tp & 0x3) 115 - return -EINVAL; 116 - 117 - ret = aarch64_insn_write(tp, insn); 118 - if (ret == 0) 119 - __flush_icache_range((uintptr_t)tp, 120 - (uintptr_t)tp + AARCH64_INSN_SIZE); 121 - 122 - return ret; 123 - } 124 - 125 - struct aarch64_insn_patch { 126 - void **text_addrs; 127 - u32 *new_insns; 128 - int insn_cnt; 129 - atomic_t cpu_count; 130 - }; 131 - 132 - static int __kprobes aarch64_insn_patch_text_cb(void *arg) 133 - { 134 - int i, ret = 0; 135 - struct aarch64_insn_patch *pp = arg; 136 - 137 - /* The first CPU becomes master */ 138 - if (atomic_inc_return(&pp->cpu_count) == 1) { 139 - for (i = 0; ret == 0 && i < pp->insn_cnt; i++) 140 - ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], 141 - pp->new_insns[i]); 142 - /* Notify other processors with an additional increment. */ 143 - atomic_inc(&pp->cpu_count); 144 - } else { 145 - while (atomic_read(&pp->cpu_count) <= num_online_cpus()) 146 - cpu_relax(); 147 - isb(); 148 - } 149 - 150 - return ret; 151 - } 152 - 153 - int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) 154 - { 155 - struct aarch64_insn_patch patch = { 156 - .text_addrs = addrs, 157 - .new_insns = insns, 158 - .insn_cnt = cnt, 159 - .cpu_count = ATOMIC_INIT(0), 160 - }; 161 - 162 - if (cnt <= 0) 163 - return -EINVAL; 164 - 165 - return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, 166 - cpu_online_mask); 167 188 } 168 189 169 190 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
+148
arch/arm64/kernel/patching.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #include <linux/kernel.h> 3 + #include <linux/mm.h> 4 + #include <linux/smp.h> 5 + #include <linux/spinlock.h> 6 + #include <linux/stop_machine.h> 7 + #include <linux/uaccess.h> 8 + 9 + #include <asm/cacheflush.h> 10 + #include <asm/fixmap.h> 11 + #include <asm/kprobes.h> 12 + #include <asm/sections.h> 13 + 14 + static DEFINE_RAW_SPINLOCK(patch_lock); 15 + 16 + static bool is_exit_text(unsigned long addr) 17 + { 18 + /* discarded with init text/data */ 19 + return system_state < SYSTEM_RUNNING && 20 + addr >= (unsigned long)__exittext_begin && 21 + addr < (unsigned long)__exittext_end; 22 + } 23 + 24 + static bool is_image_text(unsigned long addr) 25 + { 26 + return core_kernel_text(addr) || is_exit_text(addr); 27 + } 28 + 29 + static void __kprobes *patch_map(void *addr, int fixmap) 30 + { 31 + unsigned long uintaddr = (uintptr_t) addr; 32 + bool image = is_image_text(uintaddr); 33 + struct page *page; 34 + 35 + if (image) 36 + page = phys_to_page(__pa_symbol(addr)); 37 + else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 38 + page = vmalloc_to_page(addr); 39 + else 40 + return addr; 41 + 42 + BUG_ON(!page); 43 + return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + 44 + (uintaddr & ~PAGE_MASK)); 45 + } 46 + 47 + static void __kprobes patch_unmap(int fixmap) 48 + { 49 + clear_fixmap(fixmap); 50 + } 51 + /* 52 + * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always 53 + * little-endian. 54 + */ 55 + int __kprobes aarch64_insn_read(void *addr, u32 *insnp) 56 + { 57 + int ret; 58 + __le32 val; 59 + 60 + ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE); 61 + if (!ret) 62 + *insnp = le32_to_cpu(val); 63 + 64 + return ret; 65 + } 66 + 67 + static int __kprobes __aarch64_insn_write(void *addr, __le32 insn) 68 + { 69 + void *waddr = addr; 70 + unsigned long flags = 0; 71 + int ret; 72 + 73 + raw_spin_lock_irqsave(&patch_lock, flags); 74 + waddr = patch_map(addr, FIX_TEXT_POKE0); 75 + 76 + ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE); 77 + 78 + patch_unmap(FIX_TEXT_POKE0); 79 + raw_spin_unlock_irqrestore(&patch_lock, flags); 80 + 81 + return ret; 82 + } 83 + 84 + int __kprobes aarch64_insn_write(void *addr, u32 insn) 85 + { 86 + return __aarch64_insn_write(addr, cpu_to_le32(insn)); 87 + } 88 + 89 + int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) 90 + { 91 + u32 *tp = addr; 92 + int ret; 93 + 94 + /* A64 instructions must be word aligned */ 95 + if ((uintptr_t)tp & 0x3) 96 + return -EINVAL; 97 + 98 + ret = aarch64_insn_write(tp, insn); 99 + if (ret == 0) 100 + __flush_icache_range((uintptr_t)tp, 101 + (uintptr_t)tp + AARCH64_INSN_SIZE); 102 + 103 + return ret; 104 + } 105 + 106 + struct aarch64_insn_patch { 107 + void **text_addrs; 108 + u32 *new_insns; 109 + int insn_cnt; 110 + atomic_t cpu_count; 111 + }; 112 + 113 + static int __kprobes aarch64_insn_patch_text_cb(void *arg) 114 + { 115 + int i, ret = 0; 116 + struct aarch64_insn_patch *pp = arg; 117 + 118 + /* The first CPU becomes master */ 119 + if (atomic_inc_return(&pp->cpu_count) == 1) { 120 + for (i = 0; ret == 0 && i < pp->insn_cnt; i++) 121 + ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], 122 + pp->new_insns[i]); 123 + /* Notify other processors with an additional increment. */ 124 + atomic_inc(&pp->cpu_count); 125 + } else { 126 + while (atomic_read(&pp->cpu_count) <= num_online_cpus()) 127 + cpu_relax(); 128 + isb(); 129 + } 130 + 131 + return ret; 132 + } 133 + 134 + int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) 135 + { 136 + struct aarch64_insn_patch patch = { 137 + .text_addrs = addrs, 138 + .new_insns = insns, 139 + .insn_cnt = cnt, 140 + .cpu_count = ATOMIC_INIT(0), 141 + }; 142 + 143 + if (cnt <= 0) 144 + return -EINVAL; 145 + 146 + return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch, 147 + cpu_online_mask); 148 + }