Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, cpu: Clean up and unify the NOP selection infrastructure

Clean up and unify the NOP selection infrastructure:

- Make the atomic 5-byte NOP a part of the selection system.
- Pick NOPs once during early boot and then be done with it.

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Link: http://lkml.kernel.org/r/1303166160-10315-3-git-send-email-hpa@linux.intel.com

+191 -162
-8
arch/x86/include/asm/alternative.h
··· 191 191 extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 192 192 extern void text_poke_smp_batch(struct text_poke_param *params, int n); 193 193 194 - #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 195 - #define IDEAL_NOP_SIZE_5 5 196 - extern unsigned char ideal_nop5[IDEAL_NOP_SIZE_5]; 197 - extern void arch_init_ideal_nop5(void); 198 - #else 199 - static inline void arch_init_ideal_nop5(void) {} 200 - #endif 201 - 202 194 #endif /* _ASM_X86_ALTERNATIVE_H */
+84 -62
arch/x86/include/asm/nops.h
··· 1 1 #ifndef _ASM_X86_NOPS_H 2 2 #define _ASM_X86_NOPS_H 3 3 4 - /* Define nops for use with alternative() */ 4 + /* 5 + * Define nops for use with alternative() and for tracing. 6 + * 7 + * *_NOP5_ATOMIC must be a single instruction. 8 + */ 9 + 10 + #define NOP_DS_PREFIX 0x3e 5 11 6 12 /* generic versions from gas 7 13 1: nop ··· 19 13 6: leal 0x00000000(%esi),%esi 20 14 7: leal 0x00000000(,%esi,1),%esi 21 15 */ 22 - #define GENERIC_NOP1 ".byte 0x90\n" 23 - #define GENERIC_NOP2 ".byte 0x89,0xf6\n" 24 - #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" 25 - #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" 26 - #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 27 - #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" 28 - #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" 29 - #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 16 + #define GENERIC_NOP1 0x90 17 + #define GENERIC_NOP2 0x89,0xf6 18 + #define GENERIC_NOP3 0x8d,0x76,0x00 19 + #define GENERIC_NOP4 0x8d,0x74,0x26,0x00 20 + #define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4 21 + #define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00 22 + #define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00 23 + #define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7 24 + #define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4 30 25 31 26 /* Opteron 64bit nops 32 27 1: nop ··· 36 29 4: osp osp osp nop 37 30 */ 38 31 #define K8_NOP1 GENERIC_NOP1 39 - #define K8_NOP2 ".byte 0x66,0x90\n" 40 - #define K8_NOP3 ".byte 0x66,0x66,0x90\n" 41 - #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" 42 - #define K8_NOP5 K8_NOP3 K8_NOP2 43 - #define K8_NOP6 K8_NOP3 K8_NOP3 44 - #define K8_NOP7 K8_NOP4 K8_NOP3 45 - #define K8_NOP8 K8_NOP4 K8_NOP4 32 + #define K8_NOP2 0x66,K8_NOP1 33 + #define K8_NOP3 0x66,K8_NOP2 34 + #define K8_NOP4 0x66,K8_NOP3 35 + #define K8_NOP5 K8_NOP3,K8_NOP2 36 + #define K8_NOP6 K8_NOP3,K8_NOP3 37 + #define K8_NOP7 K8_NOP4,K8_NOP3 38 + #define K8_NOP8 K8_NOP4,K8_NOP4 39 + #define K8_NOP5_ATOMIC 0x66,K8_NOP4 46 40 47 41 /* K7 nops 48 42 uses eax dependencies (arbitrary choice) ··· 55 47 7: leal 0x00000000(,%eax,1),%eax 56 48 */ 57 49 #define K7_NOP1 GENERIC_NOP1 58 - #define K7_NOP2 ".byte 0x8b,0xc0\n" 59 - #define K7_NOP3 ".byte 0x8d,0x04,0x20\n" 60 - #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" 61 - #define K7_NOP5 K7_NOP4 ASM_NOP1 62 - #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" 63 - #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" 64 - #define K7_NOP8 K7_NOP7 ASM_NOP1 50 + #define K7_NOP2 0x8b,0xc0 51 + #define K7_NOP3 0x8d,0x04,0x20 52 + #define K7_NOP4 0x8d,0x44,0x20,0x00 53 + #define K7_NOP5 K7_NOP4,K7_NOP1 54 + #define K7_NOP6 0x8d,0x80,0,0,0,0 55 + #define K7_NOP7 0x8D,0x04,0x05,0,0,0,0 56 + #define K7_NOP8 K7_NOP7,K7_NOP1 57 + #define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4 65 58 66 59 /* P6 nops 67 60 uses eax dependencies (Intel-recommended choice) ··· 78 69 There is kernel code that depends on this. 79 70 */ 80 71 #define P6_NOP1 GENERIC_NOP1 81 - #define P6_NOP2 ".byte 0x66,0x90\n" 82 - #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n" 83 - #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n" 84 - #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n" 85 - #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n" 86 - #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n" 87 - #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n" 72 + #define P6_NOP2 0x66,0x90 73 + #define P6_NOP3 0x0f,0x1f,0x00 74 + #define P6_NOP4 0x0f,0x1f,0x40,0 75 + #define P6_NOP5 0x0f,0x1f,0x44,0x00,0 76 + #define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0 77 + #define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0 78 + #define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0 79 + #define P6_NOP5_ATOMIC P6_NOP5 80 + 81 + #define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n" 88 82 89 83 #if defined(CONFIG_MK7) 90 - #define ASM_NOP1 K7_NOP1 91 - #define ASM_NOP2 K7_NOP2 92 - #define ASM_NOP3 K7_NOP3 93 - #define ASM_NOP4 K7_NOP4 94 - #define ASM_NOP5 K7_NOP5 95 - #define ASM_NOP6 K7_NOP6 96 - #define ASM_NOP7 K7_NOP7 97 - #define ASM_NOP8 K7_NOP8 84 + #define ASM_NOP1 _ASM_MK_NOP(K7_NOP1) 85 + #define ASM_NOP2 _ASM_MK_NOP(K7_NOP2) 86 + #define ASM_NOP3 _ASM_MK_NOP(K7_NOP3) 87 + #define ASM_NOP4 _ASM_MK_NOP(K7_NOP4) 88 + #define ASM_NOP5 _ASM_MK_NOP(K7_NOP5) 89 + #define ASM_NOP6 _ASM_MK_NOP(K7_NOP6) 90 + #define ASM_NOP7 _ASM_MK_NOP(K7_NOP7) 91 + #define ASM_NOP8 _ASM_MK_NOP(K7_NOP8) 92 + #define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC) 98 93 #elif defined(CONFIG_X86_P6_NOP) 99 - #define ASM_NOP1 P6_NOP1 100 - #define ASM_NOP2 P6_NOP2 101 - #define ASM_NOP3 P6_NOP3 102 - #define ASM_NOP4 P6_NOP4 103 - #define ASM_NOP5 P6_NOP5 104 - #define ASM_NOP6 P6_NOP6 105 - #define ASM_NOP7 P6_NOP7 106 - #define ASM_NOP8 P6_NOP8 94 + #define ASM_NOP1 _ASM_MK_NOP(P6_NOP1) 95 + #define ASM_NOP2 _ASM_MK_NOP(P6_NOP2) 96 + #define ASM_NOP3 _ASM_MK_NOP(P6_NOP3) 97 + #define ASM_NOP4 _ASM_MK_NOP(P6_NOP4) 98 + #define ASM_NOP5 _ASM_MK_NOP(P6_NOP5) 99 + #define ASM_NOP6 _ASM_MK_NOP(P6_NOP6) 100 + #define ASM_NOP7 _ASM_MK_NOP(P6_NOP7) 101 + #define ASM_NOP8 _ASM_MK_NOP(P6_NOP8) 102 + #define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC) 107 103 #elif defined(CONFIG_X86_64) 108 - #define ASM_NOP1 K8_NOP1 109 - #define ASM_NOP2 K8_NOP2 110 - #define ASM_NOP3 K8_NOP3 111 - #define ASM_NOP4 K8_NOP4 112 - #define ASM_NOP5 K8_NOP5 113 - #define ASM_NOP6 K8_NOP6 114 - #define ASM_NOP7 K8_NOP7 115 - #define ASM_NOP8 K8_NOP8 104 + #define ASM_NOP1 _ASM_MK_NOP(K8_NOP1) 105 + #define ASM_NOP2 _ASM_MK_NOP(K8_NOP2) 106 + #define ASM_NOP3 _ASM_MK_NOP(K8_NOP3) 107 + #define ASM_NOP4 _ASM_MK_NOP(K8_NOP4) 108 + #define ASM_NOP5 _ASM_MK_NOP(K8_NOP5) 109 + #define ASM_NOP6 _ASM_MK_NOP(K8_NOP6) 110 + #define ASM_NOP7 _ASM_MK_NOP(K8_NOP7) 111 + #define ASM_NOP8 _ASM_MK_NOP(K8_NOP8) 112 + #define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC) 116 113 #else 117 - #define ASM_NOP1 GENERIC_NOP1 118 - #define ASM_NOP2 GENERIC_NOP2 119 - #define ASM_NOP3 GENERIC_NOP3 120 - #define ASM_NOP4 GENERIC_NOP4 121 - #define ASM_NOP5 GENERIC_NOP5 122 - #define ASM_NOP6 GENERIC_NOP6 123 - #define ASM_NOP7 GENERIC_NOP7 124 - #define ASM_NOP8 GENERIC_NOP8 114 + #define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1) 115 + #define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2) 116 + #define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3) 117 + #define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4) 118 + #define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5) 119 + #define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6) 120 + #define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7) 121 + #define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8) 122 + #define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC) 125 123 #endif 126 124 127 125 #define ASM_NOP_MAX 8 126 + #define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */ 127 + 128 + #ifndef __ASSEMBLY__ 129 + extern const unsigned char * const *ideal_nops; 130 + extern void arch_init_ideal_nops(void); 131 + #endif 128 132 129 133 #endif /* _ASM_X86_NOPS_H */
+101 -83
arch/x86/kernel/alternative.c
··· 67 67 #define DPRINTK(fmt, args...) if (debug_alternative) \ 68 68 printk(KERN_DEBUG fmt, args) 69 69 70 + /* 71 + * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes 72 + * that correspond to that nop. Getting from one nop to the next, we 73 + * add to the array the offset that is equal to the sum of all sizes of 74 + * nops preceding the one we are after. 75 + * 76 + * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the 77 + * nice symmetry of sizes of the previous nops. 78 + */ 70 79 #if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64) 71 - /* Use inline assembly to define this because the nops are defined 72 - as inline assembly strings in the include files and we cannot 73 - get them easily into strings. */ 74 - asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: " 75 - GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 76 - GENERIC_NOP7 GENERIC_NOP8 77 - "\t.previous"); 78 - extern const unsigned char intelnops[]; 79 - static const unsigned char *const __initconst_or_module 80 - intel_nops[ASM_NOP_MAX+1] = { 80 + static const unsigned char intelnops[] = 81 + { 82 + GENERIC_NOP1, 83 + GENERIC_NOP2, 84 + GENERIC_NOP3, 85 + GENERIC_NOP4, 86 + GENERIC_NOP5, 87 + GENERIC_NOP6, 88 + GENERIC_NOP7, 89 + GENERIC_NOP8, 90 + GENERIC_NOP5_ATOMIC 91 + }; 92 + static const unsigned char * const intel_nops[ASM_NOP_MAX+2] = 93 + { 81 94 NULL, 82 95 intelnops, 83 96 intelnops + 1, ··· 100 87 intelnops + 1 + 2 + 3 + 4 + 5, 101 88 intelnops + 1 + 2 + 3 + 4 + 5 + 6, 102 89 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 90 + intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 103 91 }; 104 92 #endif 105 93 106 94 #ifdef K8_NOP1 107 - asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: " 108 - K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 109 - K8_NOP7 K8_NOP8 110 - "\t.previous"); 111 - extern const unsigned char k8nops[]; 112 - static const unsigned char *const __initconst_or_module 113 - k8_nops[ASM_NOP_MAX+1] = { 95 + static const unsigned char k8nops[] = 96 + { 97 + K8_NOP1, 98 + K8_NOP2, 99 + K8_NOP3, 100 + K8_NOP4, 101 + K8_NOP5, 102 + K8_NOP6, 103 + K8_NOP7, 104 + K8_NOP8, 105 + K8_NOP5_ATOMIC 106 + }; 107 + static const unsigned char * const k8_nops[ASM_NOP_MAX+2] = 108 + { 114 109 NULL, 115 110 k8nops, 116 111 k8nops + 1, ··· 128 107 k8nops + 1 + 2 + 3 + 4 + 5, 129 108 k8nops + 1 + 2 + 3 + 4 + 5 + 6, 130 109 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 110 + k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 131 111 }; 132 112 #endif 133 113 134 114 #if defined(K7_NOP1) && !defined(CONFIG_X86_64) 135 - asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: " 136 - K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 137 - K7_NOP7 K7_NOP8 138 - "\t.previous"); 139 - extern const unsigned char k7nops[]; 140 - static const unsigned char *const __initconst_or_module 141 - k7_nops[ASM_NOP_MAX+1] = { 115 + static const unsigned char k7nops[] = 116 + { 117 + K7_NOP1, 118 + K7_NOP2, 119 + K7_NOP3, 120 + K7_NOP4, 121 + K7_NOP5, 122 + K7_NOP6, 123 + K7_NOP7, 124 + K7_NOP8, 125 + K7_NOP5_ATOMIC 126 + }; 127 + static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = 128 + { 142 129 NULL, 143 130 k7nops, 144 131 k7nops + 1, ··· 156 127 k7nops + 1 + 2 + 3 + 4 + 5, 157 128 k7nops + 1 + 2 + 3 + 4 + 5 + 6, 158 129 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 130 + k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 159 131 }; 160 132 #endif 161 133 162 134 #ifdef P6_NOP1 163 - asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: " 164 - P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 165 - P6_NOP7 P6_NOP8 166 - "\t.previous"); 167 - extern const unsigned char p6nops[]; 168 - static const unsigned char *const __initconst_or_module 169 - p6_nops[ASM_NOP_MAX+1] = { 135 + static const unsigned char __initconst_or_module p6nops[] = 136 + { 137 + P6_NOP1, 138 + P6_NOP2, 139 + P6_NOP3, 140 + P6_NOP4, 141 + P6_NOP5, 142 + P6_NOP6, 143 + P6_NOP7, 144 + P6_NOP8, 145 + P6_NOP5_ATOMIC 146 + }; 147 + static const unsigned char * const p6_nops[ASM_NOP_MAX+2] = 148 + { 170 149 NULL, 171 150 p6nops, 172 151 p6nops + 1, ··· 184 147 p6nops + 1 + 2 + 3 + 4 + 5, 185 148 p6nops + 1 + 2 + 3 + 4 + 5 + 6, 186 149 p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7, 150 + p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8, 187 151 }; 188 152 #endif 189 153 154 + /* Initialize these to a safe default */ 190 155 #ifdef CONFIG_X86_64 156 + const unsigned char * const *ideal_nops = p6_nops; 157 + #else 158 + const unsigned char * const *ideal_nops = intel_nops; 159 + #endif 191 160 192 - extern char __vsyscall_0; 193 - static const unsigned char *const *__init_or_module find_nop_table(void) 161 + void __init arch_init_ideal_nops(void) 194 162 { 195 - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 196 - boot_cpu_has(X86_FEATURE_NOPL)) 197 - return p6_nops; 198 - else 199 - return k8_nops; 163 + switch (boot_cpu_data.x86_vendor) { 164 + case X86_VENDOR_INTEL: 165 + if (boot_cpu_has(X86_FEATURE_NOPL)) { 166 + ideal_nops = p6_nops; 167 + } else { 168 + #ifdef CONFIG_X86_64 169 + ideal_nops = k8_nops; 170 + #else 171 + ideal_nops = intel_nops; 172 + #endif 173 + } 174 + 175 + default: 176 + #ifdef CONFIG_X86_64 177 + ideal_nops = k8_nops; 178 + #else 179 + if (boot_cpu_has(X86_FEATURE_K8)) 180 + ideal_nops = k8_nops; 181 + else if (boot_cpu_has(X86_FEATURE_K7)) 182 + ideal_nops = k7_nops; 183 + else 184 + ideal_nops = intel_nops; 185 + #endif 186 + } 200 187 } 201 - 202 - #else /* CONFIG_X86_64 */ 203 - 204 - static const unsigned char *const *__init_or_module find_nop_table(void) 205 - { 206 - if (boot_cpu_has(X86_FEATURE_K8)) 207 - return k8_nops; 208 - else if (boot_cpu_has(X86_FEATURE_K7)) 209 - return k7_nops; 210 - else if (boot_cpu_has(X86_FEATURE_NOPL)) 211 - return p6_nops; 212 - else 213 - return intel_nops; 214 - } 215 - 216 - #endif /* CONFIG_X86_64 */ 217 188 218 189 /* Use this to add nops to a buffer, then text_poke the whole buffer. */ 219 190 static void __init_or_module add_nops(void *insns, unsigned int len) 220 191 { 221 - const unsigned char *const *noptable = find_nop_table(); 222 - 223 192 while (len > 0) { 224 193 unsigned int noplen = len; 225 194 if (noplen > ASM_NOP_MAX) 226 195 noplen = ASM_NOP_MAX; 227 - memcpy(insns, noptable[noplen], noplen); 196 + memcpy(insns, ideal_nops[noplen], noplen); 228 197 insns += noplen; 229 198 len -= noplen; 230 199 } ··· 238 195 239 196 extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 240 197 extern s32 __smp_locks[], __smp_locks_end[]; 198 + extern char __vsyscall_0; 241 199 void *text_poke_early(void *addr, const void *opcode, size_t len); 242 200 243 201 /* Replace instructions with better alternatives for this CPU type. ··· 722 678 wrote_text = 0; 723 679 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 724 680 } 725 - 726 - #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 727 - 728 - #ifdef CONFIG_X86_64 729 - unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 }; 730 - #else 731 - unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 }; 732 - #endif 733 - 734 - void __init arch_init_ideal_nop5(void) 735 - { 736 - /* 737 - * There is no good nop for all x86 archs. This selection 738 - * algorithm should be unified with the one in find_nop_table(), 739 - * but this should be good enough for now. 740 - * 741 - * For cases other than the ones below, use the safe (as in 742 - * always functional) defaults above. 743 - */ 744 - #ifdef CONFIG_X86_64 745 - /* Don't use these on 32 bits due to broken virtualizers */ 746 - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 747 - memcpy(ideal_nop5, p6_nops[5], 5); 748 - #endif 749 - } 750 - #endif
+2 -2
arch/x86/kernel/ftrace.c
··· 260 260 return mod_code_status; 261 261 } 262 262 263 - static unsigned char *ftrace_nop_replace(void) 263 + static const unsigned char *ftrace_nop_replace(void) 264 264 { 265 - return ideal_nop5; 265 + return ideal_nops[NOP_ATOMIC5]; 266 266 } 267 267 268 268 static int
+3 -2
arch/x86/kernel/jump_label.c
··· 34 34 code.offset = entry->target - 35 35 (entry->code + JUMP_LABEL_NOP_SIZE); 36 36 } else 37 - memcpy(&code, ideal_nop5, JUMP_LABEL_NOP_SIZE); 37 + memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); 38 38 get_online_cpus(); 39 39 mutex_lock(&text_mutex); 40 40 text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); ··· 44 44 45 45 void arch_jump_label_text_poke_early(jump_label_t addr) 46 46 { 47 - text_poke_early((void *)addr, ideal_nop5, JUMP_LABEL_NOP_SIZE); 47 + text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5], 48 + JUMP_LABEL_NOP_SIZE); 48 49 } 49 50 50 51 #endif
+1 -5
arch/x86/kernel/setup.c
··· 691 691 692 692 void __init setup_arch(char **cmdline_p) 693 693 { 694 - unsigned long flags; 695 - 696 694 #ifdef CONFIG_X86_32 697 695 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data)); 698 696 visws_early_detect(); ··· 1034 1036 1035 1037 mcheck_init(); 1036 1038 1037 - local_irq_save(flags); 1038 - arch_init_ideal_nop5(); 1039 - local_irq_restore(flags); 1039 + arch_init_ideal_nops(); 1040 1040 } 1041 1041 1042 1042 #ifdef CONFIG_X86_32