Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Get rid of the use of .macro in C code.

It fails with LTO and probably has always been a fragile.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+411 -278
+243 -128
arch/mips/include/asm/hazards.h
··· 10 10 #ifndef _ASM_HAZARDS_H 11 11 #define _ASM_HAZARDS_H 12 12 13 - #ifdef __ASSEMBLY__ 14 - #define ASMMACRO(name, code...) .macro name; code; .endm 15 - #else 13 + #include <linux/stringify.h> 16 14 17 - #include <asm/cpu-features.h> 15 + #define ___ssnop \ 16 + sll $0, $0, 1 18 17 19 - #define ASMMACRO(name, code...) \ 20 - __asm__(".macro " #name "; " #code "; .endm"); \ 21 - \ 22 - static inline void name(void) \ 23 - { \ 24 - __asm__ __volatile__ (#name); \ 25 - } 26 - 27 - /* 28 - * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 29 - */ 30 - extern void mips_ihb(void); 31 - 32 - #endif 33 - 34 - ASMMACRO(_ssnop, 35 - sll $0, $0, 1 36 - ) 37 - 38 - ASMMACRO(_ehb, 39 - sll $0, $0, 3 40 - ) 18 + #define ___ehb \ 19 + sll $0, $0, 3 41 20 42 21 /* 43 22 * TLB hazards ··· 27 48 * MIPSR2 defines ehb for hazard avoidance 28 49 */ 29 50 30 - ASMMACRO(mtc0_tlbw_hazard, 31 - _ehb 32 - ) 33 - ASMMACRO(tlbw_use_hazard, 34 - _ehb 35 - ) 36 - ASMMACRO(tlb_probe_hazard, 37 - _ehb 38 - ) 39 - ASMMACRO(irq_enable_hazard, 40 - _ehb 41 - ) 42 - ASMMACRO(irq_disable_hazard, 43 - _ehb 44 - ) 45 - ASMMACRO(back_to_back_c0_hazard, 46 - _ehb 47 - ) 51 + #define __mtc0_tlbw_hazard \ 52 + ___ehb 53 + 54 + #define __tlbw_use_hazard \ 55 + ___ehb 56 + 57 + #define __tlb_probe_hazard \ 58 + ___ehb 59 + 60 + #define __irq_enable_hazard \ 61 + ___ehb 62 + 63 + #define __irq_disable_hazard \ 64 + ___ehb 65 + 66 + #define __back_to_back_c0_hazard \ 67 + ___ehb 68 + 48 69 /* 49 70 * gcc has a tradition of misscompiling the previous construct using the 50 71 * address of a label as argument to inline assembler. Gas otoh has the ··· 73 94 * These are slightly complicated by the fact that we guarantee R1 kernels to 74 95 * run fine on R2 processors. 75 96 */ 76 - ASMMACRO(mtc0_tlbw_hazard, 77 - _ssnop; _ssnop; _ehb 78 - ) 79 - ASMMACRO(tlbw_use_hazard, 80 - _ssnop; _ssnop; _ssnop; _ehb 81 - ) 82 - ASMMACRO(tlb_probe_hazard, 83 - _ssnop; _ssnop; _ssnop; _ehb 84 - ) 85 - ASMMACRO(irq_enable_hazard, 86 - _ssnop; _ssnop; _ssnop; _ehb 87 - ) 88 - ASMMACRO(irq_disable_hazard, 89 - _ssnop; _ssnop; _ssnop; _ehb 90 - ) 91 - ASMMACRO(back_to_back_c0_hazard, 92 - _ssnop; _ssnop; _ssnop; _ehb 93 - ) 97 + 98 + #define __mtc0_tlbw_hazard \ 99 + ___ssnop; \ 100 + ___ssnop; \ 101 + ___ehb 102 + 103 + #define __tlbw_use_hazard \ 104 + ___ssnop; \ 105 + ___ssnop; \ 106 + ___ssnop; \ 107 + ___ehb 108 + 109 + #define __tlb_probe_hazard \ 110 + ___ssnop; \ 111 + ___ssnop; \ 112 + ___ssnop; \ 113 + ___ehb 114 + 115 + #define __irq_enable_hazard \ 116 + ___ssnop; \ 117 + ___ssnop; \ 118 + ___ssnop; \ 119 + ___ehb 120 + 121 + #define __irq_disable_hazard \ 122 + ___ssnop; \ 123 + ___ssnop; \ 124 + ___ssnop; \ 125 + ___ehb 126 + 127 + #define __back_to_back_c0_hazard \ 128 + ___ssnop; \ 129 + ___ssnop; \ 130 + ___ssnop; \ 131 + ___ehb 132 + 94 133 /* 95 134 * gcc has a tradition of misscompiling the previous construct using the 96 135 * address of a label as argument to inline assembler. Gas otoh has the ··· 144 147 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 145 148 */ 146 149 147 - ASMMACRO(mtc0_tlbw_hazard, 148 - ) 149 - ASMMACRO(tlbw_use_hazard, 150 - ) 151 - ASMMACRO(tlb_probe_hazard, 152 - ) 153 - ASMMACRO(irq_enable_hazard, 154 - ) 155 - ASMMACRO(irq_disable_hazard, 156 - ) 157 - ASMMACRO(back_to_back_c0_hazard, 158 - ) 150 + #define __mtc0_tlbw_hazard 151 + 152 + #define __tlbw_use_hazard 153 + 154 + #define __tlb_probe_hazard 155 + 156 + #define __irq_enable_hazard 157 + 158 + #define __irq_disable_hazard 159 + 160 + #define __back_to_back_c0_hazard 161 + 159 162 #define instruction_hazard() do { } while (0) 160 163 161 164 #elif defined(CONFIG_CPU_SB1) ··· 163 166 /* 164 167 * Mostly like R4000 for historic reasons 165 168 */ 166 - ASMMACRO(mtc0_tlbw_hazard, 167 - ) 168 - ASMMACRO(tlbw_use_hazard, 169 - ) 170 - ASMMACRO(tlb_probe_hazard, 171 - ) 172 - ASMMACRO(irq_enable_hazard, 173 - ) 174 - ASMMACRO(irq_disable_hazard, 175 - _ssnop; _ssnop; _ssnop 176 - ) 177 - ASMMACRO(back_to_back_c0_hazard, 178 - ) 169 + #define __mtc0_tlbw_hazard 170 + 171 + #define __tlbw_use_hazard 172 + 173 + #define __tlb_probe_hazard 174 + 175 + #define __irq_enable_hazard 176 + 177 + #define __irq_disable_hazard \ 178 + ___ssnop; \ 179 + ___ssnop; \ 180 + ___ssnop 181 + 182 + #define __back_to_back_c0_hazard 183 + 179 184 #define instruction_hazard() do { } while (0) 180 185 181 186 #else ··· 191 192 * hazard so this is nice trick to have an optimal code for a range of 192 193 * processors. 193 194 */ 194 - ASMMACRO(mtc0_tlbw_hazard, 195 - nop; nop 196 - ) 197 - ASMMACRO(tlbw_use_hazard, 198 - nop; nop; nop 199 - ) 200 - ASMMACRO(tlb_probe_hazard, 201 - nop; nop; nop 202 - ) 203 - ASMMACRO(irq_enable_hazard, 204 - _ssnop; _ssnop; _ssnop; 205 - ) 206 - ASMMACRO(irq_disable_hazard, 207 - nop; nop; nop 208 - ) 209 - ASMMACRO(back_to_back_c0_hazard, 210 - _ssnop; _ssnop; _ssnop; 211 - ) 195 + #define __mtc0_tlbw_hazard \ 196 + nop; \ 197 + nop 198 + 199 + #define __tlbw_use_hazard \ 200 + nop; \ 201 + nop; \ 202 + nop 203 + 204 + #define __tlb_probe_hazard \ 205 + nop; \ 206 + nop; \ 207 + nop 208 + 209 + #define __irq_enable_hazard \ 210 + ___ssnop; \ 211 + ___ssnop; \ 212 + ___ssnop 213 + 214 + #define __irq_disable_hazard \ 215 + nop; \ 216 + nop; \ 217 + nop 218 + 219 + #define __back_to_back_c0_hazard \ 220 + ___ssnop; \ 221 + ___ssnop; \ 222 + ___ssnop 223 + 212 224 #define instruction_hazard() do { } while (0) 213 225 214 226 #endif ··· 228 218 /* FPU hazards */ 229 219 230 220 #if defined(CONFIG_CPU_SB1) 231 - ASMMACRO(enable_fpu_hazard, 232 - .set push; 233 - .set mips64; 234 - .set noreorder; 235 - _ssnop; 236 - bnezl $0, .+4; 237 - _ssnop; 238 - .set pop 239 - ) 240 - ASMMACRO(disable_fpu_hazard, 241 - ) 221 + 222 + #define __enable_fpu_hazard \ 223 + .set push; \ 224 + .set mips64; \ 225 + .set noreorder; \ 226 + ___ssnop; \ 227 + bnezl $0, .+4; \ 228 + ___ssnop; \ 229 + .set pop 230 + 231 + #define __disable_fpu_hazard 242 232 243 233 #elif defined(CONFIG_CPU_MIPSR2) 244 - ASMMACRO(enable_fpu_hazard, 245 - _ehb 246 - ) 247 - ASMMACRO(disable_fpu_hazard, 248 - _ehb 249 - ) 234 + 235 + #define __enable_fpu_hazard \ 236 + ___ehb 237 + 238 + #define __disable_fpu_hazard \ 239 + ___ehb 240 + 250 241 #else 251 - ASMMACRO(enable_fpu_hazard, 252 - nop; nop; nop; nop 253 - ) 254 - ASMMACRO(disable_fpu_hazard, 255 - _ehb 256 - ) 242 + 243 + #define __enable_fpu_hazard \ 244 + nop; \ 245 + nop; \ 246 + nop; \ 247 + nop 248 + 249 + #define __disable_fpu_hazard \ 250 + ___ehb 251 + 257 252 #endif 253 + 254 + #ifdef __ASSEMBLY__ 255 + 256 + #define _ssnop ___ssnop 257 + #define _ehb ___ehb 258 + #define mtc0_tlbw_hazard __mtc0_tlbw_hazard 259 + #define tlbw_use_hazard __tlbw_use_hazard 260 + #define tlb_probe_hazard __tlb_probe_hazard 261 + #define irq_enable_hazard __irq_enable_hazard 262 + #define irq_disable_hazard __irq_disable_hazard 263 + #define back_to_back_c0_hazard __back_to_back_c0_hazard 264 + #define enable_fpu_hazard __enable_fpu_hazard 265 + #define disable_fpu_hazard __disable_fpu_hazard 266 + 267 + #else 268 + 269 + #define _ssnop() \ 270 + do { \ 271 + __asm__ __volatile__( \ 272 + __stringify(___ssnop) \ 273 + ); \ 274 + } while (0) 275 + 276 + #define _ehb() \ 277 + do { \ 278 + __asm__ __volatile__( \ 279 + __stringify(___ehb) \ 280 + ); \ 281 + } while (0) 282 + 283 + 284 + #define mtc0_tlbw_hazard() \ 285 + do { \ 286 + __asm__ __volatile__( \ 287 + __stringify(__mtc0_tlbw_hazard) \ 288 + ); \ 289 + } while (0) 290 + 291 + 292 + #define tlbw_use_hazard() \ 293 + do { \ 294 + __asm__ __volatile__( \ 295 + __stringify(__tlbw_use_hazard) \ 296 + ); \ 297 + } while (0) 298 + 299 + 300 + #define tlb_probe_hazard() \ 301 + do { \ 302 + __asm__ __volatile__( \ 303 + __stringify(__tlb_probe_hazard) \ 304 + ); \ 305 + } while (0) 306 + 307 + 308 + #define irq_enable_hazard() \ 309 + do { \ 310 + __asm__ __volatile__( \ 311 + __stringify(__irq_enable_hazard) \ 312 + ); \ 313 + } while (0) 314 + 315 + 316 + #define irq_disable_hazard() \ 317 + do { \ 318 + __asm__ __volatile__( \ 319 + __stringify(__irq_disable_hazard) \ 320 + ); \ 321 + } while (0) 322 + 323 + 324 + #define back_to_back_c0_hazard() \ 325 + do { \ 326 + __asm__ __volatile__( \ 327 + __stringify(__back_to_back_c0_hazard) \ 328 + ); \ 329 + } while (0) 330 + 331 + 332 + #define enable_fpu_hazard() \ 333 + do { \ 334 + __asm__ __volatile__( \ 335 + __stringify(__enable_fpu_hazard) \ 336 + ); \ 337 + } while (0) 338 + 339 + 340 + #define disable_fpu_hazard() \ 341 + do { \ 342 + __asm__ __volatile__( \ 343 + __stringify(__disable_fpu_hazard) \ 344 + ); \ 345 + } while (0) 346 + 347 + /* 348 + * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 349 + */ 350 + extern void mips_ihb(void); 351 + 352 + #endif /* __ASSEMBLY__ */ 258 353 259 354 #endif /* _ASM_HAZARDS_H */
+83 -84
arch/mips/include/asm/irqflags.h
··· 14 14 #ifndef __ASSEMBLY__ 15 15 16 16 #include <linux/compiler.h> 17 + #include <linux/stringify.h> 17 18 #include <asm/hazards.h> 18 19 19 20 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) 20 21 21 - __asm__( 22 - " .macro arch_local_irq_disable\n" 23 - " .set push \n" 24 - " .set noat \n" 25 - " di \n" 26 - " irq_disable_hazard \n" 27 - " .set pop \n" 28 - " .endm \n"); 29 - 30 22 static inline void arch_local_irq_disable(void) 31 23 { 32 24 __asm__ __volatile__( 33 - "arch_local_irq_disable" 34 - : /* no outputs */ 35 - : /* no inputs */ 36 - : "memory"); 37 - } 38 - 39 - 40 - __asm__( 41 - " .macro arch_local_irq_save result \n" 42 25 " .set push \n" 43 - " .set reorder \n" 44 26 " .set noat \n" 45 - " di \\result \n" 46 - " andi \\result, 1 \n" 47 - " irq_disable_hazard \n" 27 + " di \n" 28 + " " __stringify(__irq_disable_hazard) " \n" 48 29 " .set pop \n" 49 - " .endm \n"); 30 + : /* no outputs */ 31 + : /* no inputs */ 32 + : "memory"); 33 + } 50 34 51 35 static inline unsigned long arch_local_irq_save(void) 52 36 { 53 37 unsigned long flags; 54 - asm volatile("arch_local_irq_save\t%0" 55 - : "=r" (flags) 56 - : /* no inputs */ 57 - : "memory"); 38 + 39 + asm __volatile__( 40 + " .set push \n" 41 + " .set reorder \n" 42 + " .set noat \n" 43 + " di %[flags] \n" 44 + " andi %[flags], 1 \n" 45 + " " __stringify(__irq_disable_hazard) " \n" 46 + " .set pop \n" 47 + : [flags] "=r" (flags) 48 + : /* no inputs */ 49 + : "memory"); 50 + 58 51 return flags; 59 52 } 60 53 54 + static inline void arch_local_irq_restore(unsigned long flags) 55 + { 56 + unsigned long __tmp1; 61 57 62 - __asm__( 63 - " .macro arch_local_irq_restore flags \n" 58 + __asm__ __volatile__( 64 59 " .set push \n" 65 60 " .set noreorder \n" 66 61 " .set noat \n" ··· 64 69 * Slow, but doesn't suffer from a relatively unlikely race 65 70 * condition we're having since days 1. 66 71 */ 67 - " beqz \\flags, 1f \n" 72 + " beqz %[flags], 1f \n" 68 73 " di \n" 69 74 " ei \n" 70 75 "1: \n" ··· 73 78 * Fast, dangerous. Life is fun, life is good. 74 79 */ 75 80 " mfc0 $1, $12 \n" 76 - " ins $1, \\flags, 0, 1 \n" 81 + " ins $1, %[flags], 0, 1 \n" 77 82 " mtc0 $1, $12 \n" 78 83 #endif 79 - " irq_disable_hazard \n" 84 + " " __stringify(__irq_disable_hazard) " \n" 80 85 " .set pop \n" 81 - " .endm \n"); 82 - 83 - static inline void arch_local_irq_restore(unsigned long flags) 84 - { 85 - unsigned long __tmp1; 86 - 87 - __asm__ __volatile__( 88 - "arch_local_irq_restore\t%0" 89 - : "=r" (__tmp1) 90 - : "0" (flags) 91 - : "memory"); 86 + : [flags] "=r" (__tmp1) 87 + : "0" (flags) 88 + : "memory"); 92 89 } 93 90 94 91 static inline void __arch_local_irq_restore(unsigned long flags) 95 92 { 96 - unsigned long __tmp1; 97 - 98 93 __asm__ __volatile__( 99 - "arch_local_irq_restore\t%0" 100 - : "=r" (__tmp1) 101 - : "0" (flags) 102 - : "memory"); 94 + " .set push \n" 95 + " .set noreorder \n" 96 + " .set noat \n" 97 + #if defined(CONFIG_IRQ_CPU) 98 + /* 99 + * Slow, but doesn't suffer from a relatively unlikely race 100 + * condition we're having since days 1. 101 + */ 102 + " beqz %[flags], 1f \n" 103 + " di \n" 104 + " ei \n" 105 + "1: \n" 106 + #else 107 + /* 108 + * Fast, dangerous. Life is fun, life is good. 109 + */ 110 + " mfc0 $1, $12 \n" 111 + " ins $1, %[flags], 0, 1 \n" 112 + " mtc0 $1, $12 \n" 113 + #endif 114 + " " __stringify(__irq_disable_hazard) " \n" 115 + " .set pop \n" 116 + : [flags] "=r" (flags) 117 + : "0" (flags) 118 + : "memory"); 103 119 } 104 120 #else 105 121 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ ··· 121 115 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ 122 116 123 117 124 - __asm__( 125 - " .macro arch_local_irq_enable \n" 118 + extern void smtc_ipi_replay(void); 119 + 120 + static inline void arch_local_irq_enable(void) 121 + { 122 + #ifdef CONFIG_MIPS_MT_SMTC 123 + /* 124 + * SMTC kernel needs to do a software replay of queued 125 + * IPIs, at the cost of call overhead on each local_irq_enable() 126 + */ 127 + smtc_ipi_replay(); 128 + #endif 129 + __asm__ __volatile__( 126 130 " .set push \n" 127 131 " .set reorder \n" 128 132 " .set noat \n" ··· 149 133 " xori $1,0x1e \n" 150 134 " mtc0 $1,$12 \n" 151 135 #endif 152 - " irq_enable_hazard \n" 136 + " " __stringify(__irq_enable_hazard) " \n" 153 137 " .set pop \n" 154 - " .endm"); 155 - 156 - extern void smtc_ipi_replay(void); 157 - 158 - static inline void arch_local_irq_enable(void) 159 - { 160 - #ifdef CONFIG_MIPS_MT_SMTC 161 - /* 162 - * SMTC kernel needs to do a software replay of queued 163 - * IPIs, at the cost of call overhead on each local_irq_enable() 164 - */ 165 - smtc_ipi_replay(); 166 - #endif 167 - __asm__ __volatile__( 168 - "arch_local_irq_enable" 169 - : /* no outputs */ 170 - : /* no inputs */ 171 - : "memory"); 138 + : /* no outputs */ 139 + : /* no inputs */ 140 + : "memory"); 172 141 } 173 - 174 - 175 - __asm__( 176 - " .macro arch_local_save_flags flags \n" 177 - " .set push \n" 178 - " .set reorder \n" 179 - #ifdef CONFIG_MIPS_MT_SMTC 180 - " mfc0 \\flags, $2, 1 \n" 181 - #else 182 - " mfc0 \\flags, $12 \n" 183 - #endif 184 - " .set pop \n" 185 - " .endm \n"); 186 142 187 143 static inline unsigned long arch_local_save_flags(void) 188 144 { 189 145 unsigned long flags; 190 - asm volatile("arch_local_save_flags %0" : "=r" (flags)); 146 + 147 + asm __volatile__( 148 + " .set push \n" 149 + " .set reorder \n" 150 + #ifdef CONFIG_MIPS_MT_SMTC 151 + " mfc0 %[flags], $2, 1 \n" 152 + #else 153 + " mfc0 %[flags], $12 \n" 154 + #endif 155 + " .set pop \n" 156 + : [flags] "=r" (flags)); 157 + 191 158 return flags; 192 159 } 193 160
+85 -66
arch/mips/lib/mips-atomic.c
··· 13 13 #include <linux/compiler.h> 14 14 #include <linux/preempt.h> 15 15 #include <linux/export.h> 16 + #include <linux/stringify.h> 16 17 17 18 #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) 18 19 ··· 35 34 * 36 35 * Workaround: mask EXL bit of the result or place a nop before mfc0. 37 36 */ 38 - __asm__( 39 - " .macro arch_local_irq_disable\n" 37 + notrace void arch_local_irq_disable(void) 38 + { 39 + preempt_disable(); 40 + 41 + __asm__ __volatile__( 40 42 " .set push \n" 41 43 " .set noat \n" 42 44 #ifdef CONFIG_MIPS_MT_SMTC ··· 56 52 " .set noreorder \n" 57 53 " mtc0 $1,$12 \n" 58 54 #endif 59 - " irq_disable_hazard \n" 55 + " " __stringify(__irq_disable_hazard) " \n" 60 56 " .set pop \n" 61 - " .endm \n"); 57 + : /* no outputs */ 58 + : /* no inputs */ 59 + : "memory"); 62 60 63 - notrace void arch_local_irq_disable(void) 64 - { 65 - preempt_disable(); 66 - __asm__ __volatile__( 67 - "arch_local_irq_disable" 68 - : /* no outputs */ 69 - : /* no inputs */ 70 - : "memory"); 71 61 preempt_enable(); 72 62 } 73 63 EXPORT_SYMBOL(arch_local_irq_disable); 74 64 75 65 76 - __asm__( 77 - " .macro arch_local_irq_save result \n" 66 + notrace unsigned long arch_local_irq_save(void) 67 + { 68 + unsigned long flags; 69 + 70 + preempt_disable(); 71 + 72 + __asm__ __volatile__( 78 73 " .set push \n" 79 74 " .set reorder \n" 80 75 " .set noat \n" 81 76 #ifdef CONFIG_MIPS_MT_SMTC 82 - " mfc0 \\result, $2, 1 \n" 83 - " ori $1, \\result, 0x400 \n" 77 + " mfc0 %[flags], $2, 1 \n" 78 + " ori $1, %[flags], 0x400 \n" 84 79 " .set noreorder \n" 85 80 " mtc0 $1, $2, 1 \n" 86 - " andi \\result, \\result, 0x400 \n" 81 + " andi %[flags], %[flags], 0x400 \n" 87 82 #elif defined(CONFIG_CPU_MIPSR2) 88 83 /* see irqflags.h for inline function */ 89 84 #else 90 - " mfc0 \\result, $12 \n" 91 - " ori $1, \\result, 0x1f \n" 85 + " mfc0 %[flags], $12 \n" 86 + " ori $1, %[flags], 0x1f \n" 92 87 " xori $1, 0x1f \n" 93 88 " .set noreorder \n" 94 89 " mtc0 $1, $12 \n" 95 90 #endif 96 - " irq_disable_hazard \n" 91 + " " __stringify(__irq_disable_hazard) " \n" 97 92 " .set pop \n" 98 - " .endm \n"); 93 + : [flags] "=r" (flags) 94 + : /* no inputs */ 95 + : "memory"); 99 96 100 - notrace unsigned long arch_local_irq_save(void) 101 - { 102 - unsigned long flags; 103 - preempt_disable(); 104 - asm volatile("arch_local_irq_save\t%0" 105 - : "=r" (flags) 106 - : /* no inputs */ 107 - : "memory"); 108 97 preempt_enable(); 98 + 109 99 return flags; 110 100 } 111 101 EXPORT_SYMBOL(arch_local_irq_save); 112 - 113 - 114 - __asm__( 115 - " .macro arch_local_irq_restore flags \n" 116 - " .set push \n" 117 - " .set noreorder \n" 118 - " .set noat \n" 119 - #ifdef CONFIG_MIPS_MT_SMTC 120 - "mfc0 $1, $2, 1 \n" 121 - "andi \\flags, 0x400 \n" 122 - "ori $1, 0x400 \n" 123 - "xori $1, 0x400 \n" 124 - "or \\flags, $1 \n" 125 - "mtc0 \\flags, $2, 1 \n" 126 - #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 127 - /* see irqflags.h for inline function */ 128 - #elif defined(CONFIG_CPU_MIPSR2) 129 - /* see irqflags.h for inline function */ 130 - #else 131 - " mfc0 $1, $12 \n" 132 - " andi \\flags, 1 \n" 133 - " ori $1, 0x1f \n" 134 - " xori $1, 0x1f \n" 135 - " or \\flags, $1 \n" 136 - " mtc0 \\flags, $12 \n" 137 - #endif 138 - " irq_disable_hazard \n" 139 - " .set pop \n" 140 - " .endm \n"); 141 102 142 103 notrace void arch_local_irq_restore(unsigned long flags) 143 104 { ··· 118 149 smtc_ipi_replay(); 119 150 #endif 120 151 preempt_disable(); 152 + 121 153 __asm__ __volatile__( 122 - "arch_local_irq_restore\t%0" 123 - : "=r" (__tmp1) 124 - : "0" (flags) 125 - : "memory"); 154 + " .set push \n" 155 + " .set noreorder \n" 156 + " .set noat \n" 157 + #ifdef CONFIG_MIPS_MT_SMTC 158 + " mfc0 $1, $2, 1 \n" 159 + " andi %[flags], 0x400 \n" 160 + " ori $1, 0x400 \n" 161 + " xori $1, 0x400 \n" 162 + " or %[flags], $1 \n" 163 + " mtc0 %[flags], $2, 1 \n" 164 + #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 165 + /* see irqflags.h for inline function */ 166 + #elif defined(CONFIG_CPU_MIPSR2) 167 + /* see irqflags.h for inline function */ 168 + #else 169 + " mfc0 $1, $12 \n" 170 + " andi %[flags], 1 \n" 171 + " ori $1, 0x1f \n" 172 + " xori $1, 0x1f \n" 173 + " or %[flags], $1 \n" 174 + " mtc0 %[flags], $12 \n" 175 + #endif 176 + " " __stringify(__irq_disable_hazard) " \n" 177 + " .set pop \n" 178 + : [flags] "=r" (__tmp1) 179 + : "0" (flags) 180 + : "memory"); 181 + 126 182 preempt_enable(); 127 183 } 128 184 EXPORT_SYMBOL(arch_local_irq_restore); ··· 158 164 unsigned long __tmp1; 159 165 160 166 preempt_disable(); 167 + 161 168 __asm__ __volatile__( 162 - "arch_local_irq_restore\t%0" 163 - : "=r" (__tmp1) 164 - : "0" (flags) 165 - : "memory"); 169 + " .set push \n" 170 + " .set noreorder \n" 171 + " .set noat \n" 172 + #ifdef CONFIG_MIPS_MT_SMTC 173 + " mfc0 $1, $2, 1 \n" 174 + " andi %[flags], 0x400 \n" 175 + " ori $1, 0x400 \n" 176 + " xori $1, 0x400 \n" 177 + " or %[flags], $1 \n" 178 + " mtc0 %[flags], $2, 1 \n" 179 + #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) 180 + /* see irqflags.h for inline function */ 181 + #elif defined(CONFIG_CPU_MIPSR2) 182 + /* see irqflags.h for inline function */ 183 + #else 184 + " mfc0 $1, $12 \n" 185 + " andi %[flags], 1 \n" 186 + " ori $1, 0x1f \n" 187 + " xori $1, 0x1f \n" 188 + " or %[flags], $1 \n" 189 + " mtc0 %[flags], $12 \n" 190 + #endif 191 + " " __stringify(__irq_disable_hazard) " \n" 192 + " .set pop \n" 193 + : [flags] "=r" (__tmp1) 194 + : "0" (flags) 195 + : "memory"); 196 + 166 197 preempt_enable(); 167 198 } 168 199 EXPORT_SYMBOL(__arch_local_irq_restore);