Fix IRQ flag handling naming

Fix the IRQ flag handling naming. In linux/irqflags.h under one configuration,
it maps:

local_irq_enable() -> raw_local_irq_enable()
local_irq_disable() -> raw_local_irq_disable()
local_irq_save() -> raw_local_irq_save()
...

and under the other configuration, it maps:

raw_local_irq_enable() -> local_irq_enable()
raw_local_irq_disable() -> local_irq_disable()
raw_local_irq_save() -> local_irq_save()
...

This is quite confusing. There should be one set of names expected of the
arch, and this should be wrapped to give another set of names that are expected
by users of this facility.

Change this to have the arch provide:

flags = arch_local_save_flags()
flags = arch_local_irq_save()
arch_local_irq_restore(flags)
arch_local_irq_disable()
arch_local_irq_enable()
arch_irqs_disabled_flags(flags)
arch_irqs_disabled()
arch_safe_halt()

Then linux/irqflags.h wraps these to provide:

raw_local_save_flags(flags)
raw_local_irq_save(flags)
raw_local_irq_restore(flags)
raw_local_irq_disable()
raw_local_irq_enable()
raw_irqs_disabled_flags(flags)
raw_irqs_disabled()
raw_safe_halt()

with type checking on the flags 'arguments', and then wraps those to provide:

local_save_flags(flags)
local_irq_save(flags)
local_irq_restore(flags)
local_irq_disable()
local_irq_enable()
irqs_disabled_flags(flags)
irqs_disabled()
safe_halt()

with tracing included if enabled.

The arch functions can now all be inline functions rather than some of them
having to be macros.

Signed-off-by: David Howells <dhowells@redhat.com> [X86, FRV, MN10300]
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> [Tile]
Signed-off-by: Michal Simek <monstr@monstr.eu> [Microblaze]
Tested-by: Catalin Marinas <catalin.marinas@arm.com> [ARM]
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com> [AVR]
Acked-by: Tony Luck <tony.luck@intel.com> [IA-64]
Acked-by: Hirokazu Takata <takata@linux-m32r.org> [M32R]
Acked-by: Greg Ungerer <gerg@uclinux.org> [M68K/M68KNOMMU]
Acked-by: Ralf Baechle <ralf@linux-mips.org> [MIPS]
Acked-by: Kyle McMartin <kyle@mcmartin.ca> [PA-RISC]
Acked-by: Paul Mackerras <paulus@samba.org> [PowerPC]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [S390]
Acked-by: Chen Liqin <liqin.chen@sunplusct.com> [Score]
Acked-by: Matt Fleming <matt@console-pimps.org> [SH]
Acked-by: David S. Miller <davem@davemloft.net> [Sparc]
Acked-by: Chris Zankel <chris@zankel.net> [Xtensa]
Reviewed-by: Richard Henderson <rth@twiddle.net> [Alpha]
Reviewed-by: Yoshinori Sato <ysato@users.sourceforge.jp> [H8300]
Cc: starvik@axis.com [CRIS]
Cc: jesper.nilsson@axis.com [CRIS]
Cc: linux-cris-kernel@axis.com

+1492 -1168
+67
arch/alpha/include/asm/irqflags.h
··· 1 + #ifndef __ALPHA_IRQFLAGS_H 2 + #define __ALPHA_IRQFLAGS_H 3 + 4 + #include <asm/system.h> 5 + 6 + #define IPL_MIN 0 7 + #define IPL_SW0 1 8 + #define IPL_SW1 2 9 + #define IPL_DEV0 3 10 + #define IPL_DEV1 4 11 + #define IPL_TIMER 5 12 + #define IPL_PERF 6 13 + #define IPL_POWERFAIL 6 14 + #define IPL_MCHECK 7 15 + #define IPL_MAX 7 16 + 17 + #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 18 + #undef IPL_MIN 19 + #define IPL_MIN __min_ipl 20 + extern int __min_ipl; 21 + #endif 22 + 23 + #define getipl() (rdps() & 7) 24 + #define setipl(ipl) ((void) swpipl(ipl)) 25 + 26 + static inline unsigned long arch_local_save_flags(void) 27 + { 28 + return rdps(); 29 + } 30 + 31 + static inline void arch_local_irq_disable(void) 32 + { 33 + setipl(IPL_MAX); 34 + barrier(); 35 + } 36 + 37 + static inline unsigned long arch_local_irq_save(void) 38 + { 39 + unsigned long flags = swpipl(IPL_MAX); 40 + barrier(); 41 + return flags; 42 + } 43 + 44 + static inline void arch_local_irq_enable(void) 45 + { 46 + barrier(); 47 + setipl(IPL_MIN); 48 + } 49 + 50 + static inline void arch_local_irq_restore(unsigned long flags) 51 + { 52 + barrier(); 53 + setipl(flags); 54 + barrier(); 55 + } 56 + 57 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 58 + { 59 + return flags == IPL_MAX; 60 + } 61 + 62 + static inline bool arch_irqs_disabled(void) 63 + { 64 + return arch_irqs_disabled_flags(getipl()); 65 + } 66 + 67 + #endif /* __ALPHA_IRQFLAGS_H */
-28
arch/alpha/include/asm/system.h
··· 259 259 __CALL_PAL_W1(wrusp, unsigned long); 260 260 __CALL_PAL_W1(wrvptptr, unsigned long); 261 261 262 - #define IPL_MIN 0 263 - #define IPL_SW0 1 264 - #define IPL_SW1 2 265 - #define IPL_DEV0 3 266 - #define IPL_DEV1 4 267 - #define IPL_TIMER 5 268 - #define IPL_PERF 6 269 - #define IPL_POWERFAIL 6 270 - #define IPL_MCHECK 7 271 - #define IPL_MAX 7 272 - 273 - #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 274 - #undef IPL_MIN 275 - #define IPL_MIN __min_ipl 276 - extern int __min_ipl; 277 - #endif 278 - 279 - #define getipl() (rdps() & 7) 280 - #define setipl(ipl) ((void) swpipl(ipl)) 281 - 282 - #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 283 - #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 284 - #define local_save_flags(flags) ((flags) = rdps()) 285 - #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 286 - #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 287 - 288 - #define irqs_disabled() (getipl() == IPL_MAX) 289 - 290 262 /* 291 263 * TB routines.. 292 264 */
+84 -61
arch/arm/include/asm/irqflags.h
··· 10 10 */ 11 11 #if __LINUX_ARM_ARCH__ >= 6 12 12 13 - #define raw_local_irq_save(x) \ 14 - ({ \ 15 - __asm__ __volatile__( \ 16 - "mrs %0, cpsr @ local_irq_save\n" \ 17 - "cpsid i" \ 18 - : "=r" (x) : : "memory", "cc"); \ 19 - }) 13 + static inline unsigned long arch_local_irq_save(void) 14 + { 15 + unsigned long flags; 20 16 21 - #define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") 22 - #define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") 17 + asm volatile( 18 + " mrs %0, cpsr @ arch_local_irq_save\n" 19 + " cpsid i" 20 + : "=r" (flags) : : "memory", "cc"); 21 + return flags; 22 + } 23 + 24 + static inline void arch_local_irq_enable(void) 25 + { 26 + asm volatile( 27 + " cpsie i @ arch_local_irq_enable" 28 + : 29 + : 30 + : "memory", "cc"); 31 + } 32 + 33 + static inline void arch_local_irq_disable(void) 34 + { 35 + asm volatile( 36 + " cpsid i @ arch_local_irq_disable" 37 + : 38 + : 39 + : "memory", "cc"); 40 + } 41 + 23 42 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 24 43 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 25 - 26 44 #else 27 45 28 46 /* 29 47 * Save the current interrupt enable state & disable IRQs 30 48 */ 31 - #define raw_local_irq_save(x) \ 32 - ({ \ 33 - unsigned long temp; \ 34 - (void) (&temp == &x); \ 35 - __asm__ __volatile__( \ 36 - "mrs %0, cpsr @ local_irq_save\n" \ 37 - " orr %1, %0, #128\n" \ 38 - " msr cpsr_c, %1" \ 39 - : "=r" (x), "=r" (temp) \ 40 - : \ 41 - : "memory", "cc"); \ 42 - }) 43 - 49 + static inline unsigned long arch_local_irq_save(void) 50 + { 51 + unsigned long flags, temp; 52 + 53 + asm volatile( 54 + " mrs %0, cpsr @ arch_local_irq_save\n" 55 + " orr %1, %0, #128\n" 56 + " msr cpsr_c, %1" 57 + : "=r" (flags), "=r" (temp) 58 + : 59 + : "memory", "cc"); 60 + return flags; 61 + } 62 + 44 63 /* 45 64 * Enable IRQs 46 65 */ 47 - #define raw_local_irq_enable() \ 48 - ({ \ 49 - unsigned long temp; \ 50 - __asm__ __volatile__( \ 51 - "mrs %0, cpsr @ local_irq_enable\n" \ 52 - " bic %0, %0, #128\n" \ 53 - " msr cpsr_c, %0" \ 54 - : "=r" (temp) \ 55 - : \ 56 - : "memory", "cc"); \ 57 - }) 66 + static inline void arch_local_irq_enable(void) 67 + { 68 + unsigned long temp; 69 + asm volatile( 70 + " mrs %0, cpsr @ arch_local_irq_enable\n" 71 + " bic %0, %0, #128\n" 72 + " msr cpsr_c, %0" 73 + : "=r" (temp) 74 + : 75 + : "memory", "cc"); 76 + } 58 77 59 78 /* 60 79 * Disable IRQs 61 80 */ 62 - #define raw_local_irq_disable() \ 63 - ({ \ 64 - unsigned long temp; \ 65 - __asm__ __volatile__( \ 66 - "mrs %0, cpsr @ local_irq_disable\n" \ 67 - " orr %0, %0, #128\n" \ 68 - " msr cpsr_c, %0" \ 69 - : "=r" (temp) \ 70 - : \ 71 - : "memory", "cc"); \ 72 - }) 81 + static inline void arch_local_irq_disable(void) 82 + { 83 + unsigned long temp; 84 + asm volatile( 85 + " mrs %0, cpsr @ arch_local_irq_disable\n" 86 + " orr %0, %0, #128\n" 87 + " msr cpsr_c, %0" 88 + : "=r" (temp) 89 + : 90 + : "memory", "cc"); 91 + } 73 92 74 93 /* 75 94 * Enable FIQs ··· 125 106 /* 126 107 * Save the current interrupt enable state. 127 108 */ 128 - #define raw_local_save_flags(x) \ 129 - ({ \ 130 - __asm__ __volatile__( \ 131 - "mrs %0, cpsr @ local_save_flags" \ 132 - : "=r" (x) : : "memory", "cc"); \ 133 - }) 109 + static inline unsigned long arch_local_save_flags(void) 110 + { 111 + unsigned long flags; 112 + asm volatile( 113 + " mrs %0, cpsr @ local_save_flags" 114 + : "=r" (flags) : : "memory", "cc"); 115 + return flags; 116 + } 134 117 135 118 /* 136 119 * restore saved IRQ & FIQ state 137 120 */ 138 - #define raw_local_irq_restore(x) \ 139 - __asm__ __volatile__( \ 140 - "msr cpsr_c, %0 @ local_irq_restore\n" \ 141 - : \ 142 - : "r" (x) \ 143 - : "memory", "cc") 121 + static inline void arch_local_irq_restore(unsigned long flags) 122 + { 123 + asm volatile( 124 + " msr cpsr_c, %0 @ local_irq_restore" 125 + : 126 + : "r" (flags) 127 + : "memory", "cc"); 128 + } 144 129 145 - #define raw_irqs_disabled_flags(flags) \ 146 - ({ \ 147 - (int)((flags) & PSR_I_BIT); \ 148 - }) 130 + static inline int arch_irqs_disabled_flags(unsigned long flags) 131 + { 132 + return flags & PSR_I_BIT; 133 + } 149 134 150 135 #endif 151 136 #endif
+11 -18
arch/avr32/include/asm/irqflags.h
··· 8 8 #ifndef __ASM_AVR32_IRQFLAGS_H 9 9 #define __ASM_AVR32_IRQFLAGS_H 10 10 11 + #include <linux/types.h> 11 12 #include <asm/sysreg.h> 12 13 13 - static inline unsigned long __raw_local_save_flags(void) 14 + static inline unsigned long arch_local_save_flags(void) 14 15 { 15 16 return sysreg_read(SR); 16 17 } 17 - 18 - #define raw_local_save_flags(x) \ 19 - do { (x) = __raw_local_save_flags(); } while (0) 20 18 21 19 /* 22 20 * This will restore ALL status register flags, not only the interrupt ··· 23 25 * The empty asm statement informs the compiler of this fact while 24 26 * also serving as a barrier. 25 27 */ 26 - static inline void raw_local_irq_restore(unsigned long flags) 28 + static inline void arch_local_irq_restore(unsigned long flags) 27 29 { 28 30 sysreg_write(SR, flags); 29 31 asm volatile("" : : : "memory", "cc"); 30 32 } 31 33 32 - static inline void raw_local_irq_disable(void) 34 + static inline void arch_local_irq_disable(void) 33 35 { 34 36 asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 35 37 } 36 38 37 - static inline void raw_local_irq_enable(void) 39 + static inline void arch_local_irq_enable(void) 38 40 { 39 41 asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 40 42 } 41 43 42 - static inline int raw_irqs_disabled_flags(unsigned long flags) 44 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 43 45 { 44 46 return (flags & SYSREG_BIT(GM)) != 0; 45 47 } 46 48 47 - static inline int raw_irqs_disabled(void) 49 + static inline bool arch_irqs_disabled(void) 48 50 { 49 - unsigned long flags = __raw_local_save_flags(); 50 - 51 - return raw_irqs_disabled_flags(flags); 51 + return arch_irqs_disabled_flags(arch_local_save_flags()); 52 52 } 53 53 54 - static inline unsigned long __raw_local_irq_save(void) 54 + static inline unsigned long arch_local_irq_save(void) 55 55 { 56 - unsigned long flags = __raw_local_save_flags(); 56 + unsigned long flags = arch_local_save_flags(); 57 57 58 - raw_local_irq_disable(); 58 + arch_local_irq_disable(); 59 59 60 60 return flags; 61 61 } 62 - 63 - #define raw_local_irq_save(flags) \ 64 - do { (flags) = __raw_local_irq_save(); } while (0) 65 62 66 63 #endif /* __ASM_AVR32_IRQFLAGS_H */
-12
arch/blackfin/include/asm/irqflags.h
··· 218 218 219 219 220 220 #endif /* !CONFIG_IPIPE */ 221 - 222 - /* 223 - * Raw interface to linux/irqflags.h. 224 - */ 225 - #define raw_local_save_flags(flags) do { (flags) = arch_local_save_flags(); } while (0) 226 - #define raw_local_irq_save(flags) do { (flags) = arch_local_irq_save(); } while (0) 227 - #define raw_local_irq_restore(flags) arch_local_irq_restore(flags) 228 - #define raw_local_irq_enable() arch_local_irq_enable() 229 - #define raw_local_irq_disable() arch_local_irq_disable() 230 - #define raw_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags) 231 - #define raw_irqs_disabled() arch_irqs_disabled() 232 - 233 221 #endif
+1
arch/blackfin/kernel/trace.c
··· 15 15 #include <linux/kallsyms.h> 16 16 #include <linux/err.h> 17 17 #include <linux/fs.h> 18 + #include <linux/irq.h> 18 19 #include <asm/dma.h> 19 20 #include <asm/trace.h> 20 21 #include <asm/fixed_code.h>
+45
arch/cris/include/arch-v10/arch/irqflags.h
··· 1 + #ifndef __ASM_CRIS_ARCH_IRQFLAGS_H 2 + #define __ASM_CRIS_ARCH_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + 6 + static inline unsigned long arch_local_save_flags(void) 7 + { 8 + unsigned long flags; 9 + asm volatile("move $ccr,%0" : "=rm" (flags) : : "memory"); 10 + return flags; 11 + } 12 + 13 + static inline void arch_local_irq_disable(void) 14 + { 15 + asm volatile("di" : : : "memory"); 16 + } 17 + 18 + static inline void arch_local_irq_enable(void) 19 + { 20 + asm volatile("ei" : : : "memory"); 21 + } 22 + 23 + static inline unsigned long arch_local_irq_save(void) 24 + { 25 + unsigned long flags = arch_local_save_flags(); 26 + arch_local_irq_disable(); 27 + return flags; 28 + } 29 + 30 + static inline void arch_local_irq_restore(unsigned long flags) 31 + { 32 + asm volatile("move %0,$ccr" : : "rm" (flags) : "memory"); 33 + } 34 + 35 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 36 + { 37 + return !(flags & (1 << 5)); 38 + } 39 + 40 + static inline bool arch_irqs_disabled(void) 41 + { 42 + return arch_irqs_disabled_flags(arch_local_save_flags()); 43 + } 44 + 45 + #endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
-16
arch/cris/include/arch-v10/arch/system.h
··· 44 44 struct __xchg_dummy { unsigned long a[100]; }; 45 45 #define __xg(x) ((struct __xchg_dummy *)(x)) 46 46 47 - /* interrupt control.. */ 48 - #define local_save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory"); 49 - #define local_irq_restore(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory"); 50 - #define local_irq_disable() __asm__ __volatile__ ( "di" : : :"memory"); 51 - #define local_irq_enable() __asm__ __volatile__ ( "ei" : : :"memory"); 52 - 53 - #define irqs_disabled() \ 54 - ({ \ 55 - unsigned long flags; \ 56 - local_save_flags(flags); \ 57 - !(flags & (1<<5)); \ 58 - }) 59 - 60 - /* For spinlocks etc */ 61 - #define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory"); 62 - 63 47 #endif
+46
arch/cris/include/arch-v32/arch/irqflags.h
··· 1 + #ifndef __ASM_CRIS_ARCH_IRQFLAGS_H 2 + #define __ASM_CRIS_ARCH_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <arch/ptrace.h> 6 + 7 + static inline unsigned long arch_local_save_flags(void) 8 + { 9 + unsigned long flags; 10 + asm volatile("move $ccs,%0" : "=rm" (flags) : : "memory"); 11 + return flags; 12 + } 13 + 14 + static inline void arch_local_irq_disable(void) 15 + { 16 + asm volatile("di" : : : "memory"); 17 + } 18 + 19 + static inline void arch_local_irq_enable(void) 20 + { 21 + asm volatile("ei" : : : "memory"); 22 + } 23 + 24 + static inline unsigned long arch_local_irq_save(void) 25 + { 26 + unsigned long flags = arch_local_save_flags(); 27 + arch_local_irq_disable(); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_restore(unsigned long flags) 32 + { 33 + asm volatile("move %0,$ccs" : : "rm" (flags) : "memory"); 34 + } 35 + 36 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 37 + { 38 + return !(flags & (1 << I_CCS_BITNR)); 39 + } 40 + 41 + static inline bool arch_irqs_disabled(void) 42 + { 43 + return arch_irqs_disabled_flags(arch_local_save_flags()); 44 + } 45 + 46 + #endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
-22
arch/cris/include/arch-v32/arch/system.h
··· 44 44 struct __xchg_dummy { unsigned long a[100]; }; 45 45 #define __xg(x) ((struct __xchg_dummy *)(x)) 46 46 47 - /* Used for interrupt control. */ 48 - #define local_save_flags(x) \ 49 - __asm__ __volatile__ ("move $ccs, %0" : "=rm" (x) : : "memory"); 50 - 51 - #define local_irq_restore(x) \ 52 - __asm__ __volatile__ ("move %0, $ccs" : : "rm" (x) : "memory"); 53 - 54 - #define local_irq_disable() __asm__ __volatile__ ("di" : : : "memory"); 55 - #define local_irq_enable() __asm__ __volatile__ ("ei" : : : "memory"); 56 - 57 - #define irqs_disabled() \ 58 - ({ \ 59 - unsigned long flags; \ 60 - \ 61 - local_save_flags(flags);\ 62 - !(flags & (1 << I_CCS_BITNR)); \ 63 - }) 64 - 65 - /* Used for spinlocks, etc. */ 66 - #define local_irq_save(x) \ 67 - __asm__ __volatile__ ("move $ccs, %0\n\tdi" : "=rm" (x) : : "memory"); 68 - 69 47 #endif /* _ASM_CRIS_ARCH_SYSTEM_H */
+1
arch/cris/include/asm/irqflags.h
··· 1 + #include <arch/irqflags.h>
+1
arch/cris/include/asm/system.h
··· 1 1 #ifndef __ASM_CRIS_SYSTEM_H 2 2 #define __ASM_CRIS_SYSTEM_H 3 3 4 + #include <linux/irqflags.h> 4 5 #include <arch/system.h> 5 6 6 7 /* the switch_to macro calls resume, an asm function in entry.S which does the actual
+158
arch/frv/include/asm/irqflags.h
··· 1 + /* FR-V interrupt handling 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + 12 + #ifndef _ASM_IRQFLAGS_H 13 + #define _ASM_IRQFLAGS_H 14 + 15 + /* 16 + * interrupt flag manipulation 17 + * - use virtual interrupt management since touching the PSR is slow 18 + * - ICC2.Z: T if interrupts virtually disabled 19 + * - ICC2.C: F if interrupts really disabled 20 + * - if Z==1 upon interrupt: 21 + * - C is set to 0 22 + * - interrupts are really disabled 23 + * - entry.S returns immediately 24 + * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts 25 + * - if taken, the trap: 26 + * - sets ICC2.C 27 + * - enables interrupts 28 + */ 29 + static inline void arch_local_irq_disable(void) 30 + { 31 + /* set Z flag, but don't change the C flag */ 32 + asm volatile(" andcc gr0,gr0,gr0,icc2 \n" 33 + : 34 + : 35 + : "memory", "icc2" 36 + ); 37 + } 38 + 39 + static inline void arch_local_irq_enable(void) 40 + { 41 + /* clear Z flag and then test the C flag */ 42 + asm volatile(" oricc gr0,#1,gr0,icc2 \n" 43 + " tihi icc2,gr0,#2 \n" 44 + : 45 + : 46 + : "memory", "icc2" 47 + ); 48 + } 49 + 50 + static inline unsigned long arch_local_save_flags(void) 51 + { 52 + unsigned long flags; 53 + 54 + asm volatile("movsg ccr,%0" 55 + : "=r"(flags) 56 + : 57 + : "memory"); 58 + 59 + /* shift ICC2.Z to bit 0 */ 60 + flags >>= 26; 61 + 62 + /* make flags 1 if interrupts disabled, 0 otherwise */ 63 + return flags & 1UL; 64 + 65 + } 66 + 67 + static inline unsigned long arch_local_irq_save(void) 68 + { 69 + unsigned long flags = arch_local_save_flags(); 70 + arch_local_irq_disable(); 71 + return flags; 72 + } 73 + 74 + static inline void arch_local_irq_restore(unsigned long flags) 75 + { 76 + /* load the Z flag by turning 1 if disabled into 0 if disabled 77 + * and thus setting the Z flag but not the C flag */ 78 + asm volatile(" xoricc %0,#1,gr0,icc2 \n" 79 + /* then trap if Z=0 and C=0 */ 80 + " tihi icc2,gr0,#2 \n" 81 + : 82 + : "r"(flags) 83 + : "memory", "icc2" 84 + ); 85 + 86 + } 87 + 88 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 89 + { 90 + return flags; 91 + } 92 + 93 + static inline bool arch_irqs_disabled(void) 94 + { 95 + return arch_irqs_disabled_flags(arch_local_save_flags()); 96 + } 97 + 98 + /* 99 + * real interrupt flag manipulation 100 + */ 101 + #define __arch_local_irq_disable() \ 102 + do { \ 103 + unsigned long psr; \ 104 + asm volatile(" movsg psr,%0 \n" \ 105 + " andi %0,%2,%0 \n" \ 106 + " ori %0,%1,%0 \n" \ 107 + " movgs %0,psr \n" \ 108 + : "=r"(psr) \ 109 + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 110 + : "memory"); \ 111 + } while (0) 112 + 113 + #define __arch_local_irq_enable() \ 114 + do { \ 115 + unsigned long psr; \ 116 + asm volatile(" movsg psr,%0 \n" \ 117 + " andi %0,%1,%0 \n" \ 118 + " movgs %0,psr \n" \ 119 + : "=r"(psr) \ 120 + : "i" (~PSR_PIL) \ 121 + : "memory"); \ 122 + } while (0) 123 + 124 + #define __arch_local_save_flags(flags) \ 125 + do { \ 126 + typecheck(unsigned long, flags); \ 127 + asm("movsg psr,%0" \ 128 + : "=r"(flags) \ 129 + : \ 130 + : "memory"); \ 131 + } while (0) 132 + 133 + #define __arch_local_irq_save(flags) \ 134 + do { \ 135 + unsigned long npsr; \ 136 + typecheck(unsigned long, flags); \ 137 + asm volatile(" movsg psr,%0 \n" \ 138 + " andi %0,%3,%1 \n" \ 139 + " ori %1,%2,%1 \n" \ 140 + " movgs %1,psr \n" \ 141 + : "=r"(flags), "=r"(npsr) \ 142 + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 143 + : "memory"); \ 144 + } while (0) 145 + 146 + #define __arch_local_irq_restore(flags) \ 147 + do { \ 148 + typecheck(unsigned long, flags); \ 149 + asm volatile(" movgs %0,psr \n" \ 150 + : \ 151 + : "r" (flags) \ 152 + : "memory"); \ 153 + } while (0) 154 + 155 + #define __arch_irqs_disabled() \ 156 + ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) 157 + 158 + #endif /* _ASM_IRQFLAGS_H */
-136
arch/frv/include/asm/system.h
··· 37 37 } while(0) 38 38 39 39 /* 40 - * interrupt flag manipulation 41 - * - use virtual interrupt management since touching the PSR is slow 42 - * - ICC2.Z: T if interrupts virtually disabled 43 - * - ICC2.C: F if interrupts really disabled 44 - * - if Z==1 upon interrupt: 45 - * - C is set to 0 46 - * - interrupts are really disabled 47 - * - entry.S returns immediately 48 - * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts 49 - * - if taken, the trap: 50 - * - sets ICC2.C 51 - * - enables interrupts 52 - */ 53 - #define local_irq_disable() \ 54 - do { \ 55 - /* set Z flag, but don't change the C flag */ \ 56 - asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \ 57 - : \ 58 - : \ 59 - : "memory", "icc2" \ 60 - ); \ 61 - } while(0) 62 - 63 - #define local_irq_enable() \ 64 - do { \ 65 - /* clear Z flag and then test the C flag */ \ 66 - asm volatile(" oricc gr0,#1,gr0,icc2 \n" \ 67 - " tihi icc2,gr0,#2 \n" \ 68 - : \ 69 - : \ 70 - : "memory", "icc2" \ 71 - ); \ 72 - } while(0) 73 - 74 - #define local_save_flags(flags) \ 75 - do { \ 76 - typecheck(unsigned long, flags); \ 77 - asm volatile("movsg ccr,%0" \ 78 - : "=r"(flags) \ 79 - : \ 80 - : "memory"); \ 81 - \ 82 - /* shift ICC2.Z to bit 0 */ \ 83 - flags >>= 26; \ 84 - \ 85 - /* make flags 1 if interrupts disabled, 0 otherwise */ \ 86 - flags &= 1UL; \ 87 - } while(0) 88 - 89 - #define irqs_disabled() \ 90 - ({unsigned long flags; local_save_flags(flags); !!flags; }) 91 - 92 - #define local_irq_save(flags) \ 93 - do { \ 94 - typecheck(unsigned long, flags); \ 95 - local_save_flags(flags); \ 96 - local_irq_disable(); \ 97 - } while(0) 98 - 99 - #define local_irq_restore(flags) \ 100 - do { \ 101 - typecheck(unsigned long, flags); \ 102 - \ 103 - /* load the Z flag by turning 1 if disabled into 0 if disabled \ 104 - * and thus setting the Z flag but not the C flag */ \ 105 - asm volatile(" xoricc %0,#1,gr0,icc2 \n" \ 106 - /* then test Z=0 and C=0 */ \ 107 - " tihi icc2,gr0,#2 \n" \ 108 - : \ 109 - : "r"(flags) \ 110 - : "memory", "icc2" \ 111 - ); \ 112 - \ 113 - } while(0) 114 - 115 - /* 116 - * real interrupt flag manipulation 117 - */ 118 - #define __local_irq_disable() \ 119 - do { \ 120 - unsigned long psr; \ 121 - asm volatile(" movsg psr,%0 \n" \ 122 - " andi %0,%2,%0 \n" \ 123 - " ori %0,%1,%0 \n" \ 124 - " movgs %0,psr \n" \ 125 - : "=r"(psr) \ 126 - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 127 - : "memory"); \ 128 - } while(0) 129 - 130 - #define __local_irq_enable() \ 131 - do { \ 132 - unsigned long psr; \ 133 - asm volatile(" movsg psr,%0 \n" \ 134 - " andi %0,%1,%0 \n" \ 135 - " movgs %0,psr \n" \ 136 - : "=r"(psr) \ 137 - : "i" (~PSR_PIL) \ 138 - : "memory"); \ 139 - } while(0) 140 - 141 - #define __local_save_flags(flags) \ 142 - do { \ 143 - typecheck(unsigned long, flags); \ 144 - asm("movsg psr,%0" \ 145 - : "=r"(flags) \ 146 - : \ 147 - : "memory"); \ 148 - } while(0) 149 - 150 - #define __local_irq_save(flags) \ 151 - do { \ 152 - unsigned long npsr; \ 153 - typecheck(unsigned long, flags); \ 154 - asm volatile(" movsg psr,%0 \n" \ 155 - " andi %0,%3,%1 \n" \ 156 - " ori %1,%2,%1 \n" \ 157 - " movgs %1,psr \n" \ 158 - : "=r"(flags), "=r"(npsr) \ 159 - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 160 - : "memory"); \ 161 - } while(0) 162 - 163 - #define __local_irq_restore(flags) \ 164 - do { \ 165 - typecheck(unsigned long, flags); \ 166 - asm volatile(" movgs %0,psr \n" \ 167 - : \ 168 - : "r" (flags) \ 169 - : "memory"); \ 170 - } while(0) 171 - 172 - #define __irqs_disabled() \ 173 - ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) 174 - 175 - /* 176 40 * Force strict CPU ordering. 177 41 */ 178 42 #define nop() asm volatile ("nop"::)
+43
arch/h8300/include/asm/irqflags.h
··· 1 + #ifndef _H8300_IRQFLAGS_H 2 + #define _H8300_IRQFLAGS_H 3 + 4 + static inline unsigned long arch_local_save_flags(void) 5 + { 6 + unsigned long flags; 7 + asm volatile ("stc ccr,%w0" : "=r" (flags)); 8 + return flags; 9 + } 10 + 11 + static inline void arch_local_irq_disable(void) 12 + { 13 + asm volatile ("orc #0x80,ccr" : : : "memory"); 14 + } 15 + 16 + static inline void arch_local_irq_enable(void) 17 + { 18 + asm volatile ("andc #0x7f,ccr" : : : "memory"); 19 + } 20 + 21 + static inline unsigned long arch_local_irq_save(void) 22 + { 23 + unsigned long flags = arch_local_save_flags(); 24 + arch_local_irq_disable(); 25 + return flags; 26 + } 27 + 28 + static inline void arch_local_irq_restore(unsigned long flags) 29 + { 30 + asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory"); 31 + } 32 + 33 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 34 + { 35 + return (flags & 0x80) == 0x80; 36 + } 37 + 38 + static inline bool arch_irqs_disabled(void) 39 + { 40 + return arch_irqs_disabled_flags(arch_local_save_flags()); 41 + } 42 + 43 + #endif /* _H8300_IRQFLAGS_H */
+1 -23
arch/h8300/include/asm/system.h
··· 2 2 #define _H8300_SYSTEM_H 3 3 4 4 #include <linux/linkage.h> 5 + #include <linux/irqflags.h> 5 6 6 7 struct pt_regs; 7 8 ··· 52 51 (last) = _last; \ 53 52 } 54 53 55 - #define __sti() asm volatile ("andc #0x7f,ccr") 56 - #define __cli() asm volatile ("orc #0x80,ccr") 57 - 58 - #define __save_flags(x) \ 59 - asm volatile ("stc ccr,%w0":"=r" (x)) 60 - 61 - #define __restore_flags(x) \ 62 - asm volatile ("ldc %w0,ccr": :"r" (x)) 63 - 64 - #define irqs_disabled() \ 65 - ({ \ 66 - unsigned char flags; \ 67 - __save_flags(flags); \ 68 - ((flags & 0x80) == 0x80); \ 69 - }) 70 - 71 54 #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 72 - 73 - /* For spinlocks etc */ 74 - #define local_irq_disable() __cli() 75 - #define local_irq_enable() __sti() 76 - #define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) 77 - #define local_irq_restore(x) __restore_flags(x) 78 - #define local_save_flags(x) __save_flags(x) 79 55 80 56 /* 81 57 * Force strict CPU ordering.
+94
arch/ia64/include/asm/irqflags.h
··· 1 + /* 2 + * IRQ flags defines. 3 + * 4 + * Copyright (C) 1998-2003 Hewlett-Packard Co 5 + * David Mosberger-Tang <davidm@hpl.hp.com> 6 + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 7 + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 8 + */ 9 + 10 + #ifndef _ASM_IA64_IRQFLAGS_H 11 + #define _ASM_IA64_IRQFLAGS_H 12 + 13 + #ifdef CONFIG_IA64_DEBUG_IRQ 14 + extern unsigned long last_cli_ip; 15 + static inline void arch_maybe_save_ip(unsigned long flags) 16 + { 17 + if (flags & IA64_PSR_I) 18 + last_cli_ip = ia64_getreg(_IA64_REG_IP); 19 + } 20 + #else 21 + #define arch_maybe_save_ip(flags) do {} while (0) 22 + #endif 23 + 24 + /* 25 + * - clearing psr.i is implicitly serialized (visible by next insn) 26 + * - setting psr.i requires data serialization 27 + * - we need a stop-bit before reading PSR because we sometimes 28 + * write a floating-point register right before reading the PSR 29 + * and that writes to PSR.mfl 30 + */ 31 + 32 + static inline unsigned long arch_local_save_flags(void) 33 + { 34 + ia64_stop(); 35 + #ifdef CONFIG_PARAVIRT 36 + return ia64_get_psr_i(); 37 + #else 38 + return ia64_getreg(_IA64_REG_PSR); 39 + #endif 40 + } 41 + 42 + static inline unsigned long arch_local_irq_save(void) 43 + { 44 + unsigned long flags = arch_local_save_flags(); 45 + 46 + ia64_stop(); 47 + ia64_rsm(IA64_PSR_I); 48 + arch_maybe_save_ip(flags); 49 + return flags; 50 + } 51 + 52 + static inline void arch_local_irq_disable(void) 53 + { 54 + #ifdef CONFIG_IA64_DEBUG_IRQ 55 + arch_local_irq_save(); 56 + #else 57 + ia64_stop(); 58 + ia64_rsm(IA64_PSR_I); 59 + #endif 60 + } 61 + 62 + static inline void arch_local_irq_enable(void) 63 + { 64 + ia64_stop(); 65 + ia64_ssm(IA64_PSR_I); 66 + ia64_srlz_d(); 67 + } 68 + 69 + static inline void arch_local_irq_restore(unsigned long flags) 70 + { 71 + #ifdef CONFIG_IA64_DEBUG_IRQ 72 + unsigned long old_psr = arch_local_save_flags(); 73 + #endif 74 + ia64_intrin_local_irq_restore(flags & IA64_PSR_I); 75 + arch_maybe_save_ip(old_psr & ~flags); 76 + } 77 + 78 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 79 + { 80 + return (flags & IA64_PSR_I) == 0; 81 + } 82 + 83 + static inline bool arch_irqs_disabled(void) 84 + { 85 + return arch_irqs_disabled_flags(arch_local_save_flags()); 86 + } 87 + 88 + static inline void arch_safe_halt(void) 89 + { 90 + ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ 91 + } 92 + 93 + 94 + #endif /* _ASM_IA64_IRQFLAGS_H */
-76
arch/ia64/include/asm/system.h
··· 107 107 */ 108 108 #define set_mb(var, value) do { (var) = (value); mb(); } while (0) 109 109 110 - #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ 111 - 112 110 /* 113 111 * The group barrier in front of the rsm & ssm are necessary to ensure 114 112 * that none of the previous instructions in the same group are 115 113 * affected by the rsm/ssm. 116 114 */ 117 - /* For spinlocks etc */ 118 - 119 - /* 120 - * - clearing psr.i is implicitly serialized (visible by next insn) 121 - * - setting psr.i requires data serialization 122 - * - we need a stop-bit before reading PSR because we sometimes 123 - * write a floating-point register right before reading the PSR 124 - * and that writes to PSR.mfl 125 - */ 126 - #ifdef CONFIG_PARAVIRT 127 - #define __local_save_flags() ia64_get_psr_i() 128 - #else 129 - #define __local_save_flags() ia64_getreg(_IA64_REG_PSR) 130 - #endif 131 - 132 - #define __local_irq_save(x) \ 133 - do { \ 134 - ia64_stop(); \ 135 - (x) = __local_save_flags(); \ 136 - ia64_stop(); \ 137 - ia64_rsm(IA64_PSR_I); \ 138 - } while (0) 139 - 140 - #define __local_irq_disable() \ 141 - do { \ 142 - ia64_stop(); \ 143 - ia64_rsm(IA64_PSR_I); \ 144 - } while (0) 145 - 146 - #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) 147 - 148 - #ifdef CONFIG_IA64_DEBUG_IRQ 149 - 150 - extern unsigned long last_cli_ip; 151 - 152 - # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) 153 - 154 - # define local_irq_save(x) \ 155 - do { \ 156 - unsigned long __psr; \ 157 - \ 158 - __local_irq_save(__psr); \ 159 - if (__psr & IA64_PSR_I) \ 160 - __save_ip(); \ 161 - (x) = __psr; \ 162 - } while (0) 163 - 164 - # define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0) 165 - 166 - # define local_irq_restore(x) \ 167 - do { \ 168 - unsigned long __old_psr, __psr = (x); \ 169 - \ 170 - local_save_flags(__old_psr); \ 171 - __local_irq_restore(__psr); \ 172 - if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \ 173 - __save_ip(); \ 174 - } while (0) 175 - 176 - #else /* !CONFIG_IA64_DEBUG_IRQ */ 177 - # define local_irq_save(x) __local_irq_save(x) 178 - # define local_irq_disable() __local_irq_disable() 179 - # define local_irq_restore(x) __local_irq_restore(x) 180 - #endif /* !CONFIG_IA64_DEBUG_IRQ */ 181 - 182 - #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) 183 - #define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) 184 - 185 - #define irqs_disabled() \ 186 - ({ \ 187 - unsigned long __ia64_id_flags; \ 188 - local_save_flags(__ia64_id_flags); \ 189 - (__ia64_id_flags & IA64_PSR_I) == 0; \ 190 - }) 191 115 192 116 #ifdef __KERNEL__ 193 117
+104
arch/m32r/include/asm/irqflags.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto 7 + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> 8 + */ 9 + 10 + #ifndef _ASM_M32R_IRQFLAGS_H 11 + #define _ASM_M32R_IRQFLAGS_H 12 + 13 + #include <linux/types.h> 14 + 15 + static inline unsigned long arch_local_save_flags(void) 16 + { 17 + unsigned long flags; 18 + asm volatile("mvfc %0,psw" : "=r"(flags)); 19 + return flags; 20 + } 21 + 22 + static inline void arch_local_irq_disable(void) 23 + { 24 + #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 25 + asm volatile ( 26 + "clrpsw #0x40 -> nop" 27 + : : : "memory"); 28 + #else 29 + unsigned long tmpreg0, tmpreg1; 30 + asm volatile ( 31 + "ld24 %0, #0 ; Use 32-bit insn. \n\t" 32 + "mvfc %1, psw ; No interrupt can be accepted here. \n\t" 33 + "mvtc %0, psw \n\t" 34 + "and3 %0, %1, #0xffbf \n\t" 35 + "mvtc %0, psw \n\t" 36 + : "=&r" (tmpreg0), "=&r" (tmpreg1) 37 + : 38 + : "cbit", "memory"); 39 + #endif 40 + } 41 + 42 + static inline void arch_local_irq_enable(void) 43 + { 44 + #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 45 + asm volatile ( 46 + "setpsw #0x40 -> nop" 47 + : : : "memory"); 48 + #else 49 + unsigned long tmpreg; 50 + asm volatile ( 51 + "mvfc %0, psw; \n\t" 52 + "or3 %0, %0, #0x0040; \n\t" 53 + "mvtc %0, psw; \n\t" 54 + : "=&r" (tmpreg) 55 + : 56 + : "cbit", "memory"); 57 + #endif 58 + } 59 + 60 + static inline unsigned long arch_local_irq_save(void) 61 + { 62 + unsigned long flags; 63 + 64 + #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) 65 + asm volatile ( 66 + "mvfc %0, psw; \n\t" 67 + "clrpsw #0x40 -> nop; \n\t" 68 + : "=r" (flags) 69 + : 70 + : "memory"); 71 + #else 72 + unsigned long tmpreg; 73 + asm volatile ( 74 + "ld24 %1, #0 \n\t" 75 + "mvfc %0, psw \n\t" 76 + "mvtc %1, psw \n\t" 77 + "and3 %1, %0, #0xffbf \n\t" 78 + "mvtc %1, psw \n\t" 79 + : "=r" (flags), "=&r" (tmpreg) 80 + : 81 + : "cbit", "memory"); 82 + #endif 83 + return flags; 84 + } 85 + 86 + static inline void arch_local_irq_restore(unsigned long flags) 87 + { 88 + asm volatile("mvtc %0,psw" 89 + : 90 + : "r" (flags) 91 + : "cbit", "memory"); 92 + } 93 + 94 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 95 + { 96 + return !(flags & 0x40); 97 + } 98 + 99 + static inline bool arch_irqs_disabled(void) 100 + { 101 + return arch_irqs_disabled_flags(arch_local_save_flags()); 102 + } 103 + 104 + #endif /* _ASM_M32R_IRQFLAGS_H */
+1 -65
arch/m32r/include/asm/system.h
··· 11 11 */ 12 12 13 13 #include <linux/compiler.h> 14 + #include <linux/irqflags.h> 14 15 #include <asm/assembler.h> 15 16 16 17 #ifdef __KERNEL__ ··· 54 53 : "memory", "lr" \ 55 54 ); \ 56 55 } while(0) 57 - 58 - /* Interrupt Control */ 59 - #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 60 - #define local_irq_enable() \ 61 - __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") 62 - #define local_irq_disable() \ 63 - __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") 64 - #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 65 - static inline void local_irq_enable(void) 66 - { 67 - unsigned long tmpreg; 68 - __asm__ __volatile__( 69 - "mvfc %0, psw; \n\t" 70 - "or3 %0, %0, #0x0040; \n\t" 71 - "mvtc %0, psw; \n\t" 72 - : "=&r" (tmpreg) : : "cbit", "memory"); 73 - } 74 - 75 - static inline void local_irq_disable(void) 76 - { 77 - unsigned long tmpreg0, tmpreg1; 78 - __asm__ __volatile__( 79 - "ld24 %0, #0 ; Use 32-bit insn. \n\t" 80 - "mvfc %1, psw ; No interrupt can be accepted here. \n\t" 81 - "mvtc %0, psw \n\t" 82 - "and3 %0, %1, #0xffbf \n\t" 83 - "mvtc %0, psw \n\t" 84 - : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); 85 - } 86 - #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 87 - 88 - #define local_save_flags(x) \ 89 - __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) 90 - 91 - #define local_irq_restore(x) \ 92 - __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ 93 - : "r" (x) : "cbit", "memory") 94 - 95 - #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) 96 - #define local_irq_save(x) \ 97 - __asm__ __volatile__( \ 98 - "mvfc %0, psw; \n\t" \ 99 - "clrpsw #0x40 -> nop; \n\t" \ 100 - : "=r" (x) : /* no input */ : "memory") 101 - #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 102 - #define local_irq_save(x) \ 103 - ({ \ 104 - unsigned long tmpreg; \ 105 - __asm__ __volatile__( \ 106 - "ld24 %1, #0 \n\t" \ 107 - "mvfc %0, psw \n\t" \ 108 - "mvtc %1, psw \n\t" \ 109 - "and3 %1, %0, #0xffbf \n\t" \ 110 - "mvtc %1, psw \n\t" \ 111 - : "=r" (x), "=&r" (tmpreg) \ 112 - : : "cbit", "memory"); \ 113 - }) 114 - #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 115 - 116 - #define irqs_disabled() \ 117 - ({ \ 118 - unsigned long flags; \ 119 - local_save_flags(flags); \ 120 - !(flags & 0x40); \ 121 - }) 122 56 123 57 #define nop() __asm__ __volatile__ ("nop" : : ) 124 58
+1 -1
arch/m68k/include/asm/entry_no.h
··· 28 28 * M68K COLDFIRE 29 29 */ 30 30 31 - #define ALLOWINT 0xf8ff 31 + #define ALLOWINT (~0x700) 32 32 33 33 #ifdef __ASSEMBLY__ 34 34
+76
arch/m68k/include/asm/irqflags.h
··· 1 + #ifndef _M68K_IRQFLAGS_H 2 + #define _M68K_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <linux/hardirq.h> 6 + #include <linux/preempt.h> 7 + #include <asm/thread_info.h> 8 + #include <asm/entry.h> 9 + 10 + static inline unsigned long arch_local_save_flags(void) 11 + { 12 + unsigned long flags; 13 + asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory"); 14 + return flags; 15 + } 16 + 17 + static inline void arch_local_irq_disable(void) 18 + { 19 + #ifdef CONFIG_COLDFIRE 20 + asm volatile ( 21 + "move %/sr,%%d0 \n\t" 22 + "ori.l #0x0700,%%d0 \n\t" 23 + "move %%d0,%/sr \n" 24 + : /* no outputs */ 25 + : 26 + : "cc", "%d0", "memory"); 27 + #else 28 + asm volatile ("oriw #0x0700,%%sr" : : : "memory"); 29 + #endif 30 + } 31 + 32 + static inline void arch_local_irq_enable(void) 33 + { 34 + #if defined(CONFIG_COLDFIRE) 35 + asm volatile ( 36 + "move %/sr,%%d0 \n\t" 37 + "andi.l #0xf8ff,%%d0 \n\t" 38 + "move %%d0,%/sr \n" 39 + : /* no outputs */ 40 + : 41 + : "cc", "%d0", "memory"); 42 + #else 43 + # if defined(CONFIG_MMU) 44 + if (MACH_IS_Q40 || !hardirq_count()) 45 + # endif 46 + asm volatile ( 47 + "andiw %0,%%sr" 48 + : 49 + : "i" (ALLOWINT) 50 + : "memory"); 51 + #endif 52 + } 53 + 54 + static inline unsigned long arch_local_irq_save(void) 55 + { 56 + unsigned long flags = arch_local_save_flags(); 57 + arch_local_irq_disable(); 58 + return flags; 59 + } 60 + 61 + static inline void arch_local_irq_restore(unsigned long flags) 62 + { 63 + asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory"); 64 + } 65 + 66 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 67 + { 68 + return (flags & ~ALLOWINT) != 0; 69 + } 70 + 71 + static inline bool arch_irqs_disabled(void) 72 + { 73 + return arch_irqs_disabled_flags(arch_local_save_flags()); 74 + } 75 + 76 + #endif /* _M68K_IRQFLAGS_H */
+1 -24
arch/m68k/include/asm/system_mm.h
··· 3 3 4 4 #include <linux/linkage.h> 5 5 #include <linux/kernel.h> 6 + #include <linux/irqflags.h> 6 7 #include <asm/segment.h> 7 8 #include <asm/entry.h> 8 9 ··· 62 61 #define smp_rmb() barrier() 63 62 #define smp_wmb() barrier() 64 63 #define smp_read_barrier_depends() ((void)0) 65 - 66 - /* interrupt control.. */ 67 - #if 0 68 - #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") 69 - #else 70 - #include <linux/hardirq.h> 71 - #define local_irq_enable() ({ \ 72 - if (MACH_IS_Q40 || !hardirq_count()) \ 73 - asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ 74 - }) 75 - #endif 76 - #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") 77 - #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") 78 - #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") 79 - 80 - static inline int irqs_disabled(void) 81 - { 82 - unsigned long flags; 83 - local_save_flags(flags); 84 - return flags & ~ALLOWINT; 85 - } 86 - 87 - /* For spinlocks etc */ 88 - #define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); }) 89 64 90 65 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 91 66
+1 -56
arch/m68k/include/asm/system_no.h
··· 2 2 #define _M68KNOMMU_SYSTEM_H 3 3 4 4 #include <linux/linkage.h> 5 + #include <linux/irqflags.h> 5 6 #include <asm/segment.h> 6 7 #include <asm/entry.h> 7 8 ··· 46 45 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ 47 46 (last) = _last; \ 48 47 } 49 - 50 - #ifdef CONFIG_COLDFIRE 51 - #define local_irq_enable() __asm__ __volatile__ ( \ 52 - "move %/sr,%%d0\n\t" \ 53 - "andi.l #0xf8ff,%%d0\n\t" \ 54 - "move %%d0,%/sr\n" \ 55 - : /* no outputs */ \ 56 - : \ 57 - : "cc", "%d0", "memory") 58 - #define local_irq_disable() __asm__ __volatile__ ( \ 59 - "move %/sr,%%d0\n\t" \ 60 - "ori.l #0x0700,%%d0\n\t" \ 61 - "move %%d0,%/sr\n" \ 62 - : /* no outputs */ \ 63 - : \ 64 - : "cc", "%d0", "memory") 65 - /* For spinlocks etc */ 66 - #define local_irq_save(x) __asm__ __volatile__ ( \ 67 - "movew %%sr,%0\n\t" \ 68 - "movew #0x0700,%%d0\n\t" \ 69 - "or.l %0,%%d0\n\t" \ 70 - "movew %%d0,%/sr" \ 71 - : "=d" (x) \ 72 - : \ 73 - : "cc", "%d0", "memory") 74 - #else 75 - 76 - /* portable version */ /* FIXME - see entry.h*/ 77 - #define ALLOWINT 0xf8ff 78 - 79 - #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") 80 - #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") 81 - #endif 82 - 83 - #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") 84 - #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") 85 - 86 - /* For spinlocks etc */ 87 - #ifndef local_irq_save 88 - #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) 89 - #endif 90 - 91 - #define irqs_disabled() \ 92 - ({ \ 93 - unsigned long flags; \ 94 - local_save_flags(flags); \ 95 - ((flags & 0x0700) == 0x0700); \ 96 - }) 97 48 98 49 #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 99 50 ··· 158 205 159 206 #define arch_align_stack(x) (x) 160 207 161 - 162 - static inline int irqs_disabled_flags(unsigned long flags) 163 - { 164 - if (flags & 0x0700) 165 - return 0; 166 - else 167 - return 1; 168 - } 169 208 170 209 #endif /* _M68KNOMMU_SYSTEM_H */
-2
arch/m68knommu/kernel/asm-offsets.c
··· 74 74 75 75 DEFINE(PT_PTRACED, PT_PTRACED); 76 76 77 - DEFINE(THREAD_SIZE, THREAD_SIZE); 78 - 79 77 /* Offsets in thread_info structure */ 80 78 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 81 79 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
+1
arch/m68knommu/platform/coldfire/head.S
··· 15 15 #include <asm/coldfire.h> 16 16 #include <asm/mcfcache.h> 17 17 #include <asm/mcfsim.h> 18 + #include <asm/thread_info.h> 18 19 19 20 /*****************************************************************************/ 20 21
+101 -90
arch/microblaze/include/asm/irqflags.h
··· 9 9 #ifndef _ASM_MICROBLAZE_IRQFLAGS_H 10 10 #define _ASM_MICROBLAZE_IRQFLAGS_H 11 11 12 - #include <linux/irqflags.h> 12 + #include <linux/types.h> 13 13 #include <asm/registers.h> 14 14 15 - # if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 15 + #ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 16 16 17 - # define raw_local_irq_save(flags) \ 18 - do { \ 19 - asm volatile (" msrclr %0, %1; \ 20 - nop;" \ 21 - : "=r"(flags) \ 22 - : "i"(MSR_IE) \ 23 - : "memory"); \ 24 - } while (0) 25 - 26 - # define raw_local_irq_disable() \ 27 - do { \ 28 - asm volatile (" msrclr r0, %0; \ 29 - nop;" \ 30 - : \ 31 - : "i"(MSR_IE) \ 32 - : "memory"); \ 33 - } while (0) 34 - 35 - # define raw_local_irq_enable() \ 36 - do { \ 37 - asm volatile (" msrset r0, %0; \ 38 - nop;" \ 39 - : \ 40 - : "i"(MSR_IE) \ 41 - : "memory"); \ 42 - } while (0) 43 - 44 - # else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */ 45 - 46 - # define raw_local_irq_save(flags) \ 47 - do { \ 48 - register unsigned tmp; \ 49 - asm volatile (" mfs %0, rmsr; \ 50 - nop; \ 51 - andi %1, %0, %2; \ 52 - mts rmsr, %1; \ 53 - nop;" \ 54 - : "=r"(flags), "=r" (tmp) \ 55 - : "i"(~MSR_IE) \ 56 - : "memory"); \ 57 - } while (0) 58 - 59 - # define raw_local_irq_disable() \ 60 - do { \ 61 - register unsigned tmp; \ 62 - asm volatile (" mfs %0, rmsr; \ 63 - nop; \ 64 - andi %0, %0, %1; \ 65 - mts rmsr, %0; \ 66 - nop;" \ 67 - : "=r"(tmp) \ 68 - : "i"(~MSR_IE) \ 69 - : "memory"); \ 70 - } while (0) 71 - 72 - # define raw_local_irq_enable() \ 73 - do { \ 74 - register unsigned tmp; \ 75 - asm volatile (" mfs %0, rmsr; \ 76 - nop; \ 77 - ori %0, %0, %1; \ 78 - mts rmsr, %0; \ 79 - nop;" \ 80 - : "=r"(tmp) \ 81 - : "i"(MSR_IE) \ 82 - : "memory"); \ 83 - } while (0) 84 - 85 - # endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 86 - 87 - #define raw_local_irq_restore(flags) \ 88 - do { \ 89 - asm volatile (" mts rmsr, %0; \ 90 - nop;" \ 91 - : \ 92 - : "r"(flags) \ 93 - : "memory"); \ 94 - } while (0) 95 - 96 - static inline unsigned long get_msr(void) 17 + static inline unsigned long arch_local_irq_save(void) 97 18 { 98 19 unsigned long flags; 99 - asm volatile (" mfs %0, rmsr; \ 100 - nop;" \ 101 - : "=r"(flags) \ 102 - : \ 103 - : "memory"); \ 20 + asm volatile(" msrclr %0, %1 \n" 21 + " nop \n" 22 + : "=r"(flags) 23 + : "i"(MSR_IE) 24 + : "memory"); 104 25 return flags; 105 26 } 106 27 107 - #define raw_local_save_flags(flags) ((flags) = get_msr()) 108 - #define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0) 109 - #define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0) 28 + static inline void arch_local_irq_disable(void) 29 + { 30 + /* this uses r0 without declaring it - is that correct? */ 31 + asm volatile(" msrclr r0, %0 \n" 32 + " nop \n" 33 + : 34 + : "i"(MSR_IE) 35 + : "memory"); 36 + } 37 + 38 + static inline void arch_local_irq_enable(void) 39 + { 40 + /* this uses r0 without declaring it - is that correct? */ 41 + asm volatile(" msrset r0, %0 \n" 42 + " nop \n" 43 + : 44 + : "i"(MSR_IE) 45 + : "memory"); 46 + } 47 + 48 + #else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 49 + 50 + static inline unsigned long arch_local_irq_save(void) 51 + { 52 + unsigned long flags, tmp; 53 + asm volatile (" mfs %0, rmsr \n" 54 + " nop \n" 55 + " andi %1, %0, %2 \n" 56 + " mts rmsr, %1 \n" 57 + " nop \n" 58 + : "=r"(flags), "=r"(tmp) 59 + : "i"(~MSR_IE) 60 + : "memory"); 61 + return flags; 62 + } 63 + 64 + static inline void arch_local_irq_disable(void) 65 + { 66 + unsigned long tmp; 67 + asm volatile(" mfs %0, rmsr \n" 68 + " nop \n" 69 + " andi %0, %0, %1 \n" 70 + " mts rmsr, %0 \n" 71 + " nop \n" 72 + : "=r"(tmp) 73 + : "i"(~MSR_IE) 74 + : "memory"); 75 + } 76 + 77 + static inline void arch_local_irq_enable(void) 78 + { 79 + unsigned long tmp; 80 + asm volatile(" mfs %0, rmsr \n" 81 + " nop \n" 82 + " ori %0, %0, %1 \n" 83 + " mts rmsr, %0 \n" 84 + " nop \n" 85 + : "=r"(tmp) 86 + : "i"(MSR_IE) 87 + : "memory"); 88 + } 89 + 90 + #endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 91 + 92 + static inline unsigned long arch_local_save_flags(void) 93 + { 94 + unsigned long flags; 95 + asm volatile(" mfs %0, rmsr \n" 96 + " nop \n" 97 + : "=r"(flags) 98 + : 99 + : "memory"); 100 + return flags; 101 + } 102 + 103 + static inline void arch_local_irq_restore(unsigned long flags) 104 + { 105 + asm volatile(" mts rmsr, %0 \n" 106 + " nop \n" 107 + : 108 + : "r"(flags) 109 + : "memory"); 110 + } 111 + 112 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 113 + { 114 + return (flags & MSR_IE) == 0; 115 + } 116 + 117 + static inline bool arch_irqs_disabled(void) 118 + { 119 + return arch_irqs_disabled_flags(arch_local_save_flags()); 120 + } 110 121 111 122 #endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
+29 -24
arch/mips/include/asm/irqflags.h
··· 17 17 #include <asm/hazards.h> 18 18 19 19 __asm__( 20 - " .macro raw_local_irq_enable \n" 20 + " .macro arch_local_irq_enable \n" 21 21 " .set push \n" 22 22 " .set reorder \n" 23 23 " .set noat \n" ··· 40 40 41 41 extern void smtc_ipi_replay(void); 42 42 43 - static inline void raw_local_irq_enable(void) 43 + static inline void arch_local_irq_enable(void) 44 44 { 45 45 #ifdef CONFIG_MIPS_MT_SMTC 46 46 /* ··· 50 50 smtc_ipi_replay(); 51 51 #endif 52 52 __asm__ __volatile__( 53 - "raw_local_irq_enable" 53 + "arch_local_irq_enable" 54 54 : /* no outputs */ 55 55 : /* no inputs */ 56 56 : "memory"); ··· 76 76 * Workaround: mask EXL bit of the result or place a nop before mfc0. 77 77 */ 78 78 __asm__( 79 - " .macro raw_local_irq_disable\n" 79 + " .macro arch_local_irq_disable\n" 80 80 " .set push \n" 81 81 " .set noat \n" 82 82 #ifdef CONFIG_MIPS_MT_SMTC ··· 97 97 " .set pop \n" 98 98 " .endm \n"); 99 99 100 - static inline void raw_local_irq_disable(void) 100 + static inline void arch_local_irq_disable(void) 101 101 { 102 102 __asm__ __volatile__( 103 - "raw_local_irq_disable" 103 + "arch_local_irq_disable" 104 104 : /* no outputs */ 105 105 : /* no inputs */ 106 106 : "memory"); 107 107 } 108 108 109 109 __asm__( 110 - " .macro raw_local_save_flags flags \n" 110 + " .macro arch_local_save_flags flags \n" 111 111 " .set push \n" 112 112 " .set reorder \n" 113 113 #ifdef CONFIG_MIPS_MT_SMTC ··· 118 118 " .set pop \n" 119 119 " .endm \n"); 120 120 121 - #define raw_local_save_flags(x) \ 122 - __asm__ __volatile__( \ 123 - "raw_local_save_flags %0" \ 124 - : "=r" (x)) 121 + static inline unsigned long arch_local_save_flags(void) 122 + { 123 + unsigned long flags; 124 + asm volatile("arch_local_save_flags %0" : "=r" (flags)); 125 + return flags; 126 + } 125 127 126 128 __asm__( 127 - " .macro raw_local_irq_save result \n" 129 + " .macro arch_local_irq_save result \n" 128 130 " .set push \n" 129 131 " .set reorder \n" 130 132 " .set noat \n" ··· 150 148 " .set pop \n" 151 149 " .endm \n"); 152 150 153 - #define raw_local_irq_save(x) \ 154 - __asm__ __volatile__( \ 155 - "raw_local_irq_save\t%0" \ 156 - : "=r" (x) \ 157 - : /* no inputs */ \ 158 - : "memory") 151 + static inline unsigned long arch_local_irq_save(void) 152 + { 153 + unsigned long flags; 154 + asm volatile("arch_local_irq_save\t%0" 155 + : "=r" (flags) 156 + : /* no inputs */ 157 + : "memory"); 158 + return flags; 159 + } 159 160 160 161 __asm__( 161 - " .macro raw_local_irq_restore flags \n" 162 + " .macro arch_local_irq_restore flags \n" 162 163 " .set push \n" 163 164 " .set noreorder \n" 164 165 " .set noat \n" ··· 201 196 " .endm \n"); 202 197 203 198 204 - static inline void raw_local_irq_restore(unsigned long flags) 199 + static inline void arch_local_irq_restore(unsigned long flags) 205 200 { 206 201 unsigned long __tmp1; 207 202 ··· 216 211 #endif 217 212 218 213 __asm__ __volatile__( 219 - "raw_local_irq_restore\t%0" 214 + "arch_local_irq_restore\t%0" 220 215 : "=r" (__tmp1) 221 216 : "0" (flags) 222 217 : "memory"); 223 218 } 224 219 225 - static inline void __raw_local_irq_restore(unsigned long flags) 220 + static inline void __arch_local_irq_restore(unsigned long flags) 226 221 { 227 222 unsigned long __tmp1; 228 223 229 224 __asm__ __volatile__( 230 - "raw_local_irq_restore\t%0" 225 + "arch_local_irq_restore\t%0" 231 226 : "=r" (__tmp1) 232 227 : "0" (flags) 233 228 : "memory"); 234 229 } 235 230 236 - static inline int raw_irqs_disabled_flags(unsigned long flags) 231 + static inline int arch_irqs_disabled_flags(unsigned long flags) 237 232 { 238 233 #ifdef CONFIG_MIPS_MT_SMTC 239 234 /*
+2 -2
arch/mips/kernel/smtc.c
··· 1038 1038 * but it's more efficient, given that we're already 1039 1039 * running down the IPI queue. 1040 1040 */ 1041 - __raw_local_irq_restore(flags); 1041 + __arch_local_irq_restore(flags); 1042 1042 } 1043 1043 } 1044 1044 ··· 1190 1190 /* 1191 1191 ** But use a raw restore here to avoid recursion. 1192 1192 */ 1193 - __raw_local_irq_restore(flags); 1193 + __arch_local_irq_restore(flags); 1194 1194 1195 1195 if (pipi) { 1196 1196 self_ipi(pipi);
+123
arch/mn10300/include/asm/irqflags.h
··· 1 + /* MN10300 IRQ flag handling 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + 12 + #ifndef _ASM_IRQFLAGS_H 13 + #define _ASM_IRQFLAGS_H 14 + 15 + #include <asm/cpu-regs.h> 16 + 17 + /* 18 + * interrupt control 19 + * - "disabled": run in IM1/2 20 + * - level 0 - GDB stub 21 + * - level 1 - virtual serial DMA (if present) 22 + * - level 5 - normal interrupt priority 23 + * - level 6 - timer interrupt 24 + * - "enabled": run in IM7 25 + */ 26 + #ifdef CONFIG_MN10300_TTYSM 27 + #define MN10300_CLI_LEVEL EPSW_IM_2 28 + #else 29 + #define MN10300_CLI_LEVEL EPSW_IM_1 30 + #endif 31 + 32 + #ifndef __ASSEMBLY__ 33 + 34 + static inline unsigned long arch_local_save_flags(void) 35 + { 36 + unsigned long flags; 37 + 38 + asm volatile("mov epsw,%0" : "=d"(flags)); 39 + return flags; 40 + } 41 + 42 + static inline void arch_local_irq_disable(void) 43 + { 44 + asm volatile( 45 + " and %0,epsw \n" 46 + " or %1,epsw \n" 47 + " nop \n" 48 + " nop \n" 49 + " nop \n" 50 + : 51 + : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) 52 + : "memory"); 53 + } 54 + 55 + static inline unsigned long arch_local_irq_save(void) 56 + { 57 + unsigned long flags; 58 + 59 + flags = arch_local_save_flags(); 60 + arch_local_irq_disable(); 61 + return flags; 62 + } 63 + 64 + /* 65 + * we make sure arch_irq_enable() doesn't cause priority inversion 66 + */ 67 + extern unsigned long __mn10300_irq_enabled_epsw; 68 + 69 + static inline void arch_local_irq_enable(void) 70 + { 71 + unsigned long tmp; 72 + 73 + asm volatile( 74 + " mov epsw,%0 \n" 75 + " and %1,%0 \n" 76 + " or %2,%0 \n" 77 + " mov %0,epsw \n" 78 + : "=&d"(tmp) 79 + : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) 80 + : "memory"); 81 + } 82 + 83 + static inline void arch_local_irq_restore(unsigned long flags) 84 + { 85 + asm volatile( 86 + " mov %0,epsw \n" 87 + " nop \n" 88 + " nop \n" 89 + " nop \n" 90 + : 91 + : "d"(flags) 92 + : "memory", "cc"); 93 + } 94 + 95 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 96 + { 97 + return (flags & EPSW_IM) <= MN10300_CLI_LEVEL; 98 + } 99 + 100 + static inline bool arch_irqs_disabled(void) 101 + { 102 + return arch_irqs_disabled_flags(arch_local_save_flags()); 103 + } 104 + 105 + /* 106 + * Hook to save power by halting the CPU 107 + * - called from the idle loop 108 + * - must reenable interrupts (which takes three instruction cycles to complete) 109 + */ 110 + static inline void arch_safe_halt(void) 111 + { 112 + asm volatile( 113 + " or %0,epsw \n" 114 + " nop \n" 115 + " nop \n" 116 + " bset %2,(%1) \n" 117 + : 118 + : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP) 119 + : "cc"); 120 + } 121 + 122 + #endif /* __ASSEMBLY__ */ 123 + #endif /* _ASM_IRQFLAGS_H */
+1 -108
arch/mn10300/include/asm/system.h
··· 17 17 #ifndef __ASSEMBLY__ 18 18 19 19 #include <linux/kernel.h> 20 + #include <linux/irqflags.h> 20 21 21 22 struct task_struct; 22 23 struct thread_struct; ··· 79 78 80 79 #define read_barrier_depends() do {} while (0) 81 80 #define smp_read_barrier_depends() do {} while (0) 82 - 83 - /*****************************************************************************/ 84 - /* 85 - * interrupt control 86 - * - "disabled": run in IM1/2 87 - * - level 0 - GDB stub 88 - * - level 1 - virtual serial DMA (if present) 89 - * - level 5 - normal interrupt priority 90 - * - level 6 - timer interrupt 91 - * - "enabled": run in IM7 92 - */ 93 - #ifdef CONFIG_MN10300_TTYSM 94 - #define MN10300_CLI_LEVEL EPSW_IM_2 95 - #else 96 - #define MN10300_CLI_LEVEL EPSW_IM_1 97 - #endif 98 - 99 - #define local_save_flags(x) \ 100 - do { \ 101 - typecheck(unsigned long, x); \ 102 - asm volatile( \ 103 - " mov epsw,%0 \n" \ 104 - : "=d"(x) \ 105 - ); \ 106 - } while (0) 107 - 108 - #define local_irq_disable() \ 109 - do { \ 110 - asm volatile( \ 111 - " and %0,epsw \n" \ 112 - " or %1,epsw \n" \ 113 - " nop \n" \ 114 - " nop \n" \ 115 - " nop \n" \ 116 - : \ 117 - : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \ 118 - ); \ 119 - } while (0) 120 - 121 - #define local_irq_save(x) \ 122 - do { \ 123 - local_save_flags(x); \ 124 - local_irq_disable(); \ 125 - } while (0) 126 - 127 - /* 128 - * we make sure local_irq_enable() doesn't cause priority inversion 129 - */ 130 - #ifndef __ASSEMBLY__ 131 - 132 - extern unsigned long __mn10300_irq_enabled_epsw; 133 - 134 - #endif 135 - 136 - #define local_irq_enable() \ 137 - do { \ 138 - unsigned long tmp; \ 139 - \ 140 - asm volatile( \ 141 - " mov epsw,%0 \n" \ 142 - " and %1,%0 \n" \ 143 - " or %2,%0 \n" \ 144 - " mov %0,epsw \n" \ 145 - : "=&d"(tmp) \ 146 - : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \ 147 - : "cc" \ 148 - ); \ 149 - } while (0) 150 - 151 - #define local_irq_restore(x) \ 152 - do { \ 153 - typecheck(unsigned long, x); \ 154 - asm volatile( \ 155 - " mov %0,epsw \n" \ 156 - " nop \n" \ 157 - " nop \n" \ 158 - " nop \n" \ 159 - : \ 160 - : "d"(x) \ 161 - : "memory", "cc" \ 162 - ); \ 163 - } while (0) 164 - 165 - #define irqs_disabled() \ 166 - ({ \ 167 - unsigned long flags; \ 168 - local_save_flags(flags); \ 169 - (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \ 170 - }) 171 - 172 - /* hook to save power by halting the CPU 173 - * - called from the idle loop 174 - * - must reenable interrupts (which takes three instruction cycles to complete) 175 - */ 176 - #define safe_halt() \ 177 - do { \ 178 - asm volatile(" or %0,epsw \n" \ 179 - " nop \n" \ 180 - " nop \n" \ 181 - " bset %2,(%1) \n" \ 182 - : \ 183 - : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\ 184 - : "cc" \ 185 - ); \ 186 - } while (0) 187 - 188 - #define STI or EPSW_IE|EPSW_IM,epsw 189 - #define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop 190 81 191 82 /*****************************************************************************/ 192 83 /*
+1
arch/mn10300/kernel/entry.S
··· 16 16 #include <linux/linkage.h> 17 17 #include <asm/smp.h> 18 18 #include <asm/system.h> 19 + #include <asm/irqflags.h> 19 20 #include <asm/thread_info.h> 20 21 #include <asm/intctl-regs.h> 21 22 #include <asm/busctl-regs.h>
+46
arch/parisc/include/asm/irqflags.h
··· 1 + #ifndef __PARISC_IRQFLAGS_H 2 + #define __PARISC_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <asm/psw.h> 6 + 7 + static inline unsigned long arch_local_save_flags(void) 8 + { 9 + unsigned long flags; 10 + asm volatile("ssm 0, %0" : "=r" (flags) : : "memory"); 11 + return flags; 12 + } 13 + 14 + static inline void arch_local_irq_disable(void) 15 + { 16 + asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory"); 17 + } 18 + 19 + static inline void arch_local_irq_enable(void) 20 + { 21 + asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory"); 22 + } 23 + 24 + static inline unsigned long arch_local_irq_save(void) 25 + { 26 + unsigned long flags; 27 + asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory"); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_restore(unsigned long flags) 32 + { 33 + asm volatile("mtsm %0" : : "r" (flags) : "memory"); 34 + } 35 + 36 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 37 + { 38 + return (flags & PSW_I) == 0; 39 + } 40 + 41 + static inline bool arch_irqs_disabled(void) 42 + { 43 + return arch_irqs_disabled_flags(arch_local_save_flags()); 44 + } 45 + 46 + #endif /* __PARISC_IRQFLAGS_H */
+1 -18
arch/parisc/include/asm/system.h
··· 1 1 #ifndef __PARISC_SYSTEM_H 2 2 #define __PARISC_SYSTEM_H 3 3 4 - #include <asm/psw.h> 4 + #include <linux/irqflags.h> 5 5 6 6 /* The program status word as bitfields. */ 7 7 struct pa_psw { ··· 47 47 #define switch_to(prev, next, last) do { \ 48 48 (last) = _switch_to(prev, next); \ 49 49 } while(0) 50 - 51 - /* interrupt control */ 52 - #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") 53 - #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 54 - #define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 55 - 56 - #define local_irq_save(x) \ 57 - __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) 58 - #define local_irq_restore(x) \ 59 - __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) 60 - 61 - #define irqs_disabled() \ 62 - ({ \ 63 - unsigned long flags; \ 64 - local_save_flags(flags); \ 65 - (flags & PSW_I) == 0; \ 66 - }) 67 50 68 51 #define mfctl(reg) ({ \ 69 52 unsigned long cr; \
+72 -55
arch/powerpc/include/asm/hw_irq.h
··· 16 16 #ifdef CONFIG_PPC64 17 17 #include <asm/paca.h> 18 18 19 - static inline unsigned long local_get_flags(void) 19 + static inline unsigned long arch_local_save_flags(void) 20 20 { 21 21 unsigned long flags; 22 22 23 - __asm__ __volatile__("lbz %0,%1(13)" 24 - : "=r" (flags) 25 - : "i" (offsetof(struct paca_struct, soft_enabled))); 23 + asm volatile( 24 + "lbz %0,%1(13)" 25 + : "=r" (flags) 26 + : "i" (offsetof(struct paca_struct, soft_enabled))); 26 27 27 28 return flags; 28 29 } 29 30 30 - static inline unsigned long raw_local_irq_disable(void) 31 + static inline unsigned long arch_local_irq_disable(void) 31 32 { 32 33 unsigned long flags, zero; 33 34 34 - __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" 35 - : "=r" (flags), "=&r" (zero) 36 - : "i" (offsetof(struct paca_struct, soft_enabled)) 37 - : "memory"); 35 + asm volatile( 36 + "li %1,0; lbz %0,%2(13); stb %1,%2(13)" 37 + : "=r" (flags), "=&r" (zero) 38 + : "i" (offsetof(struct paca_struct, soft_enabled)) 39 + : "memory"); 38 40 39 41 return flags; 40 42 } 41 43 42 - extern void raw_local_irq_restore(unsigned long); 44 + extern void arch_local_irq_restore(unsigned long); 43 45 extern void iseries_handle_interrupts(void); 44 46 45 - #define raw_local_irq_enable() raw_local_irq_restore(1) 46 - #define raw_local_save_flags(flags) ((flags) = local_get_flags()) 47 - #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) 47 + static inline void arch_local_irq_enable(void) 48 + { 49 + arch_local_irq_restore(1); 50 + } 48 51 49 - #define raw_irqs_disabled() (local_get_flags() == 0) 50 - #define raw_irqs_disabled_flags(flags) ((flags) == 0) 52 + static inline unsigned long arch_local_irq_save(void) 53 + { 54 + return arch_local_irq_disable(); 55 + } 56 + 57 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 58 + { 59 + return flags == 0; 60 + } 61 + 62 + static inline bool arch_irqs_disabled(void) 63 + { 64 + return arch_irqs_disabled_flags(arch_local_save_flags()); 65 + } 51 66 52 67 #ifdef CONFIG_PPC_BOOK3E 53 - #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); 54 - #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); 68 + #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); 69 + #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); 55 70 #else 56 71 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 57 72 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) ··· 79 64 get_paca()->hard_enabled = 0; \ 80 65 } while(0) 81 66 82 - #else 67 + #else /* CONFIG_PPC64 */ 83 68 84 - #if defined(CONFIG_BOOKE) 85 69 #define SET_MSR_EE(x) mtmsr(x) 86 - #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 87 - #else 88 - #define SET_MSR_EE(x) mtmsr(x) 89 - #define raw_local_irq_restore(flags) mtmsr(flags) 90 - #endif 91 70 92 - static inline void raw_local_irq_disable(void) 71 + static inline unsigned long arch_local_save_flags(void) 93 72 { 94 - #ifdef CONFIG_BOOKE 95 - __asm__ __volatile__("wrteei 0": : :"memory"); 96 - #else 97 - unsigned long msr; 73 + return mfmsr(); 74 + } 98 75 99 - msr = mfmsr(); 100 - SET_MSR_EE(msr & ~MSR_EE); 76 + static inline void arch_local_irq_restore(unsigned long flags) 77 + { 78 + #if defined(CONFIG_BOOKE) 79 + asm volatile("wrtee %0" : : "r" (flags) : "memory"); 80 + #else 81 + mtmsr(flags); 101 82 #endif 102 83 } 103 84 104 - static inline void raw_local_irq_enable(void) 85 + static inline unsigned long arch_local_irq_save(void) 86 + { 87 + unsigned long flags = arch_local_save_flags(); 88 + #ifdef CONFIG_BOOKE 89 + asm volatile("wrteei 0" : : : "memory"); 90 + #else 91 + SET_MSR_EE(flags & ~MSR_EE); 92 + #endif 93 + return flags; 94 + } 95 + 96 + static inline void arch_local_irq_disable(void) 105 97 { 106 98 #ifdef CONFIG_BOOKE 107 - __asm__ __volatile__("wrteei 1": : :"memory"); 99 + asm volatile("wrteei 0" : : : "memory"); 108 100 #else 109 - unsigned long msr; 101 + arch_local_irq_save(); 102 + #endif 103 + } 110 104 111 - msr = mfmsr(); 105 + static inline void arch_local_irq_enable(void) 106 + { 107 + #ifdef CONFIG_BOOKE 108 + asm volatile("wrteei 1" : : : "memory"); 109 + #else 110 + unsigned long msr = mfmsr(); 112 111 SET_MSR_EE(msr | MSR_EE); 113 112 #endif 114 113 } 115 114 116 - static inline void raw_local_irq_save_ptr(unsigned long *flags) 117 - { 118 - unsigned long msr; 119 - msr = mfmsr(); 120 - *flags = msr; 121 - #ifdef CONFIG_BOOKE 122 - __asm__ __volatile__("wrteei 0": : :"memory"); 123 - #else 124 - SET_MSR_EE(msr & ~MSR_EE); 125 - #endif 126 - } 127 - 128 - #define raw_local_save_flags(flags) ((flags) = mfmsr()) 129 - #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) 130 - #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) 131 - #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) 132 - 133 - #define hard_irq_disable() raw_local_irq_disable() 134 - 135 - static inline int irqs_disabled_flags(unsigned long flags) 115 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 136 116 { 137 117 return (flags & MSR_EE) == 0; 138 118 } 119 + 120 + static inline bool arch_irqs_disabled(void) 121 + { 122 + return arch_irqs_disabled_flags(arch_local_save_flags()); 123 + } 124 + 125 + #define hard_irq_disable() arch_local_irq_disable() 139 126 140 127 #endif /* CONFIG_PPC64 */ 141 128
+1 -1
arch/powerpc/include/asm/irqflags.h
··· 6 6 7 7 #ifndef __ASSEMBLY__ 8 8 /* 9 - * Get definitions for raw_local_save_flags(x), etc. 9 + * Get definitions for arch_local_save_flags(x), etc. 10 10 */ 11 11 #include <asm/hw_irq.h> 12 12
+2 -2
arch/powerpc/kernel/exceptions-64s.S
··· 818 818 819 819 /* 820 820 * hash_page couldn't handle it, set soft interrupt enable back 821 - * to what it was before the trap. Note that .raw_local_irq_restore 821 + * to what it was before the trap. Note that .arch_local_irq_restore 822 822 * handles any interrupts pending at this point. 823 823 */ 824 824 ld r3,SOFTE(r1) 825 825 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 826 - bl .raw_local_irq_restore 826 + bl .arch_local_irq_restore 827 827 b 11f 828 828 829 829 /* We have a data breakpoint exception - handle it */
+2 -2
arch/powerpc/kernel/irq.c
··· 116 116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 117 117 } 118 118 119 - notrace void raw_local_irq_restore(unsigned long en) 119 + notrace void arch_local_irq_restore(unsigned long en) 120 120 { 121 121 /* 122 122 * get_paca()->soft_enabled = en; ··· 192 192 193 193 __hard_irq_enable(); 194 194 } 195 - EXPORT_SYMBOL(raw_local_irq_restore); 195 + EXPORT_SYMBOL(arch_local_irq_restore); 196 196 #endif /* CONFIG_PPC64 */ 197 197 198 198 static int show_other_interrupts(struct seq_file *p, int prec)
+30 -25
arch/s390/include/asm/irqflags.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - /* store then or system mask. */ 12 - #define __raw_local_irq_stosm(__or) \ 11 + /* store then OR system mask. */ 12 + #define __arch_local_irq_stosm(__or) \ 13 13 ({ \ 14 14 unsigned long __mask; \ 15 15 asm volatile( \ ··· 18 18 __mask; \ 19 19 }) 20 20 21 - /* store then and system mask. */ 22 - #define __raw_local_irq_stnsm(__and) \ 21 + /* store then AND system mask. */ 22 + #define __arch_local_irq_stnsm(__and) \ 23 23 ({ \ 24 24 unsigned long __mask; \ 25 25 asm volatile( \ ··· 29 29 }) 30 30 31 31 /* set system mask. */ 32 - #define __raw_local_irq_ssm(__mask) \ 33 - ({ \ 34 - asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 35 - }) 36 - 37 - /* interrupt control.. */ 38 - static inline unsigned long raw_local_irq_enable(void) 32 + static inline void __arch_local_irq_ssm(unsigned long flags) 39 33 { 40 - return __raw_local_irq_stosm(0x03); 34 + asm volatile("ssm %0" : : "Q" (flags) : "memory"); 41 35 } 42 36 43 - static inline unsigned long raw_local_irq_disable(void) 37 + static inline unsigned long arch_local_save_flags(void) 44 38 { 45 - return __raw_local_irq_stnsm(0xfc); 39 + return __arch_local_irq_stosm(0x00); 46 40 } 47 41 48 - #define raw_local_save_flags(x) \ 49 - do { \ 50 - typecheck(unsigned long, x); \ 51 - (x) = __raw_local_irq_stosm(0x00); \ 52 - } while (0) 53 - 54 - static inline void raw_local_irq_restore(unsigned long flags) 42 + static inline unsigned long arch_local_irq_save(void) 55 43 { 56 - __raw_local_irq_ssm(flags); 44 + return __arch_local_irq_stnsm(0xfc); 57 45 } 58 46 59 - static inline int raw_irqs_disabled_flags(unsigned long flags) 47 + static inline void arch_local_irq_disable(void) 48 + { 49 + arch_local_irq_save(); 50 + } 51 + 52 + static inline void arch_local_irq_enable(void) 53 + { 54 + __arch_local_irq_stosm(0x03); 55 + } 56 + 57 + static inline void arch_local_irq_restore(unsigned long flags) 58 + { 59 + __arch_local_irq_ssm(flags); 60 + } 61 + 62 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 60 63 { 61 64 return !(flags & (3UL << (BITS_PER_LONG - 8))); 62 65 } 63 66 64 - /* For spinlocks etc */ 65 - #define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) 67 + static inline bool arch_irqs_disabled(void) 68 + { 69 + return arch_irqs_disabled_flags(arch_local_save_flags()); 70 + } 66 71 67 72 #endif /* __ASM_IRQFLAGS_H */
+1 -1
arch/s390/include/asm/system.h
··· 399 399 static inline void 400 400 __set_psw_mask(unsigned long mask) 401 401 { 402 - __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 402 + __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); 403 403 } 404 404 405 405 #define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
+2 -2
arch/s390/kernel/mem_detect.c
··· 54 54 * right thing and we don't get scheduled away with low address 55 55 * protection disabled. 56 56 */ 57 - flags = __raw_local_irq_stnsm(0xf8); 57 + flags = __arch_local_irq_stnsm(0xf8); 58 58 __ctl_store(cr0, 0, 0); 59 59 __ctl_clear_bit(0, 28); 60 60 find_memory_chunks(chunk); 61 61 __ctl_load(cr0, 0, 0); 62 - __raw_local_irq_ssm(flags); 62 + arch_local_irq_restore(flags); 63 63 } 64 64 EXPORT_SYMBOL(detect_memory_layout);
+1 -2
arch/s390/mm/init.c
··· 50 50 */ 51 51 void __init paging_init(void) 52 52 { 53 - static const int ssm_mask = 0x04000000L; 54 53 unsigned long max_zone_pfns[MAX_NR_ZONES]; 55 54 unsigned long pgd_type; 56 55 ··· 71 72 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 72 73 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 73 74 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 74 - __raw_local_irq_ssm(ssm_mask); 75 + arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); 75 76 76 77 atomic_set(&init_mm.context.attach_count, 1); 77 78
+2 -2
arch/s390/mm/maccess.c
··· 71 71 72 72 if (!count) 73 73 return 0; 74 - flags = __raw_local_irq_stnsm(0xf8UL); 74 + flags = __arch_local_irq_stnsm(0xf8UL); 75 75 asm volatile ( 76 76 "0: mvcle %1,%2,0x0\n" 77 77 "1: jo 0b\n" ··· 82 82 "+d" (_len2), "=m" (*((long *) dest)) 83 83 : "m" (*((long *) src)) 84 84 : "cc", "memory"); 85 - __raw_local_irq_ssm(flags); 85 + arch_local_irq_restore(flags); 86 86 return rc; 87 87 }
+99 -88
arch/score/include/asm/irqflags.h
··· 3 3 4 4 #ifndef __ASSEMBLY__ 5 5 6 - #define raw_local_irq_save(x) \ 7 - { \ 8 - __asm__ __volatile__( \ 9 - "mfcr r8, cr0;" \ 10 - "li r9, 0xfffffffe;" \ 11 - "nop;" \ 12 - "mv %0, r8;" \ 13 - "and r8, r8, r9;" \ 14 - "mtcr r8, cr0;" \ 15 - "nop;" \ 16 - "nop;" \ 17 - "nop;" \ 18 - "nop;" \ 19 - "nop;" \ 20 - : "=r" (x) \ 21 - : \ 22 - : "r8", "r9" \ 23 - ); \ 6 + #include <linux/types.h> 7 + 8 + static inline unsigned long arch_local_save_flags(void) 9 + { 10 + unsigned long flags; 11 + 12 + asm volatile( 13 + " mfcr r8, cr0 \n" 14 + " nop \n" 15 + " nop \n" 16 + " mv %0, r8 \n" 17 + " nop \n" 18 + " nop \n" 19 + " nop \n" 20 + " nop \n" 21 + " nop \n" 22 + " ldi r9, 0x1 \n" 23 + " and %0, %0, r9 \n" 24 + : "=r" (flags) 25 + : 26 + : "r8", "r9"); 27 + return flags; 24 28 } 25 29 26 - #define raw_local_irq_restore(x) \ 27 - { \ 28 - __asm__ __volatile__( \ 29 - "mfcr r8, cr0;" \ 30 - "ldi r9, 0x1;" \ 31 - "and %0, %0, r9;" \ 32 - "or r8, r8, %0;" \ 33 - "mtcr r8, cr0;" \ 34 - "nop;" \ 35 - "nop;" \ 36 - "nop;" \ 37 - "nop;" \ 38 - "nop;" \ 39 - : \ 40 - : "r"(x) \ 41 - : "r8", "r9" \ 42 - ); \ 30 + static inline unsigned long arch_local_irq_save(void) 31 + { 32 + unsigned long flags 33 + 34 + asm volatile( 35 + " mfcr r8, cr0 \n" 36 + " li r9, 0xfffffffe \n" 37 + " nop \n" 38 + " mv %0, r8 \n" 39 + " and r8, r8, r9 \n" 40 + " mtcr r8, cr0 \n" 41 + " nop \n" 42 + " nop \n" 43 + " nop \n" 44 + " nop \n" 45 + " nop \n" 46 + : "=r" (flags) 47 + : 48 + : "r8", "r9", "memory"); 49 + 50 + return flags; 43 51 } 44 52 45 - #define raw_local_irq_enable(void) \ 46 - { \ 47 - __asm__ __volatile__( \ 48 - "mfcr\tr8,cr0;" \ 49 - "nop;" \ 50 - "nop;" \ 51 - "ori\tr8,0x1;" \ 52 - "mtcr\tr8,cr0;" \ 53 - "nop;" \ 54 - "nop;" \ 55 - "nop;" \ 56 - "nop;" \ 57 - "nop;" \ 58 - : \ 59 - : \ 60 - : "r8"); \ 53 + static inline void arch_local_irq_restore(unsigned long flags) 54 + { 55 + asm volatile( 56 + " mfcr r8, cr0 \n" 57 + " ldi r9, 0x1 \n" 58 + " and %0, %0, r9 \n" 59 + " or r8, r8, %0 \n" 60 + " mtcr r8, cr0 \n" 61 + " nop \n" 62 + " nop \n" 63 + " nop \n" 64 + " nop \n" 65 + " nop \n" 66 + : 67 + : "r"(flags) 68 + : "r8", "r9", "memory"); 61 69 } 62 70 63 - #define raw_local_irq_disable(void) \ 64 - { \ 65 - __asm__ __volatile__( \ 66 - "mfcr\tr8,cr0;" \ 67 - "nop;" \ 68 - "nop;" \ 69 - "srli\tr8,r8,1;" \ 70 - "slli\tr8,r8,1;" \ 71 - "mtcr\tr8,cr0;" \ 72 - "nop;" \ 73 - "nop;" \ 74 - "nop;" \ 75 - "nop;" \ 76 - "nop;" \ 77 - : \ 78 - : \ 79 - : "r8"); \ 71 + static inline void arch_local_irq_enable(void) 72 + { 73 + asm volatile( 74 + " mfcr r8,cr0 \n" 75 + " nop \n" 76 + " nop \n" 77 + " ori r8,0x1 \n" 78 + " mtcr r8,cr0 \n" 79 + " nop \n" 80 + " nop \n" 81 + " nop \n" 82 + " nop \n" 83 + " nop \n" 84 + : 85 + : 86 + : "r8", "memory"); 80 87 } 81 88 82 - #define raw_local_save_flags(x) \ 83 - { \ 84 - __asm__ __volatile__( \ 85 - "mfcr r8, cr0;" \ 86 - "nop;" \ 87 - "nop;" \ 88 - "mv %0, r8;" \ 89 - "nop;" \ 90 - "nop;" \ 91 - "nop;" \ 92 - "nop;" \ 93 - "nop;" \ 94 - "ldi r9, 0x1;" \ 95 - "and %0, %0, r9;" \ 96 - : "=r" (x) \ 97 - : \ 98 - : "r8", "r9" \ 99 - ); \ 89 + static inline void arch_local_irq_disable(void) 90 + { 91 + asm volatile( 92 + " mfcr r8,cr0 \n" 93 + " nop \n" 94 + " nop \n" 95 + " srli r8,r8,1 \n" 96 + " slli r8,r8,1 \n" 97 + " mtcr r8,cr0 \n" 98 + " nop \n" 99 + " nop \n" 100 + " nop \n" 101 + " nop \n" 102 + " nop \n" 103 + : 104 + : 105 + : "r8", "memory"); 100 106 } 101 107 102 - static inline int raw_irqs_disabled_flags(unsigned long flags) 108 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 103 109 { 104 110 return !(flags & 1); 105 111 } 106 112 107 - #endif 113 + static inline bool arch_irqs_disabled(void) 114 + { 115 + return arch_irqs_disabled_flags(arch_local_save_flags()); 116 + } 117 + 118 + #endif /* __ASSEMBLY__ */ 108 119 109 120 #endif /* _ASM_SCORE_IRQFLAGS_H */
+2 -2
arch/sh/include/asm/irqflags.h
··· 1 1 #ifndef __ASM_SH_IRQFLAGS_H 2 2 #define __ASM_SH_IRQFLAGS_H 3 3 4 - #define RAW_IRQ_DISABLED 0xf0 5 - #define RAW_IRQ_ENABLED 0x00 4 + #define ARCH_IRQ_DISABLED 0xf0 5 + #define ARCH_IRQ_ENABLED 0x00 6 6 7 7 #include <asm-generic/irqflags.h> 8 8
+6 -6
arch/sh/kernel/irq_32.c
··· 10 10 #include <linux/irqflags.h> 11 11 #include <linux/module.h> 12 12 13 - void notrace raw_local_irq_restore(unsigned long flags) 13 + void notrace arch_local_irq_restore(unsigned long flags) 14 14 { 15 15 unsigned long __dummy0, __dummy1; 16 16 17 - if (flags == RAW_IRQ_DISABLED) { 17 + if (flags == ARCH_IRQ_DISABLED) { 18 18 __asm__ __volatile__ ( 19 19 "stc sr, %0\n\t" 20 20 "or #0xf0, %0\n\t" ··· 33 33 #endif 34 34 "ldc %0, sr\n\t" 35 35 : "=&r" (__dummy0), "=r" (__dummy1) 36 - : "1" (~RAW_IRQ_DISABLED) 36 + : "1" (~ARCH_IRQ_DISABLED) 37 37 : "memory" 38 38 ); 39 39 } 40 40 } 41 - EXPORT_SYMBOL(raw_local_irq_restore); 41 + EXPORT_SYMBOL(arch_local_irq_restore); 42 42 43 - unsigned long notrace __raw_local_save_flags(void) 43 + unsigned long notrace arch_local_save_flags(void) 44 44 { 45 45 unsigned long flags; 46 46 ··· 54 54 55 55 return flags; 56 56 } 57 - EXPORT_SYMBOL(__raw_local_save_flags); 57 + EXPORT_SYMBOL(arch_local_save_flags);
+22 -15
arch/sparc/include/asm/irqflags_32.h
··· 5 5 * 6 6 * This file gets included from lowlevel asm headers too, to provide 7 7 * wrapped versions of the local_irq_*() APIs, based on the 8 - * raw_local_irq_*() functions from the lowlevel headers. 8 + * arch_local_irq_*() functions from the lowlevel headers. 9 9 */ 10 10 #ifndef _ASM_IRQFLAGS_H 11 11 #define _ASM_IRQFLAGS_H 12 12 13 13 #ifndef __ASSEMBLY__ 14 14 15 - extern void raw_local_irq_restore(unsigned long); 16 - extern unsigned long __raw_local_irq_save(void); 17 - extern void raw_local_irq_enable(void); 15 + #include <linux/types.h> 18 16 19 - static inline unsigned long getipl(void) 17 + extern void arch_local_irq_restore(unsigned long); 18 + extern unsigned long arch_local_irq_save(void); 19 + extern void arch_local_irq_enable(void); 20 + 21 + static inline unsigned long arch_local_save_flags(void) 20 22 { 21 - unsigned long retval; 23 + unsigned long flags; 22 24 23 - __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 24 - return retval; 25 + asm volatile("rd %%psr, %0" : "=r" (flags)); 26 + return flags; 25 27 } 26 28 27 - #define raw_local_save_flags(flags) ((flags) = getipl()) 28 - #define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) 29 - #define raw_local_irq_disable() ((void) __raw_local_irq_save()) 30 - #define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) 31 - 32 - static inline int raw_irqs_disabled_flags(unsigned long flags) 29 + static inline void arch_local_irq_disable(void) 33 30 { 34 - return ((flags & PSR_PIL) != 0); 31 + arch_local_irq_save(); 32 + } 33 + 34 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 35 + { 36 + return (flags & PSR_PIL) != 0; 37 + } 38 + 39 + static inline bool arch_irqs_disabled(void) 40 + { 41 + return arch_irqs_disabled_flags(arch_local_save_flags()); 35 42 } 36 43 37 44 #endif /* (__ASSEMBLY__) */
+9 -20
arch/sparc/include/asm/irqflags_64.h
··· 5 5 * 6 6 * This file gets included from lowlevel asm headers too, to provide 7 7 * wrapped versions of the local_irq_*() APIs, based on the 8 - * raw_local_irq_*() functions from the lowlevel headers. 8 + * arch_local_irq_*() functions from the lowlevel headers. 9 9 */ 10 10 #ifndef _ASM_IRQFLAGS_H 11 11 #define _ASM_IRQFLAGS_H ··· 14 14 15 15 #ifndef __ASSEMBLY__ 16 16 17 - static inline unsigned long __raw_local_save_flags(void) 17 + static inline unsigned long arch_local_save_flags(void) 18 18 { 19 19 unsigned long flags; 20 20 ··· 26 26 return flags; 27 27 } 28 28 29 - #define raw_local_save_flags(flags) \ 30 - do { (flags) = __raw_local_save_flags(); } while (0) 31 - 32 - static inline void raw_local_irq_restore(unsigned long flags) 29 + static inline void arch_local_irq_restore(unsigned long flags) 33 30 { 34 31 __asm__ __volatile__( 35 32 "wrpr %0, %%pil" ··· 36 39 ); 37 40 } 38 41 39 - static inline void raw_local_irq_disable(void) 42 + static inline void arch_local_irq_disable(void) 40 43 { 41 44 __asm__ __volatile__( 42 45 "wrpr %0, %%pil" ··· 46 49 ); 47 50 } 48 51 49 - static inline void raw_local_irq_enable(void) 52 + static inline void arch_local_irq_enable(void) 50 53 { 51 54 __asm__ __volatile__( 52 55 "wrpr 0, %%pil" ··· 56 59 ); 57 60 } 58 61 59 - static inline int raw_irqs_disabled_flags(unsigned long flags) 62 + static inline int arch_irqs_disabled_flags(unsigned long flags) 60 63 { 61 64 return (flags > 0); 62 65 } 63 66 64 - static inline int raw_irqs_disabled(void) 67 + static inline int arch_irqs_disabled(void) 65 68 { 66 - unsigned long flags = __raw_local_save_flags(); 67 - 68 - return raw_irqs_disabled_flags(flags); 69 + return arch_irqs_disabled_flags(arch_local_save_flags()); 69 70 } 70 71 71 - /* 72 - * For spinlocks, etc: 73 - */ 74 - static inline unsigned long __raw_local_irq_save(void) 72 + static inline unsigned long arch_local_irq_save(void) 75 73 { 76 74 unsigned long flags, tmp; 77 75 ··· 91 99 92 100 return flags; 93 101 } 94 - 95 - #define raw_local_irq_save(flags) \ 96 - do { (flags) = __raw_local_irq_save(); } while (0) 97 102 98 103 #endif /* (__ASSEMBLY__) */ 99 104
+6 -7
arch/sparc/kernel/irq_32.c
··· 57 57 #define SMP_NOP2 58 58 #define SMP_NOP3 59 59 #endif /* SMP */ 60 - unsigned long __raw_local_irq_save(void) 60 + unsigned long arch_local_irq_save(void) 61 61 { 62 62 unsigned long retval; 63 63 unsigned long tmp; ··· 74 74 75 75 return retval; 76 76 } 77 + EXPORT_SYMBOL(arch_local_irq_save); 77 78 78 - void raw_local_irq_enable(void) 79 + void arch_local_irq_enable(void) 79 80 { 80 81 unsigned long tmp; 81 82 ··· 90 89 : "i" (PSR_PIL) 91 90 : "memory"); 92 91 } 92 + EXPORT_SYMBOL(arch_local_irq_enable); 93 93 94 - void raw_local_irq_restore(unsigned long old_psr) 94 + void arch_local_irq_restore(unsigned long old_psr) 95 95 { 96 96 unsigned long tmp; 97 97 ··· 107 105 : "i" (PSR_PIL), "r" (old_psr) 108 106 : "memory"); 109 107 } 110 - 111 - EXPORT_SYMBOL(__raw_local_irq_save); 112 - EXPORT_SYMBOL(raw_local_irq_enable); 113 - EXPORT_SYMBOL(raw_local_irq_restore); 108 + EXPORT_SYMBOL(arch_local_irq_restore); 114 109 115 110 /* 116 111 * Dave Redman (djhr@tadpole.co.uk)
+1 -1
arch/sparc/prom/p1275.c
··· 39 39 unsigned long flags; 40 40 41 41 raw_local_save_flags(flags); 42 - raw_local_irq_restore(PIL_NMI); 42 + raw_local_irq_restore((unsigned long)PIL_NMI); 43 43 raw_spin_lock(&prom_entry_lock); 44 44 45 45 prom_world(1);
+19 -17
arch/tile/include/asm/irqflags.h
··· 103 103 #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) 104 104 105 105 /* Disable interrupts. */ 106 - #define raw_local_irq_disable() \ 106 + #define arch_local_irq_disable() \ 107 107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 108 108 109 109 /* Disable all interrupts, including NMIs. */ 110 - #define raw_local_irq_disable_all() \ 110 + #define arch_local_irq_disable_all() \ 111 111 interrupt_mask_set_mask(-1UL) 112 112 113 113 /* Re-enable all maskable interrupts. */ 114 - #define raw_local_irq_enable() \ 114 + #define arch_local_irq_enable() \ 115 115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 116 116 117 117 /* Disable or enable interrupts based on flag argument. */ 118 - #define raw_local_irq_restore(disabled) do { \ 118 + #define arch_local_irq_restore(disabled) do { \ 119 119 if (disabled) \ 120 - raw_local_irq_disable(); \ 120 + arch_local_irq_disable(); \ 121 121 else \ 122 - raw_local_irq_enable(); \ 122 + arch_local_irq_enable(); \ 123 123 } while (0) 124 124 125 125 /* Return true if "flags" argument means interrupts are disabled. */ 126 - #define raw_irqs_disabled_flags(flags) ((flags) != 0) 126 + #define arch_irqs_disabled_flags(flags) ((flags) != 0) 127 127 128 128 /* Return true if interrupts are currently disabled. */ 129 - #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 129 + #define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 130 130 131 131 /* Save whether interrupts are currently disabled. */ 132 - #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) 132 + #define arch_local_save_flags() arch_irqs_disabled() 133 133 134 134 /* Save whether interrupts are currently disabled, then disable them. */ 135 - #define raw_local_irq_save(flags) \ 136 - do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) 135 + #define arch_local_irq_save() ({ \ 136 + unsigned long __flags = arch_local_save_flags(); \ 137 + arch_local_irq_disable(); \ 138 + __flags; }) 137 139 138 140 /* Prevent the given interrupt from being enabled next time we enable irqs. */ 139 - #define raw_local_irq_mask(interrupt) \ 141 + #define arch_local_irq_mask(interrupt) \ 140 142 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) 141 143 142 144 /* Prevent the given interrupt from being enabled immediately. */ 143 - #define raw_local_irq_mask_now(interrupt) do { \ 144 - raw_local_irq_mask(interrupt); \ 145 + #define arch_local_irq_mask_now(interrupt) do { \ 146 + arch_local_irq_mask(interrupt); \ 145 147 interrupt_mask_set(interrupt); \ 146 148 } while (0) 147 149 148 150 /* Allow the given interrupt to be enabled next time we enable irqs. */ 149 - #define raw_local_irq_unmask(interrupt) \ 151 + #define arch_local_irq_unmask(interrupt) \ 150 152 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) 151 153 152 154 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 153 - #define raw_local_irq_unmask_now(interrupt) do { \ 154 - raw_local_irq_unmask(interrupt); \ 155 + #define arch_local_irq_unmask_now(interrupt) do { \ 156 + arch_local_irq_unmask(interrupt); \ 155 157 if (!irqs_disabled()) \ 156 158 interrupt_mask_reset(interrupt); \ 157 159 } while (0)
+12 -20
arch/x86/include/asm/irqflags.h
··· 61 61 #else 62 62 #ifndef __ASSEMBLY__ 63 63 64 - static inline unsigned long __raw_local_save_flags(void) 64 + static inline unsigned long arch_local_save_flags(void) 65 65 { 66 66 return native_save_fl(); 67 67 } 68 68 69 - static inline void raw_local_irq_restore(unsigned long flags) 69 + static inline void arch_local_irq_restore(unsigned long flags) 70 70 { 71 71 native_restore_fl(flags); 72 72 } 73 73 74 - static inline void raw_local_irq_disable(void) 74 + static inline void arch_local_irq_disable(void) 75 75 { 76 76 native_irq_disable(); 77 77 } 78 78 79 - static inline void raw_local_irq_enable(void) 79 + static inline void arch_local_irq_enable(void) 80 80 { 81 81 native_irq_enable(); 82 82 } ··· 85 85 * Used in the idle loop; sti takes one instruction cycle 86 86 * to complete: 87 87 */ 88 - static inline void raw_safe_halt(void) 88 + static inline void arch_safe_halt(void) 89 89 { 90 90 native_safe_halt(); 91 91 } ··· 102 102 /* 103 103 * For spinlocks, etc: 104 104 */ 105 - static inline unsigned long __raw_local_irq_save(void) 105 + static inline unsigned long arch_local_irq_save(void) 106 106 { 107 - unsigned long flags = __raw_local_save_flags(); 108 - 109 - raw_local_irq_disable(); 110 - 107 + unsigned long flags = arch_local_save_flags(); 108 + arch_local_irq_disable(); 111 109 return flags; 112 110 } 113 111 #else ··· 151 153 #endif /* CONFIG_PARAVIRT */ 152 154 153 155 #ifndef __ASSEMBLY__ 154 - #define raw_local_save_flags(flags) \ 155 - do { (flags) = __raw_local_save_flags(); } while (0) 156 - 157 - #define raw_local_irq_save(flags) \ 158 - do { (flags) = __raw_local_irq_save(); } while (0) 159 - 160 - static inline int raw_irqs_disabled_flags(unsigned long flags) 156 + static inline int arch_irqs_disabled_flags(unsigned long flags) 161 157 { 162 158 return !(flags & X86_EFLAGS_IF); 163 159 } 164 160 165 - static inline int raw_irqs_disabled(void) 161 + static inline int arch_irqs_disabled(void) 166 162 { 167 - unsigned long flags = __raw_local_save_flags(); 163 + unsigned long flags = arch_local_save_flags(); 168 164 169 - return raw_irqs_disabled_flags(flags); 165 + return arch_irqs_disabled_flags(flags); 170 166 } 171 167 172 168 #else
+8 -8
arch/x86/include/asm/paravirt.h
··· 105 105 } 106 106 #endif 107 107 108 - static inline void raw_safe_halt(void) 108 + static inline void arch_safe_halt(void) 109 109 { 110 110 PVOP_VCALL0(pv_irq_ops.safe_halt); 111 111 } ··· 829 829 #define __PV_IS_CALLEE_SAVE(func) \ 830 830 ((struct paravirt_callee_save) { func }) 831 831 832 - static inline unsigned long __raw_local_save_flags(void) 832 + static inline unsigned long arch_local_save_flags(void) 833 833 { 834 834 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 835 835 } 836 836 837 - static inline void raw_local_irq_restore(unsigned long f) 837 + static inline void arch_local_irq_restore(unsigned long f) 838 838 { 839 839 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 840 840 } 841 841 842 - static inline void raw_local_irq_disable(void) 842 + static inline void arch_local_irq_disable(void) 843 843 { 844 844 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 845 845 } 846 846 847 - static inline void raw_local_irq_enable(void) 847 + static inline void arch_local_irq_enable(void) 848 848 { 849 849 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 850 850 } 851 851 852 - static inline unsigned long __raw_local_irq_save(void) 852 + static inline unsigned long arch_local_irq_save(void) 853 853 { 854 854 unsigned long f; 855 855 856 - f = __raw_local_save_flags(); 857 - raw_local_irq_disable(); 856 + f = arch_local_save_flags(); 857 + arch_local_irq_disable(); 858 858 return f; 859 859 } 860 860
+1 -1
arch/x86/xen/spinlock.c
··· 224 224 goto out; 225 225 } 226 226 227 - flags = __raw_local_save_flags(); 227 + flags = arch_local_save_flags(); 228 228 if (irq_enable) { 229 229 ADD_STATS(taken_slow_irqenable, 1); 230 230 raw_local_irq_enable();
+58
arch/xtensa/include/asm/irqflags.h
··· 1 + /* 2 + * Xtensa IRQ flags handling functions 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 2001 - 2005 Tensilica Inc. 9 + */ 10 + 11 + #ifndef _XTENSA_IRQFLAGS_H 12 + #define _XTENSA_IRQFLAGS_H 13 + 14 + #include <linux/types.h> 15 + 16 + static inline unsigned long arch_local_save_flags(void) 17 + { 18 + unsigned long flags; 19 + asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); 20 + return flags; 21 + } 22 + 23 + static inline unsigned long arch_local_irq_save(void) 24 + { 25 + unsigned long flags; 26 + asm volatile("rsil %0, "__stringify(LOCKLEVEL) 27 + : "=a" (flags) :: "memory"); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_disable(void) 32 + { 33 + arch_local_irq_save(); 34 + } 35 + 36 + static inline void arch_local_irq_enable(void) 37 + { 38 + unsigned long flags; 39 + asm volatile("rsil %0, 0" : "=a" (flags) :: "memory"); 40 + } 41 + 42 + static inline void arch_local_irq_restore(unsigned long flags) 43 + { 44 + asm volatile("wsr %0, "__stringify(PS)" ; rsync" 45 + :: "a" (flags) : "memory"); 46 + } 47 + 48 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 49 + { 50 + return (flags & 0xf) != 0; 51 + } 52 + 53 + static inline bool arch_irqs_disabled(void) 54 + { 55 + return arch_irqs_disabled_flags(arch_local_save_flags()); 56 + } 57 + 58 + #endif /* _XTENSA_IRQFLAGS_H */
+1 -32
arch/xtensa/include/asm/system.h
··· 12 12 #define _XTENSA_SYSTEM_H 13 13 14 14 #include <linux/stringify.h> 15 + #include <linux/irqflags.h> 15 16 16 17 #include <asm/processor.h> 17 - 18 - /* interrupt control */ 19 - 20 - #define local_save_flags(x) \ 21 - __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); 22 - #define local_irq_restore(x) do { \ 23 - __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ 24 - :: "a" (x) : "memory"); } while(0); 25 - #define local_irq_save(x) do { \ 26 - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ 27 - : "=a" (x) :: "memory");} while(0); 28 - 29 - static inline void local_irq_disable(void) 30 - { 31 - unsigned long flags; 32 - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) 33 - : "=a" (flags) :: "memory"); 34 - } 35 - static inline void local_irq_enable(void) 36 - { 37 - unsigned long flags; 38 - __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); 39 - 40 - } 41 - 42 - static inline int irqs_disabled(void) 43 - { 44 - unsigned long flags; 45 - local_save_flags(flags); 46 - return flags & 0xf; 47 - } 48 - 49 18 50 19 #define smp_read_barrier_depends() do { } while(0) 51 20 #define read_barrier_depends() do { } while(0)
+1 -1
drivers/s390/char/sclp.c
··· 468 468 cr0_sync &= 0xffff00a0; 469 469 cr0_sync |= 0x00000200; 470 470 __ctl_load(cr0_sync, 0, 0); 471 - __raw_local_irq_stosm(0x01); 471 + __arch_local_irq_stosm(0x01); 472 472 /* Loop until driver state indicates finished request */ 473 473 while (sclp_running_state != sclp_running_state_idle) { 474 474 /* Check for expired request timer */
+3 -2
include/asm-generic/atomic.h
··· 43 43 */ 44 44 #define atomic_set(v, i) (((v)->counter) = (i)) 45 45 46 + #include <linux/irqflags.h> 46 47 #include <asm/system.h> 47 48 48 49 /** ··· 58 57 unsigned long flags; 59 58 int temp; 60 59 61 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 60 + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 62 61 temp = v->counter; 63 62 temp += i; 64 63 v->counter = temp; ··· 79 78 unsigned long flags; 80 79 int temp; 81 80 82 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 81 + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 83 82 temp = v->counter; 84 83 temp -= i; 85 84 v->counter = temp;
+1
include/asm-generic/cmpxchg-local.h
··· 2 2 #define __ASM_GENERIC_CMPXCHG_LOCAL_H 3 3 4 4 #include <linux/types.h> 5 + #include <linux/irqflags.h> 5 6 6 7 extern unsigned long wrong_size_cmpxchg(volatile void *ptr); 7 8
-1
include/asm-generic/hardirq.h
··· 3 3 4 4 #include <linux/cache.h> 5 5 #include <linux/threads.h> 6 - #include <linux/irq.h> 7 6 8 7 typedef struct { 9 8 unsigned int __softirq_pending;
+23 -29
include/asm-generic/irqflags.h
··· 5 5 * All architectures should implement at least the first two functions, 6 6 * usually inline assembly will be the best way. 7 7 */ 8 - #ifndef RAW_IRQ_DISABLED 9 - #define RAW_IRQ_DISABLED 0 10 - #define RAW_IRQ_ENABLED 1 8 + #ifndef ARCH_IRQ_DISABLED 9 + #define ARCH_IRQ_DISABLED 0 10 + #define ARCH_IRQ_ENABLED 1 11 11 #endif 12 12 13 13 /* read interrupt enabled status */ 14 - #ifndef __raw_local_save_flags 15 - unsigned long __raw_local_save_flags(void); 14 + #ifndef arch_local_save_flags 15 + unsigned long arch_local_save_flags(void); 16 16 #endif 17 17 18 18 /* set interrupt enabled status */ 19 - #ifndef raw_local_irq_restore 20 - void raw_local_irq_restore(unsigned long flags); 19 + #ifndef arch_local_irq_restore 20 + void arch_local_irq_restore(unsigned long flags); 21 21 #endif 22 22 23 23 /* get status and disable interrupts */ 24 - #ifndef __raw_local_irq_save 25 - static inline unsigned long __raw_local_irq_save(void) 24 + #ifndef arch_local_irq_save 25 + static inline unsigned long arch_local_irq_save(void) 26 26 { 27 27 unsigned long flags; 28 - flags = __raw_local_save_flags(); 29 - raw_local_irq_restore(RAW_IRQ_DISABLED); 28 + flags = arch_local_save_flags(); 29 + arch_local_irq_restore(ARCH_IRQ_DISABLED); 30 30 return flags; 31 31 } 32 32 #endif 33 33 34 34 /* test flags */ 35 - #ifndef raw_irqs_disabled_flags 36 - static inline int raw_irqs_disabled_flags(unsigned long flags) 35 + #ifndef arch_irqs_disabled_flags 36 + static inline int arch_irqs_disabled_flags(unsigned long flags) 37 37 { 38 - return flags == RAW_IRQ_DISABLED; 38 + return flags == ARCH_IRQ_DISABLED; 39 39 } 40 40 #endif 41 41 42 42 /* unconditionally enable interrupts */ 43 - #ifndef raw_local_irq_enable 44 - static inline void raw_local_irq_enable(void) 43 + #ifndef arch_local_irq_enable 44 + static inline void arch_local_irq_enable(void) 45 45 { 46 - raw_local_irq_restore(RAW_IRQ_ENABLED); 46 + arch_local_irq_restore(ARCH_IRQ_ENABLED); 47 47 } 48 48 #endif 49 49 50 50 /* unconditionally disable interrupts */ 51 - #ifndef raw_local_irq_disable 52 - static inline void raw_local_irq_disable(void) 51 + #ifndef arch_local_irq_disable 52 + static inline void arch_local_irq_disable(void) 53 53 { 54 - raw_local_irq_restore(RAW_IRQ_DISABLED); 54 + arch_local_irq_restore(ARCH_IRQ_DISABLED); 55 55 } 56 56 #endif 57 57 58 58 /* test hardware interrupt enable bit */ 59 - #ifndef raw_irqs_disabled 60 - static inline int raw_irqs_disabled(void) 59 + #ifndef arch_irqs_disabled 60 + static inline int arch_irqs_disabled(void) 61 61 { 62 - return raw_irqs_disabled_flags(__raw_local_save_flags()); 62 + return arch_irqs_disabled_flags(arch_local_save_flags()); 63 63 } 64 64 #endif 65 - 66 - #define raw_local_save_flags(flags) \ 67 - do { (flags) = __raw_local_save_flags(); } while (0) 68 - 69 - #define raw_local_irq_save(flags) \ 70 - do { (flags) = __raw_local_irq_save(); } while (0) 71 65 72 66 #endif /* __ASM_GENERIC_IRQFLAGS_H */
+65 -44
include/linux/irqflags.h
··· 12 12 #define _LINUX_TRACE_IRQFLAGS_H 13 13 14 14 #include <linux/typecheck.h> 15 + #include <asm/irqflags.h> 15 16 16 17 #ifdef CONFIG_TRACE_IRQFLAGS 17 18 extern void trace_softirqs_on(unsigned long ip); ··· 53 52 # define start_critical_timings() do { } while (0) 54 53 #endif 55 54 55 + /* 56 + * Wrap the arch provided IRQ routines to provide appropriate checks. 57 + */ 58 + #define raw_local_irq_disable() arch_local_irq_disable() 59 + #define raw_local_irq_enable() arch_local_irq_enable() 60 + #define raw_local_irq_save(flags) \ 61 + do { \ 62 + typecheck(unsigned long, flags); \ 63 + flags = arch_local_irq_save(); \ 64 + } while (0) 65 + #define raw_local_irq_restore(flags) \ 66 + do { \ 67 + typecheck(unsigned long, flags); \ 68 + arch_local_irq_restore(flags); \ 69 + } while (0) 70 + #define raw_local_save_flags(flags) \ 71 + do { \ 72 + typecheck(unsigned long, flags); \ 73 + flags = arch_local_save_flags(); \ 74 + } while (0) 75 + #define raw_irqs_disabled_flags(flags) \ 76 + ({ \ 77 + typecheck(unsigned long, flags); \ 78 + arch_irqs_disabled_flags(flags); \ 79 + }) 80 + #define raw_irqs_disabled() (arch_irqs_disabled()) 81 + #define raw_safe_halt() arch_safe_halt() 82 + 83 + /* 84 + * The local_irq_*() APIs are equal to the raw_local_irq*() 85 + * if !TRACE_IRQFLAGS. 86 + */ 56 87 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 57 - 58 - #include <asm/irqflags.h> 59 - 60 88 #define local_irq_enable() \ 61 89 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) 62 90 #define local_irq_disable() \ 63 91 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) 64 92 #define local_irq_save(flags) \ 65 93 do { \ 66 - typecheck(unsigned long, flags); \ 67 94 raw_local_irq_save(flags); \ 68 95 trace_hardirqs_off(); \ 69 96 } while (0) ··· 99 70 100 71 #define local_irq_restore(flags) \ 101 72 do { \ 102 - typecheck(unsigned long, flags); \ 103 73 if (raw_irqs_disabled_flags(flags)) { \ 104 74 raw_local_irq_restore(flags); \ 105 75 trace_hardirqs_off(); \ ··· 107 79 raw_local_irq_restore(flags); \ 108 80 } \ 109 81 } while (0) 110 - #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 111 - /* 112 - * The local_irq_*() APIs are equal to the raw_local_irq*() 113 - * if !TRACE_IRQFLAGS. 114 - */ 115 - # define raw_local_irq_disable() local_irq_disable() 116 - # define raw_local_irq_enable() local_irq_enable() 117 - # define raw_local_irq_save(flags) \ 118 - do { \ 119 - typecheck(unsigned long, flags); \ 120 - local_irq_save(flags); \ 121 - } while (0) 122 - # define raw_local_irq_restore(flags) \ 123 - do { \ 124 - typecheck(unsigned long, flags); \ 125 - local_irq_restore(flags); \ 126 - } while (0) 127 - #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 128 - 129 - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 130 - #define safe_halt() \ 131 - do { \ 132 - trace_hardirqs_on(); \ 133 - raw_safe_halt(); \ 134 - } while (0) 135 - 136 82 #define local_save_flags(flags) \ 137 83 do { \ 138 - typecheck(unsigned long, flags); \ 139 84 raw_local_save_flags(flags); \ 140 85 } while (0) 141 86 142 - #define irqs_disabled() \ 143 - ({ \ 144 - unsigned long _flags; \ 145 - \ 146 - raw_local_save_flags(_flags); \ 147 - raw_irqs_disabled_flags(_flags); \ 148 - }) 87 + #define irqs_disabled_flags(flags) \ 88 + ({ \ 89 + raw_irqs_disabled_flags(flags); \ 90 + }) 149 91 150 - #define irqs_disabled_flags(flags) \ 151 - ({ \ 152 - typecheck(unsigned long, flags); \ 153 - raw_irqs_disabled_flags(flags); \ 154 - }) 92 + #define irqs_disabled() \ 93 + ({ \ 94 + unsigned long _flags; \ 95 + raw_local_save_flags(_flags); \ 96 + raw_irqs_disabled_flags(_flags); \ 97 + }) 98 + 99 + #define safe_halt() \ 100 + do { \ 101 + trace_hardirqs_on(); \ 102 + raw_safe_halt(); \ 103 + } while (0) 104 + 105 + 106 + #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 107 + 108 + #define local_irq_enable() do { raw_local_irq_enable(); } while (0) 109 + #define local_irq_disable() do { raw_local_irq_disable(); } while (0) 110 + #define local_irq_save(flags) \ 111 + do { \ 112 + raw_local_irq_save(flags); \ 113 + } while (0) 114 + #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) 115 + #define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) 116 + #define irqs_disabled() (raw_irqs_disabled()) 117 + #define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) 118 + #define safe_halt() do { raw_safe_halt(); } while (0) 119 + 155 120 #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 156 121 157 122 #endif
+1
include/linux/spinlock.h
··· 50 50 #include <linux/preempt.h> 51 51 #include <linux/linkage.h> 52 52 #include <linux/compiler.h> 53 + #include <linux/irqflags.h> 53 54 #include <linux/thread_info.h> 54 55 #include <linux/kernel.h> 55 56 #include <linux/stringify.h>