Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-irqflags

* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-irqflags:
Fix IRQ flag handling naming
MIPS: Add missing #inclusions of <linux/irq.h>
smc91x: Add missing #inclusion of <linux/irq.h>
Drop a couple of unnecessary asm/system.h inclusions
SH: Add missing consts to sys_execve() declaration
Blackfin: Rename IRQ flags handling functions
Blackfin: Add missing dep to asm/irqflags.h
Blackfin: Rename DES PC2() symbol to avoid collision
Blackfin: Split the BF532 BFIN_*_FIO_FLAG() functions to their own header
Blackfin: Split PLL code from mach-specific cdef headers

+2356 -1868
+67
arch/alpha/include/asm/irqflags.h
··· 1 + #ifndef __ALPHA_IRQFLAGS_H 2 + #define __ALPHA_IRQFLAGS_H 3 + 4 + #include <asm/system.h> 5 + 6 + #define IPL_MIN 0 7 + #define IPL_SW0 1 8 + #define IPL_SW1 2 9 + #define IPL_DEV0 3 10 + #define IPL_DEV1 4 11 + #define IPL_TIMER 5 12 + #define IPL_PERF 6 13 + #define IPL_POWERFAIL 6 14 + #define IPL_MCHECK 7 15 + #define IPL_MAX 7 16 + 17 + #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 18 + #undef IPL_MIN 19 + #define IPL_MIN __min_ipl 20 + extern int __min_ipl; 21 + #endif 22 + 23 + #define getipl() (rdps() & 7) 24 + #define setipl(ipl) ((void) swpipl(ipl)) 25 + 26 + static inline unsigned long arch_local_save_flags(void) 27 + { 28 + return rdps(); 29 + } 30 + 31 + static inline void arch_local_irq_disable(void) 32 + { 33 + setipl(IPL_MAX); 34 + barrier(); 35 + } 36 + 37 + static inline unsigned long arch_local_irq_save(void) 38 + { 39 + unsigned long flags = swpipl(IPL_MAX); 40 + barrier(); 41 + return flags; 42 + } 43 + 44 + static inline void arch_local_irq_enable(void) 45 + { 46 + barrier(); 47 + setipl(IPL_MIN); 48 + } 49 + 50 + static inline void arch_local_irq_restore(unsigned long flags) 51 + { 52 + barrier(); 53 + setipl(flags); 54 + barrier(); 55 + } 56 + 57 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 58 + { 59 + return flags == IPL_MAX; 60 + } 61 + 62 + static inline bool arch_irqs_disabled(void) 63 + { 64 + return arch_irqs_disabled_flags(getipl()); 65 + } 66 + 67 + #endif /* __ALPHA_IRQFLAGS_H */
-28
arch/alpha/include/asm/system.h
··· 259 259 __CALL_PAL_W1(wrusp, unsigned long); 260 260 __CALL_PAL_W1(wrvptptr, unsigned long); 261 261 262 - #define IPL_MIN 0 263 - #define IPL_SW0 1 264 - #define IPL_SW1 2 265 - #define IPL_DEV0 3 266 - #define IPL_DEV1 4 267 - #define IPL_TIMER 5 268 - #define IPL_PERF 6 269 - #define IPL_POWERFAIL 6 270 - #define IPL_MCHECK 7 271 - #define IPL_MAX 7 272 - 273 - #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK 274 - #undef IPL_MIN 275 - #define IPL_MIN __min_ipl 276 - extern int __min_ipl; 277 - #endif 278 - 279 - #define getipl() (rdps() & 7) 280 - #define setipl(ipl) ((void) swpipl(ipl)) 281 - 282 - #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) 283 - #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) 284 - #define local_save_flags(flags) ((flags) = rdps()) 285 - #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) 286 - #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) 287 - 288 - #define irqs_disabled() (getipl() == IPL_MAX) 289 - 290 262 /* 291 263 * TB routines.. 292 264 */
+84 -61
arch/arm/include/asm/irqflags.h
··· 10 10 */ 11 11 #if __LINUX_ARM_ARCH__ >= 6 12 12 13 - #define raw_local_irq_save(x) \ 14 - ({ \ 15 - __asm__ __volatile__( \ 16 - "mrs %0, cpsr @ local_irq_save\n" \ 17 - "cpsid i" \ 18 - : "=r" (x) : : "memory", "cc"); \ 19 - }) 13 + static inline unsigned long arch_local_irq_save(void) 14 + { 15 + unsigned long flags; 20 16 21 - #define raw_local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc") 22 - #define raw_local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc") 17 + asm volatile( 18 + " mrs %0, cpsr @ arch_local_irq_save\n" 19 + " cpsid i" 20 + : "=r" (flags) : : "memory", "cc"); 21 + return flags; 22 + } 23 + 24 + static inline void arch_local_irq_enable(void) 25 + { 26 + asm volatile( 27 + " cpsie i @ arch_local_irq_enable" 28 + : 29 + : 30 + : "memory", "cc"); 31 + } 32 + 33 + static inline void arch_local_irq_disable(void) 34 + { 35 + asm volatile( 36 + " cpsid i @ arch_local_irq_disable" 37 + : 38 + : 39 + : "memory", "cc"); 40 + } 41 + 23 42 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 24 43 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 25 - 26 44 #else 27 45 28 46 /* 29 47 * Save the current interrupt enable state & disable IRQs 30 48 */ 31 - #define raw_local_irq_save(x) \ 32 - ({ \ 33 - unsigned long temp; \ 34 - (void) (&temp == &x); \ 35 - __asm__ __volatile__( \ 36 - "mrs %0, cpsr @ local_irq_save\n" \ 37 - " orr %1, %0, #128\n" \ 38 - " msr cpsr_c, %1" \ 39 - : "=r" (x), "=r" (temp) \ 40 - : \ 41 - : "memory", "cc"); \ 42 - }) 43 - 49 + static inline unsigned long arch_local_irq_save(void) 50 + { 51 + unsigned long flags, temp; 52 + 53 + asm volatile( 54 + " mrs %0, cpsr @ arch_local_irq_save\n" 55 + " orr %1, %0, #128\n" 56 + " msr cpsr_c, %1" 57 + : "=r" (flags), "=r" (temp) 58 + : 59 + : "memory", "cc"); 60 + return flags; 61 + } 62 + 44 63 /* 45 64 * Enable IRQs 46 65 */ 47 - #define raw_local_irq_enable() \ 48 - ({ \ 49 - unsigned long temp; \ 50 - __asm__ __volatile__( \ 51 - "mrs %0, cpsr @ local_irq_enable\n" \ 52 - " bic %0, %0, #128\n" \ 53 - " msr cpsr_c, %0" \ 54 - : "=r" (temp) \ 55 - : \ 56 - : "memory", "cc"); \ 57 - }) 66 + static inline void arch_local_irq_enable(void) 67 + { 68 + unsigned long temp; 69 + asm volatile( 70 + " mrs %0, cpsr @ arch_local_irq_enable\n" 71 + " bic %0, %0, #128\n" 72 + " msr cpsr_c, %0" 73 + : "=r" (temp) 74 + : 75 + : "memory", "cc"); 76 + } 58 77 59 78 /* 60 79 * Disable IRQs 61 80 */ 62 - #define raw_local_irq_disable() \ 63 - ({ \ 64 - unsigned long temp; \ 65 - __asm__ __volatile__( \ 66 - "mrs %0, cpsr @ local_irq_disable\n" \ 67 - " orr %0, %0, #128\n" \ 68 - " msr cpsr_c, %0" \ 69 - : "=r" (temp) \ 70 - : \ 71 - : "memory", "cc"); \ 72 - }) 81 + static inline void arch_local_irq_disable(void) 82 + { 83 + unsigned long temp; 84 + asm volatile( 85 + " mrs %0, cpsr @ arch_local_irq_disable\n" 86 + " orr %0, %0, #128\n" 87 + " msr cpsr_c, %0" 88 + : "=r" (temp) 89 + : 90 + : "memory", "cc"); 91 + } 73 92 74 93 /* 75 94 * Enable FIQs ··· 125 106 /* 126 107 * Save the current interrupt enable state. 127 108 */ 128 - #define raw_local_save_flags(x) \ 129 - ({ \ 130 - __asm__ __volatile__( \ 131 - "mrs %0, cpsr @ local_save_flags" \ 132 - : "=r" (x) : : "memory", "cc"); \ 133 - }) 109 + static inline unsigned long arch_local_save_flags(void) 110 + { 111 + unsigned long flags; 112 + asm volatile( 113 + " mrs %0, cpsr @ local_save_flags" 114 + : "=r" (flags) : : "memory", "cc"); 115 + return flags; 116 + } 134 117 135 118 /* 136 119 * restore saved IRQ & FIQ state 137 120 */ 138 - #define raw_local_irq_restore(x) \ 139 - __asm__ __volatile__( \ 140 - "msr cpsr_c, %0 @ local_irq_restore\n" \ 141 - : \ 142 - : "r" (x) \ 143 - : "memory", "cc") 121 + static inline void arch_local_irq_restore(unsigned long flags) 122 + { 123 + asm volatile( 124 + " msr cpsr_c, %0 @ local_irq_restore" 125 + : 126 + : "r" (flags) 127 + : "memory", "cc"); 128 + } 144 129 145 - #define raw_irqs_disabled_flags(flags) \ 146 - ({ \ 147 - (int)((flags) & PSR_I_BIT); \ 148 - }) 130 + static inline int arch_irqs_disabled_flags(unsigned long flags) 131 + { 132 + return flags & PSR_I_BIT; 133 + } 149 134 150 135 #endif 151 136 #endif
+11 -18
arch/avr32/include/asm/irqflags.h
··· 8 8 #ifndef __ASM_AVR32_IRQFLAGS_H 9 9 #define __ASM_AVR32_IRQFLAGS_H 10 10 11 + #include <linux/types.h> 11 12 #include <asm/sysreg.h> 12 13 13 - static inline unsigned long __raw_local_save_flags(void) 14 + static inline unsigned long arch_local_save_flags(void) 14 15 { 15 16 return sysreg_read(SR); 16 17 } 17 - 18 - #define raw_local_save_flags(x) \ 19 - do { (x) = __raw_local_save_flags(); } while (0) 20 18 21 19 /* 22 20 * This will restore ALL status register flags, not only the interrupt ··· 23 25 * The empty asm statement informs the compiler of this fact while 24 26 * also serving as a barrier. 25 27 */ 26 - static inline void raw_local_irq_restore(unsigned long flags) 28 + static inline void arch_local_irq_restore(unsigned long flags) 27 29 { 28 30 sysreg_write(SR, flags); 29 31 asm volatile("" : : : "memory", "cc"); 30 32 } 31 33 32 - static inline void raw_local_irq_disable(void) 34 + static inline void arch_local_irq_disable(void) 33 35 { 34 36 asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 35 37 } 36 38 37 - static inline void raw_local_irq_enable(void) 39 + static inline void arch_local_irq_enable(void) 38 40 { 39 41 asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); 40 42 } 41 43 42 - static inline int raw_irqs_disabled_flags(unsigned long flags) 44 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 43 45 { 44 46 return (flags & SYSREG_BIT(GM)) != 0; 45 47 } 46 48 47 - static inline int raw_irqs_disabled(void) 49 + static inline bool arch_irqs_disabled(void) 48 50 { 49 - unsigned long flags = __raw_local_save_flags(); 50 - 51 - return raw_irqs_disabled_flags(flags); 51 + return arch_irqs_disabled_flags(arch_local_save_flags()); 52 52 } 53 53 54 - static inline unsigned long __raw_local_irq_save(void) 54 + static inline unsigned long arch_local_irq_save(void) 55 55 { 56 - unsigned long flags = __raw_local_save_flags(); 56 + unsigned long flags = arch_local_save_flags(); 57 57 58 - raw_local_irq_disable(); 58 + arch_local_irq_disable(); 59 59 60 60 return flags; 61 61 } 62 - 63 - #define raw_local_irq_save(flags) \ 64 - do { (flags) = __raw_local_irq_save(); } while (0) 65 62 66 63 #endif /* __ASM_AVR32_IRQFLAGS_H */
+4 -4
arch/blackfin/include/asm/ipipe.h
··· 49 49 #define prepare_arch_switch(next) \ 50 50 do { \ 51 51 ipipe_schedule_notify(current, next); \ 52 - local_irq_disable_hw(); \ 52 + hard_local_irq_disable(); \ 53 53 } while (0) 54 54 55 55 #define task_hijacked(p) \ ··· 57 57 int __x__ = __ipipe_root_domain_p; \ 58 58 __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ 59 59 if (__x__) \ 60 - local_irq_enable_hw(); \ 60 + hard_local_irq_enable(); \ 61 61 !__x__; \ 62 62 }) 63 63 ··· 167 167 #define __ipipe_run_isr(ipd, irq) \ 168 168 do { \ 169 169 if (!__ipipe_pipeline_head_p(ipd)) \ 170 - local_irq_enable_hw(); \ 170 + hard_local_irq_enable(); \ 171 171 if (ipd == ipipe_root_domain) { \ 172 172 if (unlikely(ipipe_virtual_irq_p(irq))) { \ 173 173 irq_enter(); \ ··· 183 183 __ipipe_run_irqtail(); \ 184 184 __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ 185 185 } \ 186 - local_irq_disable_hw(); \ 186 + hard_local_irq_disable(); \ 187 187 } while (0) 188 188 189 189 #define __ipipe_syscall_watched_p(p, sc) \
+150 -145
arch/blackfin/include/asm/irqflags.h
··· 8 8 #ifndef __ASM_BFIN_IRQFLAGS_H__ 9 9 #define __ASM_BFIN_IRQFLAGS_H__ 10 10 11 + #include <mach/blackfin.h> 12 + 11 13 #ifdef CONFIG_SMP 12 14 # include <asm/pda.h> 13 15 # include <asm/processor.h> ··· 33 31 return flags; 34 32 } 35 33 36 - #ifdef CONFIG_IPIPE 37 - 38 - #include <linux/compiler.h> 39 - #include <linux/ipipe_base.h> 40 - #include <linux/ipipe_trace.h> 41 - 42 34 #ifdef CONFIG_DEBUG_HWERR 43 35 # define bfin_no_irqs 0x3f 44 36 #else 45 37 # define bfin_no_irqs 0x1f 46 38 #endif 47 39 48 - #define raw_local_irq_disable() \ 49 - do { \ 50 - ipipe_check_context(ipipe_root_domain); \ 51 - __ipipe_stall_root(); \ 52 - barrier(); \ 53 - } while (0) 40 + /*****************************************************************************/ 41 + /* 42 + * Hard, untraced CPU interrupt flag manipulation and access. 43 + */ 44 + static inline void __hard_local_irq_disable(void) 45 + { 46 + bfin_cli(); 47 + } 54 48 55 - #define raw_local_irq_enable() \ 56 - do { \ 57 - barrier(); \ 58 - ipipe_check_context(ipipe_root_domain); \ 59 - __ipipe_unstall_root(); \ 60 - } while (0) 49 + static inline void __hard_local_irq_enable(void) 50 + { 51 + bfin_sti(bfin_irq_flags); 52 + } 61 53 62 - #define raw_local_save_flags_ptr(x) \ 63 - do { \ 64 - *(x) = __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; \ 65 - } while (0) 54 + static inline unsigned long hard_local_save_flags(void) 55 + { 56 + return bfin_read_IMASK(); 57 + } 66 58 67 - #define raw_local_save_flags(x) raw_local_save_flags_ptr(&(x)) 59 + static inline unsigned long __hard_local_irq_save(void) 60 + { 61 + unsigned long flags; 62 + flags = bfin_cli(); 63 + #ifdef CONFIG_DEBUG_HWERR 64 + bfin_sti(0x3f); 65 + #endif 66 + return flags; 67 + } 68 68 69 - #define raw_irqs_disabled_flags(x) ((x) == bfin_no_irqs) 69 + static inline int hard_irqs_disabled_flags(unsigned long flags) 70 + { 71 + return (flags & ~0x3f) == 0; 72 + } 70 73 71 - #define raw_local_irq_save_ptr(x) \ 72 - do { \ 73 - *(x) = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; \ 74 - barrier(); \ 75 - } while (0) 74 + static inline int hard_irqs_disabled(void) 75 + { 76 + unsigned long flags = hard_local_save_flags(); 77 + return hard_irqs_disabled_flags(flags); 78 + } 76 79 77 - #define raw_local_irq_save(x) \ 78 - do { \ 79 - ipipe_check_context(ipipe_root_domain); \ 80 - raw_local_irq_save_ptr(&(x)); \ 81 - } while (0) 80 + static inline void __hard_local_irq_restore(unsigned long flags) 81 + { 82 + if (!hard_irqs_disabled_flags(flags)) 83 + __hard_local_irq_enable(); 84 + } 82 85 83 - static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real) 86 + /*****************************************************************************/ 87 + /* 88 + * Interrupt pipe handling. 89 + */ 90 + #ifdef CONFIG_IPIPE 91 + 92 + #include <linux/compiler.h> 93 + #include <linux/ipipe_base.h> 94 + #include <linux/ipipe_trace.h> 95 + 96 + /* 97 + * Interrupt pipe interface to linux/irqflags.h. 98 + */ 99 + static inline void arch_local_irq_disable(void) 100 + { 101 + ipipe_check_context(ipipe_root_domain); 102 + __ipipe_stall_root(); 103 + barrier(); 104 + } 105 + 106 + static inline void arch_local_irq_enable(void) 107 + { 108 + barrier(); 109 + ipipe_check_context(ipipe_root_domain); 110 + __ipipe_unstall_root(); 111 + } 112 + 113 + static inline unsigned long arch_local_save_flags(void) 114 + { 115 + return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; 116 + } 117 + 118 + static inline int arch_irqs_disabled_flags(unsigned long flags) 119 + { 120 + return flags == bfin_no_irqs; 121 + } 122 + 123 + static inline void arch_local_irq_save_ptr(unsigned long *_flags) 124 + { 125 + x = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; 126 + barrier(); 127 + } 128 + 129 + static inline unsigned long arch_local_irq_save(void) 130 + { 131 + ipipe_check_context(ipipe_root_domain); 132 + return __hard_local_irq_save(); 133 + } 134 + 135 + static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real) 84 136 { 85 137 /* 86 138 * Merge virtual and real interrupt mask bits into a single ··· 143 87 return (real & ~(1 << 31)) | ((virt != 0) << 31); 144 88 } 145 89 146 - static inline int raw_demangle_irq_bits(unsigned long *x) 90 + static inline int arch_demangle_irq_bits(unsigned long *x) 147 91 { 148 92 int virt = (*x & (1 << 31)) != 0; 149 93 *x &= ~(1L << 31); 150 94 return virt; 151 95 } 152 96 153 - static inline void local_irq_disable_hw_notrace(void) 154 - { 155 - bfin_cli(); 156 - } 157 - 158 - static inline void local_irq_enable_hw_notrace(void) 159 - { 160 - bfin_sti(bfin_irq_flags); 161 - } 162 - 163 - #define local_save_flags_hw(flags) \ 164 - do { \ 165 - (flags) = bfin_read_IMASK(); \ 166 - } while (0) 167 - 168 - #define irqs_disabled_flags_hw(flags) (((flags) & ~0x3f) == 0) 169 - 170 - #define irqs_disabled_hw() \ 171 - ({ \ 172 - unsigned long flags; \ 173 - local_save_flags_hw(flags); \ 174 - irqs_disabled_flags_hw(flags); \ 175 - }) 176 - 177 - static inline void local_irq_save_ptr_hw(unsigned long *flags) 178 - { 179 - *flags = bfin_cli(); 180 - #ifdef CONFIG_DEBUG_HWERR 181 - bfin_sti(0x3f); 182 - #endif 183 - } 184 - 185 - #define local_irq_save_hw_notrace(flags) \ 186 - do { \ 187 - local_irq_save_ptr_hw(&(flags)); \ 188 - } while (0) 189 - 190 - static inline void local_irq_restore_hw_notrace(unsigned long flags) 191 - { 192 - if (!irqs_disabled_flags_hw(flags)) 193 - local_irq_enable_hw_notrace(); 194 - } 195 - 97 + /* 98 + * Interface to various arch routines that may be traced. 99 + */ 196 100 #ifdef CONFIG_IPIPE_TRACE_IRQSOFF 197 - # define local_irq_disable_hw() \ 198 - do { \ 199 - if (!irqs_disabled_hw()) { \ 200 - local_irq_disable_hw_notrace(); \ 201 - ipipe_trace_begin(0x80000000); \ 202 - } \ 203 - } while (0) 204 - # define local_irq_enable_hw() \ 205 - do { \ 206 - if (irqs_disabled_hw()) { \ 207 - ipipe_trace_end(0x80000000); \ 208 - local_irq_enable_hw_notrace(); \ 209 - } \ 210 - } while (0) 211 - # define local_irq_save_hw(flags) \ 212 - do { \ 213 - local_save_flags_hw(flags); \ 214 - if (!irqs_disabled_flags_hw(flags)) { \ 215 - local_irq_disable_hw_notrace(); \ 216 - ipipe_trace_begin(0x80000001); \ 217 - } \ 218 - } while (0) 219 - # define local_irq_restore_hw(flags) \ 220 - do { \ 221 - if (!irqs_disabled_flags_hw(flags)) { \ 222 - ipipe_trace_end(0x80000001); \ 223 - local_irq_enable_hw_notrace(); \ 224 - } \ 225 - } while (0) 101 + static inline void hard_local_irq_disable(void) 102 + { 103 + if (!hard_irqs_disabled()) { 104 + __hard_local_irq_disable(); 105 + ipipe_trace_begin(0x80000000); 106 + } 107 + } 108 + 109 + static inline void hard_local_irq_enable(void) 110 + { 111 + if (hard_irqs_disabled()) { 112 + ipipe_trace_end(0x80000000); 113 + __hard_local_irq_enable(); 114 + } 115 + } 116 + 117 + static inline unsigned long hard_local_irq_save(void) 118 + { 119 + unsigned long flags = hard_local_save_flags(); 120 + if (!hard_irqs_disabled_flags(flags)) { 121 + __hard_local_irq_disable(); 122 + ipipe_trace_begin(0x80000001); 123 + } 124 + return flags; 125 + } 126 + 127 + static inline void hard_local_irq_restore(unsigned long flags) 128 + { 129 + if (!hard_irqs_disabled_flags(flags)) { 130 + ipipe_trace_end(0x80000001); 131 + __hard_local_irq_enable(); 132 + } 133 + } 134 + 226 135 #else /* !CONFIG_IPIPE_TRACE_IRQSOFF */ 227 - # define local_irq_disable_hw() local_irq_disable_hw_notrace() 228 - # define local_irq_enable_hw() local_irq_enable_hw_notrace() 229 - # define local_irq_save_hw(flags) local_irq_save_hw_notrace(flags) 230 - # define local_irq_restore_hw(flags) local_irq_restore_hw_notrace(flags) 136 + # define hard_local_irq_disable() __hard_local_irq_disable() 137 + # define hard_local_irq_enable() __hard_local_irq_enable() 138 + # define hard_local_irq_save() __hard_local_irq_save() 139 + # define hard_local_irq_restore(flags) __hard_local_irq_restore(flags) 231 140 #endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */ 232 141 233 142 #else /* CONFIG_IPIPE */ 234 143 235 - static inline void raw_local_irq_disable(void) 236 - { 237 - bfin_cli(); 238 - } 239 - static inline void raw_local_irq_enable(void) 240 - { 241 - bfin_sti(bfin_irq_flags); 242 - } 144 + /* 145 + * Direct interface to linux/irqflags.h. 146 + */ 147 + #define arch_local_save_flags() hard_local_save_flags() 148 + #define arch_local_irq_save(flags) __hard_local_irq_save() 149 + #define arch_local_irq_restore(flags) __hard_local_irq_restore(flags) 150 + #define arch_local_irq_enable() __hard_local_irq_enable() 151 + #define arch_local_irq_disable() __hard_local_irq_disable() 152 + #define arch_irqs_disabled_flags(flags) hard_irqs_disabled_flags(flags) 153 + #define arch_irqs_disabled() hard_irqs_disabled() 243 154 244 - #define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0) 155 + /* 156 + * Interface to various arch routines that may be traced. 157 + */ 158 + #define hard_local_irq_save() __hard_local_irq_save() 159 + #define hard_local_irq_restore(flags) __hard_local_irq_restore(flags) 160 + #define hard_local_irq_enable() __hard_local_irq_enable() 161 + #define hard_local_irq_disable() __hard_local_irq_disable() 245 162 246 - #define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0) 247 - 248 - static inline unsigned long __raw_local_irq_save(void) 249 - { 250 - unsigned long flags = bfin_cli(); 251 - #ifdef CONFIG_DEBUG_HWERR 252 - bfin_sti(0x3f); 253 - #endif 254 - return flags; 255 - } 256 - #define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0) 257 - 258 - #define local_irq_save_hw(flags) raw_local_irq_save(flags) 259 - #define local_irq_restore_hw(flags) raw_local_irq_restore(flags) 260 - #define local_irq_enable_hw() raw_local_irq_enable() 261 - #define local_irq_disable_hw() raw_local_irq_disable() 262 - #define irqs_disabled_hw() irqs_disabled() 263 163 264 164 #endif /* !CONFIG_IPIPE */ 265 - 266 - static inline void raw_local_irq_restore(unsigned long flags) 267 - { 268 - if (!raw_irqs_disabled_flags(flags)) 269 - raw_local_irq_enable(); 270 - } 271 - 272 165 #endif
+4 -4
arch/blackfin/include/asm/mmu_context.h
··· 97 97 } 98 98 99 99 #ifdef CONFIG_IPIPE 100 - #define lock_mm_switch(flags) local_irq_save_hw_cond(flags) 101 - #define unlock_mm_switch(flags) local_irq_restore_hw_cond(flags) 100 + #define lock_mm_switch(flags) flags = hard_local_irq_save_cond() 101 + #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags) 102 102 #else 103 103 #define lock_mm_switch(flags) do { (void)(flags); } while (0) 104 104 #define unlock_mm_switch(flags) do { (void)(flags); } while (0) ··· 205 205 } 206 206 207 207 #define ipipe_mm_switch_protect(flags) \ 208 - local_irq_save_hw_cond(flags) 208 + flags = hard_local_irq_save_cond() 209 209 210 210 #define ipipe_mm_switch_unprotect(flags) \ 211 - local_irq_restore_hw_cond(flags) 211 + hard_local_irq_restore_cond(flags) 212 212 213 213 #endif
+2 -2
arch/blackfin/include/asm/system.h
··· 117 117 unsigned long tmp = 0; 118 118 unsigned long flags; 119 119 120 - local_irq_save_hw(flags); 120 + flags = hard_local_irq_save(); 121 121 122 122 switch (size) { 123 123 case 1: ··· 139 139 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 140 140 break; 141 141 } 142 - local_irq_restore_hw(flags); 142 + hard_local_irq_restore(flags); 143 143 return tmp; 144 144 } 145 145
+51 -51
arch/blackfin/kernel/bfin_gpio.c
··· 349 349 void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ 350 350 { \ 351 351 unsigned long flags; \ 352 - local_irq_save_hw(flags); \ 352 + flags = hard_local_irq_save(); \ 353 353 if (arg) \ 354 354 gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ 355 355 else \ 356 356 gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ 357 357 AWA_DUMMY_READ(name); \ 358 - local_irq_restore_hw(flags); \ 358 + hard_local_irq_restore(flags); \ 359 359 } \ 360 360 EXPORT_SYMBOL(set_gpio_ ## name); 361 361 ··· 371 371 { \ 372 372 unsigned long flags; \ 373 373 if (ANOMALY_05000311 || ANOMALY_05000323) \ 374 - local_irq_save_hw(flags); \ 374 + flags = hard_local_irq_save(); \ 375 375 if (arg) \ 376 376 gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ 377 377 else \ 378 378 gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ 379 379 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 380 380 AWA_DUMMY_READ(name); \ 381 - local_irq_restore_hw(flags); \ 381 + hard_local_irq_restore(flags); \ 382 382 } \ 383 383 } \ 384 384 EXPORT_SYMBOL(set_gpio_ ## name); ··· 391 391 { 392 392 unsigned long flags; 393 393 if (ANOMALY_05000311 || ANOMALY_05000323) 394 - local_irq_save_hw(flags); 394 + flags = hard_local_irq_save(); 395 395 gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 396 396 if (ANOMALY_05000311 || ANOMALY_05000323) { 397 397 AWA_DUMMY_READ(toggle); 398 - local_irq_restore_hw(flags); 398 + hard_local_irq_restore(flags); 399 399 } 400 400 } 401 401 EXPORT_SYMBOL(set_gpio_toggle); ··· 408 408 { \ 409 409 unsigned long flags; \ 410 410 if (ANOMALY_05000311 || ANOMALY_05000323) \ 411 - local_irq_save_hw(flags); \ 411 + flags = hard_local_irq_save(); \ 412 412 gpio_array[gpio_bank(gpio)]->name = arg; \ 413 413 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 414 414 AWA_DUMMY_READ(name); \ 415 - local_irq_restore_hw(flags); \ 415 + hard_local_irq_restore(flags); \ 416 416 } \ 417 417 } \ 418 418 EXPORT_SYMBOL(set_gpiop_ ## name); ··· 433 433 unsigned long flags; \ 434 434 unsigned short ret; \ 435 435 if (ANOMALY_05000311 || ANOMALY_05000323) \ 436 - local_irq_save_hw(flags); \ 436 + flags = hard_local_irq_save(); \ 437 437 ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ 438 438 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 439 439 AWA_DUMMY_READ(name); \ 440 - local_irq_restore_hw(flags); \ 440 + hard_local_irq_restore(flags); \ 441 441 } \ 442 442 return ret; \ 443 443 } \ ··· 460 460 unsigned long flags; \ 461 461 unsigned short ret; \ 462 462 if (ANOMALY_05000311 || ANOMALY_05000323) \ 463 - local_irq_save_hw(flags); \ 463 + flags = hard_local_irq_save(); \ 464 464 ret = (gpio_array[gpio_bank(gpio)]->name); \ 465 465 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 466 466 AWA_DUMMY_READ(name); \ 467 - local_irq_restore_hw(flags); \ 467 + hard_local_irq_restore(flags); \ 468 468 } \ 469 469 return ret; \ 470 470 } \ ··· 525 525 if (check_gpio(gpio) < 0) 526 526 return -EINVAL; 527 527 528 - local_irq_save_hw(flags); 528 + flags = hard_local_irq_save(); 529 529 if (ctrl) 530 530 reserve(wakeup, gpio); 531 531 else 532 532 unreserve(wakeup, gpio); 533 533 534 534 set_gpio_maskb(gpio, ctrl); 535 - local_irq_restore_hw(flags); 535 + hard_local_irq_restore(flags); 536 536 537 537 return 0; 538 538 } ··· 690 690 691 691 BUG_ON(ident >= MAX_RESOURCES); 692 692 693 - local_irq_save_hw(flags); 693 + flags = hard_local_irq_save(); 694 694 695 695 /* If a pin can be muxed as either GPIO or peripheral, make 696 696 * sure it is not already a GPIO pin when we request it. ··· 701 701 printk(KERN_ERR 702 702 "%s: Peripheral %d is already reserved as GPIO by %s !\n", 703 703 __func__, ident, get_label(ident)); 704 - local_irq_restore_hw(flags); 704 + hard_local_irq_restore(flags); 705 705 return -EBUSY; 706 706 } 707 707 ··· 730 730 printk(KERN_ERR 731 731 "%s: Peripheral %d function %d is already reserved by %s !\n", 732 732 __func__, ident, P_FUNCT2MUX(per), get_label(ident)); 733 - local_irq_restore_hw(flags); 733 + hard_local_irq_restore(flags); 734 734 return -EBUSY; 735 735 } 736 736 } ··· 741 741 portmux_setup(per); 742 742 port_setup(ident, PERIPHERAL_USAGE); 743 743 744 - local_irq_restore_hw(flags); 744 + hard_local_irq_restore(flags); 745 745 set_label(ident, label); 746 746 747 747 return 0; ··· 780 780 if (!(per & P_DEFINED)) 781 781 return; 782 782 783 - local_irq_save_hw(flags); 783 + flags = hard_local_irq_save(); 784 784 785 785 if (unlikely(!is_reserved(peri, ident, 0))) { 786 - local_irq_restore_hw(flags); 786 + hard_local_irq_restore(flags); 787 787 return; 788 788 } 789 789 ··· 794 794 795 795 set_label(ident, "free"); 796 796 797 - local_irq_restore_hw(flags); 797 + hard_local_irq_restore(flags); 798 798 } 799 799 EXPORT_SYMBOL(peripheral_free); 800 800 ··· 828 828 if (check_gpio(gpio) < 0) 829 829 return -EINVAL; 830 830 831 - local_irq_save_hw(flags); 831 + flags = hard_local_irq_save(); 832 832 833 833 /* 834 834 * Allow that the identical GPIO can ··· 837 837 */ 838 838 839 839 if (cmp_label(gpio, label) == 0) { 840 - local_irq_restore_hw(flags); 840 + hard_local_irq_restore(flags); 841 841 return 0; 842 842 } 843 843 ··· 846 846 dump_stack(); 847 847 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 848 848 gpio, get_label(gpio)); 849 - local_irq_restore_hw(flags); 849 + hard_local_irq_restore(flags); 850 850 return -EBUSY; 851 851 } 852 852 if (unlikely(is_reserved(peri, gpio, 1))) { ··· 855 855 printk(KERN_ERR 856 856 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 857 857 gpio, get_label(gpio)); 858 - local_irq_restore_hw(flags); 858 + hard_local_irq_restore(flags); 859 859 return -EBUSY; 860 860 } 861 861 if (unlikely(is_reserved(gpio_irq, gpio, 1))) { ··· 871 871 reserve(gpio, gpio); 872 872 set_label(gpio, label); 873 873 874 - local_irq_restore_hw(flags); 874 + hard_local_irq_restore(flags); 875 875 876 876 port_setup(gpio, GPIO_USAGE); 877 877 ··· 888 888 889 889 might_sleep(); 890 890 891 - local_irq_save_hw(flags); 891 + flags = hard_local_irq_save(); 892 892 893 893 if (unlikely(!is_reserved(gpio, gpio, 0))) { 894 894 if (system_state == SYSTEM_BOOTING) 895 895 dump_stack(); 896 896 gpio_error(gpio); 897 - local_irq_restore_hw(flags); 897 + hard_local_irq_restore(flags); 898 898 return; 899 899 } 900 900 ··· 902 902 903 903 set_label(gpio, "free"); 904 904 905 - local_irq_restore_hw(flags); 905 + hard_local_irq_restore(flags); 906 906 } 907 907 EXPORT_SYMBOL(bfin_gpio_free); 908 908 ··· 913 913 { 914 914 unsigned long flags; 915 915 916 - local_irq_save_hw(flags); 916 + flags = hard_local_irq_save(); 917 917 918 918 /* 919 919 * Allow that the identical GPIO can ··· 922 922 */ 923 923 924 924 if (cmp_label(gpio, label) == 0) { 925 - local_irq_restore_hw(flags); 925 + hard_local_irq_restore(flags); 926 926 return 0; 927 927 } 928 928 929 929 if (unlikely(is_reserved(special_gpio, gpio, 1))) { 930 - local_irq_restore_hw(flags); 930 + hard_local_irq_restore(flags); 931 931 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 932 932 gpio, get_label(gpio)); 933 933 934 934 return -EBUSY; 935 935 } 936 936 if (unlikely(is_reserved(peri, gpio, 1))) { 937 - local_irq_restore_hw(flags); 937 + hard_local_irq_restore(flags); 938 938 printk(KERN_ERR 939 939 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 940 940 gpio, get_label(gpio)); ··· 946 946 reserve(peri, gpio); 947 947 948 948 set_label(gpio, label); 949 - local_irq_restore_hw(flags); 949 + hard_local_irq_restore(flags); 950 950 port_setup(gpio, GPIO_USAGE); 951 951 952 952 return 0; ··· 959 959 960 960 might_sleep(); 961 961 962 - local_irq_save_hw(flags); 962 + flags = hard_local_irq_save(); 963 963 964 964 if (unlikely(!is_reserved(special_gpio, gpio, 0))) { 965 965 gpio_error(gpio); 966 - local_irq_restore_hw(flags); 966 + hard_local_irq_restore(flags); 967 967 return; 968 968 } 969 969 970 970 unreserve(special_gpio, gpio); 971 971 unreserve(peri, gpio); 972 972 set_label(gpio, "free"); 973 - local_irq_restore_hw(flags); 973 + hard_local_irq_restore(flags); 974 974 } 975 975 EXPORT_SYMBOL(bfin_special_gpio_free); 976 976 #endif ··· 983 983 if (check_gpio(gpio) < 0) 984 984 return -EINVAL; 985 985 986 - local_irq_save_hw(flags); 986 + flags = hard_local_irq_save(); 987 987 988 988 if (unlikely(is_reserved(peri, gpio, 1))) { 989 989 if (system_state == SYSTEM_BOOTING) ··· 991 991 printk(KERN_ERR 992 992 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 993 993 gpio, get_label(gpio)); 994 - local_irq_restore_hw(flags); 994 + hard_local_irq_restore(flags); 995 995 return -EBUSY; 996 996 } 997 997 if (unlikely(is_reserved(gpio, gpio, 1))) ··· 1002 1002 reserve(gpio_irq, gpio); 1003 1003 set_label(gpio, label); 1004 1004 1005 - local_irq_restore_hw(flags); 1005 + hard_local_irq_restore(flags); 1006 1006 1007 1007 port_setup(gpio, GPIO_USAGE); 1008 1008 ··· 1016 1016 if (check_gpio(gpio) < 0) 1017 1017 return; 1018 1018 1019 - local_irq_save_hw(flags); 1019 + flags = hard_local_irq_save(); 1020 1020 1021 1021 if (unlikely(!is_reserved(gpio_irq, gpio, 0))) { 1022 1022 if (system_state == SYSTEM_BOOTING) 1023 1023 dump_stack(); 1024 1024 gpio_error(gpio); 1025 - local_irq_restore_hw(flags); 1025 + hard_local_irq_restore(flags); 1026 1026 return; 1027 1027 } 1028 1028 ··· 1030 1030 1031 1031 set_label(gpio, "free"); 1032 1032 1033 - local_irq_restore_hw(flags); 1033 + hard_local_irq_restore(flags); 1034 1034 } 1035 1035 1036 1036 static inline void __bfin_gpio_direction_input(unsigned gpio) ··· 1052 1052 return -EINVAL; 1053 1053 } 1054 1054 1055 - local_irq_save_hw(flags); 1055 + flags = hard_local_irq_save(); 1056 1056 __bfin_gpio_direction_input(gpio); 1057 1057 AWA_DUMMY_READ(inen); 1058 - local_irq_restore_hw(flags); 1058 + hard_local_irq_restore(flags); 1059 1059 1060 1060 return 0; 1061 1061 } ··· 1070 1070 port_setup(gpio, GPIO_USAGE); 1071 1071 1072 1072 #ifdef CONFIG_BF54x 1073 - local_irq_save_hw(flags); 1073 + flags = hard_local_irq_save(); 1074 1074 __bfin_gpio_direction_input(gpio); 1075 - local_irq_restore_hw(flags); 1075 + hard_local_irq_restore(flags); 1076 1076 #endif 1077 1077 } 1078 1078 ··· 1094 1094 return -EINVAL; 1095 1095 } 1096 1096 1097 - local_irq_save_hw(flags); 1097 + flags = hard_local_irq_save(); 1098 1098 1099 1099 gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); 1100 1100 gpio_set_value(gpio, value); ··· 1105 1105 #endif 1106 1106 1107 1107 AWA_DUMMY_READ(dir); 1108 - local_irq_restore_hw(flags); 1108 + hard_local_irq_restore(flags); 1109 1109 1110 1110 return 0; 1111 1111 } ··· 1120 1120 1121 1121 if (unlikely(get_gpio_edge(gpio))) { 1122 1122 int ret; 1123 - local_irq_save_hw(flags); 1123 + flags = hard_local_irq_save(); 1124 1124 set_gpio_edge(gpio, 0); 1125 1125 ret = get_gpio_data(gpio); 1126 1126 set_gpio_edge(gpio, 1); 1127 - local_irq_restore_hw(flags); 1127 + hard_local_irq_restore(flags); 1128 1128 return ret; 1129 1129 } else 1130 1130 return get_gpio_data(gpio);
+4 -4
arch/blackfin/kernel/cplb-mpu/cplbmgr.c
··· 318 318 319 319 nr_cplb_flush[cpu]++; 320 320 321 - local_irq_save_hw(flags); 321 + flags = hard_local_irq_save(); 322 322 _disable_icplb(); 323 323 for (i = first_switched_icplb; i < MAX_CPLBS; i++) { 324 324 icplb_tbl[cpu][i].data = 0; ··· 332 332 bfin_write32(DCPLB_DATA0 + i * 4, 0); 333 333 } 334 334 _enable_dcplb(); 335 - local_irq_restore_hw(flags); 335 + hard_local_irq_restore(flags); 336 336 337 337 } 338 338 ··· 348 348 return; 349 349 } 350 350 351 - local_irq_save_hw(flags); 351 + flags = hard_local_irq_save(); 352 352 current_rwx_mask[cpu] = masks; 353 353 354 354 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { ··· 373 373 addr += PAGE_SIZE; 374 374 } 375 375 _enable_dcplb(); 376 - local_irq_restore_hw(flags); 376 + hard_local_irq_restore(flags); 377 377 }
+19 -19
arch/blackfin/kernel/ipipe.c
··· 219 219 220 220 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); 221 221 222 - local_irq_save_hw(flags); 222 + flags = hard_local_irq_save(); 223 223 224 224 if (!__ipipe_root_domain_p) { 225 - local_irq_restore_hw(flags); 225 + hard_local_irq_restore(flags); 226 226 return 1; 227 227 } 228 228 ··· 230 230 if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) 231 231 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); 232 232 233 - local_irq_restore_hw(flags); 233 + hard_local_irq_restore(flags); 234 234 235 235 return -ret; 236 236 } ··· 239 239 { 240 240 unsigned long flags; 241 241 242 - local_irq_save_hw(flags); 242 + flags = hard_local_irq_save(); 243 243 244 244 return flags; 245 245 } 246 246 247 247 void ipipe_critical_exit(unsigned long flags) 248 248 { 249 - local_irq_restore_hw(flags); 249 + hard_local_irq_restore(flags); 250 250 } 251 251 252 252 static void __ipipe_no_irqtail(void) ··· 279 279 return -EINVAL; 280 280 #endif 281 281 282 - local_irq_save_hw(flags); 282 + flags = hard_local_irq_save(); 283 283 __ipipe_handle_irq(irq, NULL); 284 - local_irq_restore_hw(flags); 284 + hard_local_irq_restore(flags); 285 285 286 286 return 1; 287 287 } ··· 293 293 294 294 BUG_ON(irqs_disabled()); 295 295 296 - local_irq_save_hw(flags); 296 + flags = hard_local_irq_save(); 297 297 298 298 if (irq_tail_hook) 299 299 irq_tail_hook(); ··· 303 303 if (ipipe_root_cpudom_var(irqpend_himask) != 0) 304 304 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); 305 305 306 - local_irq_restore_hw(flags); 306 + hard_local_irq_restore(flags); 307 307 } 308 308 309 309 void ___ipipe_sync_pipeline(unsigned long syncmask) ··· 344 344 { 345 345 unsigned long *p, flags; 346 346 347 - local_irq_save_hw(flags); 347 + flags = hard_local_irq_save(); 348 348 p = &__ipipe_root_status; 349 349 __set_bit(IPIPE_STALL_FLAG, p); 350 - local_irq_restore_hw(flags); 350 + hard_local_irq_restore(flags); 351 351 } 352 352 EXPORT_SYMBOL(__ipipe_stall_root); 353 353 ··· 356 356 unsigned long *p, flags; 357 357 int x; 358 358 359 - local_irq_save_hw(flags); 359 + flags = hard_local_irq_save(); 360 360 p = &__ipipe_root_status; 361 361 x = __test_and_set_bit(IPIPE_STALL_FLAG, p); 362 - local_irq_restore_hw(flags); 362 + hard_local_irq_restore(flags); 363 363 364 364 return x; 365 365 } ··· 371 371 unsigned long flags; 372 372 int x; 373 373 374 - local_irq_save_hw_smp(flags); 374 + flags = hard_local_irq_save_smp(); 375 375 p = &__ipipe_root_status; 376 376 x = test_bit(IPIPE_STALL_FLAG, p); 377 - local_irq_restore_hw_smp(flags); 377 + hard_local_irq_restore_smp(flags); 378 378 379 379 return x; 380 380 } ··· 384 384 { 385 385 unsigned long *p, flags; 386 386 387 - local_irq_save_hw(flags); 387 + flags = hard_local_irq_save(); 388 388 p = &__ipipe_root_status; 389 389 __set_bit(IPIPE_SYNCDEFER_FLAG, p); 390 - local_irq_restore_hw(flags); 390 + hard_local_irq_restore(flags); 391 391 } 392 392 EXPORT_SYMBOL(__ipipe_lock_root); 393 393 ··· 395 395 { 396 396 unsigned long *p, flags; 397 397 398 - local_irq_save_hw(flags); 398 + flags = hard_local_irq_save(); 399 399 p = &__ipipe_root_status; 400 400 __clear_bit(IPIPE_SYNCDEFER_FLAG, p); 401 - local_irq_restore_hw(flags); 401 + hard_local_irq_restore(flags); 402 402 } 403 403 EXPORT_SYMBOL(__ipipe_unlock_root);
+2 -2
arch/blackfin/kernel/process.c
··· 65 65 #ifdef CONFIG_IPIPE 66 66 ipipe_suspend_domain(); 67 67 #endif 68 - local_irq_disable_hw(); 68 + hard_local_irq_disable(); 69 69 if (!need_resched()) 70 70 idle_with_irq_disabled(); 71 71 72 - local_irq_enable_hw(); 72 + hard_local_irq_enable(); 73 73 } 74 74 75 75 /*
+1
arch/blackfin/kernel/trace.c
··· 15 15 #include <linux/kallsyms.h> 16 16 #include <linux/err.h> 17 17 #include <linux/fs.h> 18 + #include <linux/irq.h> 18 19 #include <asm/dma.h> 19 20 #include <asm/trace.h> 20 21 #include <asm/fixed_code.h>
-50
arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
··· 1058 1058 /* These need to be last due to the cdef/linux inter-dependencies */ 1059 1059 #include <asm/irq.h> 1060 1060 1061 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 1062 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 1063 - { 1064 - unsigned long flags, iwr0, iwr1; 1065 - 1066 - if (val == bfin_read_PLL_CTL()) 1067 - return; 1068 - 1069 - local_irq_save_hw(flags); 1070 - /* Enable the PLL Wakeup bit in SIC IWR */ 1071 - iwr0 = bfin_read32(SIC_IWR0); 1072 - iwr1 = bfin_read32(SIC_IWR1); 1073 - /* Only allow PPL Wakeup) */ 1074 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 1075 - bfin_write32(SIC_IWR1, 0); 1076 - 1077 - bfin_write16(PLL_CTL, val); 1078 - SSYNC(); 1079 - asm("IDLE;"); 1080 - 1081 - bfin_write32(SIC_IWR0, iwr0); 1082 - bfin_write32(SIC_IWR1, iwr1); 1083 - local_irq_restore_hw(flags); 1084 - } 1085 - 1086 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 1087 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 1088 - { 1089 - unsigned long flags, iwr0, iwr1; 1090 - 1091 - if (val == bfin_read_VR_CTL()) 1092 - return; 1093 - 1094 - local_irq_save_hw(flags); 1095 - /* Enable the PLL Wakeup bit in SIC IWR */ 1096 - iwr0 = bfin_read32(SIC_IWR0); 1097 - iwr1 = bfin_read32(SIC_IWR1); 1098 - /* Only allow PPL Wakeup) */ 1099 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 1100 - bfin_write32(SIC_IWR1, 0); 1101 - 1102 - bfin_write16(VR_CTL, val); 1103 - SSYNC(); 1104 - asm("IDLE;"); 1105 - 1106 - bfin_write32(SIC_IWR0, iwr0); 1107 - bfin_write32(SIC_IWR1, iwr1); 1108 - local_irq_restore_hw(flags); 1109 - } 1110 - 1111 1061 #endif /* _CDEF_BF52X_H */
+63
arch/blackfin/mach-bf518/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr0, iwr1; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr0 = bfin_read32(SIC_IWR0); 24 + iwr1 = bfin_read32(SIC_IWR1); 25 + /* Only allow PPL Wakeup) */ 26 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 27 + bfin_write32(SIC_IWR1, 0); 28 + 29 + bfin_write16(PLL_CTL, val); 30 + SSYNC(); 31 + asm("IDLE;"); 32 + 33 + bfin_write32(SIC_IWR0, iwr0); 34 + bfin_write32(SIC_IWR1, iwr1); 35 + hard_local_irq_restore(flags); 36 + } 37 + 38 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 39 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 40 + { 41 + unsigned long flags, iwr0, iwr1; 42 + 43 + if (val == bfin_read_VR_CTL()) 44 + return; 45 + 46 + flags = hard_local_irq_save(); 47 + /* Enable the PLL Wakeup bit in SIC IWR */ 48 + iwr0 = bfin_read32(SIC_IWR0); 49 + iwr1 = bfin_read32(SIC_IWR1); 50 + /* Only allow PPL Wakeup) */ 51 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 52 + bfin_write32(SIC_IWR1, 0); 53 + 54 + bfin_write16(VR_CTL, val); 55 + SSYNC(); 56 + asm("IDLE;"); 57 + 58 + bfin_write32(SIC_IWR0, iwr0); 59 + bfin_write32(SIC_IWR1, iwr1); 60 + hard_local_irq_restore(flags); 61 + } 62 + 63 + #endif /* _MACH_PLL_H */
-50
arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
··· 1110 1110 /* These need to be last due to the cdef/linux inter-dependencies */ 1111 1111 #include <asm/irq.h> 1112 1112 1113 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 1114 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 1115 - { 1116 - unsigned long flags, iwr0, iwr1; 1117 - 1118 - if (val == bfin_read_PLL_CTL()) 1119 - return; 1120 - 1121 - local_irq_save_hw(flags); 1122 - /* Enable the PLL Wakeup bit in SIC IWR */ 1123 - iwr0 = bfin_read32(SIC_IWR0); 1124 - iwr1 = bfin_read32(SIC_IWR1); 1125 - /* Only allow PPL Wakeup) */ 1126 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 1127 - bfin_write32(SIC_IWR1, 0); 1128 - 1129 - bfin_write16(PLL_CTL, val); 1130 - SSYNC(); 1131 - asm("IDLE;"); 1132 - 1133 - bfin_write32(SIC_IWR0, iwr0); 1134 - bfin_write32(SIC_IWR1, iwr1); 1135 - local_irq_restore_hw(flags); 1136 - } 1137 - 1138 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 1139 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 1140 - { 1141 - unsigned long flags, iwr0, iwr1; 1142 - 1143 - if (val == bfin_read_VR_CTL()) 1144 - return; 1145 - 1146 - local_irq_save_hw(flags); 1147 - /* Enable the PLL Wakeup bit in SIC IWR */ 1148 - iwr0 = bfin_read32(SIC_IWR0); 1149 - iwr1 = bfin_read32(SIC_IWR1); 1150 - /* Only allow PPL Wakeup) */ 1151 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 1152 - bfin_write32(SIC_IWR1, 0); 1153 - 1154 - bfin_write16(VR_CTL, val); 1155 - SSYNC(); 1156 - asm("IDLE;"); 1157 - 1158 - bfin_write32(SIC_IWR0, iwr0); 1159 - bfin_write32(SIC_IWR1, iwr1); 1160 - local_irq_restore_hw(flags); 1161 - } 1162 - 1163 1113 #endif /* _CDEF_BF52X_H */
+63
arch/blackfin/mach-bf527/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2007-2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr0, iwr1; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr0 = bfin_read32(SIC_IWR0); 24 + iwr1 = bfin_read32(SIC_IWR1); 25 + /* Only allow PPL Wakeup) */ 26 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 27 + bfin_write32(SIC_IWR1, 0); 28 + 29 + bfin_write16(PLL_CTL, val); 30 + SSYNC(); 31 + asm("IDLE;"); 32 + 33 + bfin_write32(SIC_IWR0, iwr0); 34 + bfin_write32(SIC_IWR1, iwr1); 35 + hard_local_irq_restore(flags); 36 + } 37 + 38 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 39 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 40 + { 41 + unsigned long flags, iwr0, iwr1; 42 + 43 + if (val == bfin_read_VR_CTL()) 44 + return; 45 + 46 + flags = hard_local_irq_save(); 47 + /* Enable the PLL Wakeup bit in SIC IWR */ 48 + iwr0 = bfin_read32(SIC_IWR0); 49 + iwr1 = bfin_read32(SIC_IWR1); 50 + /* Only allow PPL Wakeup) */ 51 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 52 + bfin_write32(SIC_IWR1, 0); 53 + 54 + bfin_write16(VR_CTL, val); 55 + SSYNC(); 56 + asm("IDLE;"); 57 + 58 + bfin_write32(SIC_IWR0, iwr0); 59 + bfin_write32(SIC_IWR1, iwr1); 60 + hard_local_irq_restore(flags); 61 + } 62 + 63 + #endif /* _MACH_PLL_H */
+1
arch/blackfin/mach-bf533/boards/blackstamp.c
··· 25 25 #include <asm/bfin5xx_spi.h> 26 26 #include <asm/portmux.h> 27 27 #include <asm/dpmc.h> 28 + #include <mach/fio_flag.h> 28 29 29 30 /* 30 31 * Name the Board for the /proc/cpuinfo
+1
arch/blackfin/mach-bf533/boards/ip0x.c
··· 22 22 #include <asm/dma.h> 23 23 #include <asm/bfin5xx_spi.h> 24 24 #include <asm/portmux.h> 25 + #include <mach/fio_flag.h> 25 26 26 27 /* 27 28 * Name the Board for the /proc/cpuinfo
+1
arch/blackfin/mach-bf533/boards/stamp.c
··· 24 24 #include <asm/reboot.h> 25 25 #include <asm/portmux.h> 26 26 #include <asm/dpmc.h> 27 + #include <mach/fio_flag.h> 27 28 28 29 /* 29 30 * Name the Board for the /proc/cpuinfo
-91
arch/blackfin/mach-bf533/include/mach/cdefBF532.h
··· 7 7 #ifndef _CDEF_BF532_H 8 8 #define _CDEF_BF532_H 9 9 10 - #include <asm/blackfin.h> 11 - 12 - /*include all Core registers and bit definitions*/ 13 - #include "defBF532.h" 14 - 15 10 /*include core specific register pointer definitions*/ 16 11 #include <asm/cdef_LPBlackfin.h> 17 12 ··· 649 654 650 655 /* These need to be last due to the cdef/linux inter-dependencies */ 651 656 #include <asm/irq.h> 652 - 653 - #if ANOMALY_05000311 654 - #define BFIN_WRITE_FIO_FLAG(name) \ 655 - static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \ 656 - { \ 657 - unsigned long flags; \ 658 - local_irq_save_hw(flags); \ 659 - bfin_write16(FIO_FLAG_##name, val); \ 660 - bfin_read_CHIPID(); \ 661 - local_irq_restore_hw(flags); \ 662 - } 663 - BFIN_WRITE_FIO_FLAG(D) 664 - BFIN_WRITE_FIO_FLAG(C) 665 - BFIN_WRITE_FIO_FLAG(S) 666 - BFIN_WRITE_FIO_FLAG(T) 667 - 668 - #define BFIN_READ_FIO_FLAG(name) \ 669 - static inline u16 bfin_read_FIO_FLAG_##name(void) \ 670 - { \ 671 - unsigned long flags; \ 672 - u16 ret; \ 673 - local_irq_save_hw(flags); \ 674 - ret = bfin_read16(FIO_FLAG_##name); \ 675 - bfin_read_CHIPID(); \ 676 - local_irq_restore_hw(flags); \ 677 - return ret; \ 678 - } 679 - BFIN_READ_FIO_FLAG(D) 680 - BFIN_READ_FIO_FLAG(C) 681 - BFIN_READ_FIO_FLAG(S) 682 - BFIN_READ_FIO_FLAG(T) 683 - 684 - #else 685 - #define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val) 686 - #define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val) 687 - #define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val) 688 - #define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val) 689 - #define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T) 690 - #define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C) 691 - #define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S) 692 - #define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D) 693 - #endif 694 - 695 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 696 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 697 - { 698 - unsigned long flags, iwr; 699 - 700 - if (val == bfin_read_PLL_CTL()) 701 - return; 702 - 703 - local_irq_save_hw(flags); 704 - /* Enable the PLL Wakeup bit in SIC IWR */ 705 - iwr = bfin_read32(SIC_IWR); 706 - /* Only allow PPL Wakeup) */ 707 - bfin_write32(SIC_IWR, IWR_ENABLE(0)); 708 - 709 - bfin_write16(PLL_CTL, val); 710 - SSYNC(); 711 - asm("IDLE;"); 712 - 713 - bfin_write32(SIC_IWR, iwr); 714 - local_irq_restore_hw(flags); 715 - } 716 - 717 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 718 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 719 - { 720 - unsigned long flags, iwr; 721 - 722 - if (val == bfin_read_VR_CTL()) 723 - return; 724 - 725 - local_irq_save_hw(flags); 726 - /* Enable the PLL Wakeup bit in SIC IWR */ 727 - iwr = bfin_read32(SIC_IWR); 728 - /* Only allow PPL Wakeup) */ 729 - bfin_write32(SIC_IWR, IWR_ENABLE(0)); 730 - 731 - bfin_write16(VR_CTL, val); 732 - SSYNC(); 733 - asm("IDLE;"); 734 - 735 - bfin_write32(SIC_IWR, iwr); 736 - local_irq_restore_hw(flags); 737 - } 738 657 739 658 #endif /* _CDEF_BF532_H */
+55
arch/blackfin/mach-bf533/include/mach/fio_flag.h
··· 1 + /* 2 + * Copyright 2005-2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later 5 + */ 6 + 7 + #ifndef _MACH_FIO_FLAG_H 8 + #define _MACH_FIO_FLAG_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + #if ANOMALY_05000311 14 + #define BFIN_WRITE_FIO_FLAG(name) \ 15 + static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \ 16 + { \ 17 + unsigned long flags; \ 18 + flags = hard_local_irq_save(); \ 19 + bfin_write16(FIO_FLAG_##name, val); \ 20 + bfin_read_CHIPID(); \ 21 + hard_local_irq_restore(flags); \ 22 + } 23 + BFIN_WRITE_FIO_FLAG(D) 24 + BFIN_WRITE_FIO_FLAG(C) 25 + BFIN_WRITE_FIO_FLAG(S) 26 + BFIN_WRITE_FIO_FLAG(T) 27 + 28 + #define BFIN_READ_FIO_FLAG(name) \ 29 + static inline u16 bfin_read_FIO_FLAG_##name(void) \ 30 + { \ 31 + unsigned long flags; \ 32 + u16 ret; \ 33 + flags = hard_local_irq_save(); \ 34 + ret = bfin_read16(FIO_FLAG_##name); \ 35 + bfin_read_CHIPID(); \ 36 + hard_local_irq_restore(flags); \ 37 + return ret; \ 38 + } 39 + BFIN_READ_FIO_FLAG(D) 40 + BFIN_READ_FIO_FLAG(C) 41 + BFIN_READ_FIO_FLAG(S) 42 + BFIN_READ_FIO_FLAG(T) 43 + 44 + #else 45 + #define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val) 46 + #define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val) 47 + #define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val) 48 + #define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val) 49 + #define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T) 50 + #define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C) 51 + #define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S) 52 + #define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D) 53 + #endif 54 + 55 + #endif /* _MACH_FIO_FLAG_H */
+57
arch/blackfin/mach-bf533/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2005-2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr = bfin_read32(SIC_IWR); 24 + /* Only allow PPL Wakeup) */ 25 + bfin_write32(SIC_IWR, IWR_ENABLE(0)); 26 + 27 + bfin_write16(PLL_CTL, val); 28 + SSYNC(); 29 + asm("IDLE;"); 30 + 31 + bfin_write32(SIC_IWR, iwr); 32 + hard_local_irq_restore(flags); 33 + } 34 + 35 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 36 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 37 + { 38 + unsigned long flags, iwr; 39 + 40 + if (val == bfin_read_VR_CTL()) 41 + return; 42 + 43 + flags = hard_local_irq_save(); 44 + /* Enable the PLL Wakeup bit in SIC IWR */ 45 + iwr = bfin_read32(SIC_IWR); 46 + /* Only allow PPL Wakeup) */ 47 + bfin_write32(SIC_IWR, IWR_ENABLE(0)); 48 + 49 + bfin_write16(VR_CTL, val); 50 + SSYNC(); 51 + asm("IDLE;"); 52 + 53 + bfin_write32(SIC_IWR, iwr); 54 + hard_local_irq_restore(flags); 55 + } 56 + 57 + #endif /* _MACH_PLL_H */
-44
arch/blackfin/mach-bf537/include/mach/cdefBF534.h
··· 1750 1750 /* These need to be last due to the cdef/linux inter-dependencies */ 1751 1751 #include <asm/irq.h> 1752 1752 1753 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 1754 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 1755 - { 1756 - unsigned long flags, iwr; 1757 - 1758 - if (val == bfin_read_PLL_CTL()) 1759 - return; 1760 - 1761 - local_irq_save_hw(flags); 1762 - /* Enable the PLL Wakeup bit in SIC IWR */ 1763 - iwr = bfin_read32(SIC_IWR); 1764 - /* Only allow PPL Wakeup) */ 1765 - bfin_write32(SIC_IWR, IWR_ENABLE(0)); 1766 - 1767 - bfin_write16(PLL_CTL, val); 1768 - SSYNC(); 1769 - asm("IDLE;"); 1770 - 1771 - bfin_write32(SIC_IWR, iwr); 1772 - local_irq_restore_hw(flags); 1773 - } 1774 - 1775 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 1776 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 1777 - { 1778 - unsigned long flags, iwr; 1779 - 1780 - if (val == bfin_read_VR_CTL()) 1781 - return; 1782 - 1783 - local_irq_save_hw(flags); 1784 - /* Enable the PLL Wakeup bit in SIC IWR */ 1785 - iwr = bfin_read32(SIC_IWR); 1786 - /* Only allow PPL Wakeup) */ 1787 - bfin_write32(SIC_IWR, IWR_ENABLE(0)); 1788 - 1789 - bfin_write16(VR_CTL, val); 1790 - SSYNC(); 1791 - asm("IDLE;"); 1792 - 1793 - bfin_write32(SIC_IWR, iwr); 1794 - local_irq_restore_hw(flags); 1795 - } 1796 - 1797 1753 #endif /* _CDEF_BF534_H */
+57
arch/blackfin/mach-bf537/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2005-2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr = bfin_read32(SIC_IWR); 24 + /* Only allow PPL Wakeup) */ 25 + bfin_write32(SIC_IWR, IWR_ENABLE(0)); 26 + 27 + bfin_write16(PLL_CTL, val); 28 + SSYNC(); 29 + asm("IDLE;"); 30 + 31 + bfin_write32(SIC_IWR, iwr); 32 + hard_local_irq_restore(flags); 33 + } 34 + 35 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 36 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 37 + { 38 + unsigned long flags, iwr; 39 + 40 + if (val == bfin_read_VR_CTL()) 41 + return; 42 + 43 + flags = hard_local_irq_save(); 44 + /* Enable the PLL Wakeup bit in SIC IWR */ 45 + iwr = bfin_read32(SIC_IWR); 46 + /* Only allow PPL Wakeup) */ 47 + bfin_write32(SIC_IWR, IWR_ENABLE(0)); 48 + 49 + bfin_write16(VR_CTL, val); 50 + SSYNC(); 51 + asm("IDLE;"); 52 + 53 + bfin_write32(SIC_IWR, iwr); 54 + hard_local_irq_restore(flags); 55 + } 56 + 57 + #endif /* _MACH_PLL_H */
-50
arch/blackfin/mach-bf538/include/mach/cdefBF538.h
··· 2027 2027 /* These need to be last due to the cdef/linux inter-dependencies */ 2028 2028 #include <asm/irq.h> 2029 2029 2030 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 2031 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 2032 - { 2033 - unsigned long flags, iwr0, iwr1; 2034 - 2035 - if (val == bfin_read_PLL_CTL()) 2036 - return; 2037 - 2038 - local_irq_save_hw(flags); 2039 - /* Enable the PLL Wakeup bit in SIC IWR */ 2040 - iwr0 = bfin_read32(SIC_IWR0); 2041 - iwr1 = bfin_read32(SIC_IWR1); 2042 - /* Only allow PPL Wakeup) */ 2043 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 2044 - bfin_write32(SIC_IWR1, 0); 2045 - 2046 - bfin_write16(PLL_CTL, val); 2047 - SSYNC(); 2048 - asm("IDLE;"); 2049 - 2050 - bfin_write32(SIC_IWR0, iwr0); 2051 - bfin_write32(SIC_IWR1, iwr1); 2052 - local_irq_restore_hw(flags); 2053 - } 2054 - 2055 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 2056 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 2057 - { 2058 - unsigned long flags, iwr0, iwr1; 2059 - 2060 - if (val == bfin_read_VR_CTL()) 2061 - return; 2062 - 2063 - local_irq_save_hw(flags); 2064 - /* Enable the PLL Wakeup bit in SIC IWR */ 2065 - iwr0 = bfin_read32(SIC_IWR0); 2066 - iwr1 = bfin_read32(SIC_IWR1); 2067 - /* Only allow PPL Wakeup) */ 2068 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 2069 - bfin_write32(SIC_IWR1, 0); 2070 - 2071 - bfin_write16(VR_CTL, val); 2072 - SSYNC(); 2073 - asm("IDLE;"); 2074 - 2075 - bfin_write32(SIC_IWR0, iwr0); 2076 - bfin_write32(SIC_IWR1, iwr1); 2077 - local_irq_restore_hw(flags); 2078 - } 2079 - 2080 2030 #endif
+63
arch/blackfin/mach-bf538/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2008-2009 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later. 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr0, iwr1; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr0 = bfin_read32(SIC_IWR0); 24 + iwr1 = bfin_read32(SIC_IWR1); 25 + /* Only allow PPL Wakeup) */ 26 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 27 + bfin_write32(SIC_IWR1, 0); 28 + 29 + bfin_write16(PLL_CTL, val); 30 + SSYNC(); 31 + asm("IDLE;"); 32 + 33 + bfin_write32(SIC_IWR0, iwr0); 34 + bfin_write32(SIC_IWR1, iwr1); 35 + hard_local_irq_restore(flags); 36 + } 37 + 38 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 39 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 40 + { 41 + unsigned long flags, iwr0, iwr1; 42 + 43 + if (val == bfin_read_VR_CTL()) 44 + return; 45 + 46 + flags = hard_local_irq_save(); 47 + /* Enable the PLL Wakeup bit in SIC IWR */ 48 + iwr0 = bfin_read32(SIC_IWR0); 49 + iwr1 = bfin_read32(SIC_IWR1); 50 + /* Only allow PPL Wakeup) */ 51 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 52 + bfin_write32(SIC_IWR1, 0); 53 + 54 + bfin_write16(VR_CTL, val); 55 + SSYNC(); 56 + asm("IDLE;"); 57 + 58 + bfin_write32(SIC_IWR0, iwr0); 59 + bfin_write32(SIC_IWR1, iwr1); 60 + hard_local_irq_restore(flags); 61 + } 62 + 63 + #endif /* _MACH_PLL_H */
-56
arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
··· 2648 2648 /* These need to be last due to the cdef/linux inter-dependencies */ 2649 2649 #include <asm/irq.h> 2650 2650 2651 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 2652 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 2653 - { 2654 - unsigned long flags, iwr0, iwr1, iwr2; 2655 - 2656 - if (val == bfin_read_PLL_CTL()) 2657 - return; 2658 - 2659 - local_irq_save_hw(flags); 2660 - /* Enable the PLL Wakeup bit in SIC IWR */ 2661 - iwr0 = bfin_read32(SIC_IWR0); 2662 - iwr1 = bfin_read32(SIC_IWR1); 2663 - iwr2 = bfin_read32(SIC_IWR2); 2664 - /* Only allow PPL Wakeup) */ 2665 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 2666 - bfin_write32(SIC_IWR1, 0); 2667 - bfin_write32(SIC_IWR2, 0); 2668 - 2669 - bfin_write16(PLL_CTL, val); 2670 - SSYNC(); 2671 - asm("IDLE;"); 2672 - 2673 - bfin_write32(SIC_IWR0, iwr0); 2674 - bfin_write32(SIC_IWR1, iwr1); 2675 - bfin_write32(SIC_IWR2, iwr2); 2676 - local_irq_restore_hw(flags); 2677 - } 2678 - 2679 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 2680 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 2681 - { 2682 - unsigned long flags, iwr0, iwr1, iwr2; 2683 - 2684 - if (val == bfin_read_VR_CTL()) 2685 - return; 2686 - 2687 - local_irq_save_hw(flags); 2688 - /* Enable the PLL Wakeup bit in SIC IWR */ 2689 - iwr0 = bfin_read32(SIC_IWR0); 2690 - iwr1 = bfin_read32(SIC_IWR1); 2691 - iwr2 = bfin_read32(SIC_IWR2); 2692 - /* Only allow PPL Wakeup) */ 2693 - bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 2694 - bfin_write32(SIC_IWR1, 0); 2695 - bfin_write32(SIC_IWR2, 0); 2696 - 2697 - bfin_write16(VR_CTL, val); 2698 - SSYNC(); 2699 - asm("IDLE;"); 2700 - 2701 - bfin_write32(SIC_IWR0, iwr0); 2702 - bfin_write32(SIC_IWR1, iwr1); 2703 - bfin_write32(SIC_IWR2, iwr2); 2704 - local_irq_restore_hw(flags); 2705 - } 2706 - 2707 2651 #endif /* _CDEF_BF54X_H */ 2708 2652
+69
arch/blackfin/mach-bf548/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2007-2008 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later. 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr0, iwr1, iwr2; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr0 = bfin_read32(SIC_IWR0); 24 + iwr1 = bfin_read32(SIC_IWR1); 25 + iwr2 = bfin_read32(SIC_IWR2); 26 + /* Only allow PPL Wakeup) */ 27 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 28 + bfin_write32(SIC_IWR1, 0); 29 + bfin_write32(SIC_IWR2, 0); 30 + 31 + bfin_write16(PLL_CTL, val); 32 + SSYNC(); 33 + asm("IDLE;"); 34 + 35 + bfin_write32(SIC_IWR0, iwr0); 36 + bfin_write32(SIC_IWR1, iwr1); 37 + bfin_write32(SIC_IWR2, iwr2); 38 + hard_local_irq_restore(flags); 39 + } 40 + 41 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 42 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 43 + { 44 + unsigned long flags, iwr0, iwr1, iwr2; 45 + 46 + if (val == bfin_read_VR_CTL()) 47 + return; 48 + 49 + flags = hard_local_irq_save(); 50 + /* Enable the PLL Wakeup bit in SIC IWR */ 51 + iwr0 = bfin_read32(SIC_IWR0); 52 + iwr1 = bfin_read32(SIC_IWR1); 53 + iwr2 = bfin_read32(SIC_IWR2); 54 + /* Only allow PPL Wakeup) */ 55 + bfin_write32(SIC_IWR0, IWR_ENABLE(0)); 56 + bfin_write32(SIC_IWR1, 0); 57 + bfin_write32(SIC_IWR2, 0); 58 + 59 + bfin_write16(VR_CTL, val); 60 + SSYNC(); 61 + asm("IDLE;"); 62 + 63 + bfin_write32(SIC_IWR0, iwr0); 64 + bfin_write32(SIC_IWR1, iwr1); 65 + bfin_write32(SIC_IWR2, iwr2); 66 + hard_local_irq_restore(flags); 67 + } 68 + 69 + #endif /* _MACH_PLL_H */
-50
arch/blackfin/mach-bf561/include/mach/cdefBF561.h
··· 1534 1534 /* These need to be last due to the cdef/linux inter-dependencies */ 1535 1535 #include <asm/irq.h> 1536 1536 1537 - /* Writing to PLL_CTL initiates a PLL relock sequence. */ 1538 - static __inline__ void bfin_write_PLL_CTL(unsigned int val) 1539 - { 1540 - unsigned long flags, iwr0, iwr1; 1541 - 1542 - if (val == bfin_read_PLL_CTL()) 1543 - return; 1544 - 1545 - local_irq_save_hw(flags); 1546 - /* Enable the PLL Wakeup bit in SIC IWR */ 1547 - iwr0 = bfin_read32(SICA_IWR0); 1548 - iwr1 = bfin_read32(SICA_IWR1); 1549 - /* Only allow PPL Wakeup) */ 1550 - bfin_write32(SICA_IWR0, IWR_ENABLE(0)); 1551 - bfin_write32(SICA_IWR1, 0); 1552 - 1553 - bfin_write16(PLL_CTL, val); 1554 - SSYNC(); 1555 - asm("IDLE;"); 1556 - 1557 - bfin_write32(SICA_IWR0, iwr0); 1558 - bfin_write32(SICA_IWR1, iwr1); 1559 - local_irq_restore_hw(flags); 1560 - } 1561 - 1562 - /* Writing to VR_CTL initiates a PLL relock sequence. */ 1563 - static __inline__ void bfin_write_VR_CTL(unsigned int val) 1564 - { 1565 - unsigned long flags, iwr0, iwr1; 1566 - 1567 - if (val == bfin_read_VR_CTL()) 1568 - return; 1569 - 1570 - local_irq_save_hw(flags); 1571 - /* Enable the PLL Wakeup bit in SIC IWR */ 1572 - iwr0 = bfin_read32(SICA_IWR0); 1573 - iwr1 = bfin_read32(SICA_IWR1); 1574 - /* Only allow PPL Wakeup) */ 1575 - bfin_write32(SICA_IWR0, IWR_ENABLE(0)); 1576 - bfin_write32(SICA_IWR1, 0); 1577 - 1578 - bfin_write16(VR_CTL, val); 1579 - SSYNC(); 1580 - asm("IDLE;"); 1581 - 1582 - bfin_write32(SICA_IWR0, iwr0); 1583 - bfin_write32(SICA_IWR1, iwr1); 1584 - local_irq_restore_hw(flags); 1585 - } 1586 - 1587 1537 #endif /* _CDEF_BF561_H */
+63
arch/blackfin/mach-bf561/include/mach/pll.h
··· 1 + /* 2 + * Copyright 2005-2009 Analog Devices Inc. 3 + * 4 + * Licensed under the GPL-2 or later. 5 + */ 6 + 7 + #ifndef _MACH_PLL_H 8 + #define _MACH_PLL_H 9 + 10 + #include <asm/blackfin.h> 11 + #include <asm/irqflags.h> 12 + 13 + /* Writing to PLL_CTL initiates a PLL relock sequence. */ 14 + static __inline__ void bfin_write_PLL_CTL(unsigned int val) 15 + { 16 + unsigned long flags, iwr0, iwr1; 17 + 18 + if (val == bfin_read_PLL_CTL()) 19 + return; 20 + 21 + flags = hard_local_irq_save(); 22 + /* Enable the PLL Wakeup bit in SIC IWR */ 23 + iwr0 = bfin_read32(SICA_IWR0); 24 + iwr1 = bfin_read32(SICA_IWR1); 25 + /* Only allow PPL Wakeup) */ 26 + bfin_write32(SICA_IWR0, IWR_ENABLE(0)); 27 + bfin_write32(SICA_IWR1, 0); 28 + 29 + bfin_write16(PLL_CTL, val); 30 + SSYNC(); 31 + asm("IDLE;"); 32 + 33 + bfin_write32(SICA_IWR0, iwr0); 34 + bfin_write32(SICA_IWR1, iwr1); 35 + hard_local_irq_restore(flags); 36 + } 37 + 38 + /* Writing to VR_CTL initiates a PLL relock sequence. */ 39 + static __inline__ void bfin_write_VR_CTL(unsigned int val) 40 + { 41 + unsigned long flags, iwr0, iwr1; 42 + 43 + if (val == bfin_read_VR_CTL()) 44 + return; 45 + 46 + flags = hard_local_irq_save(); 47 + /* Enable the PLL Wakeup bit in SIC IWR */ 48 + iwr0 = bfin_read32(SICA_IWR0); 49 + iwr1 = bfin_read32(SICA_IWR1); 50 + /* Only allow PPL Wakeup) */ 51 + bfin_write32(SICA_IWR0, IWR_ENABLE(0)); 52 + bfin_write32(SICA_IWR1, 0); 53 + 54 + bfin_write16(VR_CTL, val); 55 + SSYNC(); 56 + asm("IDLE;"); 57 + 58 + bfin_write32(SICA_IWR0, iwr0); 59 + bfin_write32(SICA_IWR1, iwr1); 60 + hard_local_irq_restore(flags); 61 + } 62 + 63 + #endif /* _MACH_PLL_H */
+2 -2
arch/blackfin/mach-common/cpufreq.c
··· 134 134 135 135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 136 136 if (cpu == CPUFREQ_CPU) { 137 - local_irq_save_hw(flags); 137 + flags = hard_local_irq_save(); 138 138 plldiv = (bfin_read_PLL_DIV() & SSEL) | 139 139 dpm_state_table[index].csel; 140 140 bfin_write_PLL_DIV(plldiv); ··· 155 155 loops_per_jiffy = cpufreq_scale(lpj_ref, 156 156 lpj_ref_freq, freqs.new); 157 157 } 158 - local_irq_restore_hw(flags); 158 + hard_local_irq_restore(flags); 159 159 } 160 160 /* TODO: just test case for cycles clock source, remove later */ 161 161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+12 -12
arch/blackfin/mach-common/ints-priority.c
··· 132 132 static void bfin_core_mask_irq(unsigned int irq) 133 133 { 134 134 bfin_irq_flags &= ~(1 << irq); 135 - if (!irqs_disabled_hw()) 136 - local_irq_enable_hw(); 135 + if (!hard_irqs_disabled()) 136 + hard_local_irq_enable(); 137 137 } 138 138 139 139 static void bfin_core_unmask_irq(unsigned int irq) ··· 148 148 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly 149 149 * what we need. 150 150 */ 151 - if (!irqs_disabled_hw()) 152 - local_irq_enable_hw(); 151 + if (!hard_irqs_disabled()) 152 + hard_local_irq_enable(); 153 153 return; 154 154 } 155 155 ··· 158 158 unsigned long flags; 159 159 160 160 #ifdef CONFIG_BF53x 161 - local_irq_save_hw(flags); 161 + flags = hard_local_irq_save(); 162 162 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 163 163 ~(1 << SIC_SYSIRQ(irq))); 164 164 #else 165 165 unsigned mask_bank, mask_bit; 166 - local_irq_save_hw(flags); 166 + flags = hard_local_irq_save(); 167 167 mask_bank = SIC_SYSIRQ(irq) / 32; 168 168 mask_bit = SIC_SYSIRQ(irq) % 32; 169 169 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & ··· 173 173 ~(1 << mask_bit)); 174 174 #endif 175 175 #endif 176 - local_irq_restore_hw(flags); 176 + hard_local_irq_restore(flags); 177 177 } 178 178 179 179 #ifdef CONFIG_SMP ··· 186 186 unsigned long flags; 187 187 188 188 #ifdef CONFIG_BF53x 189 - local_irq_save_hw(flags); 189 + flags = hard_local_irq_save(); 190 190 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 191 191 (1 << SIC_SYSIRQ(irq))); 192 192 #else 193 193 unsigned mask_bank, mask_bit; 194 - local_irq_save_hw(flags); 194 + flags = hard_local_irq_save(); 195 195 mask_bank = SIC_SYSIRQ(irq) / 32; 196 196 mask_bit = SIC_SYSIRQ(irq) % 32; 197 197 #ifdef CONFIG_SMP ··· 207 207 (1 << mask_bit)); 208 208 #endif 209 209 #endif 210 - local_irq_restore_hw(flags); 210 + hard_local_irq_restore(flags); 211 211 } 212 212 213 213 #ifdef CONFIG_SMP ··· 264 264 break; 265 265 } 266 266 267 - local_irq_save_hw(flags); 267 + flags = hard_local_irq_save(); 268 268 269 269 if (state) { 270 270 bfin_sic_iwr[bank] |= (1 << bit); ··· 275 275 vr_wakeup &= ~wakeup; 276 276 } 277 277 278 - local_irq_restore_hw(flags); 278 + hard_local_irq_restore(flags); 279 279 280 280 return 0; 281 281 }
+5 -5
arch/blackfin/mach-common/pm.c
··· 25 25 { 26 26 unsigned long flags; 27 27 28 - local_irq_save_hw(flags); 28 + flags = hard_local_irq_save(); 29 29 bfin_pm_standby_setup(); 30 30 31 31 #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER ··· 56 56 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 57 57 #endif 58 58 59 - local_irq_restore_hw(flags); 59 + hard_local_irq_restore(flags); 60 60 } 61 61 62 62 int bf53x_suspend_l1_mem(unsigned char *memptr) ··· 149 149 wakeup |= GPWE; 150 150 #endif 151 151 152 - local_irq_save_hw(flags); 152 + flags = hard_local_irq_save(); 153 153 154 154 ret = blackfin_dma_suspend(); 155 155 156 156 if (ret) { 157 - local_irq_restore_hw(flags); 157 + hard_local_irq_restore(flags); 158 158 kfree(memptr); 159 159 return ret; 160 160 } ··· 178 178 bfin_gpio_pm_hibernate_restore(); 179 179 blackfin_dma_resume(); 180 180 181 - local_irq_restore_hw(flags); 181 + hard_local_irq_restore(flags); 182 182 kfree(memptr); 183 183 184 184 return 0;
+45
arch/cris/include/arch-v10/arch/irqflags.h
··· 1 + #ifndef __ASM_CRIS_ARCH_IRQFLAGS_H 2 + #define __ASM_CRIS_ARCH_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + 6 + static inline unsigned long arch_local_save_flags(void) 7 + { 8 + unsigned long flags; 9 + asm volatile("move $ccr,%0" : "=rm" (flags) : : "memory"); 10 + return flags; 11 + } 12 + 13 + static inline void arch_local_irq_disable(void) 14 + { 15 + asm volatile("di" : : : "memory"); 16 + } 17 + 18 + static inline void arch_local_irq_enable(void) 19 + { 20 + asm volatile("ei" : : : "memory"); 21 + } 22 + 23 + static inline unsigned long arch_local_irq_save(void) 24 + { 25 + unsigned long flags = arch_local_save_flags(); 26 + arch_local_irq_disable(); 27 + return flags; 28 + } 29 + 30 + static inline void arch_local_irq_restore(unsigned long flags) 31 + { 32 + asm volatile("move %0,$ccr" : : "rm" (flags) : "memory"); 33 + } 34 + 35 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 36 + { 37 + return !(flags & (1 << 5)); 38 + } 39 + 40 + static inline bool arch_irqs_disabled(void) 41 + { 42 + return arch_irqs_disabled_flags(arch_local_save_flags()); 43 + } 44 + 45 + #endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
-16
arch/cris/include/arch-v10/arch/system.h
··· 44 44 struct __xchg_dummy { unsigned long a[100]; }; 45 45 #define __xg(x) ((struct __xchg_dummy *)(x)) 46 46 47 - /* interrupt control.. */ 48 - #define local_save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory"); 49 - #define local_irq_restore(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory"); 50 - #define local_irq_disable() __asm__ __volatile__ ( "di" : : :"memory"); 51 - #define local_irq_enable() __asm__ __volatile__ ( "ei" : : :"memory"); 52 - 53 - #define irqs_disabled() \ 54 - ({ \ 55 - unsigned long flags; \ 56 - local_save_flags(flags); \ 57 - !(flags & (1<<5)); \ 58 - }) 59 - 60 - /* For spinlocks etc */ 61 - #define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory"); 62 - 63 47 #endif
+46
arch/cris/include/arch-v32/arch/irqflags.h
··· 1 + #ifndef __ASM_CRIS_ARCH_IRQFLAGS_H 2 + #define __ASM_CRIS_ARCH_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <arch/ptrace.h> 6 + 7 + static inline unsigned long arch_local_save_flags(void) 8 + { 9 + unsigned long flags; 10 + asm volatile("move $ccs,%0" : "=rm" (flags) : : "memory"); 11 + return flags; 12 + } 13 + 14 + static inline void arch_local_irq_disable(void) 15 + { 16 + asm volatile("di" : : : "memory"); 17 + } 18 + 19 + static inline void arch_local_irq_enable(void) 20 + { 21 + asm volatile("ei" : : : "memory"); 22 + } 23 + 24 + static inline unsigned long arch_local_irq_save(void) 25 + { 26 + unsigned long flags = arch_local_save_flags(); 27 + arch_local_irq_disable(); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_restore(unsigned long flags) 32 + { 33 + asm volatile("move %0,$ccs" : : "rm" (flags) : "memory"); 34 + } 35 + 36 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 37 + { 38 + return !(flags & (1 << I_CCS_BITNR)); 39 + } 40 + 41 + static inline bool arch_irqs_disabled(void) 42 + { 43 + return arch_irqs_disabled_flags(arch_local_save_flags()); 44 + } 45 + 46 + #endif /* __ASM_CRIS_ARCH_IRQFLAGS_H */
-22
arch/cris/include/arch-v32/arch/system.h
··· 44 44 struct __xchg_dummy { unsigned long a[100]; }; 45 45 #define __xg(x) ((struct __xchg_dummy *)(x)) 46 46 47 - /* Used for interrupt control. */ 48 - #define local_save_flags(x) \ 49 - __asm__ __volatile__ ("move $ccs, %0" : "=rm" (x) : : "memory"); 50 - 51 - #define local_irq_restore(x) \ 52 - __asm__ __volatile__ ("move %0, $ccs" : : "rm" (x) : "memory"); 53 - 54 - #define local_irq_disable() __asm__ __volatile__ ("di" : : : "memory"); 55 - #define local_irq_enable() __asm__ __volatile__ ("ei" : : : "memory"); 56 - 57 - #define irqs_disabled() \ 58 - ({ \ 59 - unsigned long flags; \ 60 - \ 61 - local_save_flags(flags);\ 62 - !(flags & (1 << I_CCS_BITNR)); \ 63 - }) 64 - 65 - /* Used for spinlocks, etc. */ 66 - #define local_irq_save(x) \ 67 - __asm__ __volatile__ ("move $ccs, %0\n\tdi" : "=rm" (x) : : "memory"); 68 - 69 47 #endif /* _ASM_CRIS_ARCH_SYSTEM_H */
+1
arch/cris/include/asm/irqflags.h
··· 1 + #include <arch/irqflags.h>
+1
arch/cris/include/asm/system.h
··· 1 1 #ifndef __ASM_CRIS_SYSTEM_H 2 2 #define __ASM_CRIS_SYSTEM_H 3 3 4 + #include <linux/irqflags.h> 4 5 #include <arch/system.h> 5 6 6 7 /* the switch_to macro calls resume, an asm function in entry.S which does the actual
+158
arch/frv/include/asm/irqflags.h
··· 1 + /* FR-V interrupt handling 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + 12 + #ifndef _ASM_IRQFLAGS_H 13 + #define _ASM_IRQFLAGS_H 14 + 15 + /* 16 + * interrupt flag manipulation 17 + * - use virtual interrupt management since touching the PSR is slow 18 + * - ICC2.Z: T if interrupts virtually disabled 19 + * - ICC2.C: F if interrupts really disabled 20 + * - if Z==1 upon interrupt: 21 + * - C is set to 0 22 + * - interrupts are really disabled 23 + * - entry.S returns immediately 24 + * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts 25 + * - if taken, the trap: 26 + * - sets ICC2.C 27 + * - enables interrupts 28 + */ 29 + static inline void arch_local_irq_disable(void) 30 + { 31 + /* set Z flag, but don't change the C flag */ 32 + asm volatile(" andcc gr0,gr0,gr0,icc2 \n" 33 + : 34 + : 35 + : "memory", "icc2" 36 + ); 37 + } 38 + 39 + static inline void arch_local_irq_enable(void) 40 + { 41 + /* clear Z flag and then test the C flag */ 42 + asm volatile(" oricc gr0,#1,gr0,icc2 \n" 43 + " tihi icc2,gr0,#2 \n" 44 + : 45 + : 46 + : "memory", "icc2" 47 + ); 48 + } 49 + 50 + static inline unsigned long arch_local_save_flags(void) 51 + { 52 + unsigned long flags; 53 + 54 + asm volatile("movsg ccr,%0" 55 + : "=r"(flags) 56 + : 57 + : "memory"); 58 + 59 + /* shift ICC2.Z to bit 0 */ 60 + flags >>= 26; 61 + 62 + /* make flags 1 if interrupts disabled, 0 otherwise */ 63 + return flags & 1UL; 64 + 65 + } 66 + 67 + static inline unsigned long arch_local_irq_save(void) 68 + { 69 + unsigned long flags = arch_local_save_flags(); 70 + arch_local_irq_disable(); 71 + return flags; 72 + } 73 + 74 + static inline void arch_local_irq_restore(unsigned long flags) 75 + { 76 + /* load the Z flag by turning 1 if disabled into 0 if disabled 77 + * and thus setting the Z flag but not the C flag */ 78 + asm volatile(" xoricc %0,#1,gr0,icc2 \n" 79 + /* then trap if Z=0 and C=0 */ 80 + " tihi icc2,gr0,#2 \n" 81 + : 82 + : "r"(flags) 83 + : "memory", "icc2" 84 + ); 85 + 86 + } 87 + 88 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 89 + { 90 + return flags; 91 + } 92 + 93 + static inline bool arch_irqs_disabled(void) 94 + { 95 + return arch_irqs_disabled_flags(arch_local_save_flags()); 96 + } 97 + 98 + /* 99 + * real interrupt flag manipulation 100 + */ 101 + #define __arch_local_irq_disable() \ 102 + do { \ 103 + unsigned long psr; \ 104 + asm volatile(" movsg psr,%0 \n" \ 105 + " andi %0,%2,%0 \n" \ 106 + " ori %0,%1,%0 \n" \ 107 + " movgs %0,psr \n" \ 108 + : "=r"(psr) \ 109 + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 110 + : "memory"); \ 111 + } while (0) 112 + 113 + #define __arch_local_irq_enable() \ 114 + do { \ 115 + unsigned long psr; \ 116 + asm volatile(" movsg psr,%0 \n" \ 117 + " andi %0,%1,%0 \n" \ 118 + " movgs %0,psr \n" \ 119 + : "=r"(psr) \ 120 + : "i" (~PSR_PIL) \ 121 + : "memory"); \ 122 + } while (0) 123 + 124 + #define __arch_local_save_flags(flags) \ 125 + do { \ 126 + typecheck(unsigned long, flags); \ 127 + asm("movsg psr,%0" \ 128 + : "=r"(flags) \ 129 + : \ 130 + : "memory"); \ 131 + } while (0) 132 + 133 + #define __arch_local_irq_save(flags) \ 134 + do { \ 135 + unsigned long npsr; \ 136 + typecheck(unsigned long, flags); \ 137 + asm volatile(" movsg psr,%0 \n" \ 138 + " andi %0,%3,%1 \n" \ 139 + " ori %1,%2,%1 \n" \ 140 + " movgs %1,psr \n" \ 141 + : "=r"(flags), "=r"(npsr) \ 142 + : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 143 + : "memory"); \ 144 + } while (0) 145 + 146 + #define __arch_local_irq_restore(flags) \ 147 + do { \ 148 + typecheck(unsigned long, flags); \ 149 + asm volatile(" movgs %0,psr \n" \ 150 + : \ 151 + : "r" (flags) \ 152 + : "memory"); \ 153 + } while (0) 154 + 155 + #define __arch_irqs_disabled() \ 156 + ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) 157 + 158 + #endif /* _ASM_IRQFLAGS_H */
-136
arch/frv/include/asm/system.h
··· 37 37 } while(0) 38 38 39 39 /* 40 - * interrupt flag manipulation 41 - * - use virtual interrupt management since touching the PSR is slow 42 - * - ICC2.Z: T if interrupts virtually disabled 43 - * - ICC2.C: F if interrupts really disabled 44 - * - if Z==1 upon interrupt: 45 - * - C is set to 0 46 - * - interrupts are really disabled 47 - * - entry.S returns immediately 48 - * - uses TIHI (TRAP if Z==0 && C==0) #2 to really reenable interrupts 49 - * - if taken, the trap: 50 - * - sets ICC2.C 51 - * - enables interrupts 52 - */ 53 - #define local_irq_disable() \ 54 - do { \ 55 - /* set Z flag, but don't change the C flag */ \ 56 - asm volatile(" andcc gr0,gr0,gr0,icc2 \n" \ 57 - : \ 58 - : \ 59 - : "memory", "icc2" \ 60 - ); \ 61 - } while(0) 62 - 63 - #define local_irq_enable() \ 64 - do { \ 65 - /* clear Z flag and then test the C flag */ \ 66 - asm volatile(" oricc gr0,#1,gr0,icc2 \n" \ 67 - " tihi icc2,gr0,#2 \n" \ 68 - : \ 69 - : \ 70 - : "memory", "icc2" \ 71 - ); \ 72 - } while(0) 73 - 74 - #define local_save_flags(flags) \ 75 - do { \ 76 - typecheck(unsigned long, flags); \ 77 - asm volatile("movsg ccr,%0" \ 78 - : "=r"(flags) \ 79 - : \ 80 - : "memory"); \ 81 - \ 82 - /* shift ICC2.Z to bit 0 */ \ 83 - flags >>= 26; \ 84 - \ 85 - /* make flags 1 if interrupts disabled, 0 otherwise */ \ 86 - flags &= 1UL; \ 87 - } while(0) 88 - 89 - #define irqs_disabled() \ 90 - ({unsigned long flags; local_save_flags(flags); !!flags; }) 91 - 92 - #define local_irq_save(flags) \ 93 - do { \ 94 - typecheck(unsigned long, flags); \ 95 - local_save_flags(flags); \ 96 - local_irq_disable(); \ 97 - } while(0) 98 - 99 - #define local_irq_restore(flags) \ 100 - do { \ 101 - typecheck(unsigned long, flags); \ 102 - \ 103 - /* load the Z flag by turning 1 if disabled into 0 if disabled \ 104 - * and thus setting the Z flag but not the C flag */ \ 105 - asm volatile(" xoricc %0,#1,gr0,icc2 \n" \ 106 - /* then test Z=0 and C=0 */ \ 107 - " tihi icc2,gr0,#2 \n" \ 108 - : \ 109 - : "r"(flags) \ 110 - : "memory", "icc2" \ 111 - ); \ 112 - \ 113 - } while(0) 114 - 115 - /* 116 - * real interrupt flag manipulation 117 - */ 118 - #define __local_irq_disable() \ 119 - do { \ 120 - unsigned long psr; \ 121 - asm volatile(" movsg psr,%0 \n" \ 122 - " andi %0,%2,%0 \n" \ 123 - " ori %0,%1,%0 \n" \ 124 - " movgs %0,psr \n" \ 125 - : "=r"(psr) \ 126 - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 127 - : "memory"); \ 128 - } while(0) 129 - 130 - #define __local_irq_enable() \ 131 - do { \ 132 - unsigned long psr; \ 133 - asm volatile(" movsg psr,%0 \n" \ 134 - " andi %0,%1,%0 \n" \ 135 - " movgs %0,psr \n" \ 136 - : "=r"(psr) \ 137 - : "i" (~PSR_PIL) \ 138 - : "memory"); \ 139 - } while(0) 140 - 141 - #define __local_save_flags(flags) \ 142 - do { \ 143 - typecheck(unsigned long, flags); \ 144 - asm("movsg psr,%0" \ 145 - : "=r"(flags) \ 146 - : \ 147 - : "memory"); \ 148 - } while(0) 149 - 150 - #define __local_irq_save(flags) \ 151 - do { \ 152 - unsigned long npsr; \ 153 - typecheck(unsigned long, flags); \ 154 - asm volatile(" movsg psr,%0 \n" \ 155 - " andi %0,%3,%1 \n" \ 156 - " ori %1,%2,%1 \n" \ 157 - " movgs %1,psr \n" \ 158 - : "=r"(flags), "=r"(npsr) \ 159 - : "i" (PSR_PIL_14), "i" (~PSR_PIL) \ 160 - : "memory"); \ 161 - } while(0) 162 - 163 - #define __local_irq_restore(flags) \ 164 - do { \ 165 - typecheck(unsigned long, flags); \ 166 - asm volatile(" movgs %0,psr \n" \ 167 - : \ 168 - : "r" (flags) \ 169 - : "memory"); \ 170 - } while(0) 171 - 172 - #define __irqs_disabled() \ 173 - ((__get_PSR() & PSR_PIL) >= PSR_PIL_14) 174 - 175 - /* 176 40 * Force strict CPU ordering. 177 41 */ 178 42 #define nop() asm volatile ("nop"::)
+43
arch/h8300/include/asm/irqflags.h
··· 1 + #ifndef _H8300_IRQFLAGS_H 2 + #define _H8300_IRQFLAGS_H 3 + 4 + static inline unsigned long arch_local_save_flags(void) 5 + { 6 + unsigned long flags; 7 + asm volatile ("stc ccr,%w0" : "=r" (flags)); 8 + return flags; 9 + } 10 + 11 + static inline void arch_local_irq_disable(void) 12 + { 13 + asm volatile ("orc #0x80,ccr" : : : "memory"); 14 + } 15 + 16 + static inline void arch_local_irq_enable(void) 17 + { 18 + asm volatile ("andc #0x7f,ccr" : : : "memory"); 19 + } 20 + 21 + static inline unsigned long arch_local_irq_save(void) 22 + { 23 + unsigned long flags = arch_local_save_flags(); 24 + arch_local_irq_disable(); 25 + return flags; 26 + } 27 + 28 + static inline void arch_local_irq_restore(unsigned long flags) 29 + { 30 + asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory"); 31 + } 32 + 33 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 34 + { 35 + return (flags & 0x80) == 0x80; 36 + } 37 + 38 + static inline bool arch_irqs_disabled(void) 39 + { 40 + return arch_irqs_disabled_flags(arch_local_save_flags()); 41 + } 42 + 43 + #endif /* _H8300_IRQFLAGS_H */
+1 -23
arch/h8300/include/asm/system.h
··· 2 2 #define _H8300_SYSTEM_H 3 3 4 4 #include <linux/linkage.h> 5 + #include <linux/irqflags.h> 5 6 6 7 struct pt_regs; 7 8 ··· 52 51 (last) = _last; \ 53 52 } 54 53 55 - #define __sti() asm volatile ("andc #0x7f,ccr") 56 - #define __cli() asm volatile ("orc #0x80,ccr") 57 - 58 - #define __save_flags(x) \ 59 - asm volatile ("stc ccr,%w0":"=r" (x)) 60 - 61 - #define __restore_flags(x) \ 62 - asm volatile ("ldc %w0,ccr": :"r" (x)) 63 - 64 - #define irqs_disabled() \ 65 - ({ \ 66 - unsigned char flags; \ 67 - __save_flags(flags); \ 68 - ((flags & 0x80) == 0x80); \ 69 - }) 70 - 71 54 #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 72 - 73 - /* For spinlocks etc */ 74 - #define local_irq_disable() __cli() 75 - #define local_irq_enable() __sti() 76 - #define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) 77 - #define local_irq_restore(x) __restore_flags(x) 78 - #define local_save_flags(x) __save_flags(x) 79 55 80 56 /* 81 57 * Force strict CPU ordering.
+94
arch/ia64/include/asm/irqflags.h
··· 1 + /* 2 + * IRQ flags defines. 3 + * 4 + * Copyright (C) 1998-2003 Hewlett-Packard Co 5 + * David Mosberger-Tang <davidm@hpl.hp.com> 6 + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> 7 + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> 8 + */ 9 + 10 + #ifndef _ASM_IA64_IRQFLAGS_H 11 + #define _ASM_IA64_IRQFLAGS_H 12 + 13 + #ifdef CONFIG_IA64_DEBUG_IRQ 14 + extern unsigned long last_cli_ip; 15 + static inline void arch_maybe_save_ip(unsigned long flags) 16 + { 17 + if (flags & IA64_PSR_I) 18 + last_cli_ip = ia64_getreg(_IA64_REG_IP); 19 + } 20 + #else 21 + #define arch_maybe_save_ip(flags) do {} while (0) 22 + #endif 23 + 24 + /* 25 + * - clearing psr.i is implicitly serialized (visible by next insn) 26 + * - setting psr.i requires data serialization 27 + * - we need a stop-bit before reading PSR because we sometimes 28 + * write a floating-point register right before reading the PSR 29 + * and that writes to PSR.mfl 30 + */ 31 + 32 + static inline unsigned long arch_local_save_flags(void) 33 + { 34 + ia64_stop(); 35 + #ifdef CONFIG_PARAVIRT 36 + return ia64_get_psr_i(); 37 + #else 38 + return ia64_getreg(_IA64_REG_PSR); 39 + #endif 40 + } 41 + 42 + static inline unsigned long arch_local_irq_save(void) 43 + { 44 + unsigned long flags = arch_local_save_flags(); 45 + 46 + ia64_stop(); 47 + ia64_rsm(IA64_PSR_I); 48 + arch_maybe_save_ip(flags); 49 + return flags; 50 + } 51 + 52 + static inline void arch_local_irq_disable(void) 53 + { 54 + #ifdef CONFIG_IA64_DEBUG_IRQ 55 + arch_local_irq_save(); 56 + #else 57 + ia64_stop(); 58 + ia64_rsm(IA64_PSR_I); 59 + #endif 60 + } 61 + 62 + static inline void arch_local_irq_enable(void) 63 + { 64 + ia64_stop(); 65 + ia64_ssm(IA64_PSR_I); 66 + ia64_srlz_d(); 67 + } 68 + 69 + static inline void arch_local_irq_restore(unsigned long flags) 70 + { 71 + #ifdef CONFIG_IA64_DEBUG_IRQ 72 + unsigned long old_psr = arch_local_save_flags(); 73 + #endif 74 + ia64_intrin_local_irq_restore(flags & IA64_PSR_I); 75 + arch_maybe_save_ip(old_psr & ~flags); 76 + } 77 + 78 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 79 + { 80 + return (flags & IA64_PSR_I) == 0; 81 + } 82 + 83 + static inline bool arch_irqs_disabled(void) 84 + { 85 + return arch_irqs_disabled_flags(arch_local_save_flags()); 86 + } 87 + 88 + static inline void arch_safe_halt(void) 89 + { 90 + ia64_pal_halt_light(); /* PAL_HALT_LIGHT */ 91 + } 92 + 93 + 94 + #endif /* _ASM_IA64_IRQFLAGS_H */
-76
arch/ia64/include/asm/system.h
··· 107 107 */ 108 108 #define set_mb(var, value) do { (var) = (value); mb(); } while (0) 109 109 110 - #define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ 111 - 112 110 /* 113 111 * The group barrier in front of the rsm & ssm are necessary to ensure 114 112 * that none of the previous instructions in the same group are 115 113 * affected by the rsm/ssm. 116 114 */ 117 - /* For spinlocks etc */ 118 - 119 - /* 120 - * - clearing psr.i is implicitly serialized (visible by next insn) 121 - * - setting psr.i requires data serialization 122 - * - we need a stop-bit before reading PSR because we sometimes 123 - * write a floating-point register right before reading the PSR 124 - * and that writes to PSR.mfl 125 - */ 126 - #ifdef CONFIG_PARAVIRT 127 - #define __local_save_flags() ia64_get_psr_i() 128 - #else 129 - #define __local_save_flags() ia64_getreg(_IA64_REG_PSR) 130 - #endif 131 - 132 - #define __local_irq_save(x) \ 133 - do { \ 134 - ia64_stop(); \ 135 - (x) = __local_save_flags(); \ 136 - ia64_stop(); \ 137 - ia64_rsm(IA64_PSR_I); \ 138 - } while (0) 139 - 140 - #define __local_irq_disable() \ 141 - do { \ 142 - ia64_stop(); \ 143 - ia64_rsm(IA64_PSR_I); \ 144 - } while (0) 145 - 146 - #define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) 147 - 148 - #ifdef CONFIG_IA64_DEBUG_IRQ 149 - 150 - extern unsigned long last_cli_ip; 151 - 152 - # define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) 153 - 154 - # define local_irq_save(x) \ 155 - do { \ 156 - unsigned long __psr; \ 157 - \ 158 - __local_irq_save(__psr); \ 159 - if (__psr & IA64_PSR_I) \ 160 - __save_ip(); \ 161 - (x) = __psr; \ 162 - } while (0) 163 - 164 - # define local_irq_disable() do { unsigned long __x; local_irq_save(__x); } while (0) 165 - 166 - # define local_irq_restore(x) \ 167 - do { \ 168 - unsigned long __old_psr, __psr = (x); \ 169 - \ 170 - local_save_flags(__old_psr); \ 171 - __local_irq_restore(__psr); \ 172 - if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I)) \ 173 - __save_ip(); \ 174 - } while (0) 175 - 176 - #else /* !CONFIG_IA64_DEBUG_IRQ */ 177 - # define local_irq_save(x) __local_irq_save(x) 178 - # define local_irq_disable() __local_irq_disable() 179 - # define local_irq_restore(x) __local_irq_restore(x) 180 - #endif /* !CONFIG_IA64_DEBUG_IRQ */ 181 - 182 - #define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) 183 - #define local_save_flags(flags) ({ ia64_stop(); (flags) = __local_save_flags(); }) 184 - 185 - #define irqs_disabled() \ 186 - ({ \ 187 - unsigned long __ia64_id_flags; \ 188 - local_save_flags(__ia64_id_flags); \ 189 - (__ia64_id_flags & IA64_PSR_I) == 0; \ 190 - }) 191 115 192 116 #ifdef __KERNEL__ 193 117
+104
arch/m32r/include/asm/irqflags.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto 7 + * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> 8 + */ 9 + 10 + #ifndef _ASM_M32R_IRQFLAGS_H 11 + #define _ASM_M32R_IRQFLAGS_H 12 + 13 + #include <linux/types.h> 14 + 15 + static inline unsigned long arch_local_save_flags(void) 16 + { 17 + unsigned long flags; 18 + asm volatile("mvfc %0,psw" : "=r"(flags)); 19 + return flags; 20 + } 21 + 22 + static inline void arch_local_irq_disable(void) 23 + { 24 + #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 25 + asm volatile ( 26 + "clrpsw #0x40 -> nop" 27 + : : : "memory"); 28 + #else 29 + unsigned long tmpreg0, tmpreg1; 30 + asm volatile ( 31 + "ld24 %0, #0 ; Use 32-bit insn. \n\t" 32 + "mvfc %1, psw ; No interrupt can be accepted here. \n\t" 33 + "mvtc %0, psw \n\t" 34 + "and3 %0, %1, #0xffbf \n\t" 35 + "mvtc %0, psw \n\t" 36 + : "=&r" (tmpreg0), "=&r" (tmpreg1) 37 + : 38 + : "cbit", "memory"); 39 + #endif 40 + } 41 + 42 + static inline void arch_local_irq_enable(void) 43 + { 44 + #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 45 + asm volatile ( 46 + "setpsw #0x40 -> nop" 47 + : : : "memory"); 48 + #else 49 + unsigned long tmpreg; 50 + asm volatile ( 51 + "mvfc %0, psw; \n\t" 52 + "or3 %0, %0, #0x0040; \n\t" 53 + "mvtc %0, psw; \n\t" 54 + : "=&r" (tmpreg) 55 + : 56 + : "cbit", "memory"); 57 + #endif 58 + } 59 + 60 + static inline unsigned long arch_local_irq_save(void) 61 + { 62 + unsigned long flags; 63 + 64 + #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) 65 + asm volatile ( 66 + "mvfc %0, psw; \n\t" 67 + "clrpsw #0x40 -> nop; \n\t" 68 + : "=r" (flags) 69 + : 70 + : "memory"); 71 + #else 72 + unsigned long tmpreg; 73 + asm volatile ( 74 + "ld24 %1, #0 \n\t" 75 + "mvfc %0, psw \n\t" 76 + "mvtc %1, psw \n\t" 77 + "and3 %1, %0, #0xffbf \n\t" 78 + "mvtc %1, psw \n\t" 79 + : "=r" (flags), "=&r" (tmpreg) 80 + : 81 + : "cbit", "memory"); 82 + #endif 83 + return flags; 84 + } 85 + 86 + static inline void arch_local_irq_restore(unsigned long flags) 87 + { 88 + asm volatile("mvtc %0,psw" 89 + : 90 + : "r" (flags) 91 + : "cbit", "memory"); 92 + } 93 + 94 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 95 + { 96 + return !(flags & 0x40); 97 + } 98 + 99 + static inline bool arch_irqs_disabled(void) 100 + { 101 + return arch_irqs_disabled_flags(arch_local_save_flags()); 102 + } 103 + 104 + #endif /* _ASM_M32R_IRQFLAGS_H */
+1 -65
arch/m32r/include/asm/system.h
··· 11 11 */ 12 12 13 13 #include <linux/compiler.h> 14 + #include <linux/irqflags.h> 14 15 #include <asm/assembler.h> 15 16 16 17 #ifdef __KERNEL__ ··· 54 53 : "memory", "lr" \ 55 54 ); \ 56 55 } while(0) 57 - 58 - /* Interrupt Control */ 59 - #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) 60 - #define local_irq_enable() \ 61 - __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") 62 - #define local_irq_disable() \ 63 - __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") 64 - #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 65 - static inline void local_irq_enable(void) 66 - { 67 - unsigned long tmpreg; 68 - __asm__ __volatile__( 69 - "mvfc %0, psw; \n\t" 70 - "or3 %0, %0, #0x0040; \n\t" 71 - "mvtc %0, psw; \n\t" 72 - : "=&r" (tmpreg) : : "cbit", "memory"); 73 - } 74 - 75 - static inline void local_irq_disable(void) 76 - { 77 - unsigned long tmpreg0, tmpreg1; 78 - __asm__ __volatile__( 79 - "ld24 %0, #0 ; Use 32-bit insn. \n\t" 80 - "mvfc %1, psw ; No interrupt can be accepted here. \n\t" 81 - "mvtc %0, psw \n\t" 82 - "and3 %0, %1, #0xffbf \n\t" 83 - "mvtc %0, psw \n\t" 84 - : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); 85 - } 86 - #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 87 - 88 - #define local_save_flags(x) \ 89 - __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) 90 - 91 - #define local_irq_restore(x) \ 92 - __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ 93 - : "r" (x) : "cbit", "memory") 94 - 95 - #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) 96 - #define local_irq_save(x) \ 97 - __asm__ __volatile__( \ 98 - "mvfc %0, psw; \n\t" \ 99 - "clrpsw #0x40 -> nop; \n\t" \ 100 - : "=r" (x) : /* no input */ : "memory") 101 - #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 102 - #define local_irq_save(x) \ 103 - ({ \ 104 - unsigned long tmpreg; \ 105 - __asm__ __volatile__( \ 106 - "ld24 %1, #0 \n\t" \ 107 - "mvfc %0, psw \n\t" \ 108 - "mvtc %1, psw \n\t" \ 109 - "and3 %1, %0, #0xffbf \n\t" \ 110 - "mvtc %1, psw \n\t" \ 111 - : "=r" (x), "=&r" (tmpreg) \ 112 - : : "cbit", "memory"); \ 113 - }) 114 - #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ 115 - 116 - #define irqs_disabled() \ 117 - ({ \ 118 - unsigned long flags; \ 119 - local_save_flags(flags); \ 120 - !(flags & 0x40); \ 121 - }) 122 56 123 57 #define nop() __asm__ __volatile__ ("nop" : : ) 124 58
+1 -1
arch/m68k/include/asm/entry_no.h
··· 28 28 * M68K COLDFIRE 29 29 */ 30 30 31 - #define ALLOWINT 0xf8ff 31 + #define ALLOWINT (~0x700) 32 32 33 33 #ifdef __ASSEMBLY__ 34 34
+76
arch/m68k/include/asm/irqflags.h
··· 1 + #ifndef _M68K_IRQFLAGS_H 2 + #define _M68K_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <linux/hardirq.h> 6 + #include <linux/preempt.h> 7 + #include <asm/thread_info.h> 8 + #include <asm/entry.h> 9 + 10 + static inline unsigned long arch_local_save_flags(void) 11 + { 12 + unsigned long flags; 13 + asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory"); 14 + return flags; 15 + } 16 + 17 + static inline void arch_local_irq_disable(void) 18 + { 19 + #ifdef CONFIG_COLDFIRE 20 + asm volatile ( 21 + "move %/sr,%%d0 \n\t" 22 + "ori.l #0x0700,%%d0 \n\t" 23 + "move %%d0,%/sr \n" 24 + : /* no outputs */ 25 + : 26 + : "cc", "%d0", "memory"); 27 + #else 28 + asm volatile ("oriw #0x0700,%%sr" : : : "memory"); 29 + #endif 30 + } 31 + 32 + static inline void arch_local_irq_enable(void) 33 + { 34 + #if defined(CONFIG_COLDFIRE) 35 + asm volatile ( 36 + "move %/sr,%%d0 \n\t" 37 + "andi.l #0xf8ff,%%d0 \n\t" 38 + "move %%d0,%/sr \n" 39 + : /* no outputs */ 40 + : 41 + : "cc", "%d0", "memory"); 42 + #else 43 + # if defined(CONFIG_MMU) 44 + if (MACH_IS_Q40 || !hardirq_count()) 45 + # endif 46 + asm volatile ( 47 + "andiw %0,%%sr" 48 + : 49 + : "i" (ALLOWINT) 50 + : "memory"); 51 + #endif 52 + } 53 + 54 + static inline unsigned long arch_local_irq_save(void) 55 + { 56 + unsigned long flags = arch_local_save_flags(); 57 + arch_local_irq_disable(); 58 + return flags; 59 + } 60 + 61 + static inline void arch_local_irq_restore(unsigned long flags) 62 + { 63 + asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory"); 64 + } 65 + 66 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 67 + { 68 + return (flags & ~ALLOWINT) != 0; 69 + } 70 + 71 + static inline bool arch_irqs_disabled(void) 72 + { 73 + return arch_irqs_disabled_flags(arch_local_save_flags()); 74 + } 75 + 76 + #endif /* _M68K_IRQFLAGS_H */
+1 -24
arch/m68k/include/asm/system_mm.h
··· 3 3 4 4 #include <linux/linkage.h> 5 5 #include <linux/kernel.h> 6 + #include <linux/irqflags.h> 6 7 #include <asm/segment.h> 7 8 #include <asm/entry.h> 8 9 ··· 62 61 #define smp_rmb() barrier() 63 62 #define smp_wmb() barrier() 64 63 #define smp_read_barrier_depends() ((void)0) 65 - 66 - /* interrupt control.. */ 67 - #if 0 68 - #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") 69 - #else 70 - #include <linux/hardirq.h> 71 - #define local_irq_enable() ({ \ 72 - if (MACH_IS_Q40 || !hardirq_count()) \ 73 - asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ 74 - }) 75 - #endif 76 - #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") 77 - #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") 78 - #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") 79 - 80 - static inline int irqs_disabled(void) 81 - { 82 - unsigned long flags; 83 - local_save_flags(flags); 84 - return flags & ~ALLOWINT; 85 - } 86 - 87 - /* For spinlocks etc */ 88 - #define local_irq_save(x) ({ local_save_flags(x); local_irq_disable(); }) 89 64 90 65 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 91 66
+1 -56
arch/m68k/include/asm/system_no.h
··· 2 2 #define _M68KNOMMU_SYSTEM_H 3 3 4 4 #include <linux/linkage.h> 5 + #include <linux/irqflags.h> 5 6 #include <asm/segment.h> 6 7 #include <asm/entry.h> 7 8 ··· 46 45 : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \ 47 46 (last) = _last; \ 48 47 } 49 - 50 - #ifdef CONFIG_COLDFIRE 51 - #define local_irq_enable() __asm__ __volatile__ ( \ 52 - "move %/sr,%%d0\n\t" \ 53 - "andi.l #0xf8ff,%%d0\n\t" \ 54 - "move %%d0,%/sr\n" \ 55 - : /* no outputs */ \ 56 - : \ 57 - : "cc", "%d0", "memory") 58 - #define local_irq_disable() __asm__ __volatile__ ( \ 59 - "move %/sr,%%d0\n\t" \ 60 - "ori.l #0x0700,%%d0\n\t" \ 61 - "move %%d0,%/sr\n" \ 62 - : /* no outputs */ \ 63 - : \ 64 - : "cc", "%d0", "memory") 65 - /* For spinlocks etc */ 66 - #define local_irq_save(x) __asm__ __volatile__ ( \ 67 - "movew %%sr,%0\n\t" \ 68 - "movew #0x0700,%%d0\n\t" \ 69 - "or.l %0,%%d0\n\t" \ 70 - "movew %%d0,%/sr" \ 71 - : "=d" (x) \ 72 - : \ 73 - : "cc", "%d0", "memory") 74 - #else 75 - 76 - /* portable version */ /* FIXME - see entry.h*/ 77 - #define ALLOWINT 0xf8ff 78 - 79 - #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory") 80 - #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory") 81 - #endif 82 - 83 - #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") 84 - #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") 85 - 86 - /* For spinlocks etc */ 87 - #ifndef local_irq_save 88 - #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0) 89 - #endif 90 - 91 - #define irqs_disabled() \ 92 - ({ \ 93 - unsigned long flags; \ 94 - local_save_flags(flags); \ 95 - ((flags & 0x0700) == 0x0700); \ 96 - }) 97 48 98 49 #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 99 50 ··· 158 205 159 206 #define arch_align_stack(x) (x) 160 207 161 - 162 - static inline int irqs_disabled_flags(unsigned long flags) 163 - { 164 - if (flags & 0x0700) 165 - return 0; 166 - else 167 - return 1; 168 - } 169 208 170 209 #endif /* _M68KNOMMU_SYSTEM_H */
-2
arch/m68knommu/kernel/asm-offsets.c
··· 74 74 75 75 DEFINE(PT_PTRACED, PT_PTRACED); 76 76 77 - DEFINE(THREAD_SIZE, THREAD_SIZE); 78 - 79 77 /* Offsets in thread_info structure */ 80 78 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 81 79 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
+1
arch/m68knommu/platform/coldfire/head.S
··· 15 15 #include <asm/coldfire.h> 16 16 #include <asm/mcfcache.h> 17 17 #include <asm/mcfsim.h> 18 + #include <asm/thread_info.h> 18 19 19 20 /*****************************************************************************/ 20 21
+101 -90
arch/microblaze/include/asm/irqflags.h
··· 9 9 #ifndef _ASM_MICROBLAZE_IRQFLAGS_H 10 10 #define _ASM_MICROBLAZE_IRQFLAGS_H 11 11 12 - #include <linux/irqflags.h> 12 + #include <linux/types.h> 13 13 #include <asm/registers.h> 14 14 15 - # if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 15 + #ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR 16 16 17 - # define raw_local_irq_save(flags) \ 18 - do { \ 19 - asm volatile (" msrclr %0, %1; \ 20 - nop;" \ 21 - : "=r"(flags) \ 22 - : "i"(MSR_IE) \ 23 - : "memory"); \ 24 - } while (0) 25 - 26 - # define raw_local_irq_disable() \ 27 - do { \ 28 - asm volatile (" msrclr r0, %0; \ 29 - nop;" \ 30 - : \ 31 - : "i"(MSR_IE) \ 32 - : "memory"); \ 33 - } while (0) 34 - 35 - # define raw_local_irq_enable() \ 36 - do { \ 37 - asm volatile (" msrset r0, %0; \ 38 - nop;" \ 39 - : \ 40 - : "i"(MSR_IE) \ 41 - : "memory"); \ 42 - } while (0) 43 - 44 - # else /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR == 0 */ 45 - 46 - # define raw_local_irq_save(flags) \ 47 - do { \ 48 - register unsigned tmp; \ 49 - asm volatile (" mfs %0, rmsr; \ 50 - nop; \ 51 - andi %1, %0, %2; \ 52 - mts rmsr, %1; \ 53 - nop;" \ 54 - : "=r"(flags), "=r" (tmp) \ 55 - : "i"(~MSR_IE) \ 56 - : "memory"); \ 57 - } while (0) 58 - 59 - # define raw_local_irq_disable() \ 60 - do { \ 61 - register unsigned tmp; \ 62 - asm volatile (" mfs %0, rmsr; \ 63 - nop; \ 64 - andi %0, %0, %1; \ 65 - mts rmsr, %0; \ 66 - nop;" \ 67 - : "=r"(tmp) \ 68 - : "i"(~MSR_IE) \ 69 - : "memory"); \ 70 - } while (0) 71 - 72 - # define raw_local_irq_enable() \ 73 - do { \ 74 - register unsigned tmp; \ 75 - asm volatile (" mfs %0, rmsr; \ 76 - nop; \ 77 - ori %0, %0, %1; \ 78 - mts rmsr, %0; \ 79 - nop;" \ 80 - : "=r"(tmp) \ 81 - : "i"(MSR_IE) \ 82 - : "memory"); \ 83 - } while (0) 84 - 85 - # endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 86 - 87 - #define raw_local_irq_restore(flags) \ 88 - do { \ 89 - asm volatile (" mts rmsr, %0; \ 90 - nop;" \ 91 - : \ 92 - : "r"(flags) \ 93 - : "memory"); \ 94 - } while (0) 95 - 96 - static inline unsigned long get_msr(void) 17 + static inline unsigned long arch_local_irq_save(void) 97 18 { 98 19 unsigned long flags; 99 - asm volatile (" mfs %0, rmsr; \ 100 - nop;" \ 101 - : "=r"(flags) \ 102 - : \ 103 - : "memory"); \ 20 + asm volatile(" msrclr %0, %1 \n" 21 + " nop \n" 22 + : "=r"(flags) 23 + : "i"(MSR_IE) 24 + : "memory"); 104 25 return flags; 105 26 } 106 27 107 - #define raw_local_save_flags(flags) ((flags) = get_msr()) 108 - #define raw_irqs_disabled() ((get_msr() & MSR_IE) == 0) 109 - #define raw_irqs_disabled_flags(flags) ((flags & MSR_IE) == 0) 28 + static inline void arch_local_irq_disable(void) 29 + { 30 + /* this uses r0 without declaring it - is that correct? */ 31 + asm volatile(" msrclr r0, %0 \n" 32 + " nop \n" 33 + : 34 + : "i"(MSR_IE) 35 + : "memory"); 36 + } 37 + 38 + static inline void arch_local_irq_enable(void) 39 + { 40 + /* this uses r0 without declaring it - is that correct? */ 41 + asm volatile(" msrset r0, %0 \n" 42 + " nop \n" 43 + : 44 + : "i"(MSR_IE) 45 + : "memory"); 46 + } 47 + 48 + #else /* !CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 49 + 50 + static inline unsigned long arch_local_irq_save(void) 51 + { 52 + unsigned long flags, tmp; 53 + asm volatile (" mfs %0, rmsr \n" 54 + " nop \n" 55 + " andi %1, %0, %2 \n" 56 + " mts rmsr, %1 \n" 57 + " nop \n" 58 + : "=r"(flags), "=r"(tmp) 59 + : "i"(~MSR_IE) 60 + : "memory"); 61 + return flags; 62 + } 63 + 64 + static inline void arch_local_irq_disable(void) 65 + { 66 + unsigned long tmp; 67 + asm volatile(" mfs %0, rmsr \n" 68 + " nop \n" 69 + " andi %0, %0, %1 \n" 70 + " mts rmsr, %0 \n" 71 + " nop \n" 72 + : "=r"(tmp) 73 + : "i"(~MSR_IE) 74 + : "memory"); 75 + } 76 + 77 + static inline void arch_local_irq_enable(void) 78 + { 79 + unsigned long tmp; 80 + asm volatile(" mfs %0, rmsr \n" 81 + " nop \n" 82 + " ori %0, %0, %1 \n" 83 + " mts rmsr, %0 \n" 84 + " nop \n" 85 + : "=r"(tmp) 86 + : "i"(MSR_IE) 87 + : "memory"); 88 + } 89 + 90 + #endif /* CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR */ 91 + 92 + static inline unsigned long arch_local_save_flags(void) 93 + { 94 + unsigned long flags; 95 + asm volatile(" mfs %0, rmsr \n" 96 + " nop \n" 97 + : "=r"(flags) 98 + : 99 + : "memory"); 100 + return flags; 101 + } 102 + 103 + static inline void arch_local_irq_restore(unsigned long flags) 104 + { 105 + asm volatile(" mts rmsr, %0 \n" 106 + " nop \n" 107 + : 108 + : "r"(flags) 109 + : "memory"); 110 + } 111 + 112 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 113 + { 114 + return (flags & MSR_IE) == 0; 115 + } 116 + 117 + static inline bool arch_irqs_disabled(void) 118 + { 119 + return arch_irqs_disabled_flags(arch_local_save_flags()); 120 + } 110 121 111 122 #endif /* _ASM_MICROBLAZE_IRQFLAGS_H */
+1
arch/mips/alchemy/devboards/bcsr.c
··· 10 10 #include <linux/interrupt.h> 11 11 #include <linux/module.h> 12 12 #include <linux/spinlock.h> 13 + #include <linux/irq.h> 13 14 #include <asm/addrspace.h> 14 15 #include <asm/io.h> 15 16 #include <asm/mach-db1x00/bcsr.h>
+1
arch/mips/ar7/irq.c
··· 19 19 20 20 #include <linux/interrupt.h> 21 21 #include <linux/io.h> 22 + #include <linux/irq.h> 22 23 23 24 #include <asm/irq_cpu.h> 24 25 #include <asm/mipsregs.h>
+1
arch/mips/bcm63xx/irq.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/interrupt.h> 13 13 #include <linux/module.h> 14 + #include <linux/irq.h> 14 15 #include <asm/irq_cpu.h> 15 16 #include <asm/mipsregs.h> 16 17 #include <bcm63xx_cpu.h>
+1
arch/mips/cavium-octeon/serial.c
··· 13 13 #include <linux/serial_8250.h> 14 14 #include <linux/serial_reg.h> 15 15 #include <linux/tty.h> 16 + #include <linux/irq.h> 16 17 17 18 #include <asm/time.h> 18 19
+1
arch/mips/dec/setup.c
··· 18 18 #include <linux/spinlock.h> 19 19 #include <linux/types.h> 20 20 #include <linux/pm.h> 21 + #include <linux/irq.h> 21 22 22 23 #include <asm/bootinfo.h> 23 24 #include <asm/cpu.h>
+29 -24
arch/mips/include/asm/irqflags.h
··· 17 17 #include <asm/hazards.h> 18 18 19 19 __asm__( 20 - " .macro raw_local_irq_enable \n" 20 + " .macro arch_local_irq_enable \n" 21 21 " .set push \n" 22 22 " .set reorder \n" 23 23 " .set noat \n" ··· 40 40 41 41 extern void smtc_ipi_replay(void); 42 42 43 - static inline void raw_local_irq_enable(void) 43 + static inline void arch_local_irq_enable(void) 44 44 { 45 45 #ifdef CONFIG_MIPS_MT_SMTC 46 46 /* ··· 50 50 smtc_ipi_replay(); 51 51 #endif 52 52 __asm__ __volatile__( 53 - "raw_local_irq_enable" 53 + "arch_local_irq_enable" 54 54 : /* no outputs */ 55 55 : /* no inputs */ 56 56 : "memory"); ··· 76 76 * Workaround: mask EXL bit of the result or place a nop before mfc0. 77 77 */ 78 78 __asm__( 79 - " .macro raw_local_irq_disable\n" 79 + " .macro arch_local_irq_disable\n" 80 80 " .set push \n" 81 81 " .set noat \n" 82 82 #ifdef CONFIG_MIPS_MT_SMTC ··· 97 97 " .set pop \n" 98 98 " .endm \n"); 99 99 100 - static inline void raw_local_irq_disable(void) 100 + static inline void arch_local_irq_disable(void) 101 101 { 102 102 __asm__ __volatile__( 103 - "raw_local_irq_disable" 103 + "arch_local_irq_disable" 104 104 : /* no outputs */ 105 105 : /* no inputs */ 106 106 : "memory"); 107 107 } 108 108 109 109 __asm__( 110 - " .macro raw_local_save_flags flags \n" 110 + " .macro arch_local_save_flags flags \n" 111 111 " .set push \n" 112 112 " .set reorder \n" 113 113 #ifdef CONFIG_MIPS_MT_SMTC ··· 118 118 " .set pop \n" 119 119 " .endm \n"); 120 120 121 - #define raw_local_save_flags(x) \ 122 - __asm__ __volatile__( \ 123 - "raw_local_save_flags %0" \ 124 - : "=r" (x)) 121 + static inline unsigned long arch_local_save_flags(void) 122 + { 123 + unsigned long flags; 124 + asm volatile("arch_local_save_flags %0" : "=r" (flags)); 125 + return flags; 126 + } 125 127 126 128 __asm__( 127 - " .macro raw_local_irq_save result \n" 129 + " .macro arch_local_irq_save result \n" 128 130 " .set push \n" 129 131 " .set reorder \n" 130 132 " .set noat \n" ··· 150 148 " .set pop \n" 151 149 " .endm \n"); 152 150 153 - #define raw_local_irq_save(x) \ 154 - __asm__ __volatile__( \ 155 - "raw_local_irq_save\t%0" \ 156 - : "=r" (x) \ 157 - : /* no inputs */ \ 158 - : "memory") 151 + static inline unsigned long arch_local_irq_save(void) 152 + { 153 + unsigned long flags; 154 + asm volatile("arch_local_irq_save\t%0" 155 + : "=r" (flags) 156 + : /* no inputs */ 157 + : "memory"); 158 + return flags; 159 + } 159 160 160 161 __asm__( 161 - " .macro raw_local_irq_restore flags \n" 162 + " .macro arch_local_irq_restore flags \n" 162 163 " .set push \n" 163 164 " .set noreorder \n" 164 165 " .set noat \n" ··· 201 196 " .endm \n"); 202 197 203 198 204 - static inline void raw_local_irq_restore(unsigned long flags) 199 + static inline void arch_local_irq_restore(unsigned long flags) 205 200 { 206 201 unsigned long __tmp1; 207 202 ··· 216 211 #endif 217 212 218 213 __asm__ __volatile__( 219 - "raw_local_irq_restore\t%0" 214 + "arch_local_irq_restore\t%0" 220 215 : "=r" (__tmp1) 221 216 : "0" (flags) 222 217 : "memory"); 223 218 } 224 219 225 - static inline void __raw_local_irq_restore(unsigned long flags) 220 + static inline void __arch_local_irq_restore(unsigned long flags) 226 221 { 227 222 unsigned long __tmp1; 228 223 229 224 __asm__ __volatile__( 230 - "raw_local_irq_restore\t%0" 225 + "arch_local_irq_restore\t%0" 231 226 : "=r" (__tmp1) 232 227 : "0" (flags) 233 228 : "memory"); 234 229 } 235 230 236 - static inline int raw_irqs_disabled_flags(unsigned long flags) 231 + static inline int arch_irqs_disabled_flags(unsigned long flags) 237 232 { 238 233 #ifdef CONFIG_MIPS_MT_SMTC 239 234 /*
+1
arch/mips/include/asm/mach-loongson/loongson.h
··· 13 13 14 14 #include <linux/io.h> 15 15 #include <linux/init.h> 16 + #include <linux/irq.h> 16 17 17 18 /* loongson internal northbridge initialization */ 18 19 extern void bonito_irq_init(void);
+1
arch/mips/jazz/irq.c
··· 12 12 #include <linux/kernel.h> 13 13 #include <linux/smp.h> 14 14 #include <linux/spinlock.h> 15 + #include <linux/irq.h> 15 16 16 17 #include <asm/irq_cpu.h> 17 18 #include <asm/i8253.h>
+1
arch/mips/kernel/cevt-bcm1480.c
··· 19 19 #include <linux/interrupt.h> 20 20 #include <linux/percpu.h> 21 21 #include <linux/smp.h> 22 + #include <linux/irq.h> 22 23 23 24 #include <asm/addrspace.h> 24 25 #include <asm/io.h>
+1
arch/mips/kernel/cevt-ds1287.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/interrupt.h> 23 23 #include <linux/mc146818rtc.h> 24 + #include <linux/irq.h> 24 25 25 26 #include <asm/time.h> 26 27
+1
arch/mips/kernel/cevt-gt641xx.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/interrupt.h> 23 23 #include <linux/spinlock.h> 24 + #include <linux/irq.h> 24 25 25 26 #include <asm/gt64120.h> 26 27 #include <asm/time.h>
+1
arch/mips/kernel/cevt-r4k.c
··· 10 10 #include <linux/interrupt.h> 11 11 #include <linux/percpu.h> 12 12 #include <linux/smp.h> 13 + #include <linux/irq.h> 13 14 14 15 #include <asm/smtc_ipi.h> 15 16 #include <asm/time.h>
+1
arch/mips/kernel/cevt-sb1250.c
··· 17 17 */ 18 18 #include <linux/clockchips.h> 19 19 #include <linux/interrupt.h> 20 + #include <linux/irq.h> 20 21 #include <linux/percpu.h> 21 22 #include <linux/smp.h> 22 23
+1
arch/mips/kernel/cevt-smtc.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/percpu.h> 13 13 #include <linux/smp.h> 14 + #include <linux/irq.h> 14 15 15 16 #include <asm/smtc_ipi.h> 16 17 #include <asm/time.h>
+1
arch/mips/kernel/cevt-txx9.c
··· 13 13 */ 14 14 #include <linux/init.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/irq.h> 16 17 #include <asm/time.h> 17 18 #include <asm/txx9tmr.h> 18 19
+1
arch/mips/kernel/i8253.c
··· 9 9 #include <linux/module.h> 10 10 #include <linux/smp.h> 11 11 #include <linux/spinlock.h> 12 + #include <linux/irq.h> 12 13 13 14 #include <asm/delay.h> 14 15 #include <asm/i8253.h>
+1
arch/mips/kernel/i8259.c
··· 15 15 #include <linux/kernel.h> 16 16 #include <linux/spinlock.h> 17 17 #include <linux/sysdev.h> 18 + #include <linux/irq.h> 18 19 19 20 #include <asm/i8259.h> 20 21 #include <asm/io.h>
+1 -1
arch/mips/kernel/irq-gic.c
··· 3 3 #include <linux/bitmap.h> 4 4 #include <linux/init.h> 5 5 #include <linux/smp.h> 6 + #include <linux/irq.h> 6 7 7 8 #include <asm/io.h> 8 9 #include <asm/gic.h> 9 10 #include <asm/gcmpregs.h> 10 - #include <asm/irq.h> 11 11 #include <linux/hardirq.h> 12 12 #include <asm-generic/bitops/find.h> 13 13
+1
arch/mips/kernel/irq-rm7000.c
··· 11 11 */ 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/irq.h> 14 15 #include <linux/kernel.h> 15 16 16 17 #include <asm/irq_cpu.h>
+1
arch/mips/kernel/irq-rm9000.c
··· 11 11 */ 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/irq.h> 14 15 #include <linux/kernel.h> 15 16 #include <linux/module.h> 16 17
+1
arch/mips/kernel/irq_cpu.c
··· 30 30 #include <linux/init.h> 31 31 #include <linux/interrupt.h> 32 32 #include <linux/kernel.h> 33 + #include <linux/irq.h> 33 34 34 35 #include <asm/irq_cpu.h> 35 36 #include <asm/mipsregs.h>
+1
arch/mips/kernel/irq_txx9.c
··· 16 16 #include <linux/init.h> 17 17 #include <linux/interrupt.h> 18 18 #include <linux/types.h> 19 + #include <linux/irq.h> 19 20 #include <asm/txx9irq.h> 20 21 21 22 struct txx9_irc_reg {
+2 -2
arch/mips/kernel/smtc.c
··· 1038 1038 * but it's more efficient, given that we're already 1039 1039 * running down the IPI queue. 1040 1040 */ 1041 - __raw_local_irq_restore(flags); 1041 + __arch_local_irq_restore(flags); 1042 1042 } 1043 1043 } 1044 1044 ··· 1190 1190 /* 1191 1191 ** But use a raw restore here to avoid recursion. 1192 1192 */ 1193 - __raw_local_irq_restore(flags); 1193 + __arch_local_irq_restore(flags); 1194 1194 1195 1195 if (pipi) { 1196 1196 self_ipi(pipi);
+1 -1
arch/mips/kernel/traps.c
··· 28 28 #include <linux/kprobes.h> 29 29 #include <linux/notifier.h> 30 30 #include <linux/kdb.h> 31 + #include <linux/irq.h> 31 32 32 33 #include <asm/bootinfo.h> 33 34 #include <asm/branch.h> ··· 52 51 #include <asm/mmu_context.h> 53 52 #include <asm/types.h> 54 53 #include <asm/stacktrace.h> 55 - #include <asm/irq.h> 56 54 #include <asm/uasm.h> 57 55 58 56 extern void check_wait(void);
+1
arch/mips/mti-malta/malta-platform.c
··· 25 25 #include <linux/serial_8250.h> 26 26 #include <linux/mc146818rtc.h> 27 27 #include <linux/module.h> 28 + #include <linux/irq.h> 28 29 #include <linux/mtd/partitions.h> 29 30 #include <linux/mtd/physmap.h> 30 31 #include <linux/platform_device.h>
+1
arch/mips/pci/ops-tx3927.c
··· 38 38 #include <linux/kernel.h> 39 39 #include <linux/init.h> 40 40 #include <linux/interrupt.h> 41 + #include <linux/irq.h> 41 42 42 43 #include <asm/addrspace.h> 43 44 #include <asm/txx9irq.h>
+1
arch/mips/pci/ops-tx4927.c
··· 17 17 */ 18 18 #include <linux/kernel.h> 19 19 #include <linux/interrupt.h> 20 + #include <linux/irq.h> 20 21 #include <asm/txx9/pci.h> 21 22 #include <asm/txx9/tx4927pcic.h> 22 23
+1
arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
··· 14 14 #include <linux/interrupt.h> 15 15 #include <linux/kernel.h> 16 16 #include <linux/bitops.h> 17 + #include <linux/irq.h> 17 18 18 19 #include <asm/system.h> 19 20
+1
arch/mips/powertv/asic/irq_asic.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/interrupt.h> 15 15 #include <linux/kernel.h> 16 + #include <linux/irq.h> 16 17 17 18 #include <asm/irq_cpu.h> 18 19 #include <asm/mipsregs.h>
+1
arch/mips/rb532/serial.c
··· 29 29 #include <linux/tty.h> 30 30 #include <linux/serial_core.h> 31 31 #include <linux/serial_8250.h> 32 + #include <linux/irq.h> 32 33 33 34 #include <asm/serial.h> 34 35 #include <asm/mach-rc32434/rb.h>
+1
arch/mips/sni/a20r.c
··· 10 10 11 11 #include <linux/init.h> 12 12 #include <linux/interrupt.h> 13 + #include <linux/irq.h> 13 14 #include <linux/platform_device.h> 14 15 #include <linux/serial_8250.h> 15 16
+1
arch/mips/sni/pcimt.c
··· 11 11 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/irq.h> 14 15 #include <linux/pci.h> 15 16 #include <linux/serial_8250.h> 16 17
+1
arch/mips/sni/pcit.c
··· 10 10 11 11 #include <linux/init.h> 12 12 #include <linux/interrupt.h> 13 + #include <linux/irq.h> 13 14 #include <linux/pci.h> 14 15 #include <linux/serial_8250.h> 15 16
+1
arch/mips/sni/rm200.c
··· 13 13 #include <linux/delay.h> 14 14 #include <linux/init.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/irq.h> 16 17 #include <linux/platform_device.h> 17 18 #include <linux/serial_8250.h> 18 19 #include <linux/io.h>
+1
arch/mips/sni/time.c
··· 1 1 #include <linux/types.h> 2 2 #include <linux/interrupt.h> 3 + #include <linux/irq.h> 3 4 #include <linux/smp.h> 4 5 #include <linux/time.h> 5 6 #include <linux/clockchips.h>
+1
arch/mips/txx9/generic/irq_tx4927.c
··· 25 25 */ 26 26 #include <linux/init.h> 27 27 #include <linux/interrupt.h> 28 + #include <linux/irq.h> 28 29 #include <asm/irq_cpu.h> 29 30 #include <asm/txx9/tx4927.h> 30 31
+1
arch/mips/txx9/generic/irq_tx4938.c
··· 13 13 */ 14 14 #include <linux/init.h> 15 15 #include <linux/interrupt.h> 16 + #include <linux/irq.h> 16 17 #include <asm/irq_cpu.h> 17 18 #include <asm/txx9/tx4938.h> 18 19
+1
arch/mips/txx9/generic/irq_tx4939.c
··· 19 19 */ 20 20 #include <linux/init.h> 21 21 #include <linux/interrupt.h> 22 + #include <linux/irq.h> 22 23 #include <linux/types.h> 23 24 #include <asm/irq_cpu.h> 24 25 #include <asm/txx9irq.h>
+1
arch/mips/txx9/generic/setup.c
··· 24 24 #include <linux/leds.h> 25 25 #include <linux/sysdev.h> 26 26 #include <linux/slab.h> 27 + #include <linux/irq.h> 27 28 #include <asm/bootinfo.h> 28 29 #include <asm/time.h> 29 30 #include <asm/reboot.h>
+1
arch/mips/txx9/jmr3927/irq.c
··· 32 32 #include <linux/init.h> 33 33 #include <linux/types.h> 34 34 #include <linux/interrupt.h> 35 + #include <linux/irq.h> 35 36 36 37 #include <asm/io.h> 37 38 #include <asm/mipsregs.h>
+1
arch/mips/txx9/rbtx4927/irq.c
··· 111 111 #include <linux/init.h> 112 112 #include <linux/types.h> 113 113 #include <linux/interrupt.h> 114 + #include <linux/irq.h> 114 115 #include <asm/io.h> 115 116 #include <asm/mipsregs.h> 116 117 #include <asm/txx9/generic.h>
+1
arch/mips/txx9/rbtx4938/irq.c
··· 64 64 */ 65 65 #include <linux/init.h> 66 66 #include <linux/interrupt.h> 67 + #include <linux/irq.h> 67 68 #include <asm/mipsregs.h> 68 69 #include <asm/txx9/generic.h> 69 70 #include <asm/txx9/rbtx4938.h>
+1
arch/mips/txx9/rbtx4939/irq.c
··· 11 11 */ 12 12 #include <linux/init.h> 13 13 #include <linux/interrupt.h> 14 + #include <linux/irq.h> 14 15 #include <asm/mipsregs.h> 15 16 #include <asm/txx9/rbtx4939.h> 16 17
+1
arch/mips/vr41xx/common/irq.c
··· 19 19 */ 20 20 #include <linux/interrupt.h> 21 21 #include <linux/module.h> 22 + #include <linux/irq.h> 22 23 23 24 #include <asm/irq_cpu.h> 24 25 #include <asm/system.h>
+1
arch/mips/vr41xx/common/siu.c
··· 22 22 #include <linux/ioport.h> 23 23 #include <linux/platform_device.h> 24 24 #include <linux/serial_core.h> 25 + #include <linux/irq.h> 25 26 26 27 #include <asm/cpu.h> 27 28 #include <asm/vr41xx/siu.h>
+123
arch/mn10300/include/asm/irqflags.h
··· 1 + /* MN10300 IRQ flag handling 2 + * 3 + * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + 12 + #ifndef _ASM_IRQFLAGS_H 13 + #define _ASM_IRQFLAGS_H 14 + 15 + #include <asm/cpu-regs.h> 16 + 17 + /* 18 + * interrupt control 19 + * - "disabled": run in IM1/2 20 + * - level 0 - GDB stub 21 + * - level 1 - virtual serial DMA (if present) 22 + * - level 5 - normal interrupt priority 23 + * - level 6 - timer interrupt 24 + * - "enabled": run in IM7 25 + */ 26 + #ifdef CONFIG_MN10300_TTYSM 27 + #define MN10300_CLI_LEVEL EPSW_IM_2 28 + #else 29 + #define MN10300_CLI_LEVEL EPSW_IM_1 30 + #endif 31 + 32 + #ifndef __ASSEMBLY__ 33 + 34 + static inline unsigned long arch_local_save_flags(void) 35 + { 36 + unsigned long flags; 37 + 38 + asm volatile("mov epsw,%0" : "=d"(flags)); 39 + return flags; 40 + } 41 + 42 + static inline void arch_local_irq_disable(void) 43 + { 44 + asm volatile( 45 + " and %0,epsw \n" 46 + " or %1,epsw \n" 47 + " nop \n" 48 + " nop \n" 49 + " nop \n" 50 + : 51 + : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) 52 + : "memory"); 53 + } 54 + 55 + static inline unsigned long arch_local_irq_save(void) 56 + { 57 + unsigned long flags; 58 + 59 + flags = arch_local_save_flags(); 60 + arch_local_irq_disable(); 61 + return flags; 62 + } 63 + 64 + /* 65 + * we make sure arch_irq_enable() doesn't cause priority inversion 66 + */ 67 + extern unsigned long __mn10300_irq_enabled_epsw; 68 + 69 + static inline void arch_local_irq_enable(void) 70 + { 71 + unsigned long tmp; 72 + 73 + asm volatile( 74 + " mov epsw,%0 \n" 75 + " and %1,%0 \n" 76 + " or %2,%0 \n" 77 + " mov %0,epsw \n" 78 + : "=&d"(tmp) 79 + : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) 80 + : "memory"); 81 + } 82 + 83 + static inline void arch_local_irq_restore(unsigned long flags) 84 + { 85 + asm volatile( 86 + " mov %0,epsw \n" 87 + " nop \n" 88 + " nop \n" 89 + " nop \n" 90 + : 91 + : "d"(flags) 92 + : "memory", "cc"); 93 + } 94 + 95 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 96 + { 97 + return (flags & EPSW_IM) <= MN10300_CLI_LEVEL; 98 + } 99 + 100 + static inline bool arch_irqs_disabled(void) 101 + { 102 + return arch_irqs_disabled_flags(arch_local_save_flags()); 103 + } 104 + 105 + /* 106 + * Hook to save power by halting the CPU 107 + * - called from the idle loop 108 + * - must reenable interrupts (which takes three instruction cycles to complete) 109 + */ 110 + static inline void arch_safe_halt(void) 111 + { 112 + asm volatile( 113 + " or %0,epsw \n" 114 + " nop \n" 115 + " nop \n" 116 + " bset %2,(%1) \n" 117 + : 118 + : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP) 119 + : "cc"); 120 + } 121 + 122 + #endif /* __ASSEMBLY__ */ 123 + #endif /* _ASM_IRQFLAGS_H */
+1 -108
arch/mn10300/include/asm/system.h
··· 17 17 #ifndef __ASSEMBLY__ 18 18 19 19 #include <linux/kernel.h> 20 + #include <linux/irqflags.h> 20 21 21 22 struct task_struct; 22 23 struct thread_struct; ··· 79 78 80 79 #define read_barrier_depends() do {} while (0) 81 80 #define smp_read_barrier_depends() do {} while (0) 82 - 83 - /*****************************************************************************/ 84 - /* 85 - * interrupt control 86 - * - "disabled": run in IM1/2 87 - * - level 0 - GDB stub 88 - * - level 1 - virtual serial DMA (if present) 89 - * - level 5 - normal interrupt priority 90 - * - level 6 - timer interrupt 91 - * - "enabled": run in IM7 92 - */ 93 - #ifdef CONFIG_MN10300_TTYSM 94 - #define MN10300_CLI_LEVEL EPSW_IM_2 95 - #else 96 - #define MN10300_CLI_LEVEL EPSW_IM_1 97 - #endif 98 - 99 - #define local_save_flags(x) \ 100 - do { \ 101 - typecheck(unsigned long, x); \ 102 - asm volatile( \ 103 - " mov epsw,%0 \n" \ 104 - : "=d"(x) \ 105 - ); \ 106 - } while (0) 107 - 108 - #define local_irq_disable() \ 109 - do { \ 110 - asm volatile( \ 111 - " and %0,epsw \n" \ 112 - " or %1,epsw \n" \ 113 - " nop \n" \ 114 - " nop \n" \ 115 - " nop \n" \ 116 - : \ 117 - : "i"(~EPSW_IM), "i"(EPSW_IE | MN10300_CLI_LEVEL) \ 118 - ); \ 119 - } while (0) 120 - 121 - #define local_irq_save(x) \ 122 - do { \ 123 - local_save_flags(x); \ 124 - local_irq_disable(); \ 125 - } while (0) 126 - 127 - /* 128 - * we make sure local_irq_enable() doesn't cause priority inversion 129 - */ 130 - #ifndef __ASSEMBLY__ 131 - 132 - extern unsigned long __mn10300_irq_enabled_epsw; 133 - 134 - #endif 135 - 136 - #define local_irq_enable() \ 137 - do { \ 138 - unsigned long tmp; \ 139 - \ 140 - asm volatile( \ 141 - " mov epsw,%0 \n" \ 142 - " and %1,%0 \n" \ 143 - " or %2,%0 \n" \ 144 - " mov %0,epsw \n" \ 145 - : "=&d"(tmp) \ 146 - : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \ 147 - : "cc" \ 148 - ); \ 149 - } while (0) 150 - 151 - #define local_irq_restore(x) \ 152 - do { \ 153 - typecheck(unsigned long, x); \ 154 - asm volatile( \ 155 - " mov %0,epsw \n" \ 156 - " nop \n" \ 157 - " nop \n" \ 158 - " nop \n" \ 159 - : \ 160 - : "d"(x) \ 161 - : "memory", "cc" \ 162 - ); \ 163 - } while (0) 164 - 165 - #define irqs_disabled() \ 166 - ({ \ 167 - unsigned long flags; \ 168 - local_save_flags(flags); \ 169 - (flags & EPSW_IM) <= MN10300_CLI_LEVEL; \ 170 - }) 171 - 172 - /* hook to save power by halting the CPU 173 - * - called from the idle loop 174 - * - must reenable interrupts (which takes three instruction cycles to complete) 175 - */ 176 - #define safe_halt() \ 177 - do { \ 178 - asm volatile(" or %0,epsw \n" \ 179 - " nop \n" \ 180 - " nop \n" \ 181 - " bset %2,(%1) \n" \ 182 - : \ 183 - : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)\ 184 - : "cc" \ 185 - ); \ 186 - } while (0) 187 - 188 - #define STI or EPSW_IE|EPSW_IM,epsw 189 - #define CLI and ~EPSW_IM,epsw; or EPSW_IE|MN10300_CLI_LEVEL,epsw; nop; nop; nop 190 81 191 82 /*****************************************************************************/ 192 83 /*
+1
arch/mn10300/kernel/entry.S
··· 16 16 #include <linux/linkage.h> 17 17 #include <asm/smp.h> 18 18 #include <asm/system.h> 19 + #include <asm/irqflags.h> 19 20 #include <asm/thread_info.h> 20 21 #include <asm/intctl-regs.h> 21 22 #include <asm/busctl-regs.h>
+46
arch/parisc/include/asm/irqflags.h
··· 1 + #ifndef __PARISC_IRQFLAGS_H 2 + #define __PARISC_IRQFLAGS_H 3 + 4 + #include <linux/types.h> 5 + #include <asm/psw.h> 6 + 7 + static inline unsigned long arch_local_save_flags(void) 8 + { 9 + unsigned long flags; 10 + asm volatile("ssm 0, %0" : "=r" (flags) : : "memory"); 11 + return flags; 12 + } 13 + 14 + static inline void arch_local_irq_disable(void) 15 + { 16 + asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory"); 17 + } 18 + 19 + static inline void arch_local_irq_enable(void) 20 + { 21 + asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory"); 22 + } 23 + 24 + static inline unsigned long arch_local_irq_save(void) 25 + { 26 + unsigned long flags; 27 + asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory"); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_restore(unsigned long flags) 32 + { 33 + asm volatile("mtsm %0" : : "r" (flags) : "memory"); 34 + } 35 + 36 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 37 + { 38 + return (flags & PSW_I) == 0; 39 + } 40 + 41 + static inline bool arch_irqs_disabled(void) 42 + { 43 + return arch_irqs_disabled_flags(arch_local_save_flags()); 44 + } 45 + 46 + #endif /* __PARISC_IRQFLAGS_H */
+1 -18
arch/parisc/include/asm/system.h
··· 1 1 #ifndef __PARISC_SYSTEM_H 2 2 #define __PARISC_SYSTEM_H 3 3 4 - #include <asm/psw.h> 4 + #include <linux/irqflags.h> 5 5 6 6 /* The program status word as bitfields. */ 7 7 struct pa_psw { ··· 47 47 #define switch_to(prev, next, last) do { \ 48 48 (last) = _switch_to(prev, next); \ 49 49 } while(0) 50 - 51 - /* interrupt control */ 52 - #define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") 53 - #define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 54 - #define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 55 - 56 - #define local_irq_save(x) \ 57 - __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) 58 - #define local_irq_restore(x) \ 59 - __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) 60 - 61 - #define irqs_disabled() \ 62 - ({ \ 63 - unsigned long flags; \ 64 - local_save_flags(flags); \ 65 - (flags & PSW_I) == 0; \ 66 - }) 67 50 68 51 #define mfctl(reg) ({ \ 69 52 unsigned long cr; \
+72 -55
arch/powerpc/include/asm/hw_irq.h
··· 16 16 #ifdef CONFIG_PPC64 17 17 #include <asm/paca.h> 18 18 19 - static inline unsigned long local_get_flags(void) 19 + static inline unsigned long arch_local_save_flags(void) 20 20 { 21 21 unsigned long flags; 22 22 23 - __asm__ __volatile__("lbz %0,%1(13)" 24 - : "=r" (flags) 25 - : "i" (offsetof(struct paca_struct, soft_enabled))); 23 + asm volatile( 24 + "lbz %0,%1(13)" 25 + : "=r" (flags) 26 + : "i" (offsetof(struct paca_struct, soft_enabled))); 26 27 27 28 return flags; 28 29 } 29 30 30 - static inline unsigned long raw_local_irq_disable(void) 31 + static inline unsigned long arch_local_irq_disable(void) 31 32 { 32 33 unsigned long flags, zero; 33 34 34 - __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" 35 - : "=r" (flags), "=&r" (zero) 36 - : "i" (offsetof(struct paca_struct, soft_enabled)) 37 - : "memory"); 35 + asm volatile( 36 + "li %1,0; lbz %0,%2(13); stb %1,%2(13)" 37 + : "=r" (flags), "=&r" (zero) 38 + : "i" (offsetof(struct paca_struct, soft_enabled)) 39 + : "memory"); 38 40 39 41 return flags; 40 42 } 41 43 42 - extern void raw_local_irq_restore(unsigned long); 44 + extern void arch_local_irq_restore(unsigned long); 43 45 extern void iseries_handle_interrupts(void); 44 46 45 - #define raw_local_irq_enable() raw_local_irq_restore(1) 46 - #define raw_local_save_flags(flags) ((flags) = local_get_flags()) 47 - #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) 47 + static inline void arch_local_irq_enable(void) 48 + { 49 + arch_local_irq_restore(1); 50 + } 48 51 49 - #define raw_irqs_disabled() (local_get_flags() == 0) 50 - #define raw_irqs_disabled_flags(flags) ((flags) == 0) 52 + static inline unsigned long arch_local_irq_save(void) 53 + { 54 + return arch_local_irq_disable(); 55 + } 56 + 57 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 58 + { 59 + return flags == 0; 60 + } 61 + 62 + static inline bool arch_irqs_disabled(void) 63 + { 64 + return arch_irqs_disabled_flags(arch_local_save_flags()); 65 + } 51 66 52 67 #ifdef CONFIG_PPC_BOOK3E 53 - #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); 54 - #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); 68 + #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); 69 + #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); 55 70 #else 56 71 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 57 72 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) ··· 79 64 get_paca()->hard_enabled = 0; \ 80 65 } while(0) 81 66 82 - #else 67 + #else /* CONFIG_PPC64 */ 83 68 84 - #if defined(CONFIG_BOOKE) 85 69 #define SET_MSR_EE(x) mtmsr(x) 86 - #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 87 - #else 88 - #define SET_MSR_EE(x) mtmsr(x) 89 - #define raw_local_irq_restore(flags) mtmsr(flags) 90 - #endif 91 70 92 - static inline void raw_local_irq_disable(void) 71 + static inline unsigned long arch_local_save_flags(void) 93 72 { 94 - #ifdef CONFIG_BOOKE 95 - __asm__ __volatile__("wrteei 0": : :"memory"); 96 - #else 97 - unsigned long msr; 73 + return mfmsr(); 74 + } 98 75 99 - msr = mfmsr(); 100 - SET_MSR_EE(msr & ~MSR_EE); 76 + static inline void arch_local_irq_restore(unsigned long flags) 77 + { 78 + #if defined(CONFIG_BOOKE) 79 + asm volatile("wrtee %0" : : "r" (flags) : "memory"); 80 + #else 81 + mtmsr(flags); 101 82 #endif 102 83 } 103 84 104 - static inline void raw_local_irq_enable(void) 85 + static inline unsigned long arch_local_irq_save(void) 86 + { 87 + unsigned long flags = arch_local_save_flags(); 88 + #ifdef CONFIG_BOOKE 89 + asm volatile("wrteei 0" : : : "memory"); 90 + #else 91 + SET_MSR_EE(flags & ~MSR_EE); 92 + #endif 93 + return flags; 94 + } 95 + 96 + static inline void arch_local_irq_disable(void) 105 97 { 106 98 #ifdef CONFIG_BOOKE 107 - __asm__ __volatile__("wrteei 1": : :"memory"); 99 + asm volatile("wrteei 0" : : : "memory"); 108 100 #else 109 - unsigned long msr; 101 + arch_local_irq_save(); 102 + #endif 103 + } 110 104 111 - msr = mfmsr(); 105 + static inline void arch_local_irq_enable(void) 106 + { 107 + #ifdef CONFIG_BOOKE 108 + asm volatile("wrteei 1" : : : "memory"); 109 + #else 110 + unsigned long msr = mfmsr(); 112 111 SET_MSR_EE(msr | MSR_EE); 113 112 #endif 114 113 } 115 114 116 - static inline void raw_local_irq_save_ptr(unsigned long *flags) 117 - { 118 - unsigned long msr; 119 - msr = mfmsr(); 120 - *flags = msr; 121 - #ifdef CONFIG_BOOKE 122 - __asm__ __volatile__("wrteei 0": : :"memory"); 123 - #else 124 - SET_MSR_EE(msr & ~MSR_EE); 125 - #endif 126 - } 127 - 128 - #define raw_local_save_flags(flags) ((flags) = mfmsr()) 129 - #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) 130 - #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) 131 - #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) 132 - 133 - #define hard_irq_disable() raw_local_irq_disable() 134 - 135 - static inline int irqs_disabled_flags(unsigned long flags) 115 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 136 116 { 137 117 return (flags & MSR_EE) == 0; 138 118 } 119 + 120 + static inline bool arch_irqs_disabled(void) 121 + { 122 + return arch_irqs_disabled_flags(arch_local_save_flags()); 123 + } 124 + 125 + #define hard_irq_disable() arch_local_irq_disable() 139 126 140 127 #endif /* CONFIG_PPC64 */ 141 128
+1 -1
arch/powerpc/include/asm/irqflags.h
··· 6 6 7 7 #ifndef __ASSEMBLY__ 8 8 /* 9 - * Get definitions for raw_local_save_flags(x), etc. 9 + * Get definitions for arch_local_save_flags(x), etc. 10 10 */ 11 11 #include <asm/hw_irq.h> 12 12
+2 -2
arch/powerpc/kernel/exceptions-64s.S
··· 818 818 819 819 /* 820 820 * hash_page couldn't handle it, set soft interrupt enable back 821 - * to what it was before the trap. Note that .raw_local_irq_restore 821 + * to what it was before the trap. Note that .arch_local_irq_restore 822 822 * handles any interrupts pending at this point. 823 823 */ 824 824 ld r3,SOFTE(r1) 825 825 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) 826 - bl .raw_local_irq_restore 826 + bl .arch_local_irq_restore 827 827 b 11f 828 828 829 829 /* We have a data breakpoint exception - handle it */
+2 -2
arch/powerpc/kernel/irq.c
··· 116 116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 117 117 } 118 118 119 - notrace void raw_local_irq_restore(unsigned long en) 119 + notrace void arch_local_irq_restore(unsigned long en) 120 120 { 121 121 /* 122 122 * get_paca()->soft_enabled = en; ··· 192 192 193 193 __hard_irq_enable(); 194 194 } 195 - EXPORT_SYMBOL(raw_local_irq_restore); 195 + EXPORT_SYMBOL(arch_local_irq_restore); 196 196 #endif /* CONFIG_PPC64 */ 197 197 198 198 static int show_other_interrupts(struct seq_file *p, int prec)
+30 -25
arch/s390/include/asm/irqflags.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - /* store then or system mask. */ 12 - #define __raw_local_irq_stosm(__or) \ 11 + /* store then OR system mask. */ 12 + #define __arch_local_irq_stosm(__or) \ 13 13 ({ \ 14 14 unsigned long __mask; \ 15 15 asm volatile( \ ··· 18 18 __mask; \ 19 19 }) 20 20 21 - /* store then and system mask. */ 22 - #define __raw_local_irq_stnsm(__and) \ 21 + /* store then AND system mask. */ 22 + #define __arch_local_irq_stnsm(__and) \ 23 23 ({ \ 24 24 unsigned long __mask; \ 25 25 asm volatile( \ ··· 29 29 }) 30 30 31 31 /* set system mask. */ 32 - #define __raw_local_irq_ssm(__mask) \ 33 - ({ \ 34 - asm volatile("ssm %0" : : "Q" (__mask) : "memory"); \ 35 - }) 36 - 37 - /* interrupt control.. */ 38 - static inline unsigned long raw_local_irq_enable(void) 32 + static inline void __arch_local_irq_ssm(unsigned long flags) 39 33 { 40 - return __raw_local_irq_stosm(0x03); 34 + asm volatile("ssm %0" : : "Q" (flags) : "memory"); 41 35 } 42 36 43 - static inline unsigned long raw_local_irq_disable(void) 37 + static inline unsigned long arch_local_save_flags(void) 44 38 { 45 - return __raw_local_irq_stnsm(0xfc); 39 + return __arch_local_irq_stosm(0x00); 46 40 } 47 41 48 - #define raw_local_save_flags(x) \ 49 - do { \ 50 - typecheck(unsigned long, x); \ 51 - (x) = __raw_local_irq_stosm(0x00); \ 52 - } while (0) 53 - 54 - static inline void raw_local_irq_restore(unsigned long flags) 42 + static inline unsigned long arch_local_irq_save(void) 55 43 { 56 - __raw_local_irq_ssm(flags); 44 + return __arch_local_irq_stnsm(0xfc); 57 45 } 58 46 59 - static inline int raw_irqs_disabled_flags(unsigned long flags) 47 + static inline void arch_local_irq_disable(void) 48 + { 49 + arch_local_irq_save(); 50 + } 51 + 52 + static inline void arch_local_irq_enable(void) 53 + { 54 + __arch_local_irq_stosm(0x03); 55 + } 56 + 57 + static inline void arch_local_irq_restore(unsigned long flags) 58 + { 59 + __arch_local_irq_ssm(flags); 60 + } 61 + 62 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 60 63 { 61 64 return !(flags & (3UL << (BITS_PER_LONG - 8))); 62 65 } 63 66 64 - /* For spinlocks etc */ 65 - #define raw_local_irq_save(x) ((x) = raw_local_irq_disable()) 67 + static inline bool arch_irqs_disabled(void) 68 + { 69 + return arch_irqs_disabled_flags(arch_local_save_flags()); 70 + } 66 71 67 72 #endif /* __ASM_IRQFLAGS_H */
+1 -1
arch/s390/include/asm/system.h
··· 398 398 static inline void 399 399 __set_psw_mask(unsigned long mask) 400 400 { 401 - __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); 401 + __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); 402 402 } 403 403 404 404 #define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
+2 -2
arch/s390/kernel/mem_detect.c
··· 54 54 * right thing and we don't get scheduled away with low address 55 55 * protection disabled. 56 56 */ 57 - flags = __raw_local_irq_stnsm(0xf8); 57 + flags = __arch_local_irq_stnsm(0xf8); 58 58 __ctl_store(cr0, 0, 0); 59 59 __ctl_clear_bit(0, 28); 60 60 find_memory_chunks(chunk); 61 61 __ctl_load(cr0, 0, 0); 62 - __raw_local_irq_ssm(flags); 62 + arch_local_irq_restore(flags); 63 63 } 64 64 EXPORT_SYMBOL(detect_memory_layout);
+1 -2
arch/s390/mm/init.c
··· 50 50 */ 51 51 void __init paging_init(void) 52 52 { 53 - static const int ssm_mask = 0x04000000L; 54 53 unsigned long max_zone_pfns[MAX_NR_ZONES]; 55 54 unsigned long pgd_type; 56 55 ··· 71 72 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 72 73 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 73 74 __ctl_load(S390_lowcore.kernel_asce, 13, 13); 74 - __raw_local_irq_ssm(ssm_mask); 75 + arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); 75 76 76 77 atomic_set(&init_mm.context.attach_count, 1); 77 78
+2 -2
arch/s390/mm/maccess.c
··· 71 71 72 72 if (!count) 73 73 return 0; 74 - flags = __raw_local_irq_stnsm(0xf8UL); 74 + flags = __arch_local_irq_stnsm(0xf8UL); 75 75 asm volatile ( 76 76 "0: mvcle %1,%2,0x0\n" 77 77 "1: jo 0b\n" ··· 82 82 "+d" (_len2), "=m" (*((long *) dest)) 83 83 : "m" (*((long *) src)) 84 84 : "cc", "memory"); 85 - __raw_local_irq_ssm(flags); 85 + arch_local_irq_restore(flags); 86 86 return rc; 87 87 }
+99 -88
arch/score/include/asm/irqflags.h
··· 3 3 4 4 #ifndef __ASSEMBLY__ 5 5 6 - #define raw_local_irq_save(x) \ 7 - { \ 8 - __asm__ __volatile__( \ 9 - "mfcr r8, cr0;" \ 10 - "li r9, 0xfffffffe;" \ 11 - "nop;" \ 12 - "mv %0, r8;" \ 13 - "and r8, r8, r9;" \ 14 - "mtcr r8, cr0;" \ 15 - "nop;" \ 16 - "nop;" \ 17 - "nop;" \ 18 - "nop;" \ 19 - "nop;" \ 20 - : "=r" (x) \ 21 - : \ 22 - : "r8", "r9" \ 23 - ); \ 6 + #include <linux/types.h> 7 + 8 + static inline unsigned long arch_local_save_flags(void) 9 + { 10 + unsigned long flags; 11 + 12 + asm volatile( 13 + " mfcr r8, cr0 \n" 14 + " nop \n" 15 + " nop \n" 16 + " mv %0, r8 \n" 17 + " nop \n" 18 + " nop \n" 19 + " nop \n" 20 + " nop \n" 21 + " nop \n" 22 + " ldi r9, 0x1 \n" 23 + " and %0, %0, r9 \n" 24 + : "=r" (flags) 25 + : 26 + : "r8", "r9"); 27 + return flags; 24 28 } 25 29 26 - #define raw_local_irq_restore(x) \ 27 - { \ 28 - __asm__ __volatile__( \ 29 - "mfcr r8, cr0;" \ 30 - "ldi r9, 0x1;" \ 31 - "and %0, %0, r9;" \ 32 - "or r8, r8, %0;" \ 33 - "mtcr r8, cr0;" \ 34 - "nop;" \ 35 - "nop;" \ 36 - "nop;" \ 37 - "nop;" \ 38 - "nop;" \ 39 - : \ 40 - : "r"(x) \ 41 - : "r8", "r9" \ 42 - ); \ 30 + static inline unsigned long arch_local_irq_save(void) 31 + { 32 + unsigned long flags 33 + 34 + asm volatile( 35 + " mfcr r8, cr0 \n" 36 + " li r9, 0xfffffffe \n" 37 + " nop \n" 38 + " mv %0, r8 \n" 39 + " and r8, r8, r9 \n" 40 + " mtcr r8, cr0 \n" 41 + " nop \n" 42 + " nop \n" 43 + " nop \n" 44 + " nop \n" 45 + " nop \n" 46 + : "=r" (flags) 47 + : 48 + : "r8", "r9", "memory"); 49 + 50 + return flags; 43 51 } 44 52 45 - #define raw_local_irq_enable(void) \ 46 - { \ 47 - __asm__ __volatile__( \ 48 - "mfcr\tr8,cr0;" \ 49 - "nop;" \ 50 - "nop;" \ 51 - "ori\tr8,0x1;" \ 52 - "mtcr\tr8,cr0;" \ 53 - "nop;" \ 54 - "nop;" \ 55 - "nop;" \ 56 - "nop;" \ 57 - "nop;" \ 58 - : \ 59 - : \ 60 - : "r8"); \ 53 + static inline void arch_local_irq_restore(unsigned long flags) 54 + { 55 + asm volatile( 56 + " mfcr r8, cr0 \n" 57 + " ldi r9, 0x1 \n" 58 + " and %0, %0, r9 \n" 59 + " or r8, r8, %0 \n" 60 + " mtcr r8, cr0 \n" 61 + " nop \n" 62 + " nop \n" 63 + " nop \n" 64 + " nop \n" 65 + " nop \n" 66 + : 67 + : "r"(flags) 68 + : "r8", "r9", "memory"); 61 69 } 62 70 63 - #define raw_local_irq_disable(void) \ 64 - { \ 65 - __asm__ __volatile__( \ 66 - "mfcr\tr8,cr0;" \ 67 - "nop;" \ 68 - "nop;" \ 69 - "srli\tr8,r8,1;" \ 70 - "slli\tr8,r8,1;" \ 71 - "mtcr\tr8,cr0;" \ 72 - "nop;" \ 73 - "nop;" \ 74 - "nop;" \ 75 - "nop;" \ 76 - "nop;" \ 77 - : \ 78 - : \ 79 - : "r8"); \ 71 + static inline void arch_local_irq_enable(void) 72 + { 73 + asm volatile( 74 + " mfcr r8,cr0 \n" 75 + " nop \n" 76 + " nop \n" 77 + " ori r8,0x1 \n" 78 + " mtcr r8,cr0 \n" 79 + " nop \n" 80 + " nop \n" 81 + " nop \n" 82 + " nop \n" 83 + " nop \n" 84 + : 85 + : 86 + : "r8", "memory"); 80 87 } 81 88 82 - #define raw_local_save_flags(x) \ 83 - { \ 84 - __asm__ __volatile__( \ 85 - "mfcr r8, cr0;" \ 86 - "nop;" \ 87 - "nop;" \ 88 - "mv %0, r8;" \ 89 - "nop;" \ 90 - "nop;" \ 91 - "nop;" \ 92 - "nop;" \ 93 - "nop;" \ 94 - "ldi r9, 0x1;" \ 95 - "and %0, %0, r9;" \ 96 - : "=r" (x) \ 97 - : \ 98 - : "r8", "r9" \ 99 - ); \ 89 + static inline void arch_local_irq_disable(void) 90 + { 91 + asm volatile( 92 + " mfcr r8,cr0 \n" 93 + " nop \n" 94 + " nop \n" 95 + " srli r8,r8,1 \n" 96 + " slli r8,r8,1 \n" 97 + " mtcr r8,cr0 \n" 98 + " nop \n" 99 + " nop \n" 100 + " nop \n" 101 + " nop \n" 102 + " nop \n" 103 + : 104 + : 105 + : "r8", "memory"); 100 106 } 101 107 102 - static inline int raw_irqs_disabled_flags(unsigned long flags) 108 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 103 109 { 104 110 return !(flags & 1); 105 111 } 106 112 107 - #endif 113 + static inline bool arch_irqs_disabled(void) 114 + { 115 + return arch_irqs_disabled_flags(arch_local_save_flags()); 116 + } 117 + 118 + #endif /* __ASSEMBLY__ */ 108 119 109 120 #endif /* _ASM_SCORE_IRQFLAGS_H */
+2 -2
arch/sh/include/asm/irqflags.h
··· 1 1 #ifndef __ASM_SH_IRQFLAGS_H 2 2 #define __ASM_SH_IRQFLAGS_H 3 3 4 - #define RAW_IRQ_DISABLED 0xf0 5 - #define RAW_IRQ_ENABLED 0x00 4 + #define ARCH_IRQ_DISABLED 0xf0 5 + #define ARCH_IRQ_ENABLED 0x00 6 6 7 7 #include <asm-generic/irqflags.h> 8 8
+4 -3
arch/sh/include/asm/syscalls_32.h
··· 19 19 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, 20 20 unsigned long r6, unsigned long r7, 21 21 struct pt_regs __regs); 22 - asmlinkage int sys_execve(const char __user *ufilename, char __user * __user *uargv, 23 - char __user * __user *uenvp, unsigned long r7, 24 - struct pt_regs __regs); 22 + asmlinkage int sys_execve(const char __user *ufilename, 23 + const char __user *const __user *uargv, 24 + const char __user *const __user *uenvp, 25 + unsigned long r7, struct pt_regs __regs); 25 26 asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, 26 27 unsigned long r6, unsigned long r7, 27 28 struct pt_regs __regs);
+6 -6
arch/sh/kernel/irq_32.c
··· 10 10 #include <linux/irqflags.h> 11 11 #include <linux/module.h> 12 12 13 - void notrace raw_local_irq_restore(unsigned long flags) 13 + void notrace arch_local_irq_restore(unsigned long flags) 14 14 { 15 15 unsigned long __dummy0, __dummy1; 16 16 17 - if (flags == RAW_IRQ_DISABLED) { 17 + if (flags == ARCH_IRQ_DISABLED) { 18 18 __asm__ __volatile__ ( 19 19 "stc sr, %0\n\t" 20 20 "or #0xf0, %0\n\t" ··· 33 33 #endif 34 34 "ldc %0, sr\n\t" 35 35 : "=&r" (__dummy0), "=r" (__dummy1) 36 - : "1" (~RAW_IRQ_DISABLED) 36 + : "1" (~ARCH_IRQ_DISABLED) 37 37 : "memory" 38 38 ); 39 39 } 40 40 } 41 - EXPORT_SYMBOL(raw_local_irq_restore); 41 + EXPORT_SYMBOL(arch_local_irq_restore); 42 42 43 - unsigned long notrace __raw_local_save_flags(void) 43 + unsigned long notrace arch_local_save_flags(void) 44 44 { 45 45 unsigned long flags; 46 46 ··· 54 54 55 55 return flags; 56 56 } 57 - EXPORT_SYMBOL(__raw_local_save_flags); 57 + EXPORT_SYMBOL(arch_local_save_flags);
+22 -15
arch/sparc/include/asm/irqflags_32.h
··· 5 5 * 6 6 * This file gets included from lowlevel asm headers too, to provide 7 7 * wrapped versions of the local_irq_*() APIs, based on the 8 - * raw_local_irq_*() functions from the lowlevel headers. 8 + * arch_local_irq_*() functions from the lowlevel headers. 9 9 */ 10 10 #ifndef _ASM_IRQFLAGS_H 11 11 #define _ASM_IRQFLAGS_H 12 12 13 13 #ifndef __ASSEMBLY__ 14 14 15 - extern void raw_local_irq_restore(unsigned long); 16 - extern unsigned long __raw_local_irq_save(void); 17 - extern void raw_local_irq_enable(void); 15 + #include <linux/types.h> 18 16 19 - static inline unsigned long getipl(void) 17 + extern void arch_local_irq_restore(unsigned long); 18 + extern unsigned long arch_local_irq_save(void); 19 + extern void arch_local_irq_enable(void); 20 + 21 + static inline unsigned long arch_local_save_flags(void) 20 22 { 21 - unsigned long retval; 23 + unsigned long flags; 22 24 23 - __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 24 - return retval; 25 + asm volatile("rd %%psr, %0" : "=r" (flags)); 26 + return flags; 25 27 } 26 28 27 - #define raw_local_save_flags(flags) ((flags) = getipl()) 28 - #define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) 29 - #define raw_local_irq_disable() ((void) __raw_local_irq_save()) 30 - #define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) 31 - 32 - static inline int raw_irqs_disabled_flags(unsigned long flags) 29 + static inline void arch_local_irq_disable(void) 33 30 { 34 - return ((flags & PSR_PIL) != 0); 31 + arch_local_irq_save(); 32 + } 33 + 34 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 35 + { 36 + return (flags & PSR_PIL) != 0; 37 + } 38 + 39 + static inline bool arch_irqs_disabled(void) 40 + { 41 + return arch_irqs_disabled_flags(arch_local_save_flags()); 35 42 } 36 43 37 44 #endif /* (__ASSEMBLY__) */
+9 -20
arch/sparc/include/asm/irqflags_64.h
··· 5 5 * 6 6 * This file gets included from lowlevel asm headers too, to provide 7 7 * wrapped versions of the local_irq_*() APIs, based on the 8 - * raw_local_irq_*() functions from the lowlevel headers. 8 + * arch_local_irq_*() functions from the lowlevel headers. 9 9 */ 10 10 #ifndef _ASM_IRQFLAGS_H 11 11 #define _ASM_IRQFLAGS_H ··· 14 14 15 15 #ifndef __ASSEMBLY__ 16 16 17 - static inline unsigned long __raw_local_save_flags(void) 17 + static inline unsigned long arch_local_save_flags(void) 18 18 { 19 19 unsigned long flags; 20 20 ··· 26 26 return flags; 27 27 } 28 28 29 - #define raw_local_save_flags(flags) \ 30 - do { (flags) = __raw_local_save_flags(); } while (0) 31 - 32 - static inline void raw_local_irq_restore(unsigned long flags) 29 + static inline void arch_local_irq_restore(unsigned long flags) 33 30 { 34 31 __asm__ __volatile__( 35 32 "wrpr %0, %%pil" ··· 36 39 ); 37 40 } 38 41 39 - static inline void raw_local_irq_disable(void) 42 + static inline void arch_local_irq_disable(void) 40 43 { 41 44 __asm__ __volatile__( 42 45 "wrpr %0, %%pil" ··· 46 49 ); 47 50 } 48 51 49 - static inline void raw_local_irq_enable(void) 52 + static inline void arch_local_irq_enable(void) 50 53 { 51 54 __asm__ __volatile__( 52 55 "wrpr 0, %%pil" ··· 56 59 ); 57 60 } 58 61 59 - static inline int raw_irqs_disabled_flags(unsigned long flags) 62 + static inline int arch_irqs_disabled_flags(unsigned long flags) 60 63 { 61 64 return (flags > 0); 62 65 } 63 66 64 - static inline int raw_irqs_disabled(void) 67 + static inline int arch_irqs_disabled(void) 65 68 { 66 - unsigned long flags = __raw_local_save_flags(); 67 - 68 - return raw_irqs_disabled_flags(flags); 69 + return arch_irqs_disabled_flags(arch_local_save_flags()); 69 70 } 70 71 71 - /* 72 - * For spinlocks, etc: 73 - */ 74 - static inline unsigned long __raw_local_irq_save(void) 72 + static inline unsigned long arch_local_irq_save(void) 75 73 { 76 74 unsigned long flags, tmp; 77 75 ··· 91 99 92 100 return flags; 93 101 } 94 - 95 - #define raw_local_irq_save(flags) \ 96 - do { (flags) = __raw_local_irq_save(); } while (0) 97 102 98 103 #endif /* (__ASSEMBLY__) */ 99 104
+6 -7
arch/sparc/kernel/irq_32.c
··· 57 57 #define SMP_NOP2 58 58 #define SMP_NOP3 59 59 #endif /* SMP */ 60 - unsigned long __raw_local_irq_save(void) 60 + unsigned long arch_local_irq_save(void) 61 61 { 62 62 unsigned long retval; 63 63 unsigned long tmp; ··· 74 74 75 75 return retval; 76 76 } 77 + EXPORT_SYMBOL(arch_local_irq_save); 77 78 78 - void raw_local_irq_enable(void) 79 + void arch_local_irq_enable(void) 79 80 { 80 81 unsigned long tmp; 81 82 ··· 90 89 : "i" (PSR_PIL) 91 90 : "memory"); 92 91 } 92 + EXPORT_SYMBOL(arch_local_irq_enable); 93 93 94 - void raw_local_irq_restore(unsigned long old_psr) 94 + void arch_local_irq_restore(unsigned long old_psr) 95 95 { 96 96 unsigned long tmp; 97 97 ··· 107 105 : "i" (PSR_PIL), "r" (old_psr) 108 106 : "memory"); 109 107 } 110 - 111 - EXPORT_SYMBOL(__raw_local_irq_save); 112 - EXPORT_SYMBOL(raw_local_irq_enable); 113 - EXPORT_SYMBOL(raw_local_irq_restore); 108 + EXPORT_SYMBOL(arch_local_irq_restore); 114 109 115 110 /* 116 111 * Dave Redman (djhr@tadpole.co.uk)
+1 -1
arch/sparc/prom/p1275.c
··· 39 39 unsigned long flags; 40 40 41 41 raw_local_save_flags(flags); 42 - raw_local_irq_restore(PIL_NMI); 42 + raw_local_irq_restore((unsigned long)PIL_NMI); 43 43 raw_spin_lock(&prom_entry_lock); 44 44 45 45 prom_world(1);
+19 -17
arch/tile/include/asm/irqflags.h
··· 103 103 #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) 104 104 105 105 /* Disable interrupts. */ 106 - #define raw_local_irq_disable() \ 106 + #define arch_local_irq_disable() \ 107 107 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 108 108 109 109 /* Disable all interrupts, including NMIs. */ 110 - #define raw_local_irq_disable_all() \ 110 + #define arch_local_irq_disable_all() \ 111 111 interrupt_mask_set_mask(-1UL) 112 112 113 113 /* Re-enable all maskable interrupts. */ 114 - #define raw_local_irq_enable() \ 114 + #define arch_local_irq_enable() \ 115 115 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 116 116 117 117 /* Disable or enable interrupts based on flag argument. */ 118 - #define raw_local_irq_restore(disabled) do { \ 118 + #define arch_local_irq_restore(disabled) do { \ 119 119 if (disabled) \ 120 - raw_local_irq_disable(); \ 120 + arch_local_irq_disable(); \ 121 121 else \ 122 - raw_local_irq_enable(); \ 122 + arch_local_irq_enable(); \ 123 123 } while (0) 124 124 125 125 /* Return true if "flags" argument means interrupts are disabled. */ 126 - #define raw_irqs_disabled_flags(flags) ((flags) != 0) 126 + #define arch_irqs_disabled_flags(flags) ((flags) != 0) 127 127 128 128 /* Return true if interrupts are currently disabled. */ 129 - #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 129 + #define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) 130 130 131 131 /* Save whether interrupts are currently disabled. */ 132 - #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) 132 + #define arch_local_save_flags() arch_irqs_disabled() 133 133 134 134 /* Save whether interrupts are currently disabled, then disable them. */ 135 - #define raw_local_irq_save(flags) \ 136 - do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) 135 + #define arch_local_irq_save() ({ \ 136 + unsigned long __flags = arch_local_save_flags(); \ 137 + arch_local_irq_disable(); \ 138 + __flags; }) 137 139 138 140 /* Prevent the given interrupt from being enabled next time we enable irqs. */ 139 - #define raw_local_irq_mask(interrupt) \ 141 + #define arch_local_irq_mask(interrupt) \ 140 142 (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) 141 143 142 144 /* Prevent the given interrupt from being enabled immediately. */ 143 - #define raw_local_irq_mask_now(interrupt) do { \ 144 - raw_local_irq_mask(interrupt); \ 145 + #define arch_local_irq_mask_now(interrupt) do { \ 146 + arch_local_irq_mask(interrupt); \ 145 147 interrupt_mask_set(interrupt); \ 146 148 } while (0) 147 149 148 150 /* Allow the given interrupt to be enabled next time we enable irqs. */ 149 - #define raw_local_irq_unmask(interrupt) \ 151 + #define arch_local_irq_unmask(interrupt) \ 150 152 (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) 151 153 152 154 /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 153 - #define raw_local_irq_unmask_now(interrupt) do { \ 154 - raw_local_irq_unmask(interrupt); \ 155 + #define arch_local_irq_unmask_now(interrupt) do { \ 156 + arch_local_irq_unmask(interrupt); \ 155 157 if (!irqs_disabled()) \ 156 158 interrupt_mask_reset(interrupt); \ 157 159 } while (0)
+12 -20
arch/x86/include/asm/irqflags.h
··· 61 61 #else 62 62 #ifndef __ASSEMBLY__ 63 63 64 - static inline unsigned long __raw_local_save_flags(void) 64 + static inline unsigned long arch_local_save_flags(void) 65 65 { 66 66 return native_save_fl(); 67 67 } 68 68 69 - static inline void raw_local_irq_restore(unsigned long flags) 69 + static inline void arch_local_irq_restore(unsigned long flags) 70 70 { 71 71 native_restore_fl(flags); 72 72 } 73 73 74 - static inline void raw_local_irq_disable(void) 74 + static inline void arch_local_irq_disable(void) 75 75 { 76 76 native_irq_disable(); 77 77 } 78 78 79 - static inline void raw_local_irq_enable(void) 79 + static inline void arch_local_irq_enable(void) 80 80 { 81 81 native_irq_enable(); 82 82 } ··· 85 85 * Used in the idle loop; sti takes one instruction cycle 86 86 * to complete: 87 87 */ 88 - static inline void raw_safe_halt(void) 88 + static inline void arch_safe_halt(void) 89 89 { 90 90 native_safe_halt(); 91 91 } ··· 102 102 /* 103 103 * For spinlocks, etc: 104 104 */ 105 - static inline unsigned long __raw_local_irq_save(void) 105 + static inline unsigned long arch_local_irq_save(void) 106 106 { 107 - unsigned long flags = __raw_local_save_flags(); 108 - 109 - raw_local_irq_disable(); 110 - 107 + unsigned long flags = arch_local_save_flags(); 108 + arch_local_irq_disable(); 111 109 return flags; 112 110 } 113 111 #else ··· 151 153 #endif /* CONFIG_PARAVIRT */ 152 154 153 155 #ifndef __ASSEMBLY__ 154 - #define raw_local_save_flags(flags) \ 155 - do { (flags) = __raw_local_save_flags(); } while (0) 156 - 157 - #define raw_local_irq_save(flags) \ 158 - do { (flags) = __raw_local_irq_save(); } while (0) 159 - 160 - static inline int raw_irqs_disabled_flags(unsigned long flags) 156 + static inline int arch_irqs_disabled_flags(unsigned long flags) 161 157 { 162 158 return !(flags & X86_EFLAGS_IF); 163 159 } 164 160 165 - static inline int raw_irqs_disabled(void) 161 + static inline int arch_irqs_disabled(void) 166 162 { 167 - unsigned long flags = __raw_local_save_flags(); 163 + unsigned long flags = arch_local_save_flags(); 168 164 169 - return raw_irqs_disabled_flags(flags); 165 + return arch_irqs_disabled_flags(flags); 170 166 } 171 167 172 168 #else
+8 -8
arch/x86/include/asm/paravirt.h
··· 105 105 } 106 106 #endif 107 107 108 - static inline void raw_safe_halt(void) 108 + static inline void arch_safe_halt(void) 109 109 { 110 110 PVOP_VCALL0(pv_irq_ops.safe_halt); 111 111 } ··· 824 824 #define __PV_IS_CALLEE_SAVE(func) \ 825 825 ((struct paravirt_callee_save) { func }) 826 826 827 - static inline unsigned long __raw_local_save_flags(void) 827 + static inline unsigned long arch_local_save_flags(void) 828 828 { 829 829 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 830 830 } 831 831 832 - static inline void raw_local_irq_restore(unsigned long f) 832 + static inline void arch_local_irq_restore(unsigned long f) 833 833 { 834 834 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 835 835 } 836 836 837 - static inline void raw_local_irq_disable(void) 837 + static inline void arch_local_irq_disable(void) 838 838 { 839 839 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 840 840 } 841 841 842 - static inline void raw_local_irq_enable(void) 842 + static inline void arch_local_irq_enable(void) 843 843 { 844 844 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 845 845 } 846 846 847 - static inline unsigned long __raw_local_irq_save(void) 847 + static inline unsigned long arch_local_irq_save(void) 848 848 { 849 849 unsigned long f; 850 850 851 - f = __raw_local_save_flags(); 852 - raw_local_irq_disable(); 851 + f = arch_local_save_flags(); 852 + arch_local_irq_disable(); 853 853 return f; 854 854 } 855 855
+1 -1
arch/x86/xen/spinlock.c
··· 224 224 goto out; 225 225 } 226 226 227 - flags = __raw_local_save_flags(); 227 + flags = arch_local_save_flags(); 228 228 if (irq_enable) { 229 229 ADD_STATS(taken_slow_irqenable, 1); 230 230 raw_local_irq_enable();
+58
arch/xtensa/include/asm/irqflags.h
··· 1 + /* 2 + * Xtensa IRQ flags handling functions 3 + * 4 + * This file is subject to the terms and conditions of the GNU General Public 5 + * License. See the file "COPYING" in the main directory of this archive 6 + * for more details. 7 + * 8 + * Copyright (C) 2001 - 2005 Tensilica Inc. 9 + */ 10 + 11 + #ifndef _XTENSA_IRQFLAGS_H 12 + #define _XTENSA_IRQFLAGS_H 13 + 14 + #include <linux/types.h> 15 + 16 + static inline unsigned long arch_local_save_flags(void) 17 + { 18 + unsigned long flags; 19 + asm volatile("rsr %0,"__stringify(PS) : "=a" (flags)); 20 + return flags; 21 + } 22 + 23 + static inline unsigned long arch_local_irq_save(void) 24 + { 25 + unsigned long flags; 26 + asm volatile("rsil %0, "__stringify(LOCKLEVEL) 27 + : "=a" (flags) :: "memory"); 28 + return flags; 29 + } 30 + 31 + static inline void arch_local_irq_disable(void) 32 + { 33 + arch_local_irq_save(); 34 + } 35 + 36 + static inline void arch_local_irq_enable(void) 37 + { 38 + unsigned long flags; 39 + asm volatile("rsil %0, 0" : "=a" (flags) :: "memory"); 40 + } 41 + 42 + static inline void arch_local_irq_restore(unsigned long flags) 43 + { 44 + asm volatile("wsr %0, "__stringify(PS)" ; rsync" 45 + :: "a" (flags) : "memory"); 46 + } 47 + 48 + static inline bool arch_irqs_disabled_flags(unsigned long flags) 49 + { 50 + return (flags & 0xf) != 0; 51 + } 52 + 53 + static inline bool arch_irqs_disabled(void) 54 + { 55 + return arch_irqs_disabled_flags(arch_local_save_flags()); 56 + } 57 + 58 + #endif /* _XTENSA_IRQFLAGS_H */
+1 -32
arch/xtensa/include/asm/system.h
··· 12 12 #define _XTENSA_SYSTEM_H 13 13 14 14 #include <linux/stringify.h> 15 + #include <linux/irqflags.h> 15 16 16 17 #include <asm/processor.h> 17 - 18 - /* interrupt control */ 19 - 20 - #define local_save_flags(x) \ 21 - __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); 22 - #define local_irq_restore(x) do { \ 23 - __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ 24 - :: "a" (x) : "memory"); } while(0); 25 - #define local_irq_save(x) do { \ 26 - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ 27 - : "=a" (x) :: "memory");} while(0); 28 - 29 - static inline void local_irq_disable(void) 30 - { 31 - unsigned long flags; 32 - __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) 33 - : "=a" (flags) :: "memory"); 34 - } 35 - static inline void local_irq_enable(void) 36 - { 37 - unsigned long flags; 38 - __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); 39 - 40 - } 41 - 42 - static inline int irqs_disabled(void) 43 - { 44 - unsigned long flags; 45 - local_save_flags(flags); 46 - return flags & 0xf; 47 - } 48 - 49 18 50 19 #define smp_read_barrier_depends() do { } while(0) 51 20 #define read_barrier_depends() do { } while(0)
+65 -65
crypto/des_generic.c
··· 614 614 #define T3(x) pt[2 * (x) + 2] 615 615 #define T4(x) pt[2 * (x) + 3] 616 616 617 - #define PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) 617 + #define DES_PC2(a, b, c, d) (T4(d) | T3(c) | T2(b) | T1(a)) 618 618 619 619 /* 620 620 * Encryption key expansion ··· 639 639 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; 640 640 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; 641 641 642 - pe[15 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; 643 - pe[14 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 644 - pe[13 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 645 - pe[12 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 646 - pe[11 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 647 - pe[10 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 648 - pe[ 9 * 2 + 0] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 649 - pe[ 8 * 2 + 0] = PC2(d, a, b, c); c = rs[c]; 650 - pe[ 7 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 651 - pe[ 6 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 652 - pe[ 5 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 653 - pe[ 4 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 654 - pe[ 3 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 655 - pe[ 2 * 2 + 0] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 656 - pe[ 1 * 2 + 0] = PC2(c, d, a, b); b = rs[b]; 657 - pe[ 0 * 2 + 0] = PC2(b, c, d, a); 642 + pe[15 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; 643 + pe[14 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 644 + pe[13 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 645 + pe[12 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 646 + pe[11 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 647 + pe[10 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 648 + pe[ 9 * 2 + 0] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 649 + pe[ 8 * 2 + 0] = DES_PC2(d, a, b, c); c = rs[c]; 650 + pe[ 7 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 651 + pe[ 6 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 652 + pe[ 5 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 653 + pe[ 4 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 654 + pe[ 3 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 655 + pe[ 2 * 2 + 0] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 656 + pe[ 1 * 2 + 0] = DES_PC2(c, d, a, b); b = rs[b]; 657 + pe[ 0 * 2 + 0] = DES_PC2(b, c, d, a); 658 658 659 659 /* Check if first half is weak */ 660 660 w = (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); ··· 670 670 /* Check if second half is weak */ 671 671 w |= (a ^ c) | (b ^ d) | (rs[a] ^ c) | (b ^ rs[d]); 672 672 673 - pe[15 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; 674 - pe[14 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 675 - pe[13 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 676 - pe[12 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 677 - pe[11 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 678 - pe[10 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 679 - pe[ 9 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 680 - pe[ 8 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; 681 - pe[ 7 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 682 - pe[ 6 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 683 - pe[ 5 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 684 - pe[ 4 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 685 - pe[ 3 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 686 - pe[ 2 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 687 - pe[ 1 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; 688 - pe[ 0 * 2 + 1] = PC2(b, c, d, a); 673 + pe[15 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; 674 + pe[14 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 675 + pe[13 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 676 + pe[12 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 677 + pe[11 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 678 + pe[10 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 679 + pe[ 9 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 680 + pe[ 8 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; 681 + pe[ 7 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 682 + pe[ 6 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 683 + pe[ 5 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 684 + pe[ 4 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 685 + pe[ 3 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 686 + pe[ 2 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 687 + pe[ 1 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; 688 + pe[ 0 * 2 + 1] = DES_PC2(b, c, d, a); 689 689 690 690 /* Fixup: 2413 5768 -> 1357 2468 */ 691 691 for (d = 0; d < 16; ++d) { ··· 722 722 b = k[6]; b &= 0x0e; b <<= 4; b |= k[2] & 0x1e; b = pc1[b]; 723 723 a = k[7]; a &= 0x0e; a <<= 4; a |= k[3] & 0x1e; a = pc1[a]; 724 724 725 - pe[ 0 * 2] = PC2(a, b, c, d); d = rs[d]; 726 - pe[ 1 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 727 - pe[ 2 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 728 - pe[ 3 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 729 - pe[ 4 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 730 - pe[ 5 * 2] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 731 - pe[ 6 * 2] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 732 - pe[ 7 * 2] = PC2(d, a, b, c); c = rs[c]; 733 - pe[ 8 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 734 - pe[ 9 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 735 - pe[10 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 736 - pe[11 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 737 - pe[12 * 2] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 738 - pe[13 * 2] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 739 - pe[14 * 2] = PC2(c, d, a, b); b = rs[b]; 740 - pe[15 * 2] = PC2(b, c, d, a); 725 + pe[ 0 * 2] = DES_PC2(a, b, c, d); d = rs[d]; 726 + pe[ 1 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 727 + pe[ 2 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 728 + pe[ 3 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 729 + pe[ 4 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 730 + pe[ 5 * 2] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 731 + pe[ 6 * 2] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 732 + pe[ 7 * 2] = DES_PC2(d, a, b, c); c = rs[c]; 733 + pe[ 8 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 734 + pe[ 9 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 735 + pe[10 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 736 + pe[11 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 737 + pe[12 * 2] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 738 + pe[13 * 2] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 739 + pe[14 * 2] = DES_PC2(c, d, a, b); b = rs[b]; 740 + pe[15 * 2] = DES_PC2(b, c, d, a); 741 741 742 742 /* Skip to next table set */ 743 743 pt += 512; ··· 747 747 b = k[2]; b &= 0xe0; b >>= 4; b |= k[6] & 0xf0; b = pc1[b + 1]; 748 748 a = k[3]; a &= 0xe0; a >>= 4; a |= k[7] & 0xf0; a = pc1[a + 1]; 749 749 750 - pe[ 0 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; 751 - pe[ 1 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 752 - pe[ 2 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 753 - pe[ 3 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 754 - pe[ 4 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 755 - pe[ 5 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; b = rs[b]; 756 - pe[ 6 * 2 + 1] = PC2(b, c, d, a); a = rs[a]; d = rs[d]; 757 - pe[ 7 * 2 + 1] = PC2(d, a, b, c); c = rs[c]; 758 - pe[ 8 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 759 - pe[ 9 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 760 - pe[10 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 761 - pe[11 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 762 - pe[12 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; a = rs[a]; 763 - pe[13 * 2 + 1] = PC2(a, b, c, d); d = rs[d]; c = rs[c]; 764 - pe[14 * 2 + 1] = PC2(c, d, a, b); b = rs[b]; 765 - pe[15 * 2 + 1] = PC2(b, c, d, a); 750 + pe[ 0 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; 751 + pe[ 1 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 752 + pe[ 2 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 753 + pe[ 3 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 754 + pe[ 4 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 755 + pe[ 5 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; b = rs[b]; 756 + pe[ 6 * 2 + 1] = DES_PC2(b, c, d, a); a = rs[a]; d = rs[d]; 757 + pe[ 7 * 2 + 1] = DES_PC2(d, a, b, c); c = rs[c]; 758 + pe[ 8 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 759 + pe[ 9 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 760 + pe[10 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 761 + pe[11 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 762 + pe[12 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; a = rs[a]; 763 + pe[13 * 2 + 1] = DES_PC2(a, b, c, d); d = rs[d]; c = rs[c]; 764 + pe[14 * 2 + 1] = DES_PC2(c, d, a, b); b = rs[b]; 765 + pe[15 * 2 + 1] = DES_PC2(b, c, d, a); 766 766 767 767 /* Fixup: 2413 5768 -> 1357 2468 */ 768 768 for (d = 0; d < 16; ++d) {
+1
drivers/net/bfin_mac.c
··· 38 38 #include <asm/blackfin.h> 39 39 #include <asm/cacheflush.h> 40 40 #include <asm/portmux.h> 41 + #include <mach/pll.h> 41 42 42 43 #include "bfin_mac.h" 43 44
+1
drivers/net/smc91x.c
··· 72 72 #include <linux/sched.h> 73 73 #include <linux/delay.h> 74 74 #include <linux/interrupt.h> 75 + #include <linux/irq.h> 75 76 #include <linux/errno.h> 76 77 #include <linux/ioport.h> 77 78 #include <linux/crc32.h>
+1 -1
drivers/s390/char/sclp.c
··· 468 468 cr0_sync &= 0xffff00a0; 469 469 cr0_sync |= 0x00000200; 470 470 __ctl_load(cr0_sync, 0, 0); 471 - __raw_local_irq_stosm(0x01); 471 + __arch_local_irq_stosm(0x01); 472 472 /* Loop until driver state indicates finished request */ 473 473 while (sclp_running_state != sclp_running_state_idle) { 474 474 /* Check for expired request timer */
+1
drivers/vlynq/vlynq.c
··· 31 31 #include <linux/delay.h> 32 32 #include <linux/io.h> 33 33 #include <linux/slab.h> 34 + #include <linux/irq.h> 34 35 35 36 #include <linux/vlynq.h> 36 37
+1
drivers/watchdog/octeon-wdt-main.c
··· 64 64 #include <linux/cpu.h> 65 65 #include <linux/smp.h> 66 66 #include <linux/fs.h> 67 + #include <linux/irq.h> 67 68 68 69 #include <asm/mipsregs.h> 69 70 #include <asm/uasm.h>
+3 -2
include/asm-generic/atomic.h
··· 43 43 */ 44 44 #define atomic_set(v, i) (((v)->counter) = (i)) 45 45 46 + #include <linux/irqflags.h> 46 47 #include <asm/system.h> 47 48 48 49 /** ··· 58 57 unsigned long flags; 59 58 int temp; 60 59 61 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 60 + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 62 61 temp = v->counter; 63 62 temp += i; 64 63 v->counter = temp; ··· 79 78 unsigned long flags; 80 79 int temp; 81 80 82 - raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ 81 + raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ 83 82 temp = v->counter; 84 83 temp -= i; 85 84 v->counter = temp;
+1
include/asm-generic/cmpxchg-local.h
··· 2 2 #define __ASM_GENERIC_CMPXCHG_LOCAL_H 3 3 4 4 #include <linux/types.h> 5 + #include <linux/irqflags.h> 5 6 6 7 extern unsigned long wrong_size_cmpxchg(volatile void *ptr); 7 8
+23 -29
include/asm-generic/irqflags.h
··· 5 5 * All architectures should implement at least the first two functions, 6 6 * usually inline assembly will be the best way. 7 7 */ 8 - #ifndef RAW_IRQ_DISABLED 9 - #define RAW_IRQ_DISABLED 0 10 - #define RAW_IRQ_ENABLED 1 8 + #ifndef ARCH_IRQ_DISABLED 9 + #define ARCH_IRQ_DISABLED 0 10 + #define ARCH_IRQ_ENABLED 1 11 11 #endif 12 12 13 13 /* read interrupt enabled status */ 14 - #ifndef __raw_local_save_flags 15 - unsigned long __raw_local_save_flags(void); 14 + #ifndef arch_local_save_flags 15 + unsigned long arch_local_save_flags(void); 16 16 #endif 17 17 18 18 /* set interrupt enabled status */ 19 - #ifndef raw_local_irq_restore 20 - void raw_local_irq_restore(unsigned long flags); 19 + #ifndef arch_local_irq_restore 20 + void arch_local_irq_restore(unsigned long flags); 21 21 #endif 22 22 23 23 /* get status and disable interrupts */ 24 - #ifndef __raw_local_irq_save 25 - static inline unsigned long __raw_local_irq_save(void) 24 + #ifndef arch_local_irq_save 25 + static inline unsigned long arch_local_irq_save(void) 26 26 { 27 27 unsigned long flags; 28 - flags = __raw_local_save_flags(); 29 - raw_local_irq_restore(RAW_IRQ_DISABLED); 28 + flags = arch_local_save_flags(); 29 + arch_local_irq_restore(ARCH_IRQ_DISABLED); 30 30 return flags; 31 31 } 32 32 #endif 33 33 34 34 /* test flags */ 35 - #ifndef raw_irqs_disabled_flags 36 - static inline int raw_irqs_disabled_flags(unsigned long flags) 35 + #ifndef arch_irqs_disabled_flags 36 + static inline int arch_irqs_disabled_flags(unsigned long flags) 37 37 { 38 - return flags == RAW_IRQ_DISABLED; 38 + return flags == ARCH_IRQ_DISABLED; 39 39 } 40 40 #endif 41 41 42 42 /* unconditionally enable interrupts */ 43 - #ifndef raw_local_irq_enable 44 - static inline void raw_local_irq_enable(void) 43 + #ifndef arch_local_irq_enable 44 + static inline void arch_local_irq_enable(void) 45 45 { 46 - raw_local_irq_restore(RAW_IRQ_ENABLED); 46 + arch_local_irq_restore(ARCH_IRQ_ENABLED); 47 47 } 48 48 #endif 49 49 50 50 /* unconditionally disable interrupts */ 51 - #ifndef raw_local_irq_disable 52 - static inline void raw_local_irq_disable(void) 51 + #ifndef arch_local_irq_disable 52 + static inline void arch_local_irq_disable(void) 53 53 { 54 - raw_local_irq_restore(RAW_IRQ_DISABLED); 54 + arch_local_irq_restore(ARCH_IRQ_DISABLED); 55 55 } 56 56 #endif 57 57 58 58 /* test hardware interrupt enable bit */ 59 - #ifndef raw_irqs_disabled 60 - static inline int raw_irqs_disabled(void) 59 + #ifndef arch_irqs_disabled 60 + static inline int arch_irqs_disabled(void) 61 61 { 62 - return raw_irqs_disabled_flags(__raw_local_save_flags()); 62 + return arch_irqs_disabled_flags(arch_local_save_flags()); 63 63 } 64 64 #endif 65 - 66 - #define raw_local_save_flags(flags) \ 67 - do { (flags) = __raw_local_save_flags(); } while (0) 68 - 69 - #define raw_local_irq_save(flags) \ 70 - do { (flags) = __raw_local_irq_save(); } while (0) 71 65 72 66 #endif /* __ASM_GENERIC_IRQFLAGS_H */
-1
include/linux/hardirq.h
··· 8 8 #include <linux/lockdep.h> 9 9 #include <linux/ftrace_irq.h> 10 10 #include <asm/hardirq.h> 11 - #include <asm/system.h> 12 11 13 12 /* 14 13 * We put the hardirq and softirq counter into the preemption
+65 -44
include/linux/irqflags.h
··· 12 12 #define _LINUX_TRACE_IRQFLAGS_H 13 13 14 14 #include <linux/typecheck.h> 15 + #include <asm/irqflags.h> 15 16 16 17 #ifdef CONFIG_TRACE_IRQFLAGS 17 18 extern void trace_softirqs_on(unsigned long ip); ··· 53 52 # define start_critical_timings() do { } while (0) 54 53 #endif 55 54 55 + /* 56 + * Wrap the arch provided IRQ routines to provide appropriate checks. 57 + */ 58 + #define raw_local_irq_disable() arch_local_irq_disable() 59 + #define raw_local_irq_enable() arch_local_irq_enable() 60 + #define raw_local_irq_save(flags) \ 61 + do { \ 62 + typecheck(unsigned long, flags); \ 63 + flags = arch_local_irq_save(); \ 64 + } while (0) 65 + #define raw_local_irq_restore(flags) \ 66 + do { \ 67 + typecheck(unsigned long, flags); \ 68 + arch_local_irq_restore(flags); \ 69 + } while (0) 70 + #define raw_local_save_flags(flags) \ 71 + do { \ 72 + typecheck(unsigned long, flags); \ 73 + flags = arch_local_save_flags(); \ 74 + } while (0) 75 + #define raw_irqs_disabled_flags(flags) \ 76 + ({ \ 77 + typecheck(unsigned long, flags); \ 78 + arch_irqs_disabled_flags(flags); \ 79 + }) 80 + #define raw_irqs_disabled() (arch_irqs_disabled()) 81 + #define raw_safe_halt() arch_safe_halt() 82 + 83 + /* 84 + * The local_irq_*() APIs are equal to the raw_local_irq*() 85 + * if !TRACE_IRQFLAGS. 86 + */ 56 87 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 57 - 58 - #include <asm/irqflags.h> 59 - 60 88 #define local_irq_enable() \ 61 89 do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) 62 90 #define local_irq_disable() \ 63 91 do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) 64 92 #define local_irq_save(flags) \ 65 93 do { \ 66 - typecheck(unsigned long, flags); \ 67 94 raw_local_irq_save(flags); \ 68 95 trace_hardirqs_off(); \ 69 96 } while (0) ··· 99 70 100 71 #define local_irq_restore(flags) \ 101 72 do { \ 102 - typecheck(unsigned long, flags); \ 103 73 if (raw_irqs_disabled_flags(flags)) { \ 104 74 raw_local_irq_restore(flags); \ 105 75 trace_hardirqs_off(); \ ··· 107 79 raw_local_irq_restore(flags); \ 108 80 } \ 109 81 } while (0) 110 - #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 111 - /* 112 - * The local_irq_*() APIs are equal to the raw_local_irq*() 113 - * if !TRACE_IRQFLAGS. 114 - */ 115 - # define raw_local_irq_disable() local_irq_disable() 116 - # define raw_local_irq_enable() local_irq_enable() 117 - # define raw_local_irq_save(flags) \ 118 - do { \ 119 - typecheck(unsigned long, flags); \ 120 - local_irq_save(flags); \ 121 - } while (0) 122 - # define raw_local_irq_restore(flags) \ 123 - do { \ 124 - typecheck(unsigned long, flags); \ 125 - local_irq_restore(flags); \ 126 - } while (0) 127 - #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 128 - 129 - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT 130 - #define safe_halt() \ 131 - do { \ 132 - trace_hardirqs_on(); \ 133 - raw_safe_halt(); \ 134 - } while (0) 135 - 136 82 #define local_save_flags(flags) \ 137 83 do { \ 138 - typecheck(unsigned long, flags); \ 139 84 raw_local_save_flags(flags); \ 140 85 } while (0) 141 86 142 - #define irqs_disabled() \ 143 - ({ \ 144 - unsigned long _flags; \ 145 - \ 146 - raw_local_save_flags(_flags); \ 147 - raw_irqs_disabled_flags(_flags); \ 148 - }) 87 + #define irqs_disabled_flags(flags) \ 88 + ({ \ 89 + raw_irqs_disabled_flags(flags); \ 90 + }) 149 91 150 - #define irqs_disabled_flags(flags) \ 151 - ({ \ 152 - typecheck(unsigned long, flags); \ 153 - raw_irqs_disabled_flags(flags); \ 154 - }) 92 + #define irqs_disabled() \ 93 + ({ \ 94 + unsigned long _flags; \ 95 + raw_local_save_flags(_flags); \ 96 + raw_irqs_disabled_flags(_flags); \ 97 + }) 98 + 99 + #define safe_halt() \ 100 + do { \ 101 + trace_hardirqs_on(); \ 102 + raw_safe_halt(); \ 103 + } while (0) 104 + 105 + 106 + #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ 107 + 108 + #define local_irq_enable() do { raw_local_irq_enable(); } while (0) 109 + #define local_irq_disable() do { raw_local_irq_disable(); } while (0) 110 + #define local_irq_save(flags) \ 111 + do { \ 112 + raw_local_irq_save(flags); \ 113 + } while (0) 114 + #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) 115 + #define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) 116 + #define irqs_disabled() (raw_irqs_disabled()) 117 + #define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) 118 + #define safe_halt() do { raw_safe_halt(); } while (0) 119 + 155 120 #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ 156 121 157 122 #endif
-1
include/linux/list.h
··· 5 5 #include <linux/stddef.h> 6 6 #include <linux/poison.h> 7 7 #include <linux/prefetch.h> 8 - #include <asm/system.h> 9 8 10 9 /* 11 10 * Simple doubly linked list implementation.
+1
include/linux/spinlock.h
··· 50 50 #include <linux/preempt.h> 51 51 #include <linux/linkage.h> 52 52 #include <linux/compiler.h> 53 + #include <linux/irqflags.h> 53 54 #include <linux/thread_info.h> 54 55 #include <linux/kernel.h> 55 56 #include <linux/stringify.h>