Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: Rename IRQ flags handling functions

Rename h/w IRQ flags handling functions to be in line with what is expected for
the irq renaming patch. This renames local_*_hw() to hard_local_*() using the
following perl command:

perl -pi -e 's/local_irq_(restore|enable|disable)_hw/hard_local_irq_\1/ or s/local_irq_save_hw([_a-z]*)[(]flags[)]/flags = hard_local_irq_save\1()/' `find arch/blackfin/ -name "*.[ch]"`

and then fixing up asm/irqflags.h manually.

Additionally, arch/hard_local_save_flags() and arch/hard_local_irq_save() both
return the flags rather than passing it through the argument list.

Signed-off-by: David Howells <dhowells@redhat.com>

+295 -285
+4 -4
arch/blackfin/include/asm/ipipe.h
··· 49 49 #define prepare_arch_switch(next) \ 50 50 do { \ 51 51 ipipe_schedule_notify(current, next); \ 52 - local_irq_disable_hw(); \ 52 + hard_local_irq_disable(); \ 53 53 } while (0) 54 54 55 55 #define task_hijacked(p) \ ··· 57 57 int __x__ = __ipipe_root_domain_p; \ 58 58 __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ 59 59 if (__x__) \ 60 - local_irq_enable_hw(); \ 60 + hard_local_irq_enable(); \ 61 61 !__x__; \ 62 62 }) 63 63 ··· 167 167 #define __ipipe_run_isr(ipd, irq) \ 168 168 do { \ 169 169 if (!__ipipe_pipeline_head_p(ipd)) \ 170 - local_irq_enable_hw(); \ 170 + hard_local_irq_enable(); \ 171 171 if (ipd == ipipe_root_domain) { \ 172 172 if (unlikely(ipipe_virtual_irq_p(irq))) { \ 173 173 irq_enter(); \ ··· 183 183 __ipipe_run_irqtail(); \ 184 184 __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ 185 185 } \ 186 - local_irq_disable_hw(); \ 186 + hard_local_irq_disable(); \ 187 187 } while (0) 188 188 189 189 #define __ipipe_syscall_watched_p(p, sc) \
+158 -148
arch/blackfin/include/asm/irqflags.h
··· 33 33 return flags; 34 34 } 35 35 36 - #ifdef CONFIG_IPIPE 37 - 38 - #include <linux/compiler.h> 39 - #include <linux/ipipe_base.h> 40 - #include <linux/ipipe_trace.h> 41 - 42 36 #ifdef CONFIG_DEBUG_HWERR 43 37 # define bfin_no_irqs 0x3f 44 38 #else 45 39 # define bfin_no_irqs 0x1f 46 40 #endif 47 41 48 - #define raw_local_irq_disable() \ 49 - do { \ 50 - ipipe_check_context(ipipe_root_domain); \ 51 - __ipipe_stall_root(); \ 52 - barrier(); \ 53 - } while (0) 42 + /*****************************************************************************/ 43 + /* 44 + * Hard, untraced CPU interrupt flag manipulation and access. 45 + */ 46 + static inline void __hard_local_irq_disable(void) 47 + { 48 + bfin_cli(); 49 + } 54 50 55 - #define raw_local_irq_enable() \ 56 - do { \ 57 - barrier(); \ 58 - ipipe_check_context(ipipe_root_domain); \ 59 - __ipipe_unstall_root(); \ 60 - } while (0) 51 + static inline void __hard_local_irq_enable(void) 52 + { 53 + bfin_sti(bfin_irq_flags); 54 + } 61 55 62 - #define raw_local_save_flags_ptr(x) \ 63 - do { \ 64 - *(x) = __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; \ 65 - } while (0) 56 + static inline unsigned long hard_local_save_flags(void) 57 + { 58 + return bfin_read_IMASK(); 59 + } 66 60 67 - #define raw_local_save_flags(x) raw_local_save_flags_ptr(&(x)) 61 + static inline unsigned long __hard_local_irq_save(void) 62 + { 63 + unsigned long flags; 64 + flags = bfin_cli(); 65 + #ifdef CONFIG_DEBUG_HWERR 66 + bfin_sti(0x3f); 67 + #endif 68 + return flags; 69 + } 68 70 69 - #define raw_irqs_disabled_flags(x) ((x) == bfin_no_irqs) 71 + static inline int hard_irqs_disabled_flags(unsigned long flags) 72 + { 73 + return (flags & ~0x3f) == 0; 74 + } 70 75 71 - #define raw_local_irq_save_ptr(x) \ 72 - do { \ 73 - *(x) = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; \ 74 - barrier(); \ 75 - } while (0) 76 + static inline int hard_irqs_disabled(void) 77 + { 78 + unsigned long flags = hard_local_save_flags(); 79 + return hard_irqs_disabled_flags(flags); 80 + } 76 81 77 - #define raw_local_irq_save(x) \ 78 - do { \ 79 - ipipe_check_context(ipipe_root_domain); \ 80 - raw_local_irq_save_ptr(&(x)); \ 81 - } while (0) 82 + static inline void __hard_local_irq_restore(unsigned long flags) 83 + { 84 + if (!hard_irqs_disabled_flags(flags)) 85 + __hard_local_irq_enable(); 86 + } 82 87 83 - static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real) 88 + /*****************************************************************************/ 89 + /* 90 + * Interrupt pipe handling. 91 + */ 92 + #ifdef CONFIG_IPIPE 93 + 94 + #include <linux/compiler.h> 95 + #include <linux/ipipe_base.h> 96 + #include <linux/ipipe_trace.h> 97 + 98 + /* 99 + * Interrupt pipe interface to linux/irqflags.h. 100 + */ 101 + static inline void arch_local_irq_disable(void) 102 + { 103 + ipipe_check_context(ipipe_root_domain); 104 + __ipipe_stall_root(); 105 + barrier(); 106 + } 107 + 108 + static inline void arch_local_irq_enable(void) 109 + { 110 + barrier(); 111 + ipipe_check_context(ipipe_root_domain); 112 + __ipipe_unstall_root(); 113 + } 114 + 115 + static inline unsigned long arch_local_save_flags(void) 116 + { 117 + return __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; 118 + } 119 + 120 + static inline int arch_irqs_disabled_flags(unsigned long flags) 121 + { 122 + return flags == bfin_no_irqs; 123 + } 124 + 125 + static inline void arch_local_irq_save_ptr(unsigned long *_flags) 126 + { 127 + x = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; 128 + barrier(); 129 + } 130 + 131 + static inline unsigned long arch_local_irq_save(void) 132 + { 133 + ipipe_check_context(ipipe_root_domain); 134 + return __hard_local_irq_save(); 135 + } 136 + 137 + static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real) 84 138 { 85 139 /* 86 140 * Merge virtual and real interrupt mask bits into a single ··· 143 89 return (real & ~(1 << 31)) | ((virt != 0) << 31); 144 90 } 145 91 146 - static inline int raw_demangle_irq_bits(unsigned long *x) 92 + static inline int arch_demangle_irq_bits(unsigned long *x) 147 93 { 148 94 int virt = (*x & (1 << 31)) != 0; 149 95 *x &= ~(1L << 31); 150 96 return virt; 151 97 } 152 98 153 - static inline void local_irq_disable_hw_notrace(void) 154 - { 155 - bfin_cli(); 156 - } 157 - 158 - static inline void local_irq_enable_hw_notrace(void) 159 - { 160 - bfin_sti(bfin_irq_flags); 161 - } 162 - 163 - #define local_save_flags_hw(flags) \ 164 - do { \ 165 - (flags) = bfin_read_IMASK(); \ 166 - } while (0) 167 - 168 - #define irqs_disabled_flags_hw(flags) (((flags) & ~0x3f) == 0) 169 - 170 - #define irqs_disabled_hw() \ 171 - ({ \ 172 - unsigned long flags; \ 173 - local_save_flags_hw(flags); \ 174 - irqs_disabled_flags_hw(flags); \ 175 - }) 176 - 177 - static inline void local_irq_save_ptr_hw(unsigned long *flags) 178 - { 179 - *flags = bfin_cli(); 180 - #ifdef CONFIG_DEBUG_HWERR 181 - bfin_sti(0x3f); 182 - #endif 183 - } 184 - 185 - #define local_irq_save_hw_notrace(flags) \ 186 - do { \ 187 - local_irq_save_ptr_hw(&(flags)); \ 188 - } while (0) 189 - 190 - static inline void local_irq_restore_hw_notrace(unsigned long flags) 191 - { 192 - if (!irqs_disabled_flags_hw(flags)) 193 - local_irq_enable_hw_notrace(); 194 - } 195 - 99 + /* 100 + * Interface to various arch routines that may be traced. 101 + */ 196 102 #ifdef CONFIG_IPIPE_TRACE_IRQSOFF 197 - # define local_irq_disable_hw() \ 198 - do { \ 199 - if (!irqs_disabled_hw()) { \ 200 - local_irq_disable_hw_notrace(); \ 201 - ipipe_trace_begin(0x80000000); \ 202 - } \ 203 - } while (0) 204 - # define local_irq_enable_hw() \ 205 - do { \ 206 - if (irqs_disabled_hw()) { \ 207 - ipipe_trace_end(0x80000000); \ 208 - local_irq_enable_hw_notrace(); \ 209 - } \ 210 - } while (0) 211 - # define local_irq_save_hw(flags) \ 212 - do { \ 213 - local_save_flags_hw(flags); \ 214 - if (!irqs_disabled_flags_hw(flags)) { \ 215 - local_irq_disable_hw_notrace(); \ 216 - ipipe_trace_begin(0x80000001); \ 217 - } \ 218 - } while (0) 219 - # define local_irq_restore_hw(flags) \ 220 - do { \ 221 - if (!irqs_disabled_flags_hw(flags)) { \ 222 - ipipe_trace_end(0x80000001); \ 223 - local_irq_enable_hw_notrace(); \ 224 - } \ 225 - } while (0) 103 + static inline void hard_local_irq_disable(void) 104 + { 105 + if (!hard_irqs_disabled()) { 106 + __hard_local_irq_disable(); 107 + ipipe_trace_begin(0x80000000); 108 + } 109 + } 110 + 111 + static inline void hard_local_irq_enable(void) 112 + { 113 + if (hard_irqs_disabled()) { 114 + ipipe_trace_end(0x80000000); 115 + __hard_local_irq_enable(); 116 + } 117 + } 118 + 119 + static inline unsigned long hard_local_irq_save(void) 120 + { 121 + unsigned long flags = hard_local_save_flags(); 122 + if (!hard_irqs_disabled_flags(flags)) { 123 + __hard_local_irq_disable(); 124 + ipipe_trace_begin(0x80000001); 125 + } 126 + return flags; 127 + } 128 + 129 + static inline void hard_local_irq_restore(unsigned long flags) 130 + { 131 + if (!hard_irqs_disabled_flags(flags)) { 132 + ipipe_trace_end(0x80000001); 133 + __hard_local_irq_enable(); 134 + } 135 + } 136 + 226 137 #else /* !CONFIG_IPIPE_TRACE_IRQSOFF */ 227 - # define local_irq_disable_hw() local_irq_disable_hw_notrace() 228 - # define local_irq_enable_hw() local_irq_enable_hw_notrace() 229 - # define local_irq_save_hw(flags) local_irq_save_hw_notrace(flags) 230 - # define local_irq_restore_hw(flags) local_irq_restore_hw_notrace(flags) 138 + # define hard_local_irq_disable() __hard_local_irq_disable() 139 + # define hard_local_irq_enable() __hard_local_irq_enable() 140 + # define hard_local_irq_save() __hard_local_irq_save() 141 + # define hard_local_irq_restore(flags) __hard_local_irq_restore(flags) 231 142 #endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */ 232 143 233 144 #else /* CONFIG_IPIPE */ 234 145 235 - static inline void raw_local_irq_disable(void) 236 - { 237 - bfin_cli(); 238 - } 239 - static inline void raw_local_irq_enable(void) 240 - { 241 - bfin_sti(bfin_irq_flags); 242 - } 146 + /* 147 + * Direct interface to linux/irqflags.h. 148 + */ 149 + #define arch_local_save_flags() hard_local_save_flags() 150 + #define arch_local_irq_save(flags) __hard_local_irq_save() 151 + #define arch_local_irq_restore(flags) __hard_local_irq_restore(flags) 152 + #define arch_local_irq_enable() __hard_local_irq_enable() 153 + #define arch_local_irq_disable() __hard_local_irq_disable() 154 + #define arch_irqs_disabled_flags(flags) hard_irqs_disabled_flags(flags) 155 + #define arch_irqs_disabled() hard_irqs_disabled() 243 156 244 - static inline unsigned long arch_local_save_flags(void) 245 - { 246 - return bfin_read_IMASK(); 247 - } 157 + /* 158 + * Interface to various arch routines that may be traced. 159 + */ 160 + #define hard_local_irq_save() __hard_local_irq_save() 161 + #define hard_local_irq_restore(flags) __hard_local_irq_restore(flags) 162 + #define hard_local_irq_enable() __hard_local_irq_enable() 163 + #define hard_local_irq_disable() __hard_local_irq_disable() 248 164 249 - #define raw_local_save_flags(flags) do { (flags) = arch_local_save_flags(); } while (0) 250 - 251 - #define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0) 252 - 253 - static inline unsigned long __raw_local_irq_save(void) 254 - { 255 - unsigned long flags = bfin_cli(); 256 - #ifdef CONFIG_DEBUG_HWERR 257 - bfin_sti(0x3f); 258 - #endif 259 - return flags; 260 - } 261 - #define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0) 262 - 263 - #define local_irq_save_hw(flags) raw_local_irq_save(flags) 264 - #define local_irq_restore_hw(flags) raw_local_irq_restore(flags) 265 - #define local_irq_enable_hw() raw_local_irq_enable() 266 - #define local_irq_disable_hw() raw_local_irq_disable() 267 - #define irqs_disabled_hw() irqs_disabled() 268 165 269 166 #endif /* !CONFIG_IPIPE */ 270 167 271 - static inline void raw_local_irq_restore(unsigned long flags) 272 - { 273 - if (!raw_irqs_disabled_flags(flags)) 274 - raw_local_irq_enable(); 275 - } 168 + /* 169 + * Raw interface to linux/irqflags.h. 170 + */ 171 + #define raw_local_save_flags(flags) do { (flags) = arch_local_save_flags(); } while (0) 172 + #define raw_local_irq_save(flags) do { (flags) = arch_local_irq_save(); } while (0) 173 + #define raw_local_irq_restore(flags) arch_local_irq_restore(flags) 174 + #define raw_local_irq_enable() arch_local_irq_enable() 175 + #define raw_local_irq_disable() arch_local_irq_disable() 176 + #define raw_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags) 177 + #define raw_irqs_disabled() arch_irqs_disabled() 276 178 277 179 #endif
+4 -4
arch/blackfin/include/asm/mmu_context.h
··· 97 97 } 98 98 99 99 #ifdef CONFIG_IPIPE 100 - #define lock_mm_switch(flags) local_irq_save_hw_cond(flags) 101 - #define unlock_mm_switch(flags) local_irq_restore_hw_cond(flags) 100 + #define lock_mm_switch(flags) flags = hard_local_irq_save_cond() 101 + #define unlock_mm_switch(flags) hard_local_irq_restore_cond(flags) 102 102 #else 103 103 #define lock_mm_switch(flags) do { (void)(flags); } while (0) 104 104 #define unlock_mm_switch(flags) do { (void)(flags); } while (0) ··· 205 205 } 206 206 207 207 #define ipipe_mm_switch_protect(flags) \ 208 - local_irq_save_hw_cond(flags) 208 + flags = hard_local_irq_save_cond() 209 209 210 210 #define ipipe_mm_switch_unprotect(flags) \ 211 - local_irq_restore_hw_cond(flags) 211 + hard_local_irq_restore_cond(flags) 212 212 213 213 #endif
+2 -2
arch/blackfin/include/asm/system.h
··· 117 117 unsigned long tmp = 0; 118 118 unsigned long flags; 119 119 120 - local_irq_save_hw(flags); 120 + flags = hard_local_irq_save(); 121 121 122 122 switch (size) { 123 123 case 1: ··· 139 139 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 140 140 break; 141 141 } 142 - local_irq_restore_hw(flags); 142 + hard_local_irq_restore(flags); 143 143 return tmp; 144 144 } 145 145
+51 -51
arch/blackfin/kernel/bfin_gpio.c
··· 349 349 void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ 350 350 { \ 351 351 unsigned long flags; \ 352 - local_irq_save_hw(flags); \ 352 + flags = hard_local_irq_save(); \ 353 353 if (arg) \ 354 354 gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ 355 355 else \ 356 356 gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ 357 357 AWA_DUMMY_READ(name); \ 358 - local_irq_restore_hw(flags); \ 358 + hard_local_irq_restore(flags); \ 359 359 } \ 360 360 EXPORT_SYMBOL(set_gpio_ ## name); 361 361 ··· 371 371 { \ 372 372 unsigned long flags; \ 373 373 if (ANOMALY_05000311 || ANOMALY_05000323) \ 374 - local_irq_save_hw(flags); \ 374 + flags = hard_local_irq_save(); \ 375 375 if (arg) \ 376 376 gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ 377 377 else \ 378 378 gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ 379 379 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 380 380 AWA_DUMMY_READ(name); \ 381 - local_irq_restore_hw(flags); \ 381 + hard_local_irq_restore(flags); \ 382 382 } \ 383 383 } \ 384 384 EXPORT_SYMBOL(set_gpio_ ## name); ··· 391 391 { 392 392 unsigned long flags; 393 393 if (ANOMALY_05000311 || ANOMALY_05000323) 394 - local_irq_save_hw(flags); 394 + flags = hard_local_irq_save(); 395 395 gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 396 396 if (ANOMALY_05000311 || ANOMALY_05000323) { 397 397 AWA_DUMMY_READ(toggle); 398 - local_irq_restore_hw(flags); 398 + hard_local_irq_restore(flags); 399 399 } 400 400 } 401 401 EXPORT_SYMBOL(set_gpio_toggle); ··· 408 408 { \ 409 409 unsigned long flags; \ 410 410 if (ANOMALY_05000311 || ANOMALY_05000323) \ 411 - local_irq_save_hw(flags); \ 411 + flags = hard_local_irq_save(); \ 412 412 gpio_array[gpio_bank(gpio)]->name = arg; \ 413 413 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 414 414 AWA_DUMMY_READ(name); \ 415 - local_irq_restore_hw(flags); \ 415 + hard_local_irq_restore(flags); \ 416 416 } \ 417 417 } \ 418 418 EXPORT_SYMBOL(set_gpiop_ ## name); ··· 433 433 unsigned long flags; \ 434 434 unsigned short ret; \ 435 435 if (ANOMALY_05000311 || ANOMALY_05000323) \ 436 - local_irq_save_hw(flags); \ 436 + flags = hard_local_irq_save(); \ 437 437 ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ 438 438 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 439 439 AWA_DUMMY_READ(name); \ 440 - local_irq_restore_hw(flags); \ 440 + hard_local_irq_restore(flags); \ 441 441 } \ 442 442 return ret; \ 443 443 } \ ··· 460 460 unsigned long flags; \ 461 461 unsigned short ret; \ 462 462 if (ANOMALY_05000311 || ANOMALY_05000323) \ 463 - local_irq_save_hw(flags); \ 463 + flags = hard_local_irq_save(); \ 464 464 ret = (gpio_array[gpio_bank(gpio)]->name); \ 465 465 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 466 466 AWA_DUMMY_READ(name); \ 467 - local_irq_restore_hw(flags); \ 467 + hard_local_irq_restore(flags); \ 468 468 } \ 469 469 return ret; \ 470 470 } \ ··· 525 525 if (check_gpio(gpio) < 0) 526 526 return -EINVAL; 527 527 528 - local_irq_save_hw(flags); 528 + flags = hard_local_irq_save(); 529 529 if (ctrl) 530 530 reserve(wakeup, gpio); 531 531 else 532 532 unreserve(wakeup, gpio); 533 533 534 534 set_gpio_maskb(gpio, ctrl); 535 - local_irq_restore_hw(flags); 535 + hard_local_irq_restore(flags); 536 536 537 537 return 0; 538 538 } ··· 690 690 691 691 BUG_ON(ident >= MAX_RESOURCES); 692 692 693 - local_irq_save_hw(flags); 693 + flags = hard_local_irq_save(); 694 694 695 695 /* If a pin can be muxed as either GPIO or peripheral, make 696 696 * sure it is not already a GPIO pin when we request it. ··· 701 701 printk(KERN_ERR 702 702 "%s: Peripheral %d is already reserved as GPIO by %s !\n", 703 703 __func__, ident, get_label(ident)); 704 - local_irq_restore_hw(flags); 704 + hard_local_irq_restore(flags); 705 705 return -EBUSY; 706 706 } 707 707 ··· 730 730 printk(KERN_ERR 731 731 "%s: Peripheral %d function %d is already reserved by %s !\n", 732 732 __func__, ident, P_FUNCT2MUX(per), get_label(ident)); 733 - local_irq_restore_hw(flags); 733 + hard_local_irq_restore(flags); 734 734 return -EBUSY; 735 735 } 736 736 } ··· 741 741 portmux_setup(per); 742 742 port_setup(ident, PERIPHERAL_USAGE); 743 743 744 - local_irq_restore_hw(flags); 744 + hard_local_irq_restore(flags); 745 745 set_label(ident, label); 746 746 747 747 return 0; ··· 780 780 if (!(per & P_DEFINED)) 781 781 return; 782 782 783 - local_irq_save_hw(flags); 783 + flags = hard_local_irq_save(); 784 784 785 785 if (unlikely(!is_reserved(peri, ident, 0))) { 786 - local_irq_restore_hw(flags); 786 + hard_local_irq_restore(flags); 787 787 return; 788 788 } 789 789 ··· 794 794 795 795 set_label(ident, "free"); 796 796 797 - local_irq_restore_hw(flags); 797 + hard_local_irq_restore(flags); 798 798 } 799 799 EXPORT_SYMBOL(peripheral_free); 800 800 ··· 828 828 if (check_gpio(gpio) < 0) 829 829 return -EINVAL; 830 830 831 - local_irq_save_hw(flags); 831 + flags = hard_local_irq_save(); 832 832 833 833 /* 834 834 * Allow that the identical GPIO can ··· 837 837 */ 838 838 839 839 if (cmp_label(gpio, label) == 0) { 840 - local_irq_restore_hw(flags); 840 + hard_local_irq_restore(flags); 841 841 return 0; 842 842 } 843 843 ··· 846 846 dump_stack(); 847 847 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 848 848 gpio, get_label(gpio)); 849 - local_irq_restore_hw(flags); 849 + hard_local_irq_restore(flags); 850 850 return -EBUSY; 851 851 } 852 852 if (unlikely(is_reserved(peri, gpio, 1))) { ··· 855 855 printk(KERN_ERR 856 856 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 857 857 gpio, get_label(gpio)); 858 - local_irq_restore_hw(flags); 858 + hard_local_irq_restore(flags); 859 859 return -EBUSY; 860 860 } 861 861 if (unlikely(is_reserved(gpio_irq, gpio, 1))) { ··· 871 871 reserve(gpio, gpio); 872 872 set_label(gpio, label); 873 873 874 - local_irq_restore_hw(flags); 874 + hard_local_irq_restore(flags); 875 875 876 876 port_setup(gpio, GPIO_USAGE); 877 877 ··· 888 888 889 889 might_sleep(); 890 890 891 - local_irq_save_hw(flags); 891 + flags = hard_local_irq_save(); 892 892 893 893 if (unlikely(!is_reserved(gpio, gpio, 0))) { 894 894 if (system_state == SYSTEM_BOOTING) 895 895 dump_stack(); 896 896 gpio_error(gpio); 897 - local_irq_restore_hw(flags); 897 + hard_local_irq_restore(flags); 898 898 return; 899 899 } 900 900 ··· 902 902 903 903 set_label(gpio, "free"); 904 904 905 - local_irq_restore_hw(flags); 905 + hard_local_irq_restore(flags); 906 906 } 907 907 EXPORT_SYMBOL(bfin_gpio_free); 908 908 ··· 913 913 { 914 914 unsigned long flags; 915 915 916 - local_irq_save_hw(flags); 916 + flags = hard_local_irq_save(); 917 917 918 918 /* 919 919 * Allow that the identical GPIO can ··· 922 922 */ 923 923 924 924 if (cmp_label(gpio, label) == 0) { 925 - local_irq_restore_hw(flags); 925 + hard_local_irq_restore(flags); 926 926 return 0; 927 927 } 928 928 929 929 if (unlikely(is_reserved(special_gpio, gpio, 1))) { 930 - local_irq_restore_hw(flags); 930 + hard_local_irq_restore(flags); 931 931 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 932 932 gpio, get_label(gpio)); 933 933 934 934 return -EBUSY; 935 935 } 936 936 if (unlikely(is_reserved(peri, gpio, 1))) { 937 - local_irq_restore_hw(flags); 937 + hard_local_irq_restore(flags); 938 938 printk(KERN_ERR 939 939 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 940 940 gpio, get_label(gpio)); ··· 946 946 reserve(peri, gpio); 947 947 948 948 set_label(gpio, label); 949 - local_irq_restore_hw(flags); 949 + hard_local_irq_restore(flags); 950 950 port_setup(gpio, GPIO_USAGE); 951 951 952 952 return 0; ··· 959 959 960 960 might_sleep(); 961 961 962 - local_irq_save_hw(flags); 962 + flags = hard_local_irq_save(); 963 963 964 964 if (unlikely(!is_reserved(special_gpio, gpio, 0))) { 965 965 gpio_error(gpio); 966 - local_irq_restore_hw(flags); 966 + hard_local_irq_restore(flags); 967 967 return; 968 968 } 969 969 970 970 unreserve(special_gpio, gpio); 971 971 unreserve(peri, gpio); 972 972 set_label(gpio, "free"); 973 - local_irq_restore_hw(flags); 973 + hard_local_irq_restore(flags); 974 974 } 975 975 EXPORT_SYMBOL(bfin_special_gpio_free); 976 976 #endif ··· 983 983 if (check_gpio(gpio) < 0) 984 984 return -EINVAL; 985 985 986 - local_irq_save_hw(flags); 986 + flags = hard_local_irq_save(); 987 987 988 988 if (unlikely(is_reserved(peri, gpio, 1))) { 989 989 if (system_state == SYSTEM_BOOTING) ··· 991 991 printk(KERN_ERR 992 992 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 993 993 gpio, get_label(gpio)); 994 - local_irq_restore_hw(flags); 994 + hard_local_irq_restore(flags); 995 995 return -EBUSY; 996 996 } 997 997 if (unlikely(is_reserved(gpio, gpio, 1))) ··· 1002 1002 reserve(gpio_irq, gpio); 1003 1003 set_label(gpio, label); 1004 1004 1005 - local_irq_restore_hw(flags); 1005 + hard_local_irq_restore(flags); 1006 1006 1007 1007 port_setup(gpio, GPIO_USAGE); 1008 1008 ··· 1016 1016 if (check_gpio(gpio) < 0) 1017 1017 return; 1018 1018 1019 - local_irq_save_hw(flags); 1019 + flags = hard_local_irq_save(); 1020 1020 1021 1021 if (unlikely(!is_reserved(gpio_irq, gpio, 0))) { 1022 1022 if (system_state == SYSTEM_BOOTING) 1023 1023 dump_stack(); 1024 1024 gpio_error(gpio); 1025 - local_irq_restore_hw(flags); 1025 + hard_local_irq_restore(flags); 1026 1026 return; 1027 1027 } 1028 1028 ··· 1030 1030 1031 1031 set_label(gpio, "free"); 1032 1032 1033 - local_irq_restore_hw(flags); 1033 + hard_local_irq_restore(flags); 1034 1034 } 1035 1035 1036 1036 static inline void __bfin_gpio_direction_input(unsigned gpio) ··· 1052 1052 return -EINVAL; 1053 1053 } 1054 1054 1055 - local_irq_save_hw(flags); 1055 + flags = hard_local_irq_save(); 1056 1056 __bfin_gpio_direction_input(gpio); 1057 1057 AWA_DUMMY_READ(inen); 1058 - local_irq_restore_hw(flags); 1058 + hard_local_irq_restore(flags); 1059 1059 1060 1060 return 0; 1061 1061 } ··· 1070 1070 port_setup(gpio, GPIO_USAGE); 1071 1071 1072 1072 #ifdef CONFIG_BF54x 1073 - local_irq_save_hw(flags); 1073 + flags = hard_local_irq_save(); 1074 1074 __bfin_gpio_direction_input(gpio); 1075 - local_irq_restore_hw(flags); 1075 + hard_local_irq_restore(flags); 1076 1076 #endif 1077 1077 } 1078 1078 ··· 1094 1094 return -EINVAL; 1095 1095 } 1096 1096 1097 - local_irq_save_hw(flags); 1097 + flags = hard_local_irq_save(); 1098 1098 1099 1099 gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); 1100 1100 gpio_set_value(gpio, value); ··· 1105 1105 #endif 1106 1106 1107 1107 AWA_DUMMY_READ(dir); 1108 - local_irq_restore_hw(flags); 1108 + hard_local_irq_restore(flags); 1109 1109 1110 1110 return 0; 1111 1111 } ··· 1120 1120 1121 1121 if (unlikely(get_gpio_edge(gpio))) { 1122 1122 int ret; 1123 - local_irq_save_hw(flags); 1123 + flags = hard_local_irq_save(); 1124 1124 set_gpio_edge(gpio, 0); 1125 1125 ret = get_gpio_data(gpio); 1126 1126 set_gpio_edge(gpio, 1); 1127 - local_irq_restore_hw(flags); 1127 + hard_local_irq_restore(flags); 1128 1128 return ret; 1129 1129 } else 1130 1130 return get_gpio_data(gpio);
+4 -4
arch/blackfin/kernel/cplb-mpu/cplbmgr.c
··· 318 318 319 319 nr_cplb_flush[cpu]++; 320 320 321 - local_irq_save_hw(flags); 321 + flags = hard_local_irq_save(); 322 322 _disable_icplb(); 323 323 for (i = first_switched_icplb; i < MAX_CPLBS; i++) { 324 324 icplb_tbl[cpu][i].data = 0; ··· 332 332 bfin_write32(DCPLB_DATA0 + i * 4, 0); 333 333 } 334 334 _enable_dcplb(); 335 - local_irq_restore_hw(flags); 335 + hard_local_irq_restore(flags); 336 336 337 337 } 338 338 ··· 348 348 return; 349 349 } 350 350 351 - local_irq_save_hw(flags); 351 + flags = hard_local_irq_save(); 352 352 current_rwx_mask[cpu] = masks; 353 353 354 354 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { ··· 373 373 addr += PAGE_SIZE; 374 374 } 375 375 _enable_dcplb(); 376 - local_irq_restore_hw(flags); 376 + hard_local_irq_restore(flags); 377 377 }
+19 -19
arch/blackfin/kernel/ipipe.c
··· 219 219 220 220 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); 221 221 222 - local_irq_save_hw(flags); 222 + flags = hard_local_irq_save(); 223 223 224 224 if (!__ipipe_root_domain_p) { 225 - local_irq_restore_hw(flags); 225 + hard_local_irq_restore(flags); 226 226 return 1; 227 227 } 228 228 ··· 230 230 if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) 231 231 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); 232 232 233 - local_irq_restore_hw(flags); 233 + hard_local_irq_restore(flags); 234 234 235 235 return -ret; 236 236 } ··· 239 239 { 240 240 unsigned long flags; 241 241 242 - local_irq_save_hw(flags); 242 + flags = hard_local_irq_save(); 243 243 244 244 return flags; 245 245 } 246 246 247 247 void ipipe_critical_exit(unsigned long flags) 248 248 { 249 - local_irq_restore_hw(flags); 249 + hard_local_irq_restore(flags); 250 250 } 251 251 252 252 static void __ipipe_no_irqtail(void) ··· 279 279 return -EINVAL; 280 280 #endif 281 281 282 - local_irq_save_hw(flags); 282 + flags = hard_local_irq_save(); 283 283 __ipipe_handle_irq(irq, NULL); 284 - local_irq_restore_hw(flags); 284 + hard_local_irq_restore(flags); 285 285 286 286 return 1; 287 287 } ··· 293 293 294 294 BUG_ON(irqs_disabled()); 295 295 296 - local_irq_save_hw(flags); 296 + flags = hard_local_irq_save(); 297 297 298 298 if (irq_tail_hook) 299 299 irq_tail_hook(); ··· 303 303 if (ipipe_root_cpudom_var(irqpend_himask) != 0) 304 304 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); 305 305 306 - local_irq_restore_hw(flags); 306 + hard_local_irq_restore(flags); 307 307 } 308 308 309 309 void ___ipipe_sync_pipeline(unsigned long syncmask) ··· 344 344 { 345 345 unsigned long *p, flags; 346 346 347 - local_irq_save_hw(flags); 347 + flags = hard_local_irq_save(); 348 348 p = &__ipipe_root_status; 349 349 __set_bit(IPIPE_STALL_FLAG, p); 350 - local_irq_restore_hw(flags); 350 + hard_local_irq_restore(flags); 351 351 } 352 352 EXPORT_SYMBOL(__ipipe_stall_root); 353 353 ··· 356 356 unsigned long *p, flags; 357 357 int x; 358 358 359 - local_irq_save_hw(flags); 359 + flags = hard_local_irq_save(); 360 360 p = &__ipipe_root_status; 361 361 x = __test_and_set_bit(IPIPE_STALL_FLAG, p); 362 - local_irq_restore_hw(flags); 362 + hard_local_irq_restore(flags); 363 363 364 364 return x; 365 365 } ··· 371 371 unsigned long flags; 372 372 int x; 373 373 374 - local_irq_save_hw_smp(flags); 374 + flags = hard_local_irq_save_smp(); 375 375 p = &__ipipe_root_status; 376 376 x = test_bit(IPIPE_STALL_FLAG, p); 377 - local_irq_restore_hw_smp(flags); 377 + hard_local_irq_restore_smp(flags); 378 378 379 379 return x; 380 380 } ··· 384 384 { 385 385 unsigned long *p, flags; 386 386 387 - local_irq_save_hw(flags); 387 + flags = hard_local_irq_save(); 388 388 p = &__ipipe_root_status; 389 389 __set_bit(IPIPE_SYNCDEFER_FLAG, p); 390 - local_irq_restore_hw(flags); 390 + hard_local_irq_restore(flags); 391 391 } 392 392 EXPORT_SYMBOL(__ipipe_lock_root); 393 393 ··· 395 395 { 396 396 unsigned long *p, flags; 397 397 398 - local_irq_save_hw(flags); 398 + flags = hard_local_irq_save(); 399 399 p = &__ipipe_root_status; 400 400 __clear_bit(IPIPE_SYNCDEFER_FLAG, p); 401 - local_irq_restore_hw(flags); 401 + hard_local_irq_restore(flags); 402 402 } 403 403 EXPORT_SYMBOL(__ipipe_unlock_root);
+2 -2
arch/blackfin/kernel/process.c
··· 65 65 #ifdef CONFIG_IPIPE 66 66 ipipe_suspend_domain(); 67 67 #endif 68 - local_irq_disable_hw(); 68 + hard_local_irq_disable(); 69 69 if (!need_resched()) 70 70 idle_with_irq_disabled(); 71 71 72 - local_irq_enable_hw(); 72 + hard_local_irq_enable(); 73 73 } 74 74 75 75 /*
+4 -4
arch/blackfin/mach-bf518/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr0 = bfin_read32(SIC_IWR0); 24 24 iwr1 = bfin_read32(SIC_IWR1); ··· 32 32 33 33 bfin_write32(SIC_IWR0, iwr0); 34 34 bfin_write32(SIC_IWR1, iwr1); 35 - local_irq_restore_hw(flags); 35 + hard_local_irq_restore(flags); 36 36 } 37 37 38 38 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 43 43 if (val == bfin_read_VR_CTL()) 44 44 return; 45 45 46 - local_irq_save_hw(flags); 46 + flags = hard_local_irq_save(); 47 47 /* Enable the PLL Wakeup bit in SIC IWR */ 48 48 iwr0 = bfin_read32(SIC_IWR0); 49 49 iwr1 = bfin_read32(SIC_IWR1); ··· 57 57 58 58 bfin_write32(SIC_IWR0, iwr0); 59 59 bfin_write32(SIC_IWR1, iwr1); 60 - local_irq_restore_hw(flags); 60 + hard_local_irq_restore(flags); 61 61 } 62 62 63 63 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf527/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr0 = bfin_read32(SIC_IWR0); 24 24 iwr1 = bfin_read32(SIC_IWR1); ··· 32 32 33 33 bfin_write32(SIC_IWR0, iwr0); 34 34 bfin_write32(SIC_IWR1, iwr1); 35 - local_irq_restore_hw(flags); 35 + hard_local_irq_restore(flags); 36 36 } 37 37 38 38 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 43 43 if (val == bfin_read_VR_CTL()) 44 44 return; 45 45 46 - local_irq_save_hw(flags); 46 + flags = hard_local_irq_save(); 47 47 /* Enable the PLL Wakeup bit in SIC IWR */ 48 48 iwr0 = bfin_read32(SIC_IWR0); 49 49 iwr1 = bfin_read32(SIC_IWR1); ··· 57 57 58 58 bfin_write32(SIC_IWR0, iwr0); 59 59 bfin_write32(SIC_IWR1, iwr1); 60 - local_irq_restore_hw(flags); 60 + hard_local_irq_restore(flags); 61 61 } 62 62 63 63 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf533/include/mach/fio_flag.h
··· 15 15 static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \ 16 16 { \ 17 17 unsigned long flags; \ 18 - local_irq_save_hw(flags); \ 18 + flags = hard_local_irq_save(); \ 19 19 bfin_write16(FIO_FLAG_##name, val); \ 20 20 bfin_read_CHIPID(); \ 21 - local_irq_restore_hw(flags); \ 21 + hard_local_irq_restore(flags); \ 22 22 } 23 23 BFIN_WRITE_FIO_FLAG(D) 24 24 BFIN_WRITE_FIO_FLAG(C) ··· 30 30 { \ 31 31 unsigned long flags; \ 32 32 u16 ret; \ 33 - local_irq_save_hw(flags); \ 33 + flags = hard_local_irq_save(); \ 34 34 ret = bfin_read16(FIO_FLAG_##name); \ 35 35 bfin_read_CHIPID(); \ 36 - local_irq_restore_hw(flags); \ 36 + hard_local_irq_restore(flags); \ 37 37 return ret; \ 38 38 } 39 39 BFIN_READ_FIO_FLAG(D)
+4 -4
arch/blackfin/mach-bf533/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr = bfin_read32(SIC_IWR); 24 24 /* Only allow PPL Wakeup) */ ··· 29 29 asm("IDLE;"); 30 30 31 31 bfin_write32(SIC_IWR, iwr); 32 - local_irq_restore_hw(flags); 32 + hard_local_irq_restore(flags); 33 33 } 34 34 35 35 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 40 40 if (val == bfin_read_VR_CTL()) 41 41 return; 42 42 43 - local_irq_save_hw(flags); 43 + flags = hard_local_irq_save(); 44 44 /* Enable the PLL Wakeup bit in SIC IWR */ 45 45 iwr = bfin_read32(SIC_IWR); 46 46 /* Only allow PPL Wakeup) */ ··· 51 51 asm("IDLE;"); 52 52 53 53 bfin_write32(SIC_IWR, iwr); 54 - local_irq_restore_hw(flags); 54 + hard_local_irq_restore(flags); 55 55 } 56 56 57 57 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf537/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr = bfin_read32(SIC_IWR); 24 24 /* Only allow PPL Wakeup) */ ··· 29 29 asm("IDLE;"); 30 30 31 31 bfin_write32(SIC_IWR, iwr); 32 - local_irq_restore_hw(flags); 32 + hard_local_irq_restore(flags); 33 33 } 34 34 35 35 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 40 40 if (val == bfin_read_VR_CTL()) 41 41 return; 42 42 43 - local_irq_save_hw(flags); 43 + flags = hard_local_irq_save(); 44 44 /* Enable the PLL Wakeup bit in SIC IWR */ 45 45 iwr = bfin_read32(SIC_IWR); 46 46 /* Only allow PPL Wakeup) */ ··· 51 51 asm("IDLE;"); 52 52 53 53 bfin_write32(SIC_IWR, iwr); 54 - local_irq_restore_hw(flags); 54 + hard_local_irq_restore(flags); 55 55 } 56 56 57 57 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf538/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr0 = bfin_read32(SIC_IWR0); 24 24 iwr1 = bfin_read32(SIC_IWR1); ··· 32 32 33 33 bfin_write32(SIC_IWR0, iwr0); 34 34 bfin_write32(SIC_IWR1, iwr1); 35 - local_irq_restore_hw(flags); 35 + hard_local_irq_restore(flags); 36 36 } 37 37 38 38 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 43 43 if (val == bfin_read_VR_CTL()) 44 44 return; 45 45 46 - local_irq_save_hw(flags); 46 + flags = hard_local_irq_save(); 47 47 /* Enable the PLL Wakeup bit in SIC IWR */ 48 48 iwr0 = bfin_read32(SIC_IWR0); 49 49 iwr1 = bfin_read32(SIC_IWR1); ··· 57 57 58 58 bfin_write32(SIC_IWR0, iwr0); 59 59 bfin_write32(SIC_IWR1, iwr1); 60 - local_irq_restore_hw(flags); 60 + hard_local_irq_restore(flags); 61 61 } 62 62 63 63 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf548/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr0 = bfin_read32(SIC_IWR0); 24 24 iwr1 = bfin_read32(SIC_IWR1); ··· 35 35 bfin_write32(SIC_IWR0, iwr0); 36 36 bfin_write32(SIC_IWR1, iwr1); 37 37 bfin_write32(SIC_IWR2, iwr2); 38 - local_irq_restore_hw(flags); 38 + hard_local_irq_restore(flags); 39 39 } 40 40 41 41 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 46 46 if (val == bfin_read_VR_CTL()) 47 47 return; 48 48 49 - local_irq_save_hw(flags); 49 + flags = hard_local_irq_save(); 50 50 /* Enable the PLL Wakeup bit in SIC IWR */ 51 51 iwr0 = bfin_read32(SIC_IWR0); 52 52 iwr1 = bfin_read32(SIC_IWR1); ··· 63 63 bfin_write32(SIC_IWR0, iwr0); 64 64 bfin_write32(SIC_IWR1, iwr1); 65 65 bfin_write32(SIC_IWR2, iwr2); 66 - local_irq_restore_hw(flags); 66 + hard_local_irq_restore(flags); 67 67 } 68 68 69 69 #endif /* _MACH_PLL_H */
+4 -4
arch/blackfin/mach-bf561/include/mach/pll.h
··· 18 18 if (val == bfin_read_PLL_CTL()) 19 19 return; 20 20 21 - local_irq_save_hw(flags); 21 + flags = hard_local_irq_save(); 22 22 /* Enable the PLL Wakeup bit in SIC IWR */ 23 23 iwr0 = bfin_read32(SICA_IWR0); 24 24 iwr1 = bfin_read32(SICA_IWR1); ··· 32 32 33 33 bfin_write32(SICA_IWR0, iwr0); 34 34 bfin_write32(SICA_IWR1, iwr1); 35 - local_irq_restore_hw(flags); 35 + hard_local_irq_restore(flags); 36 36 } 37 37 38 38 /* Writing to VR_CTL initiates a PLL relock sequence. */ ··· 43 43 if (val == bfin_read_VR_CTL()) 44 44 return; 45 45 46 - local_irq_save_hw(flags); 46 + flags = hard_local_irq_save(); 47 47 /* Enable the PLL Wakeup bit in SIC IWR */ 48 48 iwr0 = bfin_read32(SICA_IWR0); 49 49 iwr1 = bfin_read32(SICA_IWR1); ··· 57 57 58 58 bfin_write32(SICA_IWR0, iwr0); 59 59 bfin_write32(SICA_IWR1, iwr1); 60 - local_irq_restore_hw(flags); 60 + hard_local_irq_restore(flags); 61 61 } 62 62 63 63 #endif /* _MACH_PLL_H */
+2 -2
arch/blackfin/mach-common/cpufreq.c
··· 134 134 135 135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 136 136 if (cpu == CPUFREQ_CPU) { 137 - local_irq_save_hw(flags); 137 + flags = hard_local_irq_save(); 138 138 plldiv = (bfin_read_PLL_DIV() & SSEL) | 139 139 dpm_state_table[index].csel; 140 140 bfin_write_PLL_DIV(plldiv); ··· 155 155 loops_per_jiffy = cpufreq_scale(lpj_ref, 156 156 lpj_ref_freq, freqs.new); 157 157 } 158 - local_irq_restore_hw(flags); 158 + hard_local_irq_restore(flags); 159 159 } 160 160 /* TODO: just test case for cycles clock source, remove later */ 161 161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+12 -12
arch/blackfin/mach-common/ints-priority.c
··· 132 132 static void bfin_core_mask_irq(unsigned int irq) 133 133 { 134 134 bfin_irq_flags &= ~(1 << irq); 135 - if (!irqs_disabled_hw()) 136 - local_irq_enable_hw(); 135 + if (!hard_irqs_disabled()) 136 + hard_local_irq_enable(); 137 137 } 138 138 139 139 static void bfin_core_unmask_irq(unsigned int irq) ··· 148 148 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly 149 149 * what we need. 150 150 */ 151 - if (!irqs_disabled_hw()) 152 - local_irq_enable_hw(); 151 + if (!hard_irqs_disabled()) 152 + hard_local_irq_enable(); 153 153 return; 154 154 } 155 155 ··· 158 158 unsigned long flags; 159 159 160 160 #ifdef CONFIG_BF53x 161 - local_irq_save_hw(flags); 161 + flags = hard_local_irq_save(); 162 162 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 163 163 ~(1 << SIC_SYSIRQ(irq))); 164 164 #else 165 165 unsigned mask_bank, mask_bit; 166 - local_irq_save_hw(flags); 166 + flags = hard_local_irq_save(); 167 167 mask_bank = SIC_SYSIRQ(irq) / 32; 168 168 mask_bit = SIC_SYSIRQ(irq) % 32; 169 169 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & ··· 173 173 ~(1 << mask_bit)); 174 174 #endif 175 175 #endif 176 - local_irq_restore_hw(flags); 176 + hard_local_irq_restore(flags); 177 177 } 178 178 179 179 #ifdef CONFIG_SMP ··· 186 186 unsigned long flags; 187 187 188 188 #ifdef CONFIG_BF53x 189 - local_irq_save_hw(flags); 189 + flags = hard_local_irq_save(); 190 190 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 191 191 (1 << SIC_SYSIRQ(irq))); 192 192 #else 193 193 unsigned mask_bank, mask_bit; 194 - local_irq_save_hw(flags); 194 + flags = hard_local_irq_save(); 195 195 mask_bank = SIC_SYSIRQ(irq) / 32; 196 196 mask_bit = SIC_SYSIRQ(irq) % 32; 197 197 #ifdef CONFIG_SMP ··· 207 207 (1 << mask_bit)); 208 208 #endif 209 209 #endif 210 - local_irq_restore_hw(flags); 210 + hard_local_irq_restore(flags); 211 211 } 212 212 213 213 #ifdef CONFIG_SMP ··· 264 264 break; 265 265 } 266 266 267 - local_irq_save_hw(flags); 267 + flags = hard_local_irq_save(); 268 268 269 269 if (state) { 270 270 bfin_sic_iwr[bank] |= (1 << bit); ··· 275 275 vr_wakeup &= ~wakeup; 276 276 } 277 277 278 - local_irq_restore_hw(flags); 278 + hard_local_irq_restore(flags); 279 279 280 280 return 0; 281 281 }
+5 -5
arch/blackfin/mach-common/pm.c
··· 25 25 { 26 26 unsigned long flags; 27 27 28 - local_irq_save_hw(flags); 28 + flags = hard_local_irq_save(); 29 29 bfin_pm_standby_setup(); 30 30 31 31 #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER ··· 56 56 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 57 57 #endif 58 58 59 - local_irq_restore_hw(flags); 59 + hard_local_irq_restore(flags); 60 60 } 61 61 62 62 int bf53x_suspend_l1_mem(unsigned char *memptr) ··· 149 149 wakeup |= GPWE; 150 150 #endif 151 151 152 - local_irq_save_hw(flags); 152 + flags = hard_local_irq_save(); 153 153 154 154 ret = blackfin_dma_suspend(); 155 155 156 156 if (ret) { 157 - local_irq_restore_hw(flags); 157 + hard_local_irq_restore(flags); 158 158 kfree(memptr); 159 159 return ret; 160 160 } ··· 178 178 bfin_gpio_pm_hibernate_restore(); 179 179 blackfin_dma_resume(); 180 180 181 - local_irq_restore_hw(flags); 181 + hard_local_irq_restore(flags); 182 182 kfree(memptr); 183 183 184 184 return 0;