Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Lazy interrupt disabling for 64-bit machines

This implements a lazy strategy for disabling interrupts. This means
that local_irq_disable() et al. just clear the 'interrupts are
enabled' flag in the paca. If an interrupt comes along, the interrupt
entry code notices that interrupts are supposed to be disabled, and
clears the EE bit in SRR1, clears the 'interrupts are hard-enabled'
flag in the paca, and returns. This means that interrupts only
actually get disabled in the processor when an interrupt comes along.

When interrupts are enabled by local_irq_enable() et al., the code
sets the interrupts-enabled flag in the paca, and then checks whether
interrupts got hard-disabled. If so, it also sets the EE bit in the
MSR to hard-enable the interrupts.

This has the potential to improve performance, and also makes it
easier to make a kernel that can boot on iSeries and on other 64-bit
machines, since this lazy-disable strategy is very similar to the
soft-disable strategy that iSeries already uses.

This version renames paca->proc_enabled to paca->soft_enabled, and
changes a couple of soft-disables in the kexec code to hard-disables,
which should fix the crash that Michael Ellerman saw. This doesn't
yet use a reserved CR field for the soft_enabled and hard_enabled
flags. This applies on top of Stephen Rothwell's patches to make it
possible to build a combined iSeries/other kernel.

Signed-off-by: Paul Mackerras <paulus@samba.org>

+160 -111
+2 -1
arch/powerpc/kernel/asm-offsets.c
··· 118 118 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); 119 119 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); 120 120 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); 121 - DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled)); 121 + DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 122 + DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 122 123 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); 123 124 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 124 125 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+2 -2
arch/powerpc/kernel/crash.c
··· 111 111 if (!cpu_online(cpu)) 112 112 return; 113 113 114 - local_irq_disable(); 114 + hard_irq_disable(); 115 115 if (!cpu_isset(cpu, cpus_in_crash)) 116 116 crash_save_this_cpu(regs, cpu); 117 117 cpu_set(cpu, cpus_in_crash); ··· 289 289 * an SMP system. 290 290 * The kernel is broken so disable interrupts. 291 291 */ 292 - local_irq_disable(); 292 + hard_irq_disable(); 293 293 294 294 for_each_irq(irq) { 295 295 struct irq_desc *desc = irq_desc + irq;
+18 -21
arch/powerpc/kernel/entry_64.S
··· 87 87 addi r9,r1,STACK_FRAME_OVERHEAD 88 88 ld r11,exception_marker@toc(r2) 89 89 std r11,-16(r9) /* "regshere" marker */ 90 + li r10,1 91 + stb r10,PACASOFTIRQEN(r13) 92 + stb r10,PACAHARDIRQEN(r13) 93 + std r10,SOFTE(r1) 90 94 #ifdef CONFIG_PPC_ISERIES 91 95 BEGIN_FW_FTR_SECTION 92 96 /* Hack for handling interrupts when soft-enabling on iSeries */ ··· 98 94 andi. r10,r12,MSR_PR /* from kernel */ 99 95 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq 100 96 beq hardware_interrupt_entry 101 - lbz r10,PACAPROCENABLED(r13) 102 - std r10,SOFTE(r1) 103 97 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 104 98 #endif 105 99 mfmsr r11 ··· 462 460 #endif 463 461 464 462 restore: 463 + ld r5,SOFTE(r1) 465 464 #ifdef CONFIG_PPC_ISERIES 466 465 BEGIN_FW_FTR_SECTION 467 - ld r5,SOFTE(r1) 468 466 cmpdi 0,r5,0 469 467 beq 4f 470 468 /* Check for pending interrupts (iSeries) */ ··· 474 472 beq+ 4f /* skip do_IRQ if no interrupts */ 475 473 476 474 li r3,0 477 - stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ 475 + stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ 478 476 ori r10,r10,MSR_EE 479 477 mtmsrd r10 /* hard-enable again */ 480 478 addi r3,r1,STACK_FRAME_OVERHEAD 481 479 bl .do_IRQ 482 480 b .ret_from_except_lite /* loop back and handle more */ 483 - 484 - 4: stb r5,PACAPROCENABLED(r13) 481 + 4: 485 482 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 486 483 #endif 484 + stb r5,PACASOFTIRQEN(r13) 487 485 488 486 ld r3,_MSR(r1) 489 487 andi. r0,r3,MSR_RI ··· 540 538 /* Check that preempt_count() == 0 and interrupts are enabled */ 541 539 lwz r8,TI_PREEMPT(r9) 542 540 cmpwi cr1,r8,0 543 - #ifdef CONFIG_PPC_ISERIES 544 - BEGIN_FW_FTR_SECTION 545 541 ld r0,SOFTE(r1) 546 542 cmpdi r0,0 547 - END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 548 - #endif 549 - BEGIN_FW_FTR_SECTION 550 - andi. r0,r3,MSR_EE 551 - END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 552 543 crandc eq,cr1*4+eq,eq 553 544 bne restore 554 545 /* here we are preempting the current task */ 555 546 1: 556 - #ifdef CONFIG_PPC_ISERIES 557 - BEGIN_FW_FTR_SECTION 558 547 li r0,1 559 - stb r0,PACAPROCENABLED(r13) 560 - END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 561 - #endif 548 + stb r0,PACASOFTIRQEN(r13) 549 + stb r0,PACAHARDIRQEN(r13) 562 550 ori r10,r10,MSR_EE 563 551 mtmsrd r10,1 /* reenable interrupts */ 564 552 bl .preempt_schedule ··· 631 639 /* There is no way it is acceptable to get here with interrupts enabled, 632 640 * check it with the asm equivalent of WARN_ON 633 641 */ 634 - mfmsr r6 635 - andi. r0,r6,MSR_EE 642 + lbz r0,PACASOFTIRQEN(r13) 636 643 1: tdnei r0,0 637 644 .section __bug_table,"a" 638 645 .llong 1b,__LINE__ + 0x1000000, 1f, 2f ··· 640 649 1: .asciz __FILE__ 641 650 2: .asciz "enter_rtas" 642 651 .previous 643 - 652 + 653 + /* Hard-disable interrupts */ 654 + mfmsr r6 655 + rldicl r7,r6,48,1 656 + rotldi r7,r7,16 657 + mtmsrd r7,1 658 + 644 659 /* Unfortunately, the stack pointer and the MSR are also clobbered, 645 660 * so they are saved in the PACA which allows us to restore 646 661 * our original state after RTAS returns.
+77 -33
arch/powerpc/kernel/head_64.S
··· 35 35 #include <asm/thread_info.h> 36 36 #include <asm/firmware.h> 37 37 38 - #ifdef CONFIG_PPC_ISERIES 39 38 #define DO_SOFT_DISABLE 40 - #endif 41 39 42 40 /* 43 41 * We layout physical memory as follows: ··· 306 308 std r9,_LINK(r1); \ 307 309 mfctr r10; /* save CTR in stackframe */ \ 308 310 std r10,_CTR(r1); \ 311 + lbz r10,PACASOFTIRQEN(r13); \ 309 312 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 313 + std r10,SOFTE(r1); \ 310 314 std r11,_XER(r1); \ 311 315 li r9,(n)+1; \ 312 316 std r9,_TRAP(r1); /* set trap number */ \ ··· 343 343 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 344 344 345 345 346 + #define MASKABLE_EXCEPTION_PSERIES(n, label) \ 347 + . = n; \ 348 + .globl label##_pSeries; \ 349 + label##_pSeries: \ 350 + HMT_MEDIUM; \ 351 + mtspr SPRN_SPRG1,r13; /* save r13 */ \ 352 + mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ 353 + std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ 354 + std r10,PACA_EXGEN+EX_R10(r13); \ 355 + lbz r10,PACASOFTIRQEN(r13); \ 356 + mfcr r9; \ 357 + cmpwi r10,0; \ 358 + beq masked_interrupt; \ 359 + mfspr r10,SPRN_SPRG1; \ 360 + std r10,PACA_EXGEN+EX_R13(r13); \ 361 + std r11,PACA_EXGEN+EX_R11(r13); \ 362 + std r12,PACA_EXGEN+EX_R12(r13); \ 363 + clrrdi r12,r13,32; /* get high part of &label */ \ 364 + mfmsr r10; \ 365 + mfspr r11,SPRN_SRR0; /* save SRR0 */ \ 366 + LOAD_HANDLER(r12,label##_common) \ 367 + ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 368 + mtspr SPRN_SRR0,r12; \ 369 + mfspr r12,SPRN_SRR1; /* and SRR1 */ \ 370 + mtspr SPRN_SRR1,r10; \ 371 + rfid; \ 372 + b . /* prevent speculative execution */ 373 + 346 374 #define STD_EXCEPTION_ISERIES(n, label, area) \ 347 375 .globl label##_iSeries; \ 348 376 label##_iSeries: \ ··· 386 358 HMT_MEDIUM; \ 387 359 mtspr SPRN_SPRG1,r13; /* save r13 */ \ 388 360 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 389 - lbz r10,PACAPROCENABLED(r13); \ 361 + lbz r10,PACASOFTIRQEN(r13); \ 390 362 cmpwi 0,r10,0; \ 391 363 beq- label##_iSeries_masked; \ 392 364 EXCEPTION_PROLOG_ISERIES_2; \ 393 365 b label##_common; \ 394 366 395 - #ifdef DO_SOFT_DISABLE 367 + #ifdef CONFIG_PPC_ISERIES 396 368 #define DISABLE_INTS \ 397 - BEGIN_FW_FTR_SECTION; \ 398 - lbz r10,PACAPROCENABLED(r13); \ 399 369 li r11,0; \ 400 - std r10,SOFTE(r1); \ 370 + stb r11,PACASOFTIRQEN(r13); \ 371 + BEGIN_FW_FTR_SECTION; \ 372 + stb r11,PACAHARDIRQEN(r13); \ 373 + END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ 374 + BEGIN_FW_FTR_SECTION; \ 401 375 mfmsr r10; \ 402 - stb r11,PACAPROCENABLED(r13); \ 403 376 ori r10,r10,MSR_EE; \ 404 377 mtmsrd r10,1; \ 405 378 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 406 379 407 - #define ENABLE_INTS \ 408 - BEGIN_FW_FTR_SECTION; \ 409 - lbz r10,PACAPROCENABLED(r13); \ 410 - mfmsr r11; \ 411 - std r10,SOFTE(r1); \ 412 - ori r11,r11,MSR_EE; \ 413 - END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \ 414 - BEGIN_FW_FTR_SECTION; \ 415 - ld r12,_MSR(r1); \ 416 - mfmsr r11; \ 417 - rlwimi r11,r12,0,MSR_EE; \ 418 - END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ 419 - mtmsrd r11,1 380 + #else 381 + #define DISABLE_INTS \ 382 + li r11,0; \ 383 + stb r11,PACASOFTIRQEN(r13); \ 384 + stb r11,PACAHARDIRQEN(r13) 420 385 421 - #else /* hard enable/disable interrupts */ 422 - #define DISABLE_INTS 386 + #endif /* CONFIG_PPC_ISERIES */ 423 387 424 388 #define ENABLE_INTS \ 425 389 ld r12,_MSR(r1); \ 426 390 mfmsr r11; \ 427 391 rlwimi r11,r12,0,MSR_EE; \ 428 392 mtmsrd r11,1 429 - 430 - #endif 431 393 432 394 #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 433 395 .align 7; \ ··· 559 541 mfspr r12,SPRN_SRR1 /* and SRR1 */ 560 542 b .slb_miss_realmode /* Rel. branch works in real mode */ 561 543 562 - STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 544 + MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) 563 545 STD_EXCEPTION_PSERIES(0x600, alignment) 564 546 STD_EXCEPTION_PSERIES(0x700, program_check) 565 547 STD_EXCEPTION_PSERIES(0x800, fp_unavailable) 566 - STD_EXCEPTION_PSERIES(0x900, decrementer) 548 + MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) 567 549 STD_EXCEPTION_PSERIES(0xa00, trap_0a) 568 550 STD_EXCEPTION_PSERIES(0xb00, trap_0b) 569 551 ··· 615 597 /*** pSeries interrupt support ***/ 616 598 617 599 /* moved from 0xf00 */ 618 - STD_EXCEPTION_PSERIES(., performance_monitor) 600 + MASKABLE_EXCEPTION_PSERIES(., performance_monitor) 601 + 602 + /* 603 + * An interrupt came in while soft-disabled; clear EE in SRR1, 604 + * clear paca->hard_enabled and return. 605 + */ 606 + masked_interrupt: 607 + stb r10,PACAHARDIRQEN(r13) 608 + mtcrf 0x80,r9 609 + ld r9,PACA_EXGEN+EX_R9(r13) 610 + mfspr r10,SPRN_SRR1 611 + rldicl r10,r10,48,1 /* clear MSR_EE */ 612 + rotldi r10,r10,16 613 + mtspr SPRN_SRR1,r10 614 + ld r10,PACA_EXGEN+EX_R10(r13) 615 + mfspr r13,SPRN_SPRG1 616 + rfid 617 + b . 619 618 620 619 .align 7 621 620 _GLOBAL(do_stab_bolted_pSeries) ··· 987 952 REST_8GPRS(2, r1) 988 953 989 954 mfmsr r10 990 - clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 955 + rldicl r10,r10,48,1 /* clear EE */ 956 + rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ 991 957 mtmsrd r10,1 992 958 993 959 mtspr SPRN_SRR1,r12 ··· 1913 1877 /* enable MMU and jump to start_secondary */ 1914 1878 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1915 1879 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1916 - #ifdef DO_SOFT_DISABLE 1880 + #ifdef CONFIG_PPC_ISERIES 1917 1881 BEGIN_FW_FTR_SECTION 1918 1882 ori r4,r4,MSR_EE 1919 1883 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 1920 1884 #endif 1885 + BEGIN_FW_FTR_SECTION 1886 + stb r7,PACASOFTIRQEN(r13) 1887 + stb r7,PACAHARDIRQEN(r13) 1888 + END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 1889 + 1921 1890 mtspr SPRN_SRR0,r3 1922 1891 mtspr SPRN_SRR1,r4 1923 1892 rfid ··· 2060 2019 2061 2020 /* Load up the kernel context */ 2062 2021 5: 2063 - #ifdef DO_SOFT_DISABLE 2064 - BEGIN_FW_FTR_SECTION 2065 2022 li r5,0 2066 - stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 2023 + stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ 2024 + #ifdef CONFIG_PPC_ISERIES 2025 + BEGIN_FW_FTR_SECTION 2067 2026 mfmsr r5 2068 2027 ori r5,r5,MSR_EE /* Hard Enabled */ 2069 2028 mtmsrd r5 2070 2029 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) 2071 2030 #endif 2031 + BEGIN_FW_FTR_SECTION 2032 + stb r5,PACAHARDIRQEN(r13) 2033 + END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) 2072 2034 2073 2035 bl .start_kernel 2074 2036
+7 -1
arch/powerpc/kernel/idle_power4.S
··· 30 30 beqlr 31 31 32 32 /* Go to NAP now */ 33 + mfmsr r7 34 + rldicl r0,r7,48,1 35 + rotldi r0,r0,16 36 + mtmsrd r0,1 /* hard-disable interrupts */ 37 + li r0,1 38 + stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ 39 + stb r0,PACAHARDIRQEN(r13) 33 40 BEGIN_FTR_SECTION 34 41 DSSALL 35 42 sync ··· 45 38 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ 46 39 ori r8,r8,_TLF_NAPPING /* so when we take an exception */ 47 40 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ 48 - mfmsr r7 49 41 ori r7,r7,MSR_EE 50 42 oris r7,r7,MSR_POW@h 51 43 1: sync
+23 -1
arch/powerpc/kernel/irq.c
··· 64 64 #include <asm/ptrace.h> 65 65 #include <asm/machdep.h> 66 66 #include <asm/udbg.h> 67 - #ifdef CONFIG_PPC_ISERIES 67 + #ifdef CONFIG_PPC64 68 68 #include <asm/paca.h> 69 + #include <asm/firmware.h> 69 70 #endif 70 71 71 72 int __irq_offset_value; ··· 96 95 EXPORT_SYMBOL(irq_desc); 97 96 98 97 int distribute_irqs = 1; 98 + 99 + void local_irq_restore(unsigned long en) 100 + { 101 + get_paca()->soft_enabled = en; 102 + if (!en) 103 + return; 104 + 105 + if (firmware_has_feature(FW_FEATURE_ISERIES)) { 106 + if (get_paca()->lppaca_ptr->int_dword.any_int) 107 + iseries_handle_interrupts(); 108 + return; 109 + } 110 + 111 + if (get_paca()->hard_enabled) 112 + return; 113 + /* need to hard-enable interrupts here */ 114 + get_paca()->hard_enabled = en; 115 + if ((int)mfspr(SPRN_DEC) < 0) 116 + mtspr(SPRN_DEC, 1); 117 + hard_irq_enable(); 118 + } 99 119 #endif /* CONFIG_PPC64 */ 100 120 101 121 int show_interrupts(struct seq_file *p, void *v)
+4
arch/powerpc/kernel/ppc_ksyms.c
··· 49 49 #include <asm/commproc.h> 50 50 #endif 51 51 52 + #ifdef CONFIG_PPC64 53 + EXPORT_SYMBOL(local_irq_restore); 54 + #endif 55 + 52 56 #ifdef CONFIG_PPC32 53 57 extern void transfer_to_handler(void); 54 58 extern void do_IRQ(struct pt_regs *regs);
+2 -2
arch/powerpc/kernel/setup_64.c
··· 223 223 { 224 224 struct paca_struct *lpaca = get_paca(); 225 225 226 - /* Mark enabled in PACA */ 227 - lpaca->proc_enabled = 0; 226 + /* Mark interrupts enabled in PACA */ 227 + lpaca->soft_enabled = 0; 228 228 229 229 /* Initialize hash table for that CPU */ 230 230 htab_initialize_secondary();
-6
arch/powerpc/platforms/iseries/ksyms.c
··· 19 19 EXPORT_SYMBOL(HvCall5); 20 20 EXPORT_SYMBOL(HvCall6); 21 21 EXPORT_SYMBOL(HvCall7); 22 - 23 - #ifdef CONFIG_SMP 24 - EXPORT_SYMBOL(local_get_flags); 25 - EXPORT_SYMBOL(local_irq_disable); 26 - EXPORT_SYMBOL(local_irq_restore); 27 - #endif
+2 -33
arch/powerpc/platforms/iseries/misc.S
··· 19 19 20 20 .text 21 21 22 - /* unsigned long local_save_flags(void) */ 23 - _GLOBAL(local_get_flags) 24 - lbz r3,PACAPROCENABLED(r13) 25 - blr 26 - 27 - /* unsigned long local_irq_disable(void) */ 28 - _GLOBAL(local_irq_disable) 29 - lbz r3,PACAPROCENABLED(r13) 30 - li r4,0 31 - stb r4,PACAPROCENABLED(r13) 32 - blr /* Done */ 33 - 34 - /* void local_irq_restore(unsigned long flags) */ 35 - _GLOBAL(local_irq_restore) 36 - lbz r5,PACAPROCENABLED(r13) 37 - /* Check if things are setup the way we want _already_. */ 38 - cmpw 0,r3,r5 39 - beqlr 40 - /* are we enabling interrupts? */ 41 - cmpdi 0,r3,0 42 - stb r3,PACAPROCENABLED(r13) 43 - beqlr 44 - /* Check pending interrupts */ 45 - /* A decrementer, IPI or PMC interrupt may have occurred 46 - * while we were in the hypervisor (which enables) */ 47 - ld r4,PACALPPACAPTR(r13) 48 - ld r4,LPPACAANYINT(r4) 49 - cmpdi r4,0 50 - beqlr 51 - 52 - /* 53 - * Handle pending interrupts in interrupt context 54 - */ 22 + /* Handle pending interrupts in interrupt context */ 23 + _GLOBAL(iseries_handle_interrupts) 55 24 li r0,0x5555 56 25 sc 57 26 blr
+21 -10
include/asm-powerpc/hw_irq.h
··· 7 7 #ifdef __KERNEL__ 8 8 9 9 #include <linux/errno.h> 10 + #include <linux/compiler.h> 10 11 #include <asm/ptrace.h> 11 12 #include <asm/processor.h> 12 13 13 14 extern void timer_interrupt(struct pt_regs *); 14 15 15 - #ifdef CONFIG_PPC_ISERIES 16 + #ifdef CONFIG_PPC64 17 + #include <asm/paca.h> 16 18 17 - extern unsigned long local_get_flags(void); 18 - extern unsigned long local_irq_disable(void); 19 + static inline unsigned long local_get_flags(void) 20 + { 21 + return get_paca()->soft_enabled; 22 + } 23 + 24 + static inline unsigned long local_irq_disable(void) 25 + { 26 + unsigned long flag = get_paca()->soft_enabled; 27 + get_paca()->soft_enabled = 0; 28 + barrier(); 29 + return flag; 30 + } 31 + 19 32 extern void local_irq_restore(unsigned long); 33 + extern void iseries_handle_interrupts(void); 20 34 21 35 #define local_irq_enable() local_irq_restore(1) 22 36 #define local_save_flags(flags) ((flags) = local_get_flags()) ··· 38 24 39 25 #define irqs_disabled() (local_get_flags() == 0) 40 26 27 + #define hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 28 + #define hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) 29 + 41 30 #else 42 31 43 32 #if defined(CONFIG_BOOKE) 44 33 #define SET_MSR_EE(x) mtmsr(x) 45 34 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") 46 - #elif defined(__powerpc64__) 47 - #define SET_MSR_EE(x) __mtmsrd(x, 1) 48 - #define local_irq_restore(flags) do { \ 49 - __asm__ __volatile__("": : :"memory"); \ 50 - __mtmsrd((flags), 1); \ 51 - } while(0) 52 35 #else 53 36 #define SET_MSR_EE(x) mtmsr(x) 54 37 #define local_irq_restore(flags) mtmsr(flags) ··· 92 81 #define local_irq_save(flags) local_irq_save_ptr(&flags) 93 82 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0) 94 83 95 - #endif /* CONFIG_PPC_ISERIES */ 84 + #endif /* CONFIG_PPC64 */ 96 85 97 86 #define mask_irq(irq) \ 98 87 ({ \
+2 -1
include/asm-powerpc/paca.h
··· 93 93 u64 stab_rr; /* stab/slb round-robin counter */ 94 94 u64 saved_r1; /* r1 save for RTAS calls */ 95 95 u64 saved_msr; /* MSR saved here by enter_rtas */ 96 - u8 proc_enabled; /* irq soft-enable flag */ 96 + u8 soft_enabled; /* irq soft-enable flag */ 97 + u8 hard_enabled; /* set if irqs are enabled in MSR */ 97 98 u8 io_sync; /* writel() needs spin_unlock sync */ 98 99 99 100 /* Stuff for accurate time accounting */