Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: convert interrupt handlers to use wrappers

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210130130852.2952424-29-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
3a96570f fd3f1e0f

+170 -102
-29
arch/powerpc/include/asm/asm-prototypes.h
··· 56 56 int enter_vmx_ops(void); 57 57 void *exit_vmx_ops(void *dest); 58 58 59 - /* Traps */ 60 - long machine_check_early(struct pt_regs *regs); 61 - long hmi_exception_realmode(struct pt_regs *regs); 62 - void SMIException(struct pt_regs *regs); 63 - void handle_hmi_exception(struct pt_regs *regs); 64 - void instruction_breakpoint_exception(struct pt_regs *regs); 65 - void RunModeException(struct pt_regs *regs); 66 - void single_step_exception(struct pt_regs *regs); 67 - void program_check_exception(struct pt_regs *regs); 68 - void alignment_exception(struct pt_regs *regs); 69 - void StackOverflow(struct pt_regs *regs); 70 - void stack_overflow_exception(struct pt_regs *regs); 71 - void kernel_fp_unavailable_exception(struct pt_regs *regs); 72 - void altivec_unavailable_exception(struct pt_regs *regs); 73 - void vsx_unavailable_exception(struct pt_regs *regs); 74 - void fp_unavailable_tm(struct pt_regs *regs); 75 - void altivec_unavailable_tm(struct pt_regs *regs); 76 - void vsx_unavailable_tm(struct pt_regs *regs); 77 - void facility_unavailable_exception(struct pt_regs *regs); 78 - void TAUException(struct pt_regs *regs); 79 - void altivec_assist_exception(struct pt_regs *regs); 80 - void unrecoverable_exception(struct pt_regs *regs); 81 - void kernel_bad_stack(struct pt_regs *regs); 82 - void system_reset_exception(struct pt_regs *regs); 83 - void machine_check_exception(struct pt_regs *regs); 84 - void emulation_assist_interrupt(struct pt_regs *regs); 85 - long do_slb_fault(struct pt_regs *regs); 86 - void do_bad_slb_fault(struct pt_regs *regs); 87 - 88 59 /* signals, syscalls and interrupts */ 89 60 long sys_swapcontext(struct ucontext __user *old_ctx, 90 61 struct ucontext __user *new_ctx,
-1
arch/powerpc/include/asm/book3s/64/mmu-hash.h
··· 456 456 457 457 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa, 458 458 unsigned long rlags, unsigned long vflags, int psize, int ssize); 459 - long do_hash_fault(struct pt_regs *regs); 460 459 extern int __hash_page_4K(unsigned long ea, unsigned long access, 461 460 unsigned long vsid, pte_t *ptep, unsigned long trap, 462 461 unsigned long flags, int ssize, int subpage_prot);
-3
arch/powerpc/include/asm/debug.h
··· 50 50 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 51 51 extern void do_send_trap(struct pt_regs *regs, unsigned long address, 52 52 unsigned long error_code, int brkpt); 53 - #else 54 - 55 - void do_break(struct pt_regs *regs); 56 53 #endif 57 54 58 55 #endif /* _ASM_POWERPC_DEBUG_H */
-9
arch/powerpc/include/asm/hw_irq.h
··· 50 50 51 51 #ifndef __ASSEMBLY__ 52 52 53 - extern void replay_system_reset(void); 54 - extern void replay_soft_interrupts(void); 55 - 56 - extern void timer_interrupt(struct pt_regs *); 57 - extern void performance_monitor_exception(struct pt_regs *regs); 58 - extern void WatchdogException(struct pt_regs *regs); 59 - extern void unknown_exception(struct pt_regs *regs); 60 - void unknown_async_exception(struct pt_regs *regs); 61 - 62 53 #ifdef CONFIG_PPC64 63 54 #include <asm/paca.h> 64 55
+66
arch/powerpc/include/asm/interrupt.h
··· 232 232 \ 233 233 static __always_inline long ____##func(struct pt_regs *regs) 234 234 235 + 236 + /* Interrupt handlers */ 237 + /* kernel/traps.c */ 238 + DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception); 239 + #ifdef CONFIG_PPC_BOOK3S_64 240 + DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception); 241 + #else 242 + DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception); 243 + #endif 244 + DECLARE_INTERRUPT_HANDLER(SMIException); 245 + DECLARE_INTERRUPT_HANDLER(handle_hmi_exception); 246 + DECLARE_INTERRUPT_HANDLER(unknown_exception); 247 + DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception); 248 + DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception); 249 + DECLARE_INTERRUPT_HANDLER(RunModeException); 250 + DECLARE_INTERRUPT_HANDLER(single_step_exception); 251 + DECLARE_INTERRUPT_HANDLER(program_check_exception); 252 + DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt); 253 + DECLARE_INTERRUPT_HANDLER(alignment_exception); 254 + DECLARE_INTERRUPT_HANDLER(StackOverflow); 255 + DECLARE_INTERRUPT_HANDLER(stack_overflow_exception); 256 + DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception); 257 + DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception); 258 + DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception); 259 + DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception); 260 + DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm); 261 + DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm); 262 + DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm); 263 + DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 264 + DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 265 + DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception); 266 + DECLARE_INTERRUPT_HANDLER(DebugException); 267 + DECLARE_INTERRUPT_HANDLER(altivec_assist_exception); 268 + DECLARE_INTERRUPT_HANDLER(CacheLockingException); 269 + DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException); 270 + DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException); 271 + DECLARE_INTERRUPT_HANDLER(unrecoverable_exception); 272 + DECLARE_INTERRUPT_HANDLER(WatchdogException); 273 + DECLARE_INTERRUPT_HANDLER(kernel_bad_stack); 274 + 275 + /* slb.c */ 276 + DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault); 277 + DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault); 278 + 279 + /* hash_utils.c */ 280 + DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault); 281 + 282 + /* fault.c */ 283 + DECLARE_INTERRUPT_HANDLER_RET(do_page_fault); 284 + DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv); 285 + 286 + /* process.c */ 287 + DECLARE_INTERRUPT_HANDLER(do_break); 288 + 289 + /* time.c */ 290 + DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt); 291 + 292 + /* mce.c */ 293 + DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early); 294 + DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode); 295 + 296 + DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException); 297 + 298 + void replay_system_reset(void); 299 + void replay_soft_interrupts(void); 300 + 235 301 #endif /* _ASM_POWERPC_INTERRUPT_H */
+1 -1
arch/powerpc/include/asm/nmi.h
··· 4 4 5 5 #ifdef CONFIG_PPC_WATCHDOG 6 6 extern void arch_touch_nmi_watchdog(void); 7 - void soft_nmi_interrupt(struct pt_regs *regs); 7 + long soft_nmi_interrupt(struct pt_regs *regs); 8 8 #else 9 9 static inline void arch_touch_nmi_watchdog(void) {} 10 10 #endif
+3 -3
arch/powerpc/kernel/dbell.c
··· 12 12 #include <linux/hardirq.h> 13 13 14 14 #include <asm/dbell.h> 15 + #include <asm/interrupt.h> 15 16 #include <asm/irq_regs.h> 16 17 #include <asm/kvm_ppc.h> 17 18 #include <asm/trace.h> 18 19 19 20 #ifdef CONFIG_SMP 20 21 21 - void doorbell_exception(struct pt_regs *regs) 22 + DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) 22 23 { 23 24 struct pt_regs *old_regs = set_irq_regs(regs); 24 25 ··· 40 39 set_irq_regs(old_regs); 41 40 } 42 41 #else /* CONFIG_SMP */ 43 - void doorbell_exception(struct pt_regs *regs) 42 + DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception) 44 43 { 45 44 printk(KERN_WARNING "Received doorbell on non-smp system\n"); 46 45 } 47 46 #endif /* CONFIG_SMP */ 48 -
+2 -1
arch/powerpc/kernel/irq.c
··· 54 54 #include <linux/pgtable.h> 55 55 56 56 #include <linux/uaccess.h> 57 + #include <asm/interrupt.h> 57 58 #include <asm/io.h> 58 59 #include <asm/irq.h> 59 60 #include <asm/cache.h> ··· 666 665 irq_exit(); 667 666 } 668 667 669 - void do_IRQ(struct pt_regs *regs) 668 + DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 670 669 { 671 670 struct pt_regs *old_regs = set_irq_regs(regs); 672 671 void *cursp, *irqsp, *sirqsp;
+3 -2
arch/powerpc/kernel/mce.c
··· 19 19 #include <linux/ftrace.h> 20 20 #include <linux/memblock.h> 21 21 22 + #include <asm/interrupt.h> 22 23 #include <asm/machdep.h> 23 24 #include <asm/mce.h> 24 25 #include <asm/nmi.h> ··· 584 583 * 585 584 * regs->nip and regs->msr contains srr0 and ssr1. 586 585 */ 587 - long notrace machine_check_early(struct pt_regs *regs) 586 + DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early) 588 587 { 589 588 long handled = 0; 590 589 u8 ftrace_enabled = this_cpu_get_ftrace_enabled(); ··· 718 717 /* 719 718 * Return values: 720 719 */ 721 - long hmi_exception_realmode(struct pt_regs *regs) 720 + DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode) 722 721 { 723 722 int ret; 724 723
+2 -1
arch/powerpc/kernel/process.c
··· 41 41 #include <linux/pkeys.h> 42 42 #include <linux/seq_buf.h> 43 43 44 + #include <asm/interrupt.h> 44 45 #include <asm/io.h> 45 46 #include <asm/processor.h> 46 47 #include <asm/mmu.h> ··· 660 659 } 661 660 } 662 661 663 - void do_break(struct pt_regs *regs) 662 + DEFINE_INTERRUPT_HANDLER(do_break) 664 663 { 665 664 current->thread.trap_nr = TRAP_HWBKPT; 666 665 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
+1
arch/powerpc/kernel/syscall_64.c
··· 5 5 #include <asm/kup.h> 6 6 #include <asm/cputime.h> 7 7 #include <asm/hw_irq.h> 8 + #include <asm/interrupt.h> 8 9 #include <asm/kprobes.h> 9 10 #include <asm/paca.h> 10 11 #include <asm/ptrace.h>
+2 -1
arch/powerpc/kernel/tau_6xx.c
··· 22 22 #include <linux/delay.h> 23 23 #include <linux/workqueue.h> 24 24 25 + #include <asm/interrupt.h> 25 26 #include <asm/io.h> 26 27 #include <asm/reg.h> 27 28 #include <asm/nvram.h> ··· 101 100 * with interrupts disabled 102 101 */ 103 102 104 - void TAUException(struct pt_regs * regs) 103 + DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException) 105 104 { 106 105 int cpu = smp_processor_id(); 107 106
+2 -1
arch/powerpc/kernel/time.c
··· 57 57 #include <linux/processor.h> 58 58 #include <asm/trace.h> 59 59 60 + #include <asm/interrupt.h> 60 61 #include <asm/io.h> 61 62 #include <asm/nvram.h> 62 63 #include <asm/cache.h> ··· 572 571 * timer_interrupt - gets called when the decrementer overflows, 573 572 * with interrupts disabled. 574 573 */ 575 - void timer_interrupt(struct pt_regs *regs) 574 + DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) 576 575 { 577 576 struct clock_event_device *evt = this_cpu_ptr(&decrementers); 578 577 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+54 -34
arch/powerpc/kernel/traps.c
··· 41 41 #include <asm/emulated_ops.h> 42 42 #include <linux/uaccess.h> 43 43 #include <asm/debugfs.h> 44 + #include <asm/interrupt.h> 44 45 #include <asm/io.h> 45 46 #include <asm/machdep.h> 46 47 #include <asm/rtas.h> ··· 431 430 regs->msr &= ~MSR_RI; 432 431 #endif 433 432 } 434 - 435 - void system_reset_exception(struct pt_regs *regs) 433 + DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception) 436 434 { 437 435 unsigned long hsrr0, hsrr1; 438 436 bool saved_hsrrs = false; ··· 519 519 this_cpu_set_ftrace_enabled(ftrace_enabled); 520 520 521 521 /* What should we do here? We could issue a shutdown or hard reset. */ 522 + 523 + return 0; 522 524 } 523 525 NOKPROBE_SYMBOL(system_reset_exception); 524 526 ··· 807 805 } 808 806 NOKPROBE_SYMBOL(die_mce); 809 807 810 - void machine_check_exception(struct pt_regs *regs) 808 + #ifdef CONFIG_PPC_BOOK3S_64 809 + DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception) 810 + #else 811 + DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception) 812 + #endif 811 813 { 812 814 int recover = 0; 813 815 ··· 861 855 die_mce("Unrecoverable Machine check", regs, SIGBUS); 862 856 863 857 if (nmi) nmi_exit(); 858 + 859 + #ifdef CONFIG_PPC_BOOK3S_64 860 + return; 861 + #else 862 + return 0; 863 + #endif 864 864 } 865 865 NOKPROBE_SYMBOL(machine_check_exception); 866 866 867 - void SMIException(struct pt_regs *regs) 867 + DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */ 868 868 { 869 869 die("System Management Interrupt", regs, SIGABRT); 870 870 } ··· 1056 1044 } 1057 1045 #endif /* CONFIG_VSX */ 1058 1046 1059 - void handle_hmi_exception(struct pt_regs *regs) 1047 + DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception) 1060 1048 { 1061 1049 struct pt_regs *old_regs; 1062 1050 ··· 1085 1073 set_irq_regs(old_regs); 1086 1074 } 1087 1075 1088 - void unknown_exception(struct pt_regs *regs) 1076 + DEFINE_INTERRUPT_HANDLER(unknown_exception) 1089 1077 { 1090 1078 enum ctx_state prev_state = exception_enter(); 1091 1079 ··· 1097 1085 exception_exit(prev_state); 1098 1086 } 1099 1087 1100 - void unknown_async_exception(struct pt_regs *regs) 1088 + DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception) 1101 1089 { 1102 1090 enum ctx_state prev_state = exception_enter(); 1103 1091 ··· 1109 1097 exception_exit(prev_state); 1110 1098 } 1111 1099 1112 - void instruction_breakpoint_exception(struct pt_regs *regs) 1100 + DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception) 1113 1101 { 1114 1102 enum ctx_state prev_state = exception_enter(); 1115 1103 ··· 1124 1112 exception_exit(prev_state); 1125 1113 } 1126 1114 1127 - void RunModeException(struct pt_regs *regs) 1115 + DEFINE_INTERRUPT_HANDLER(RunModeException) 1128 1116 { 1129 1117 _exception(SIGTRAP, regs, TRAP_UNK, 0); 1130 1118 } 1131 1119 1132 - void single_step_exception(struct pt_regs *regs) 1120 + DEFINE_INTERRUPT_HANDLER(single_step_exception) 1133 1121 { 1134 1122 enum ctx_state prev_state = exception_enter(); 1135 1123 ··· 1591 1579 1592 1580 } 1593 1581 1594 - void program_check_exception(struct pt_regs *regs) 1582 + DEFINE_INTERRUPT_HANDLER(program_check_exception) 1595 1583 { 1596 1584 enum ctx_state prev_state = exception_enter(); 1597 1585 ··· 1605 1593 * This occurs when running in hypervisor mode on POWER6 or later 1606 1594 * and an illegal instruction is encountered. 1607 1595 */ 1608 - void emulation_assist_interrupt(struct pt_regs *regs) 1596 + DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt) 1609 1597 { 1610 1598 enum ctx_state prev_state = exception_enter(); 1611 1599 ··· 1616 1604 } 1617 1605 NOKPROBE_SYMBOL(emulation_assist_interrupt); 1618 1606 1619 - void alignment_exception(struct pt_regs *regs) 1607 + DEFINE_INTERRUPT_HANDLER(alignment_exception) 1620 1608 { 1621 1609 enum ctx_state prev_state = exception_enter(); 1622 1610 int sig, code, fixed = 0; ··· 1666 1654 exception_exit(prev_state); 1667 1655 } 1668 1656 1669 - void StackOverflow(struct pt_regs *regs) 1657 + DEFINE_INTERRUPT_HANDLER(StackOverflow) 1670 1658 { 1671 1659 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", 1672 1660 current->comm, task_pid_nr(current), regs->gpr[1]); ··· 1675 1663 panic("kernel stack overflow"); 1676 1664 } 1677 1665 1678 - void stack_overflow_exception(struct pt_regs *regs) 1666 + DEFINE_INTERRUPT_HANDLER(stack_overflow_exception) 1679 1667 { 1680 1668 enum ctx_state prev_state = exception_enter(); 1681 1669 ··· 1684 1672 exception_exit(prev_state); 1685 1673 } 1686 1674 1687 - void kernel_fp_unavailable_exception(struct pt_regs *regs) 1675 + DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception) 1688 1676 { 1689 1677 enum ctx_state prev_state = exception_enter(); 1690 1678 ··· 1695 1683 exception_exit(prev_state); 1696 1684 } 1697 1685 1698 - void altivec_unavailable_exception(struct pt_regs *regs) 1686 + DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception) 1699 1687 { 1700 1688 enum ctx_state prev_state = exception_enter(); 1701 1689 ··· 1714 1702 exception_exit(prev_state); 1715 1703 } 1716 1704 1717 - void vsx_unavailable_exception(struct pt_regs *regs) 1705 + DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception) 1718 1706 { 1719 1707 if (user_mode(regs)) { 1720 1708 /* A user program has executed an vsx instruction, ··· 1745 1733 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT); 1746 1734 } 1747 1735 1748 - void facility_unavailable_exception(struct pt_regs *regs) 1736 + DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception) 1749 1737 { 1750 1738 static char *facility_strings[] = { 1751 1739 [FSCR_FP_LG] = "FPU", ··· 1865 1853 1866 1854 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1867 1855 1868 - void fp_unavailable_tm(struct pt_regs *regs) 1856 + DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm) 1869 1857 { 1870 1858 /* Note: This does not handle any kind of FP laziness. */ 1871 1859 ··· 1898 1886 tm_recheckpoint(&current->thread); 1899 1887 } 1900 1888 1901 - void altivec_unavailable_tm(struct pt_regs *regs) 1889 + DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm) 1902 1890 { 1903 1891 /* See the comments in fp_unavailable_tm(). This function operates 1904 1892 * the same way. ··· 1913 1901 current->thread.used_vr = 1; 1914 1902 } 1915 1903 1916 - void vsx_unavailable_tm(struct pt_regs *regs) 1904 + DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm) 1917 1905 { 1918 1906 /* See the comments in fp_unavailable_tm(). This works similarly, 1919 1907 * though we're loading both FP and VEC registers in here. ··· 1938 1926 } 1939 1927 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1940 1928 1941 - static void performance_monitor_exception_nmi(struct pt_regs *regs) 1929 + #ifdef CONFIG_PPC64 1930 + DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi); 1931 + DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi) 1942 1932 { 1943 1933 nmi_enter(); 1944 1934 ··· 1949 1935 perf_irq(regs); 1950 1936 1951 1937 nmi_exit(); 1952 - } 1953 1938 1954 - static void performance_monitor_exception_async(struct pt_regs *regs) 1939 + return 0; 1940 + } 1941 + #endif 1942 + 1943 + DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async); 1944 + DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async) 1955 1945 { 1956 1946 irq_enter(); 1957 1947 ··· 1966 1948 irq_exit(); 1967 1949 } 1968 1950 1969 - void performance_monitor_exception(struct pt_regs *regs) 1951 + DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception) 1970 1952 { 1971 1953 /* 1972 1954 * On 64-bit, if perf interrupts hit in a local_irq_disable ··· 1978 1960 performance_monitor_exception_nmi(regs); 1979 1961 else 1980 1962 performance_monitor_exception_async(regs); 1963 + 1964 + return 0; 1981 1965 } 1982 1966 1983 1967 #ifdef CONFIG_PPC_ADV_DEBUG_REGS ··· 2042 2022 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); 2043 2023 } 2044 2024 2045 - void DebugException(struct pt_regs *regs) 2025 + DEFINE_INTERRUPT_HANDLER(DebugException) 2046 2026 { 2047 2027 unsigned long debug_status = regs->dsisr; 2048 2028 ··· 2115 2095 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 2116 2096 2117 2097 #ifdef CONFIG_ALTIVEC 2118 - void altivec_assist_exception(struct pt_regs *regs) 2098 + DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) 2119 2099 { 2120 2100 int err; 2121 2101 ··· 2149 2129 #endif /* CONFIG_ALTIVEC */ 2150 2130 2151 2131 #ifdef CONFIG_FSL_BOOKE 2152 - void CacheLockingException(struct pt_regs *regs) 2132 + DEFINE_INTERRUPT_HANDLER(CacheLockingException) 2153 2133 { 2154 2134 unsigned long error_code = regs->dsisr; 2155 2135 ··· 2164 2144 #endif /* CONFIG_FSL_BOOKE */ 2165 2145 2166 2146 #ifdef CONFIG_SPE 2167 - void SPEFloatingPointException(struct pt_regs *regs) 2147 + DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) 2168 2148 { 2169 2149 extern int do_spe_mathemu(struct pt_regs *regs); 2170 2150 unsigned long spefscr; ··· 2216 2196 return; 2217 2197 } 2218 2198 2219 - void SPEFloatingPointRoundException(struct pt_regs *regs) 2199 + DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) 2220 2200 { 2221 2201 extern int speround_handler(struct pt_regs *regs); 2222 2202 int err; ··· 2258 2238 * in the MSR is 0. This indicates that SRR0/1 are live, and that 2259 2239 * we therefore lost state by taking this exception. 2260 2240 */ 2261 - void unrecoverable_exception(struct pt_regs *regs) 2241 + DEFINE_INTERRUPT_HANDLER(unrecoverable_exception) 2262 2242 { 2263 2243 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n", 2264 2244 regs->trap, regs->nip, regs->msr); ··· 2278 2258 return; 2279 2259 } 2280 2260 2281 - void WatchdogException(struct pt_regs *regs) 2261 + DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */ 2282 2262 { 2283 2263 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); 2284 2264 WatchdogHandler(regs); ··· 2289 2269 * We enter here if we discover during exception entry that we are 2290 2270 * running in supervisor mode with a userspace value in the stack pointer. 2291 2271 */ 2292 - void kernel_bad_stack(struct pt_regs *regs) 2272 + DEFINE_INTERRUPT_HANDLER(kernel_bad_stack) 2293 2273 { 2294 2274 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", 2295 2275 regs->gpr[1], regs->nip);
+5 -2
arch/powerpc/kernel/watchdog.c
··· 26 26 #include <linux/delay.h> 27 27 #include <linux/smp.h> 28 28 29 + #include <asm/interrupt.h> 29 30 #include <asm/paca.h> 30 31 #include <asm/nmi.h> 31 32 ··· 249 248 watchdog_smp_panic(cpu, tb); 250 249 } 251 250 252 - void soft_nmi_interrupt(struct pt_regs *regs) 251 + DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt) 253 252 { 254 253 unsigned long flags; 255 254 int cpu = raw_smp_processor_id(); 256 255 u64 tb; 257 256 258 257 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) 259 - return; 258 + return 0; 260 259 261 260 nmi_enter(); 262 261 ··· 293 292 294 293 out: 295 294 nmi_exit(); 295 + 296 + return 0; 296 297 } 297 298 298 299 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
+1
arch/powerpc/kvm/book3s_hv.c
··· 53 53 #include <asm/cputable.h> 54 54 #include <asm/cacheflush.h> 55 55 #include <linux/uaccess.h> 56 + #include <asm/interrupt.h> 56 57 #include <asm/io.h> 57 58 #include <asm/kvm_ppc.h> 58 59 #include <asm/kvm_book3s.h>
+1
arch/powerpc/kvm/book3s_hv_builtin.c
··· 17 17 18 18 #include <asm/asm-prototypes.h> 19 19 #include <asm/cputable.h> 20 + #include <asm/interrupt.h> 20 21 #include <asm/kvm_ppc.h> 21 22 #include <asm/kvm_book3s.h> 22 23 #include <asm/archrandom.h>
+1
arch/powerpc/kvm/booke.c
··· 20 20 21 21 #include <asm/cputable.h> 22 22 #include <linux/uaccess.h> 23 + #include <asm/interrupt.h> 23 24 #include <asm/kvm_ppc.h> 24 25 #include <asm/cacheflush.h> 25 26 #include <asm/dbell.h>
+9 -3
arch/powerpc/mm/book3s64/hash_utils.c
··· 38 38 #include <linux/pgtable.h> 39 39 40 40 #include <asm/debugfs.h> 41 + #include <asm/interrupt.h> 41 42 #include <asm/processor.h> 42 43 #include <asm/mmu.h> 43 44 #include <asm/mmu_context.h> ··· 1513 1512 } 1514 1513 EXPORT_SYMBOL_GPL(hash_page); 1515 1514 1516 - static long __do_hash_fault(struct pt_regs *regs) 1515 + DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault); 1516 + DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault) 1517 1517 { 1518 1518 unsigned long ea = regs->dar; 1519 1519 unsigned long dsisr = regs->dsisr; ··· 1567 1565 return err; 1568 1566 } 1569 1567 1570 - long do_hash_fault(struct pt_regs *regs) 1568 + /* 1569 + * The _RAW interrupt entry checks for the in_nmi() case before 1570 + * running the full handler. 1571 + */ 1572 + DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault) 1571 1573 { 1572 1574 unsigned long dsisr = regs->dsisr; 1573 1575 long err; ··· 1593 1587 * the access, or panic if there isn't a handler. 1594 1588 */ 1595 1589 if (unlikely(in_nmi())) { 1596 - bad_page_fault(regs, SIGSEGV); 1590 + do_bad_page_fault_segv(regs); 1597 1591 return 0; 1598 1592 } 1599 1593
+4 -3
arch/powerpc/mm/book3s64/slb.c
··· 10 10 */ 11 11 12 12 #include <asm/asm-prototypes.h> 13 + #include <asm/interrupt.h> 13 14 #include <asm/mmu.h> 14 15 #include <asm/mmu_context.h> 15 16 #include <asm/paca.h> ··· 814 813 return slb_insert_entry(ea, context, flags, ssize, false); 815 814 } 816 815 817 - long do_slb_fault(struct pt_regs *regs) 816 + DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault) 818 817 { 819 818 unsigned long ea = regs->dar; 820 819 unsigned long id = get_region_id(ea); ··· 834 833 */ 835 834 836 835 /* 837 - * The interrupt state is not reconciled, for performance, so that 836 + * This is a raw interrupt handler, for performance, so that 838 837 * fast_interrupt_return can be used. The handler must not touch local 839 838 * irq state, or schedule. We could test for usermode and upgrade to a 840 839 * normal process context (synchronous) interrupt for those, which ··· 869 868 } 870 869 } 871 870 872 - void do_bad_slb_fault(struct pt_regs *regs) 871 + DEFINE_INTERRUPT_HANDLER(do_bad_slb_fault) 873 872 { 874 873 int err = regs->result; 875 874
+3 -2
arch/powerpc/mm/fault.c
··· 34 34 #include <linux/uaccess.h> 35 35 36 36 #include <asm/firmware.h> 37 + #include <asm/interrupt.h> 37 38 #include <asm/page.h> 38 39 #include <asm/mmu.h> 39 40 #include <asm/mmu_context.h> ··· 541 540 } 542 541 NOKPROBE_SYMBOL(__do_page_fault); 543 542 544 - long do_page_fault(struct pt_regs *regs) 543 + DEFINE_INTERRUPT_HANDLER_RET(do_page_fault) 545 544 { 546 545 const struct exception_table_entry *entry; 547 546 enum ctx_state prev_state; ··· 625 624 } 626 625 627 626 #ifdef CONFIG_PPC_BOOK3S_64 628 - void do_bad_page_fault_segv(struct pt_regs *regs) 627 + DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv) 629 628 { 630 629 bad_page_fault(regs, SIGSEGV); 631 630 }
+3 -3
arch/powerpc/platforms/cell/ras.c
··· 49 49 50 50 } 51 51 52 - void cbe_system_error_exception(struct pt_regs *regs) 52 + DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) 53 53 { 54 54 int cpu = smp_processor_id(); 55 55 ··· 58 58 dump_stack(); 59 59 } 60 60 61 - void cbe_maintenance_exception(struct pt_regs *regs) 61 + DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) 62 62 { 63 63 int cpu = smp_processor_id(); 64 64 ··· 70 70 dump_stack(); 71 71 } 72 72 73 - void cbe_thermal_exception(struct pt_regs *regs) 73 + DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) 74 74 { 75 75 int cpu = smp_processor_id(); 76 76
+6 -3
arch/powerpc/platforms/cell/ras.h
··· 2 2 #ifndef RAS_H 3 3 #define RAS_H 4 4 5 - extern void cbe_system_error_exception(struct pt_regs *regs); 6 - extern void cbe_maintenance_exception(struct pt_regs *regs); 7 - extern void cbe_thermal_exception(struct pt_regs *regs); 5 + #include <asm/interrupt.h> 6 + 7 + DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception); 8 + DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception); 9 + DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception); 10 + 8 11 extern void cbe_ras_init(void); 9 12 10 13 #endif /* RAS_H */
+1
arch/powerpc/platforms/powernv/idle.c
··· 14 14 15 15 #include <asm/asm-prototypes.h> 16 16 #include <asm/firmware.h> 17 + #include <asm/interrupt.h> 17 18 #include <asm/machdep.h> 18 19 #include <asm/opal.h> 19 20 #include <asm/cputhreads.h>