[SPARC64]: Revamp Spitfire error trap handling.

Current uncorrectable error handling was poor enough
that the processor could just loop taking the same
trap over and over again. Fix things up so that we
at least get a log message and perhaps even some register
state.

In the process, much consolidation became possible,
particularly with the correctable error handler.

Prefix assembler and C function names with "spitfire"
to indicate that these are for Ultra-I/II/IIi/IIe only.

More work is needed to make these routines robust and
featureful to the level of the Ultra-III error handlers.

Signed-off-by: David S. Miller <davem@davemloft.net>

+453 -280
+198 -138
arch/sparc64/kernel/entry.S
··· 21 #include <asm/visasm.h> 22 #include <asm/estate.h> 23 #include <asm/auxio.h> 24 25 #define curptr g6 26 ··· 691 retl 692 nop 693 694 - .globl __do_data_access_exception 695 - .globl __do_data_access_exception_tl1 696 - __do_data_access_exception_tl1: 697 - rdpr %pstate, %g4 698 - wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 699 - mov TLB_SFSR, %g3 700 - mov DMMU_SFAR, %g5 701 - ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 702 - ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR 703 - stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit 704 membar #Sync 705 rdpr %tt, %g3 706 - cmp %g3, 0x80 ! first win spill/fill trap 707 - blu,pn %xcc, 1f 708 - cmp %g3, 0xff ! last win spill/fill trap 709 - bgu,pn %xcc, 1f 710 nop 711 - ba,pt %xcc, winfix_dax 712 - rdpr %tpc, %g3 713 - 1: sethi %hi(109f), %g7 714 ba,pt %xcc, etraptl1 715 - 109: or %g7, %lo(109b), %g7 716 - mov %l4, %o1 717 - mov %l5, %o2 718 - call data_access_exception_tl1 719 - add %sp, PTREGS_OFF, %o0 720 - ba,pt %xcc, rtrap 721 - clr %l6 722 723 - __do_data_access_exception: 724 - rdpr %pstate, %g4 725 - wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 726 - mov TLB_SFSR, %g3 727 - mov DMMU_SFAR, %g5 728 - ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 729 - ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR 730 - stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit 731 - membar #Sync 732 - sethi %hi(109f), %g7 733 - ba,pt %xcc, etrap 734 - 109: or %g7, %lo(109b), %g7 735 - mov %l4, %o1 736 - mov %l5, %o2 737 - call data_access_exception 738 - add %sp, PTREGS_OFF, %o0 739 - ba,pt %xcc, rtrap 740 - clr %l6 741 742 - .globl __do_instruction_access_exception 743 - .globl __do_instruction_access_exception_tl1 744 - __do_instruction_access_exception_tl1: 745 - rdpr %pstate, %g4 746 - wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 747 - mov TLB_SFSR, %g3 748 - ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR 749 - rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC 750 - stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 751 - membar #Sync 752 - sethi %hi(109f), %g7 753 - ba,pt %xcc, etraptl1 754 - 109: or %g7, %lo(109b), %g7 755 - mov %l4, %o1 756 - mov %l5, %o2 757 - call instruction_access_exception_tl1 758 - add %sp, PTREGS_OFF, %o0 759 - ba,pt %xcc, rtrap 760 - clr %l6 761 762 - __do_instruction_access_exception: 763 - rdpr %pstate, %g4 764 - wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 765 - mov TLB_SFSR, %g3 766 - ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR 767 - rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC 768 - stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 769 - membar #Sync 770 - sethi %hi(109f), %g7 771 - ba,pt %xcc, etrap 772 - 109: or %g7, %lo(109b), %g7 773 - mov %l4, %o1 774 mov %l5, %o2 775 - call instruction_access_exception 776 add %sp, PTREGS_OFF, %o0 777 ba,pt %xcc, rtrap 778 clr %l6 ··· 808 * as it is the only situation where we can safely record 809 * and log. For trap level >1 we just clear the CE bit 810 * in the AFSR and return. 811 - */ 812 - 813 - /* Our trap handling infrastructure allows us to preserve 814 - * two 64-bit values during etrap for arguments to 815 - * subsequent C code. Therefore we encode the information 816 - * as follows: 817 * 818 - * value 1) Full 64-bits of AFAR 819 - * value 2) Low 33-bits of AFSR, then bits 33-->42 820 - * are UDBL error status and bits 43-->52 821 - * are UDBH error status 822 */ 823 - .align 64 824 - .globl cee_trap 825 - cee_trap: 826 - ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR 827 - ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR 828 - sllx %g1, 31, %g1 ! Clear reserved bits 829 - srlx %g1, 31, %g1 ! in AFSR 830 831 - /* NOTE: UltraSparc-I/II have high and low UDB error 832 - * registers, corresponding to the two UDB units 833 - * present on those chips. UltraSparc-IIi only 834 - * has a single UDB, called "SDB" in the manual. 835 - * For IIi the upper UDB register always reads 836 - * as zero so for our purposes things will just 837 - * work with the checks below. 838 */ 839 - ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status 840 - andcc %g3, (1 << 8), %g4 ! Check CE bit 841 - sllx %g3, (64 - 10), %g3 ! Clear reserved bits 842 - srlx %g3, (64 - 10), %g3 ! in UDB-Low error status 843 844 - sllx %g3, (33 + 0), %g3 ! Shift up to encoding area 845 - or %g1, %g3, %g1 ! Or it in 846 - be,pn %xcc, 1f ! Branch if CE bit was clear 847 nop 848 - stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL 849 - membar #Sync ! Synchronize ASI stores 850 - 1: mov 0x18, %g5 ! Addr of UDB-High error status 851 - ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it 852 853 - andcc %g3, (1 << 8), %g4 ! Check CE bit 854 - sllx %g3, (64 - 10), %g3 ! Clear reserved bits 855 - srlx %g3, (64 - 10), %g3 ! in UDB-High error status 856 - sllx %g3, (33 + 10), %g3 ! Shift up to encoding area 857 - or %g1, %g3, %g1 ! Or it in 858 - be,pn %xcc, 1f ! Branch if CE bit was clear 859 - nop 860 - nop 861 862 - stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH 863 - membar #Sync ! Synchronize ASI stores 864 - 1: mov 1, %g5 ! AFSR CE bit is 865 - sllx %g5, 20, %g5 ! bit 20 866 - stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR 867 - membar #Sync ! Synchronize ASI stores 868 - sllx %g2, (64 - 41), %g2 ! Clear reserved bits 869 - srlx %g2, (64 - 41), %g2 ! in latched AFAR 870 871 - andn %g2, 0x0f, %g2 ! Finish resv bit clearing 872 - mov %g1, %g4 ! Move AFSR+UDB* into save reg 873 - mov %g2, %g5 ! Move AFAR into save reg 874 - rdpr %pil, %g2 875 - wrpr %g0, 15, %pil 876 - ba,pt %xcc, etrap_irq 877 - rd %pc, %g7 878 - mov %l4, %o0 879 - 880 - mov %l5, %o1 881 - call cee_log 882 - add %sp, PTREGS_OFF, %o2 883 - ba,a,pt %xcc, rtrap_irq 884 885 /* Capture I/D/E-cache state into per-cpu error scoreboard. 886 *
··· 21 #include <asm/visasm.h> 22 #include <asm/estate.h> 23 #include <asm/auxio.h> 24 + #include <asm/sfafsr.h> 25 26 #define curptr g6 27 ··· 690 retl 691 nop 692 693 + /* We need to carefully read the error status, ACK 694 + * the errors, prevent recursive traps, and pass the 695 + * information on to C code for logging. 696 + * 697 + * We pass the AFAR in as-is, and we encode the status 698 + * information as described in asm-sparc64/sfafsr.h 699 + */ 700 + .globl __spitfire_access_error 701 + __spitfire_access_error: 702 + /* Disable ESTATE error reporting so that we do not 703 + * take recursive traps and RED state the processor. 704 + */ 705 + stxa %g0, [%g0] ASI_ESTATE_ERROR_EN 706 membar #Sync 707 + 708 + mov UDBE_UE, %g1 709 + ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR 710 + 711 + /* __spitfire_cee_trap branches here with AFSR in %g4 and 712 + * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the 713 + * ESTATE Error Enable register. 714 + */ 715 + __spitfire_cee_trap_continue: 716 + ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR 717 + 718 rdpr %tt, %g3 719 + and %g3, 0x1ff, %g3 ! Paranoia 720 + sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3 721 + or %g4, %g3, %g4 722 + rdpr %tl, %g3 723 + cmp %g3, 1 724 + mov 1, %g3 725 + bleu %xcc, 1f 726 + sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3 727 + 728 + or %g4, %g3, %g4 729 + 730 + /* Read in the UDB error register state, clearing the 731 + * sticky error bits as-needed. We only clear them if 732 + * the UE bit is set. Likewise, __spitfire_cee_trap 733 + * below will only do so if the CE bit is set. 734 + * 735 + * NOTE: UltraSparc-I/II have high and low UDB error 736 + * registers, corresponding to the two UDB units 737 + * present on those chips. UltraSparc-IIi only 738 + * has a single UDB, called "SDB" in the manual. 739 + * For IIi the upper UDB register always reads 740 + * as zero so for our purposes things will just 741 + * work with the checks below. 742 + */ 743 + 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3 744 + and %g3, 0x3ff, %g7 ! Paranoia 745 + sllx %g7, SFSTAT_UDBH_SHIFT, %g7 746 + or %g4, %g7, %g4 747 + andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE 748 + be,pn %xcc, 1f 749 nop 750 + stxa %g3, [%g0] ASI_UDB_ERROR_W 751 + membar #Sync 752 + 753 + 1: mov 0x18, %g3 754 + ldxa [%g3] ASI_UDBL_ERROR_R, %g3 755 + and %g3, 0x3ff, %g7 ! Paranoia 756 + sllx %g7, SFSTAT_UDBL_SHIFT, %g7 757 + or %g4, %g7, %g4 758 + andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE 759 + be,pn %xcc, 1f 760 + nop 761 + mov 0x18, %g7 762 + stxa %g3, [%g7] ASI_UDB_ERROR_W 763 + membar #Sync 764 + 765 + 1: /* Ok, now that we've latched the error state, 766 + * clear the sticky bits in the AFSR. 767 + */ 768 + stxa %g4, [%g0] ASI_AFSR 769 + membar #Sync 770 + 771 + rdpr %tl, %g2 772 + cmp %g2, 1 773 + rdpr %pil, %g2 774 + bleu,pt %xcc, 1f 775 + wrpr %g0, 15, %pil 776 + 777 ba,pt %xcc, etraptl1 778 + rd %pc, %g7 779 780 + ba,pt %xcc, 2f 781 + nop 782 783 + 1: ba,pt %xcc, etrap_irq 784 + rd %pc, %g7 785 786 + 2: mov %l4, %o1 787 mov %l5, %o2 788 + call spitfire_access_error 789 add %sp, PTREGS_OFF, %o0 790 ba,pt %xcc, rtrap 791 clr %l6 ··· 793 * as it is the only situation where we can safely record 794 * and log. For trap level >1 we just clear the CE bit 795 * in the AFSR and return. 796 * 797 + * This is just like __spiftire_access_error above, but it 798 + * specifically handles correctable errors. If an 799 + * uncorrectable error is indicated in the AFSR we 800 + * will branch directly above to __spitfire_access_error 801 + * to handle it instead. Uncorrectable therefore takes 802 + * priority over correctable, and the error logging 803 + * C code will notice this case by inspecting the 804 + * trap type. 805 */ 806 + .globl __spitfire_cee_trap 807 + __spitfire_cee_trap: 808 + ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR 809 + mov 1, %g3 810 + sllx %g3, SFAFSR_UE_SHIFT, %g3 811 + andcc %g4, %g3, %g0 ! Check for UE 812 + bne,pn %xcc, __spitfire_access_error 813 + nop 814 815 + /* Ok, in this case we only have a correctable error. 816 + * Indicate we only wish to capture that state in register 817 + * %g1, and we only disable CE error reporting unlike UE 818 + * handling which disables all errors. 819 */ 820 + ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3 821 + andn %g3, ESTATE_ERR_CE, %g3 822 + stxa %g3, [%g0] ASI_ESTATE_ERROR_EN 823 + membar #Sync 824 825 + /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */ 826 + ba,pt %xcc, __spitfire_cee_trap_continue 827 + mov UDBE_CE, %g1 828 + 829 + .globl __spitfire_data_access_exception 830 + .globl __spitfire_data_access_exception_tl1 831 + __spitfire_data_access_exception_tl1: 832 + rdpr %pstate, %g4 833 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 834 + mov TLB_SFSR, %g3 835 + mov DMMU_SFAR, %g5 836 + ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 837 + ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR 838 + stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit 839 + membar #Sync 840 + rdpr %tt, %g3 841 + cmp %g3, 0x80 ! first win spill/fill trap 842 + blu,pn %xcc, 1f 843 + cmp %g3, 0xff ! last win spill/fill trap 844 + bgu,pn %xcc, 1f 845 nop 846 + ba,pt %xcc, winfix_dax 847 + rdpr %tpc, %g3 848 + 1: sethi %hi(109f), %g7 849 + ba,pt %xcc, etraptl1 850 + 109: or %g7, %lo(109b), %g7 851 + mov %l4, %o1 852 + mov %l5, %o2 853 + call spitfire_data_access_exception_tl1 854 + add %sp, PTREGS_OFF, %o0 855 + ba,pt %xcc, rtrap 856 + clr %l6 857 858 + __spitfire_data_access_exception: 859 + rdpr %pstate, %g4 860 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 861 + mov TLB_SFSR, %g3 862 + mov DMMU_SFAR, %g5 863 + ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR 864 + ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR 865 + stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit 866 + membar #Sync 867 + sethi %hi(109f), %g7 868 + ba,pt %xcc, etrap 869 + 109: or %g7, %lo(109b), %g7 870 + mov %l4, %o1 871 + mov %l5, %o2 872 + call spitfire_data_access_exception 873 + add %sp, PTREGS_OFF, %o0 874 + ba,pt %xcc, rtrap 875 + clr %l6 876 877 + .globl __spitfire_insn_access_exception 878 + .globl __spitfire_insn_access_exception_tl1 879 + __spitfire_insn_access_exception_tl1: 880 + rdpr %pstate, %g4 881 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 882 + mov TLB_SFSR, %g3 883 + ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR 884 + rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC 885 + stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 886 + membar #Sync 887 + sethi %hi(109f), %g7 888 + ba,pt %xcc, etraptl1 889 + 109: or %g7, %lo(109b), %g7 890 + mov %l4, %o1 891 + mov %l5, %o2 892 + call spitfire_insn_access_exception_tl1 893 + add %sp, PTREGS_OFF, %o0 894 + ba,pt %xcc, rtrap 895 + clr %l6 896 897 + __spitfire_insn_access_exception: 898 + rdpr %pstate, %g4 899 + wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate 900 + mov TLB_SFSR, %g3 901 + ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR 902 + rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC 903 + stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit 904 + membar #Sync 905 + sethi %hi(109f), %g7 906 + ba,pt %xcc, etrap 907 + 109: or %g7, %lo(109b), %g7 908 + mov %l4, %o1 909 + mov %l5, %o2 910 + call spitfire_insn_access_exception 911 + add %sp, PTREGS_OFF, %o0 912 + ba,pt %xcc, rtrap 913 + clr %l6 914 915 /* Capture I/D/E-cache state into per-cpu error scoreboard. 916 *
+145 -119
arch/sparc64/kernel/traps.c
··· 33 #include <asm/dcu.h> 34 #include <asm/estate.h> 35 #include <asm/chafsr.h> 36 #include <asm/psrcompat.h> 37 #include <asm/processor.h> 38 #include <asm/timer.h> ··· 144 } 145 #endif 146 147 - void instruction_access_exception(struct pt_regs *regs, 148 - unsigned long sfsr, unsigned long sfar) 149 { 150 siginfo_t info; 151 ··· 153 return; 154 155 if (regs->tstate & TSTATE_PRIV) { 156 - printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", 157 - sfsr, sfar); 158 die_if_kernel("Iax", regs); 159 } 160 if (test_thread_flag(TIF_32BIT)) { ··· 169 force_sig_info(SIGSEGV, &info, current); 170 } 171 172 - void instruction_access_exception_tl1(struct pt_regs *regs, 173 - unsigned long sfsr, unsigned long sfar) 174 { 175 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 176 0, 0x8, SIGTRAP) == NOTIFY_STOP) 177 return; 178 179 dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 180 - instruction_access_exception(regs, sfsr, sfar); 181 } 182 183 - void data_access_exception(struct pt_regs *regs, 184 - unsigned long sfsr, unsigned long sfar) 185 { 186 siginfo_t info; 187 ··· 205 return; 206 } 207 /* Shit... */ 208 - printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", 209 - sfsr, sfar); 210 die_if_kernel("Dax", regs); 211 } 212 ··· 218 force_sig_info(SIGSEGV, &info, current); 219 } 220 221 - void data_access_exception_tl1(struct pt_regs *regs, 222 - unsigned long sfsr, unsigned long sfar) 223 { 224 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 225 0, 0x30, SIGTRAP) == NOTIFY_STOP) 226 return; 227 228 dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 229 - data_access_exception(regs, sfsr, sfar); 230 } 231 232 #ifdef CONFIG_PCI ··· 261 : "memory"); 262 } 263 264 - void do_iae(struct pt_regs *regs) 265 { 266 - siginfo_t info; 267 - 268 - spitfire_clean_and_reenable_l1_caches(); 269 - 270 - if (notify_die(DIE_TRAP, "instruction access exception", regs, 271 - 0, 0x8, SIGTRAP) == NOTIFY_STOP) 272 - return; 273 - 274 - info.si_signo = SIGBUS; 275 - info.si_errno = 0; 276 - info.si_code = BUS_OBJERR; 277 - info.si_addr = (void *)0; 278 - info.si_trapno = 0; 279 - force_sig_info(SIGBUS, &info, current); 280 - } 281 - 282 - void do_dae(struct pt_regs *regs) 283 - { 284 - siginfo_t info; 285 - 286 - #ifdef CONFIG_PCI 287 - if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { 288 - spitfire_clean_and_reenable_l1_caches(); 289 - 290 - pci_poke_faulted = 1; 291 - 292 - /* Why the fuck did they have to change this? */ 293 - if (tlb_type == cheetah || tlb_type == cheetah_plus) 294 - regs->tpc += 4; 295 - 296 - regs->tnpc = regs->tpc + 4; 297 - return; 298 - } 299 - #endif 300 - spitfire_clean_and_reenable_l1_caches(); 301 - 302 - if (notify_die(DIE_TRAP, "data access exception", regs, 303 - 0, 0x30, SIGTRAP) == NOTIFY_STOP) 304 - return; 305 - 306 - info.si_signo = SIGBUS; 307 - info.si_errno = 0; 308 - info.si_code = BUS_OBJERR; 309 - info.si_addr = (void *)0; 310 - info.si_trapno = 0; 311 - force_sig_info(SIGBUS, &info, current); 312 } 313 314 static char ecc_syndrome_table[] = { ··· 305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a 306 }; 307 308 - /* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status 309 - * in the following format. The AFAR is left as is, with 310 - * reserved bits cleared, and is a raw 40-bit physical 311 - * address. 312 - */ 313 - #define CE_STATUS_UDBH_UE (1UL << (43 + 9)) 314 - #define CE_STATUS_UDBH_CE (1UL << (43 + 8)) 315 - #define CE_STATUS_UDBH_ESYNDR (0xffUL << 43) 316 - #define CE_STATUS_UDBH_SHIFT 43 317 - #define CE_STATUS_UDBL_UE (1UL << (33 + 9)) 318 - #define CE_STATUS_UDBL_CE (1UL << (33 + 8)) 319 - #define CE_STATUS_UDBL_ESYNDR (0xffUL << 33) 320 - #define CE_STATUS_UDBL_SHIFT 33 321 - #define CE_STATUS_AFSR_MASK (0x1ffffffffUL) 322 - #define CE_STATUS_AFSR_ME (1UL << 32) 323 - #define CE_STATUS_AFSR_PRIV (1UL << 31) 324 - #define CE_STATUS_AFSR_ISAP (1UL << 30) 325 - #define CE_STATUS_AFSR_ETP (1UL << 29) 326 - #define CE_STATUS_AFSR_IVUE (1UL << 28) 327 - #define CE_STATUS_AFSR_TO (1UL << 27) 328 - #define CE_STATUS_AFSR_BERR (1UL << 26) 329 - #define CE_STATUS_AFSR_LDP (1UL << 25) 330 - #define CE_STATUS_AFSR_CP (1UL << 24) 331 - #define CE_STATUS_AFSR_WP (1UL << 23) 332 - #define CE_STATUS_AFSR_EDP (1UL << 22) 333 - #define CE_STATUS_AFSR_UE (1UL << 21) 334 - #define CE_STATUS_AFSR_CE (1UL << 20) 335 - #define CE_STATUS_AFSR_ETS (0xfUL << 16) 336 - #define CE_STATUS_AFSR_ETS_SHIFT 16 337 - #define CE_STATUS_AFSR_PSYND (0xffffUL << 0) 338 - #define CE_STATUS_AFSR_PSYND_SHIFT 0 339 - 340 - /* Layout of Ecache TAG Parity Syndrome of AFSR */ 341 - #define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */ 342 - #define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */ 343 - #define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */ 344 - #define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */ 345 - 346 static char *syndrome_unknown = "<Unknown>"; 347 348 - asmlinkage void cee_log(unsigned long ce_status, 349 - unsigned long afar, 350 - struct pt_regs *regs) 351 { 352 - char memmod_str[64]; 353 - char *p; 354 - unsigned short scode, udb_reg; 355 356 - printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " 357 - "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n", 358 - smp_processor_id(), 359 - (ce_status & CE_STATUS_AFSR_MASK), 360 - afar, 361 - ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL), 362 - ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL)); 363 - 364 - udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL); 365 - if (udb_reg & (1 << 8)) { 366 - scode = ecc_syndrome_table[udb_reg & 0xff]; 367 if (prom_getunumber(scode, afar, 368 memmod_str, sizeof(memmod_str)) == -1) 369 p = syndrome_unknown; ··· 324 smp_processor_id(), scode, p); 325 } 326 327 - udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL); 328 - if (udb_reg & (1 << 8)) { 329 - scode = ecc_syndrome_table[udb_reg & 0xff]; 330 if (prom_getunumber(scode, afar, 331 memmod_str, sizeof(memmod_str)) == -1) 332 p = syndrome_unknown; ··· 334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " 335 "Memory Module \"%s\"\n", 336 smp_processor_id(), scode, p); 337 } 338 } 339
··· 33 #include <asm/dcu.h> 34 #include <asm/estate.h> 35 #include <asm/chafsr.h> 36 + #include <asm/sfafsr.h> 37 #include <asm/psrcompat.h> 38 #include <asm/processor.h> 39 #include <asm/timer.h> ··· 143 } 144 #endif 145 146 + void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 147 { 148 siginfo_t info; 149 ··· 153 return; 154 155 if (regs->tstate & TSTATE_PRIV) { 156 + printk("spitfire_insn_access_exception: SFSR[%016lx] " 157 + "SFAR[%016lx], going.\n", sfsr, sfar); 158 die_if_kernel("Iax", regs); 159 } 160 if (test_thread_flag(TIF_32BIT)) { ··· 169 force_sig_info(SIGSEGV, &info, current); 170 } 171 172 + void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 173 { 174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, 175 0, 0x8, SIGTRAP) == NOTIFY_STOP) 176 return; 177 178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 179 + spitfire_insn_access_exception(regs, sfsr, sfar); 180 } 181 182 + void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 183 { 184 siginfo_t info; 185 ··· 207 return; 208 } 209 /* Shit... */ 210 + printk("spitfire_data_access_exception: SFSR[%016lx] " 211 + "SFAR[%016lx], going.\n", sfsr, sfar); 212 die_if_kernel("Dax", regs); 213 } 214 ··· 220 force_sig_info(SIGSEGV, &info, current); 221 } 222 223 + void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) 224 { 225 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, 226 0, 0x30, SIGTRAP) == NOTIFY_STOP) 227 return; 228 229 dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 230 + spitfire_data_access_exception(regs, sfsr, sfar); 231 } 232 233 #ifdef CONFIG_PCI ··· 264 : "memory"); 265 } 266 267 + static void spitfire_enable_estate_errors(void) 268 { 269 + __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" 270 + "membar #Sync" 271 + : /* no outputs */ 272 + : "r" (ESTATE_ERR_ALL), 273 + "i" (ASI_ESTATE_ERROR_EN)); 274 } 275 276 static char ecc_syndrome_table[] = { ··· 349 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a 350 }; 351 352 static char *syndrome_unknown = "<Unknown>"; 353 354 + static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) 355 { 356 + unsigned short scode; 357 + char memmod_str[64], *p; 358 359 + if (udbl & bit) { 360 + scode = ecc_syndrome_table[udbl & 0xff]; 361 if (prom_getunumber(scode, afar, 362 memmod_str, sizeof(memmod_str)) == -1) 363 p = syndrome_unknown; ··· 418 smp_processor_id(), scode, p); 419 } 420 421 + if (udbh & bit) { 422 + scode = ecc_syndrome_table[udbh & 0xff]; 423 if (prom_getunumber(scode, afar, 424 memmod_str, sizeof(memmod_str)) == -1) 425 p = syndrome_unknown; ··· 429 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] " 430 "Memory Module \"%s\"\n", 431 smp_processor_id(), scode, p); 432 + } 433 + 434 + } 435 + 436 + static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) 437 + { 438 + 439 + printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " 440 + "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", 441 + smp_processor_id(), afsr, afar, udbl, udbh, tl1); 442 + 443 + spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); 444 + 445 + /* We always log it, even if someone is listening for this 446 + * trap. 447 + */ 448 + notify_die(DIE_TRAP, "Correctable ECC Error", regs, 449 + 0, TRAP_TYPE_CEE, SIGTRAP); 450 + 451 + /* The Correctable ECC Error trap does not disable I/D caches. So 452 + * we only have to restore the ESTATE Error Enable register. 453 + */ 454 + spitfire_enable_estate_errors(); 455 + } 456 + 457 + static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) 458 + { 459 + siginfo_t info; 460 + 461 + printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " 462 + "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", 463 + smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); 464 + 465 + /* XXX add more human friendly logging of the error status 466 + * XXX as is implemented for cheetah 467 + */ 468 + 469 + spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); 470 + 471 + /* We always log it, even if someone is listening for this 472 + * trap. 473 + */ 474 + notify_die(DIE_TRAP, "Uncorrectable Error", regs, 475 + 0, tt, SIGTRAP); 476 + 477 + if (regs->tstate & TSTATE_PRIV) { 478 + if (tl1) 479 + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); 480 + die_if_kernel("UE", regs); 481 + } 482 + 483 + /* XXX need more intelligent processing here, such as is implemented 484 + * XXX for cheetah errors, in fact if the E-cache still holds the 485 + * XXX line with bad parity this will loop 486 + */ 487 + 488 + spitfire_clean_and_reenable_l1_caches(); 489 + spitfire_enable_estate_errors(); 490 + 491 + if (test_thread_flag(TIF_32BIT)) { 492 + regs->tpc &= 0xffffffff; 493 + regs->tnpc &= 0xffffffff; 494 + } 495 + info.si_signo = SIGBUS; 496 + info.si_errno = 0; 497 + info.si_code = BUS_OBJERR; 498 + info.si_addr = (void *)0; 499 + info.si_trapno = 0; 500 + force_sig_info(SIGBUS, &info, current); 501 + } 502 + 503 + void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) 504 + { 505 + unsigned long afsr, tt, udbh, udbl; 506 + int tl1; 507 + 508 + afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; 509 + tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; 510 + tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; 511 + udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; 512 + udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; 513 + 514 + #ifdef CONFIG_PCI 515 + if (tt == TRAP_TYPE_DAE && 516 + pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { 517 + spitfire_clean_and_reenable_l1_caches(); 518 + spitfire_enable_estate_errors(); 519 + 520 + pci_poke_faulted = 1; 521 + regs->tnpc = regs->tpc + 4; 522 + return; 523 + } 524 + #endif 525 + 526 + if (afsr & SFAFSR_UE) 527 + spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); 528 + 529 + if (tt == TRAP_TYPE_CEE) { 530 + /* Handle the case where we took a CEE trap, but ACK'd 531 + * only the UE state in the UDB error registers. 532 + */ 533 + if (afsr & SFAFSR_UE) { 534 + if (udbh & UDBE_CE) { 535 + __asm__ __volatile__( 536 + "stxa %0, [%1] %2\n\t" 537 + "membar #Sync" 538 + : /* no outputs */ 539 + : "r" (udbh & UDBE_CE), 540 + "r" (0x0), "i" (ASI_UDB_ERROR_W)); 541 + } 542 + if (udbl & UDBE_CE) { 543 + __asm__ __volatile__( 544 + "stxa %0, [%1] %2\n\t" 545 + "membar #Sync" 546 + : /* no outputs */ 547 + : "r" (udbl & UDBE_CE), 548 + "r" (0x18), "i" (ASI_UDB_ERROR_W)); 549 + } 550 + } 551 + 552 + spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); 553 } 554 } 555
+16 -11
arch/sparc64/kernel/ttable.S
··· 18 tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) 19 tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) 20 tl0_iax: membar #Sync 21 - TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) 22 tl0_resv009: BTRAP(0x9) 23 - tl0_iae: TRAP(do_iae) 24 tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) 25 tl0_ill: membar #Sync 26 TRAP_7INSNS(do_illegal_instruction) ··· 37 tl0_div0: TRAP(do_div0) 38 tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) 39 tl0_resv02f: BTRAP(0x2f) 40 - tl0_dax: TRAP_NOSAVE(__do_data_access_exception) 41 tl0_resv031: BTRAP(0x31) 42 - tl0_dae: TRAP(do_dae) 43 tl0_resv033: BTRAP(0x33) 44 tl0_mna: TRAP_NOSAVE(do_mna) 45 tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) ··· 75 tl0_ivec: TRAP_IVEC 76 tl0_paw: TRAP(do_paw) 77 tl0_vaw: TRAP(do_vaw) 78 - tl0_cee: TRAP_NOSAVE(cee_trap) 79 tl0_iamiss: 80 #include "itlb_base.S" 81 tl0_damiss: ··· 178 sparc64_ttable_tl1: 179 tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) 180 tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) 181 - tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) 182 tl1_resv009: BTRAPTL1(0x9) 183 - tl1_iae: TRAPTL1(do_iae_tl1) 184 tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) 185 tl1_ill: TRAPTL1(do_ill_tl1) 186 tl1_privop: BTRAPTL1(0x11) ··· 197 tl1_div0: TRAPTL1(do_div0_tl1) 198 tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) 199 tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) 200 - tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) 201 tl1_resv031: BTRAPTL1(0x31) 202 - tl1_dae: TRAPTL1(do_dae_tl1) 203 tl1_resv033: BTRAPTL1(0x33) 204 tl1_mna: TRAP_NOSAVE(do_mna) 205 tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) ··· 224 tl1_vaw: TRAPTL1(do_vaw_tl1) 225 226 /* The grotty trick to save %g1 into current->thread.cee_stuff 227 - * is because when we take this trap we could be interrupting trap 228 - * code already using the trap alternate global registers. 229 * 230 * We cross our fingers and pray that this store/load does 231 * not cause yet another CEE trap.
··· 18 tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) 19 tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) 20 tl0_iax: membar #Sync 21 + TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) 22 tl0_resv009: BTRAP(0x9) 23 + tl0_iae: membar #Sync 24 + TRAP_NOSAVE_7INSNS(__spitfire_access_error) 25 tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) 26 tl0_ill: membar #Sync 27 TRAP_7INSNS(do_illegal_instruction) ··· 36 tl0_div0: TRAP(do_div0) 37 tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) 38 tl0_resv02f: BTRAP(0x2f) 39 + tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) 40 tl0_resv031: BTRAP(0x31) 41 + tl0_dae: membar #Sync 42 + TRAP_NOSAVE_7INSNS(__spitfire_access_error) 43 tl0_resv033: BTRAP(0x33) 44 tl0_mna: TRAP_NOSAVE(do_mna) 45 tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) ··· 73 tl0_ivec: TRAP_IVEC 74 tl0_paw: TRAP(do_paw) 75 tl0_vaw: TRAP(do_vaw) 76 + tl0_cee: membar #Sync 77 + TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) 78 tl0_iamiss: 79 #include "itlb_base.S" 80 tl0_damiss: ··· 175 sparc64_ttable_tl1: 176 tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) 177 tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) 178 + tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) 179 tl1_resv009: BTRAPTL1(0x9) 180 + tl1_iae: membar #Sync 181 + TRAP_NOSAVE_7INSNS(__spitfire_access_error) 182 tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) 183 tl1_ill: TRAPTL1(do_ill_tl1) 184 tl1_privop: BTRAPTL1(0x11) ··· 193 tl1_div0: TRAPTL1(do_div0_tl1) 194 tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) 195 tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) 196 + tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) 197 tl1_resv031: BTRAPTL1(0x31) 198 + tl1_dae: membar #Sync 199 + TRAP_NOSAVE_7INSNS(__spitfire_access_error) 200 tl1_resv033: BTRAPTL1(0x33) 201 tl1_mna: TRAP_NOSAVE(do_mna) 202 tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) ··· 219 tl1_vaw: TRAPTL1(do_vaw_tl1) 220 221 /* The grotty trick to save %g1 into current->thread.cee_stuff 222 + * is because when we take this trap we could be interrupting 223 + * trap code already using the trap alternate global registers. 224 * 225 * We cross our fingers and pray that this store/load does 226 * not cause yet another CEE trap.
+9 -9
arch/sparc64/kernel/unaligned.c
··· 349 350 extern void do_fpother(struct pt_regs *regs); 351 extern void do_privact(struct pt_regs *regs); 352 - extern void data_access_exception(struct pt_regs *regs, 353 - unsigned long sfsr, 354 - unsigned long sfar); 355 356 int handle_ldf_stq(u32 insn, struct pt_regs *regs) 357 { ··· 394 break; 395 } 396 default: 397 - data_access_exception(regs, 0, addr); 398 return 1; 399 } 400 if (put_user (first >> 32, (u32 __user *)addr) || 401 __put_user ((u32)first, (u32 __user *)(addr + 4)) || 402 __put_user (second >> 32, (u32 __user *)(addr + 8)) || 403 __put_user ((u32)second, (u32 __user *)(addr + 12))) { 404 - data_access_exception(regs, 0, addr); 405 return 1; 406 } 407 } else { ··· 414 do_privact(regs); 415 return 1; 416 } else if (asi > ASI_SNFL) { 417 - data_access_exception(regs, 0, addr); 418 return 1; 419 } 420 switch (insn & 0x180000) { ··· 431 err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); 432 } 433 if (err && !(asi & 0x2 /* NF */)) { 434 - data_access_exception(regs, 0, addr); 435 return 1; 436 } 437 if (asi & 0x8) /* Little */ { ··· 534 *(u64 *)(f->regs + freg) = value; 535 current_thread_info()->fpsaved[0] |= flag; 536 } else { 537 - daex: data_access_exception(regs, sfsr, sfar); 538 return; 539 } 540 advance(regs); ··· 578 __put_user ((u32)value, (u32 __user *)(sfar + 4))) 579 goto daex; 580 } else { 581 - daex: data_access_exception(regs, sfsr, sfar); 582 return; 583 } 584 advance(regs);
··· 349 350 extern void do_fpother(struct pt_regs *regs); 351 extern void do_privact(struct pt_regs *regs); 352 + extern void spitfire_data_access_exception(struct pt_regs *regs, 353 + unsigned long sfsr, 354 + unsigned long sfar); 355 356 int handle_ldf_stq(u32 insn, struct pt_regs *regs) 357 { ··· 394 break; 395 } 396 default: 397 + spitfire_data_access_exception(regs, 0, addr); 398 return 1; 399 } 400 if (put_user (first >> 32, (u32 __user *)addr) || 401 __put_user ((u32)first, (u32 __user *)(addr + 4)) || 402 __put_user (second >> 32, (u32 __user *)(addr + 8)) || 403 __put_user ((u32)second, (u32 __user *)(addr + 12))) { 404 + spitfire_data_access_exception(regs, 0, addr); 405 return 1; 406 } 407 } else { ··· 414 do_privact(regs); 415 return 1; 416 } else if (asi > ASI_SNFL) { 417 + spitfire_data_access_exception(regs, 0, addr); 418 return 1; 419 } 420 switch (insn & 0x180000) { ··· 431 err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); 432 } 433 if (err && !(asi & 0x2 /* NF */)) { 434 + spitfire_data_access_exception(regs, 0, addr); 435 return 1; 436 } 437 if (asi & 0x8) /* Little */ { ··· 534 *(u64 *)(f->regs + freg) = value; 535 current_thread_info()->fpsaved[0] |= flag; 536 } else { 537 + daex: spitfire_data_access_exception(regs, sfsr, sfar); 538 return; 539 } 540 advance(regs); ··· 578 __put_user ((u32)value, (u32 __user *)(sfar + 4))) 579 goto daex; 580 } else { 581 + daex: spitfire_data_access_exception(regs, sfsr, sfar); 582 return; 583 } 584 advance(regs);
+3 -3
arch/sparc64/kernel/winfixup.S
··· 318 nop 319 rdpr %pstate, %l1 ! Prepare to change globals. 320 mov %g4, %o1 ! Setup args for 321 - mov %g5, %o2 ! final call to data_access_exception. 322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO 323 324 mov %g6, %o7 ! Stash away current. ··· 330 mov TSB_REG, %g1 331 ldxa [%g1] ASI_IMMU, %g5 332 #endif 333 - call data_access_exception 334 add %sp, PTREGS_OFF, %o0 335 336 b,pt %xcc, rtrap ··· 391 109: or %g7, %lo(109b), %g7 392 mov %l4, %o1 393 mov %l5, %o2 394 - call data_access_exception 395 add %sp, PTREGS_OFF, %o0 396 ba,pt %xcc, rtrap 397 clr %l6
··· 318 nop 319 rdpr %pstate, %l1 ! Prepare to change globals. 320 mov %g4, %o1 ! Setup args for 321 + mov %g5, %o2 ! final call to spitfire_data_access_exception. 322 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO 323 324 mov %g6, %o7 ! Stash away current. ··· 330 mov TSB_REG, %g1 331 ldxa [%g1] ASI_IMMU, %g5 332 #endif 333 + call spitfire_data_access_exception 334 add %sp, PTREGS_OFF, %o0 335 336 b,pt %xcc, rtrap ··· 391 109: or %g7, %lo(109b), %g7 392 mov %l4, %o1 393 mov %l5, %o2 394 + call spitfire_data_access_exception 395 add %sp, PTREGS_OFF, %o0 396 ba,pt %xcc, rtrap 397 clr %l6
+82
include/asm-sparc64/sfafsr.h
···
··· 1 + #ifndef _SPARC64_SFAFSR_H 2 + #define _SPARC64_SFAFSR_H 3 + 4 + #include <asm/const.h> 5 + 6 + /* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */ 7 + 8 + #define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT) 9 + #define SFAFSR_ME_SHIFT 32 10 + #define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT) 11 + #define SFAFSR_PRIV_SHIFT 31 12 + #define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT) 13 + #define SFAFSR_ISAP_SHIFT 30 14 + #define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT) 15 + #define SFAFSR_ETP_SHIFT 29 16 + #define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT) 17 + #define SFAFSR_IVUE_SHIFT 28 18 + #define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT) 19 + #define SFAFSR_TO_SHIFT 27 20 + #define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT) 21 + #define SFAFSR_BERR_SHIFT 26 22 + #define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT) 23 + #define SFAFSR_LDP_SHIFT 25 24 + #define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT) 25 + #define SFAFSR_CP_SHIFT 24 26 + #define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT) 27 + #define SFAFSR_WP_SHIFT 23 28 + #define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT) 29 + #define SFAFSR_EDP_SHIFT 22 30 + #define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT) 31 + #define SFAFSR_UE_SHIFT 21 32 + #define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT) 33 + #define SFAFSR_CE_SHIFT 20 34 + #define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT) 35 + #define SFAFSR_ETS_SHIFT 16 36 + #define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT) 37 + #define SFAFSR_PSYND_SHIFT 0 38 + 39 + /* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read 40 + * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write 41 + */ 42 + 43 + #define UDBE_UE (_AC(1,UL) << 9) 44 + #define UDBE_CE (_AC(1,UL) << 8) 45 + #define UDBE_E_SYNDR (_AC(0xff,UL) << 0) 46 + 47 + /* The trap handlers for asynchronous errors encode the AFSR and 48 + * other pieces of information into a 64-bit argument for C code 49 + * encoded as follows: 50 + * 51 + * ----------------------------------------------- 52 + * | UDB_H | UDB_L | TL>1 | TT | AFSR | 53 + * ----------------------------------------------- 54 + * 63 54 53 44 42 41 33 32 0 55 + * 56 + * The AFAR is passed in unchanged. 57 + */ 58 + #define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) 59 + #define SFSTAT_UDBH_SHIFT 54 60 + #define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) 61 + #define SFSTAT_UDBL_SHIFT 44 62 + #define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT) 63 + #define SFSTAT_TL_GT_ONE_SHIFT 42 64 + #define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT) 65 + #define SFSTAT_TRAP_TYPE_SHIFT 33 66 + #define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT) 67 + #define SFSTAT_AFSR_SHIFT 0 68 + 69 + /* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */ 70 + #define ESTATE_ERR_CE 0x1 /* Correctable errors */ 71 + #define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */ 72 + #define ESTATE_ERR_ISAP 0x4 /* System address parity error */ 73 + #define ESTATE_ERR_ALL (ESTATE_ERR_CE | \ 74 + ESTATE_ERR_NCE | \ 75 + ESTATE_ERR_ISAP) 76 + 77 + /* The various trap types that report using the above state. */ 78 + #define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */ 79 + #define TRAP_TYPE_DAE 0x32 /* Data Access Error */ 80 + #define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */ 81 + 82 + #endif /* _SPARC64_SFAFSR_H */