Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] s390: improved machine check handling

Improved machine check handling. Kernel is now able to receive machine checks
while in kernel mode (system call, interrupt and program check handling).
Also register validation is now performed.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Heiko Carstens and committed by
Linus Torvalds
77fa2245 f901e5d1

+576 -135
+93 -9
arch/s390/kernel/entry.S
··· 7 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 8 8 * Hartmut Penner (hp@de.ibm.com), 9 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 10 + * Heiko Carstens <heiko.carstens@de.ibm.com> 10 11 */ 11 12 12 13 #include <linux/sys.h> ··· 50 49 SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP 51 50 SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 52 51 53 - _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 52 + _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING | \ 54 53 _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 55 - _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 54 + _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) 56 55 57 56 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 58 57 STACK_SIZE = 1 << STACK_SHIFT ··· 122 121 bz BASED(stack_overflow) 123 122 3: 124 123 #endif 125 - 2: s %r15,BASED(.Lc_spsize) # make room for registers & psw 124 + 2: 125 + .endm 126 + 127 + .macro CREATE_STACK_FRAME psworg,savearea 128 + s %r15,BASED(.Lc_spsize) # make room for registers & psw 126 129 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 127 130 la %r12,\psworg 128 131 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 ··· 166 161 be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's 167 162 lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't 168 163 __switch_to_noper: 164 + l %r4,__THREAD_info(%r2) # get thread_info of prev 165 + tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 166 + bz __switch_to_no_mcck-__switch_to_base(%r1) 167 + ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 168 + l %r4,__THREAD_info(%r3) # get thread_info of next 169 + oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 170 + __switch_to_no_mcck: 169 171 stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 170 172 st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 171 173 l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp ··· 197 185 sysc_saveall: 198 186 SAVE_ALL_BASE __LC_SAVE_AREA 199 187 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 188 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 200 189 lh %r7,0x8a # get svc number from lowcore 201 190 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 202 191 sysc_vtime: ··· 247 234 # One of the work bits is on. Find out which one. 248 235 # 249 236 sysc_work: 237 + tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 238 + bo BASED(sysc_mcck_pending) 250 239 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 251 240 bo BASED(sysc_reschedule) 252 241 tm __TI_flags+3(%r9),_TIF_SIGPENDING ··· 266 251 l %r1,BASED(.Lschedule) 267 252 la %r14,BASED(sysc_work_loop) 268 253 br %r1 # call scheduler 254 + 255 + # 256 + # _TIF_MCCK_PENDING is set, call handler 257 + # 258 + sysc_mcck_pending: 259 + l %r1,BASED(.Ls390_handle_mcck) 260 + la %r14,BASED(sysc_work_loop) 261 + br %r1 # TIF bit will be cleared by handler 269 262 270 263 # 271 264 # _TIF_SIGPENDING is set, call do_signal ··· 453 430 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 454 431 bnz BASED(pgm_per) # got per exception -> special case 455 432 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 433 + CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 456 434 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 457 435 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 458 436 bz BASED(pgm_no_vtime) ··· 492 468 # 493 469 pgm_per_std: 494 470 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 471 + CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 495 472 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 496 473 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 497 474 bz BASED(pgm_no_vtime2) ··· 518 493 # 519 494 pgm_svcper: 520 495 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 496 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 521 497 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 522 498 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 523 499 bz BASED(pgm_no_vtime3) ··· 547 521 stck __LC_INT_CLOCK 548 522 SAVE_ALL_BASE __LC_SAVE_AREA+16 549 523 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+16,0 524 + CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 550 525 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 551 526 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 552 527 bz BASED(io_no_vtime) ··· 605 578 lr %r15,%r1 606 579 # 607 580 # One of the work bits is on. Find out which one. 608 - # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED 581 + # Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED and _TIF_MCCK_PENDING 609 582 # 610 583 io_work_loop: 584 + tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 585 + bo BASED(io_mcck_pending) 611 586 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 612 587 bo BASED(io_reschedule) 613 588 tm __TI_flags+3(%r9),_TIF_SIGPENDING 614 589 bo BASED(io_sigpending) 615 590 b BASED(io_leave) 591 + 592 + # 593 + # _TIF_MCCK_PENDING is set, call handler 594 + # 595 + io_mcck_pending: 596 + l %r1,BASED(.Ls390_handle_mcck) 597 + l %r14,BASED(io_work_loop) 598 + br %r1 # TIF bit will be cleared by handler 616 599 617 600 # 618 601 # _TIF_NEED_RESCHED is set, call schedule ··· 658 621 stck __LC_INT_CLOCK 659 622 SAVE_ALL_BASE __LC_SAVE_AREA+16 660 623 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16,0 624 + CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 661 625 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 662 626 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 663 627 bz BASED(ext_no_vtime) ··· 680 642 681 643 .globl mcck_int_handler 682 644 mcck_int_handler: 683 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 645 + spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 646 + lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 684 647 SAVE_ALL_BASE __LC_SAVE_AREA+32 685 - SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32,0 648 + la %r12,__LC_MCK_OLD_PSW 649 + tm __LC_MCCK_CODE,0x80 # system damage? 650 + bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 651 + tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 652 + bo BASED(0f) 653 + spt __LC_LAST_UPDATE_TIMER # revalidate cpu timer 686 654 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 687 - tm SP_PSW+1(%r15),0x01 # interrupting from user ? 655 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 656 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 657 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_EXIT_TIMER 658 + 0: tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 659 + bno BASED(mcck_no_vtime) # no -> skip cleanup critical 660 + tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? 688 661 bz BASED(mcck_no_vtime) 689 662 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 690 663 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 691 664 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 692 665 mcck_no_vtime: 693 666 #endif 667 + 0: 668 + tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 669 + bno BASED(mcck_int_main) # no -> skip cleanup critical 670 + tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 671 + bnz BASED(mcck_int_main) # from user -> load async stack 672 + clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) 673 + bhe BASED(mcck_int_main) 674 + clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) 675 + bl BASED(mcck_int_main) 676 + l %r14,BASED(.Lcleanup_critical) 677 + basr %r14,%r14 678 + mcck_int_main: 679 + l %r14,__LC_PANIC_STACK # are we already on the panic stack? 680 + slr %r14,%r15 681 + sra %r14,PAGE_SHIFT 682 + be BASED(0f) 683 + l %r15,__LC_PANIC_STACK # load panic stack 684 + 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 685 + l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 686 + la %r2,SP_PTREGS(%r15) # load pt_regs 694 687 l %r1,BASED(.Ls390_mcck) 695 - basr %r14,%r1 # call machine check handler 688 + basr %r14,%r1 # call machine check handler 689 + tm SP_PSW+1(%r15),0x01 # returning to user ? 690 + bno BASED(mcck_return) 691 + l %r1,__LC_KERNEL_STACK # switch to kernel stack 692 + s %r1,BASED(.Lc_spsize) 693 + mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 694 + xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 695 + lr %r15,%r1 696 + stosm __SF_EMPTY(%r15),0x04 # turn dat on 697 + tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 698 + bno BASED(mcck_return) 699 + l %r1,BASED(.Ls390_handle_mcck) 700 + basr %r14,%r1 # call machine check handler 696 701 mcck_return: 697 702 RESTORE_ALL 0 698 703 ··· 823 742 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop) 824 743 bl BASED(0f) 825 744 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 826 - bl BASED(cleanup_sysc_leave) 745 + bl BASED(cleanup_sysc_return) 827 746 0: 828 747 br %r14 829 748 ··· 841 760 mvc __LC_SAVE_AREA(16),__LC_SAVE_AREA+16 842 761 0: st %r13,__LC_SAVE_AREA+20 843 762 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 763 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 844 764 st %r15,__LC_SAVE_AREA+28 845 765 lh %r7,0x8a 846 766 #ifdef CONFIG_VIRT_CPU_ACCOUNTING ··· 916 834 * Symbol constants 917 835 */ 918 836 .Ls390_mcck: .long s390_do_machine_check 837 + .Ls390_handle_mcck: 838 + .long s390_handle_mcck 919 839 .Ldo_IRQ: .long do_IRQ 920 840 .Ldo_extint: .long do_extint 921 841 .Ldo_signal: .long do_signal
+88 -9
arch/s390/kernel/entry64.S
··· 7 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 8 8 * Hartmut Penner (hp@de.ibm.com), 9 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 10 + * Heiko Carstens <heiko.carstens@de.ibm.com> 10 11 */ 11 12 12 13 #include <linux/sys.h> ··· 53 52 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 54 53 STACK_SIZE = 1 << STACK_SHIFT 55 54 56 - _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 55 + _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING | \ 57 56 _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 58 - _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 57 + _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) 59 58 60 59 #define BASED(name) name-system_call(%r13) 61 60 ··· 115 114 jz stack_overflow 116 115 3: 117 116 #endif 118 - 2: aghi %r15,-SP_SIZE # make room for registers & psw 117 + 2: 118 + .endm 119 + 120 + .macro CREATE_STACK_FRAME psworg,savearea 121 + aghi %r15,-SP_SIZE # make room for registers & psw 119 122 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 120 123 la %r12,\psworg 121 124 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 ··· 157 152 je __switch_to_noper # we got away without bashing TLB's 158 153 lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't 159 154 __switch_to_noper: 155 + lg %r4,__THREAD_info(%r2) # get thread_info of prev 156 + tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 157 + jz __switch_to_no_mcck 158 + ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 159 + lg %r4,__THREAD_info(%r3) # get thread_info of next 160 + oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 161 + __switch_to_no_mcck: 160 162 stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 161 163 stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 162 164 lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp ··· 188 176 sysc_saveall: 189 177 SAVE_ALL_BASE __LC_SAVE_AREA 190 178 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 179 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 191 180 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 192 181 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 193 182 sysc_vtime: ··· 245 232 # One of the work bits is on. Find out which one. 246 233 # 247 234 sysc_work: 235 + tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 236 + jo sysc_mcck_pending 248 237 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 249 238 jo sysc_reschedule 250 239 tm __TI_flags+7(%r9),_TIF_SIGPENDING ··· 263 248 sysc_reschedule: 264 249 larl %r14,sysc_work_loop 265 250 jg schedule # return point is sysc_return 251 + 252 + # 253 + # _TIF_MCCK_PENDING is set, call handler 254 + # 255 + sysc_mcck_pending: 256 + larl %r14,sysc_work_loop 257 + jg s390_handle_mcck # TIF bit will be cleared by handler 266 258 267 259 # 268 260 # _TIF_SIGPENDING is set, call do_signal ··· 496 474 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 497 475 jnz pgm_per # got per exception -> special case 498 476 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 477 + CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 499 478 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 500 479 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 501 480 jz pgm_no_vtime ··· 535 512 # 536 513 pgm_per_std: 537 514 SAVE_ALL __LC_PGM_OLD_PSW,__LC_SAVE_AREA,1 515 + CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 538 516 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 539 517 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 540 518 jz pgm_no_vtime2 ··· 561 537 # 562 538 pgm_svcper: 563 539 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 540 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 564 541 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 565 542 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 566 543 jz pgm_no_vtime3 ··· 589 564 stck __LC_INT_CLOCK 590 565 SAVE_ALL_BASE __LC_SAVE_AREA+32 591 566 SAVE_ALL __LC_IO_OLD_PSW,__LC_SAVE_AREA+32,0 567 + CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 592 568 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 593 569 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 594 570 jz io_no_vtime ··· 647 621 lgr %r15,%r1 648 622 # 649 623 # One of the work bits is on. Find out which one. 650 - # Checked are: _TIF_SIGPENDING and _TIF_NEED_RESCHED 624 + # Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED and _TIF_MCCK_PENDING 651 625 # 652 626 io_work_loop: 627 + tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 628 + jo io_mcck_pending 653 629 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 654 630 jo io_reschedule 655 631 tm __TI_flags+7(%r9),_TIF_SIGPENDING 656 632 jo io_sigpending 657 633 j io_leave 634 + 635 + # 636 + # _TIF_MCCK_PENDING is set, call handler 637 + # 638 + io_mcck_pending: 639 + larl %r14,io_work_loop 640 + jg s390_handle_mcck # TIF bit will be cleared by handler 658 641 659 642 # 660 643 # _TIF_NEED_RESCHED is set, call schedule ··· 696 661 stck __LC_INT_CLOCK 697 662 SAVE_ALL_BASE __LC_SAVE_AREA+32 698 663 SAVE_ALL __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32,0 664 + CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 699 665 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 700 666 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 701 667 jz ext_no_vtime ··· 716 680 */ 717 681 .globl mcck_int_handler 718 682 mcck_int_handler: 719 - STORE_TIMER __LC_ASYNC_ENTER_TIMER 683 + la %r1,4095 # revalidate r1 684 + spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 685 + lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 720 686 SAVE_ALL_BASE __LC_SAVE_AREA+64 721 - SAVE_ALL __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64,0 687 + la %r12,__LC_MCK_OLD_PSW 688 + tm __LC_MCCK_CODE,0x80 # system damage? 689 + jo mcck_int_main # yes -> rest of mcck code invalid 690 + tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 691 + jo 0f 692 + spt __LC_LAST_UPDATE_TIMER 722 693 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 723 - tm SP_PSW+1(%r15),0x01 # interrupting from user ? 694 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 695 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 696 + mvc __LC_LAST_UPDATE_TIMER(8),__LC_EXIT_TIMER 697 + 0: tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 698 + jno mcck_no_vtime # no -> no timer update 699 + tm __LC_MCK_OLD_PSW+1,0x01 # interrupting from user ? 724 700 jz mcck_no_vtime 725 701 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 726 702 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 727 703 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 728 704 mcck_no_vtime: 729 705 #endif 730 - brasl %r14,s390_do_machine_check 706 + 0: 707 + tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 708 + jno mcck_int_main # no -> skip cleanup critical 709 + tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 710 + jnz mcck_int_main # from user -> load kernel stack 711 + clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) 712 + jhe mcck_int_main 713 + clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) 714 + jl mcck_int_main 715 + brasl %r14,cleanup_critical 716 + mcck_int_main: 717 + lg %r14,__LC_PANIC_STACK # are we already on the panic stack? 718 + slgr %r14,%r15 719 + srag %r14,%r14,PAGE_SHIFT 720 + jz 0f 721 + lg %r15,__LC_PANIC_STACK # load panic stack 722 + 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 723 + lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 724 + la %r2,SP_PTREGS(%r15) # load pt_regs 725 + brasl %r14,s390_do_machine_check 726 + tm SP_PSW+1(%r15),0x01 # returning to user ? 727 + jno mcck_return 728 + lg %r1,__LC_KERNEL_STACK # switch to kernel stack 729 + aghi %r1,-SP_SIZE 730 + mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 731 + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 732 + lgr %r15,%r1 733 + stosm __SF_EMPTY(%r15),0x04 # turn dat on 734 + tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 735 + jno mcck_return 736 + brasl %r14,s390_handle_mcck 731 737 mcck_return: 732 738 RESTORE_ALL 0 733 739 ··· 853 775 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) 854 776 jl 0f 855 777 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) 856 - jl cleanup_sysc_leave 778 + jl cleanup_sysc_return 857 779 0: 858 780 br %r14 859 781 ··· 871 793 mvc __LC_SAVE_AREA(32),__LC_SAVE_AREA+32 872 794 0: stg %r13,__LC_SAVE_AREA+40 873 795 SAVE_ALL __LC_SVC_OLD_PSW,__LC_SAVE_AREA,1 796 + CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 874 797 stg %r15,__LC_SAVE_AREA+56 875 798 llgh %r7,__LC_SVC_INT_CODE 876 799 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
+12 -34
arch/s390/kernel/process.c
··· 91 91 (void *)(long) smp_processor_id()); 92 92 } 93 93 94 + extern void s390_handle_mcck(void); 94 95 /* 95 96 * The idle loop on a S390... 96 97 */ 97 98 void default_idle(void) 98 99 { 99 - psw_t wait_psw; 100 - unsigned long reg; 101 100 int cpu, rc; 102 101 103 102 local_irq_disable(); ··· 124 125 cpu_die(); 125 126 #endif 126 127 127 - /* 128 - * Wait for external, I/O or machine check interrupt and 129 - * switch off machine check bit after the wait has ended. 130 - */ 131 - wait_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK | PSW_MASK_WAIT | 132 - PSW_MASK_IO | PSW_MASK_EXT; 133 - #ifndef CONFIG_ARCH_S390X 134 - asm volatile ( 135 - " basr %0,0\n" 136 - "0: la %0,1f-0b(%0)\n" 137 - " st %0,4(%1)\n" 138 - " oi 4(%1),0x80\n" 139 - " lpsw 0(%1)\n" 140 - "1: la %0,2f-1b(%0)\n" 141 - " st %0,4(%1)\n" 142 - " oi 4(%1),0x80\n" 143 - " ni 1(%1),0xf9\n" 144 - " lpsw 0(%1)\n" 145 - "2:" 146 - : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); 147 - #else /* CONFIG_ARCH_S390X */ 148 - asm volatile ( 149 - " larl %0,0f\n" 150 - " stg %0,8(%1)\n" 151 - " lpswe 0(%1)\n" 152 - "0: larl %0,1f\n" 153 - " stg %0,8(%1)\n" 154 - " ni 1(%1),0xf9\n" 155 - " lpswe 0(%1)\n" 156 - "1:" 157 - : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" ); 158 - #endif /* CONFIG_ARCH_S390X */ 128 + local_mcck_disable(); 129 + if (test_thread_flag(TIF_MCCK_PENDING)) { 130 + local_mcck_enable(); 131 + local_irq_enable(); 132 + s390_handle_mcck(); 133 + return; 134 + } 135 + 136 + /* Wait for external, I/O or machine check interrupt. */ 137 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | 138 + PSW_MASK_IO | PSW_MASK_EXT); 159 139 } 160 140 161 141 void cpu_idle(void)
+10 -3
arch/s390/kernel/setup.c
··· 414 414 lc->program_new_psw.mask = PSW_KERNEL_BITS; 415 415 lc->program_new_psw.addr = 416 416 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 417 - lc->mcck_new_psw.mask = PSW_KERNEL_BITS; 417 + lc->mcck_new_psw.mask = 418 + PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 418 419 lc->mcck_new_psw.addr = 419 420 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 420 421 lc->io_new_psw.mask = PSW_KERNEL_BITS; ··· 425 424 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 426 425 lc->async_stack = (unsigned long) 427 426 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 428 - #ifdef CONFIG_CHECK_STACK 429 427 lc->panic_stack = (unsigned long) 430 428 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 431 - #endif 432 429 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 433 430 lc->thread_info = (unsigned long) &init_thread_union; 431 + #ifndef CONFIG_ARCH_S390X 432 + if (MACHINE_HAS_IEEE) { 433 + lc->extended_save_area_addr = (__u32) 434 + __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 435 + /* enable extended save area */ 436 + ctl_set_bit(14, 29); 437 + } 438 + #endif 434 439 #ifdef CONFIG_ARCH_S390X 435 440 if (MACHINE_HAS_DIAG44) 436 441 lc->diag44_opcode = 0x83000044;
+12 -1
arch/s390/kernel/smp.c
··· 773 773 774 774 *(lowcore_ptr[i]) = S390_lowcore; 775 775 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 776 - #ifdef CONFIG_CHECK_STACK 777 776 stack = __get_free_pages(GFP_KERNEL,0); 778 777 if (stack == 0ULL) 779 778 panic("smp_boot_cpus failed to allocate memory\n"); 780 779 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 780 + #ifndef __s390x__ 781 + if (MACHINE_HAS_IEEE) { 782 + lowcore_ptr[i]->extended_save_area_addr = 783 + (__u32) __get_free_pages(GFP_KERNEL,0); 784 + if (lowcore_ptr[i]->extended_save_area_addr == 0) 785 + panic("smp_boot_cpus failed to " 786 + "allocate memory\n"); 787 + } 781 788 #endif 782 789 } 790 + #ifndef __s390x__ 791 + if (MACHINE_HAS_IEEE) 792 + ctl_set_bit(14, 29); /* enable extended save area */ 793 + #endif 783 794 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 784 795 785 796 for_each_cpu(cpu)
+287 -34
drivers/s390/s390mach.c
··· 31 31 extern struct workqueue_struct *slow_path_wq; 32 32 extern struct work_struct slow_path_work; 33 33 34 - static void 34 + static NORET_TYPE void 35 35 s390_handle_damage(char *msg) 36 36 { 37 - printk(KERN_EMERG "%s\n", msg); 38 37 #ifdef CONFIG_SMP 39 38 smp_send_stop(); 40 39 #endif 41 40 disabled_wait((unsigned long) __builtin_return_address(0)); 41 + for(;;); 42 42 } 43 43 44 44 /* ··· 122 122 return 0; 123 123 } 124 124 125 + struct mcck_struct { 126 + int kill_task; 127 + int channel_report; 128 + int warning; 129 + unsigned long long mcck_code; 130 + }; 131 + 132 + static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); 133 + 125 134 /* 126 - * machine check handler. 135 + * Main machine check handler function. Will be called with interrupts enabled 136 + * or disabled and machine checks enabled or disabled. 127 137 */ 128 138 void 129 - s390_do_machine_check(void) 139 + s390_handle_mcck(void) 130 140 { 131 - struct mci *mci; 141 + unsigned long flags; 142 + struct mcck_struct mcck; 132 143 133 - mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 144 + /* 145 + * Disable machine checks and get the current state of accumulated 146 + * machine checks. Afterwards delete the old state and enable machine 147 + * checks again. 148 + */ 149 + local_irq_save(flags); 150 + local_mcck_disable(); 151 + mcck = __get_cpu_var(cpu_mcck); 152 + memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); 153 + clear_thread_flag(TIF_MCCK_PENDING); 154 + local_mcck_enable(); 155 + local_irq_restore(flags); 134 156 135 - if (mci->sd) /* system damage */ 136 - s390_handle_damage("received system damage machine check\n"); 137 - 138 - if (mci->pd) /* instruction processing damage */ 139 - s390_handle_damage("received instruction processing " 140 - "damage machine check\n"); 141 - 142 - if (mci->se) /* storage error uncorrected */ 143 - s390_handle_damage("received storage error uncorrected " 144 - "machine check\n"); 145 - 146 - if (mci->sc) /* storage error corrected */ 147 - printk(KERN_WARNING 148 - "received storage error corrected machine check\n"); 149 - 150 - if (mci->ke) /* storage key-error uncorrected */ 151 - s390_handle_damage("received storage key-error uncorrected " 152 - "machine check\n"); 153 - 154 - if (mci->ds && mci->fa) /* storage degradation */ 155 - s390_handle_damage("received storage degradation machine " 156 - "check\n"); 157 - 158 - if (mci->cp) /* channel report word pending */ 157 + if (mcck.channel_report) 159 158 up(&m_sem); 160 159 161 160 #ifdef CONFIG_MACHCHK_WARNING ··· 167 168 * On VM we only get one interrupt per virtally presented machinecheck. 168 169 * Though one suffices, we may get one interrupt per (virtual) processor. 169 170 */ 170 - if (mci->w) { /* WARNING pending ? */ 171 + if (mcck.warning) { /* WARNING pending ? */ 171 172 static int mchchk_wng_posted = 0; 172 173 /* 173 174 * Use single machine clear, as we cannot handle smp right now ··· 177 178 kill_proc(1, SIGPWR, 1); 178 179 } 179 180 #endif 181 + 182 + if (mcck.kill_task) { 183 + local_irq_enable(); 184 + printk(KERN_EMERG "mcck: Terminating task because of machine " 185 + "malfunction (code 0x%016llx).\n", mcck.mcck_code); 186 + printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", 187 + current->comm, current->pid); 188 + do_exit(SIGSEGV); 189 + } 190 + } 191 + 192 + /* 193 + * returns 0 if all registers could be validated 194 + * returns 1 otherwise 195 + */ 196 + static int 197 + s390_revalidate_registers(struct mci *mci) 198 + { 199 + int kill_task; 200 + u64 tmpclock; 201 + u64 zero; 202 + void *fpt_save_area, *fpt_creg_save_area; 203 + 204 + kill_task = 0; 205 + zero = 0; 206 + /* General purpose registers */ 207 + if (!mci->gr) 208 + /* 209 + * General purpose registers couldn't be restored and have 210 + * unknown contents. Process needs to be terminated. 211 + */ 212 + kill_task = 1; 213 + 214 + /* Revalidate floating point registers */ 215 + if (!mci->fp) 216 + /* 217 + * Floating point registers can't be restored and 218 + * therefore the process needs to be terminated. 219 + */ 220 + kill_task = 1; 221 + 222 + #ifndef __s390x__ 223 + asm volatile("ld 0,0(%0)\n" 224 + "ld 2,8(%0)\n" 225 + "ld 4,16(%0)\n" 226 + "ld 6,24(%0)" 227 + : : "a" (&S390_lowcore.floating_pt_save_area)); 228 + #endif 229 + 230 + if (MACHINE_HAS_IEEE) { 231 + #ifdef __s390x__ 232 + fpt_save_area = &S390_lowcore.floating_pt_save_area; 233 + fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 234 + #else 235 + fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; 236 + fpt_creg_save_area = fpt_save_area+128; 237 + #endif 238 + /* Floating point control register */ 239 + if (!mci->fc) { 240 + /* 241 + * Floating point control register can't be restored. 242 + * Task will be terminated. 243 + */ 244 + asm volatile ("lfpc 0(%0)" : : "a" (&zero)); 245 + kill_task = 1; 246 + 247 + } 248 + else 249 + asm volatile ( 250 + "lfpc 0(%0)" 251 + : : "a" (fpt_creg_save_area)); 252 + 253 + asm volatile("ld 0,0(%0)\n" 254 + "ld 1,8(%0)\n" 255 + "ld 2,16(%0)\n" 256 + "ld 3,24(%0)\n" 257 + "ld 4,32(%0)\n" 258 + "ld 5,40(%0)\n" 259 + "ld 6,48(%0)\n" 260 + "ld 7,56(%0)\n" 261 + "ld 8,64(%0)\n" 262 + "ld 9,72(%0)\n" 263 + "ld 10,80(%0)\n" 264 + "ld 11,88(%0)\n" 265 + "ld 12,96(%0)\n" 266 + "ld 13,104(%0)\n" 267 + "ld 14,112(%0)\n" 268 + "ld 15,120(%0)\n" 269 + : : "a" (fpt_save_area)); 270 + } 271 + 272 + /* Revalidate access registers */ 273 + asm volatile("lam 0,15,0(%0)" 274 + : : "a" (&S390_lowcore.access_regs_save_area)); 275 + if (!mci->ar) 276 + /* 277 + * Access registers have unknown contents. 278 + * Terminating task. 279 + */ 280 + kill_task = 1; 281 + 282 + /* Revalidate control registers */ 283 + if (!mci->cr) 284 + /* 285 + * Control registers have unknown contents. 286 + * Can't recover and therefore stopping machine. 287 + */ 288 + s390_handle_damage("invalid control registers."); 289 + else 290 + #ifdef __s390x__ 291 + asm volatile("lctlg 0,15,0(%0)" 292 + : : "a" (&S390_lowcore.cregs_save_area)); 293 + #else 294 + asm volatile("lctl 0,15,0(%0)" 295 + : : "a" (&S390_lowcore.cregs_save_area)); 296 + #endif 297 + 298 + /* 299 + * We don't even try to revalidate the TOD register, since we simply 300 + * can't write something sensible into that register. 301 + */ 302 + 303 + #ifdef __s390x__ 304 + /* 305 + * See if we can revalidate the TOD programmable register with its 306 + * old contents (should be zero) otherwise set it to zero. 307 + */ 308 + if (!mci->pr) 309 + asm volatile("sr 0,0\n" 310 + "sckpf" 311 + : : : "0", "cc"); 312 + else 313 + asm volatile( 314 + "l 0,0(%0)\n" 315 + "sckpf" 316 + : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); 317 + #endif 318 + 319 + /* Revalidate clock comparator register */ 320 + asm volatile ("stck 0(%1)\n" 321 + "sckc 0(%1)" 322 + : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 323 + 324 + /* Check if old PSW is valid */ 325 + if (!mci->wp) 326 + /* 327 + * Can't tell if we come from user or kernel mode 328 + * -> stopping machine. 329 + */ 330 + s390_handle_damage("old psw invalid."); 331 + 332 + if (!mci->ms || !mci->pm || !mci->ia) 333 + kill_task = 1; 334 + 335 + return kill_task; 336 + } 337 + 338 + /* 339 + * machine check handler. 340 + */ 341 + void 342 + s390_do_machine_check(struct pt_regs *regs) 343 + { 344 + struct mci *mci; 345 + struct mcck_struct *mcck; 346 + int umode; 347 + 348 + mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 349 + mcck = &__get_cpu_var(cpu_mcck); 350 + umode = user_mode(regs); 351 + 352 + if (mci->sd) 353 + /* System damage -> stopping machine */ 354 + s390_handle_damage("received system damage machine check."); 355 + 356 + if (mci->pd) { 357 + if (mci->b) { 358 + /* Processing backup -> verify if we can survive this */ 359 + u64 z_mcic, o_mcic, t_mcic; 360 + #ifdef __s390x__ 361 + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 362 + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 363 + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 364 + 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 365 + 1ULL<<16); 366 + #else 367 + z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | 368 + 1ULL<<29); 369 + o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 370 + 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 371 + 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); 372 + #endif 373 + t_mcic = *(u64 *)mci; 374 + 375 + if (((t_mcic & z_mcic) != 0) || 376 + ((t_mcic & o_mcic) != o_mcic)) { 377 + s390_handle_damage("processing backup machine " 378 + "check with damage."); 379 + } 380 + if (!umode) 381 + s390_handle_damage("processing backup machine " 382 + "check in kernel mode."); 383 + mcck->kill_task = 1; 384 + mcck->mcck_code = *(unsigned long long *) mci; 385 + } 386 + else { 387 + /* Processing damage -> stopping machine */ 388 + s390_handle_damage("received instruction processing " 389 + "damage machine check."); 390 + } 391 + } 392 + if (s390_revalidate_registers(mci)) { 393 + if (umode) { 394 + /* 395 + * Couldn't restore all register contents while in 396 + * user mode -> mark task for termination. 397 + */ 398 + mcck->kill_task = 1; 399 + mcck->mcck_code = *(unsigned long long *) mci; 400 + set_thread_flag(TIF_MCCK_PENDING); 401 + } 402 + else 403 + /* 404 + * Couldn't restore all register contents while in 405 + * kernel mode -> stopping machine. 406 + */ 407 + s390_handle_damage("unable to revalidate registers."); 408 + } 409 + 410 + if (mci->se) 411 + /* Storage error uncorrected */ 412 + s390_handle_damage("received storage error uncorrected " 413 + "machine check."); 414 + 415 + if (mci->ke) 416 + /* Storage key-error uncorrected */ 417 + s390_handle_damage("received storage key-error uncorrected " 418 + "machine check."); 419 + 420 + if (mci->ds && mci->fa) 421 + /* Storage degradation */ 422 + s390_handle_damage("received storage degradation machine " 423 + "check."); 424 + 425 + if (mci->cp) { 426 + /* Channel report word pending */ 427 + mcck->channel_report = 1; 428 + set_thread_flag(TIF_MCCK_PENDING); 429 + } 430 + 431 + if (mci->w) { 432 + /* Warning pending */ 433 + mcck->warning = 1; 434 + set_thread_flag(TIF_MCCK_PENDING); 435 + } 180 436 } 181 437 182 438 /* ··· 443 189 machine_check_init(void) 444 190 { 445 191 init_MUTEX_LOCKED(&m_sem); 446 - ctl_clear_bit(14, 25); /* disable damage MCH */ 447 - ctl_set_bit(14, 26); /* enable degradation MCH */ 448 - ctl_set_bit(14, 27); /* enable system recovery MCH */ 192 + ctl_clear_bit(14, 25); /* disable external damage MCH */ 193 + ctl_set_bit(14, 27); /* enable system recovery MCH */ 449 194 #ifdef CONFIG_MACHCHK_WARNING 450 195 ctl_set_bit(14, 24); /* enable warning MCH */ 451 196 #endif
+30 -5
drivers/s390/s390mach.h
··· 16 16 __u32 sd : 1; /* 00 system damage */ 17 17 __u32 pd : 1; /* 01 instruction-processing damage */ 18 18 __u32 sr : 1; /* 02 system recovery */ 19 - __u32 to_be_defined_1 : 4; /* 03-06 */ 19 + __u32 to_be_defined_1 : 1; /* 03 */ 20 + __u32 cd : 1; /* 04 timing-facility damage */ 21 + __u32 ed : 1; /* 05 external damage */ 22 + __u32 to_be_defined_2 : 1; /* 06 */ 20 23 __u32 dg : 1; /* 07 degradation */ 21 24 __u32 w : 1; /* 08 warning pending */ 22 25 __u32 cp : 1; /* 09 channel-report pending */ 23 - __u32 to_be_defined_2 : 6; /* 10-15 */ 26 + __u32 sp : 1; /* 10 service-processor damage */ 27 + __u32 ck : 1; /* 11 channel-subsystem damage */ 28 + __u32 to_be_defined_3 : 2; /* 12-13 */ 29 + __u32 b : 1; /* 14 backed up */ 30 + __u32 to_be_defined_4 : 1; /* 15 */ 24 31 __u32 se : 1; /* 16 storage error uncorrected */ 25 32 __u32 sc : 1; /* 17 storage error corrected */ 26 33 __u32 ke : 1; /* 18 storage-key error uncorrected */ 27 34 __u32 ds : 1; /* 19 storage degradation */ 28 - __u32 to_be_defined_3 : 4; /* 20-23 */ 35 + __u32 wp : 1; /* 20 psw mwp validity */ 36 + __u32 ms : 1; /* 21 psw mask and key validity */ 37 + __u32 pm : 1; /* 22 psw program mask and cc validity */ 38 + __u32 ia : 1; /* 23 psw instruction address validity */ 29 39 __u32 fa : 1; /* 24 failing storage address validity */ 30 - __u32 to_be_defined_4 : 7; /* 25-31 */ 40 + __u32 to_be_defined_5 : 1; /* 25 */ 41 + __u32 ec : 1; /* 26 external damage code validity */ 42 + __u32 fp : 1; /* 27 floating point register validity */ 43 + __u32 gr : 1; /* 28 general register validity */ 44 + __u32 cr : 1; /* 29 control register validity */ 45 + __u32 to_be_defined_6 : 1; /* 30 */ 46 + __u32 st : 1; /* 31 storage logical validity */ 31 47 __u32 ie : 1; /* 32 indirect storage error */ 32 - __u32 to_be_defined_5 : 31; /* 33-63 */ 48 + __u32 ar : 1; /* 33 access register validity */ 49 + __u32 da : 1; /* 34 delayed access exception */ 50 + __u32 to_be_defined_7 : 7; /* 35-41 */ 51 + __u32 pr : 1; /* 42 tod programmable register validity */ 52 + __u32 fc : 1; /* 43 fp control register validity */ 53 + __u32 ap : 1; /* 44 ancillary report */ 54 + __u32 to_be_defined_8 : 1; /* 45 */ 55 + __u32 ct : 1; /* 46 cpu timer validity */ 56 + __u32 cc : 1; /* 47 clock comparator validity */ 57 + __u32 to_be_defined_9 : 16; /* 47-63 */ 33 58 }; 34 59 35 60 /*
+6 -1
include/asm-s390/lowcore.h
··· 109 109 110 110 #ifndef __s390x__ 111 111 #define __LC_PFAULT_INTPARM 0x080 112 + #define __LC_CPU_TIMER_SAVE_AREA 0x0D8 112 113 #define __LC_AREGS_SAVE_AREA 0x120 114 + #define __LC_GPREGS_SAVE_AREA 0x180 113 115 #define __LC_CREGS_SAVE_AREA 0x1C0 114 116 #else /* __s390x__ */ 115 117 #define __LC_PFAULT_INTPARM 0x11B8 118 + #define __LC_GPREGS_SAVE_AREA 0x1280 119 + #define __LC_CPU_TIMER_SAVE_AREA 0x1328 116 120 #define __LC_AREGS_SAVE_AREA 0x1340 117 121 #define __LC_CREGS_SAVE_AREA 0x1380 118 122 #endif /* __s390x__ */ ··· 171 167 __u16 subchannel_nr; /* 0x0ba */ 172 168 __u32 io_int_parm; /* 0x0bc */ 173 169 __u32 io_int_word; /* 0x0c0 */ 174 - __u8 pad3[0xD8-0xC4]; /* 0x0c4 */ 170 + __u8 pad3[0xD4-0xC4]; /* 0x0c4 */ 171 + __u32 extended_save_area_addr; /* 0x0d4 */ 175 172 __u32 cpu_timer_save_area[2]; /* 0x0d8 */ 176 173 __u32 clock_comp_save_area[2]; /* 0x0e0 */ 177 174 __u32 mcck_interruption_code[2]; /* 0x0e8 */
+20 -32
include/asm-s390/processor.h
··· 207 207 #endif /* __s390x__ */ 208 208 209 209 /* 210 + * Set PSW to specified value. 211 + */ 212 + static inline void __load_psw(psw_t psw) 213 + { 214 + #ifndef __s390x__ 215 + asm volatile ("lpsw 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 216 + #else 217 + asm volatile ("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 218 + #endif 219 + } 220 + 221 + /* 210 222 * Set PSW mask to specified value, while leaving the 211 223 * PSW addr pointing to the next instruction. 212 224 */ ··· 226 214 static inline void __load_psw_mask (unsigned long mask) 227 215 { 228 216 unsigned long addr; 229 - 230 217 psw_t psw; 218 + 231 219 psw.mask = mask; 232 220 233 221 #ifndef __s390x__ ··· 253 241 */ 254 242 static inline void enabled_wait(void) 255 243 { 256 - unsigned long reg; 257 - psw_t wait_psw; 258 - 259 - wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | 260 - PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY; 261 - #ifndef __s390x__ 262 - asm volatile ( 263 - " basr %0,0\n" 264 - "0: la %0,1f-0b(%0)\n" 265 - " st %0,4(%1)\n" 266 - " oi 4(%1),0x80\n" 267 - " lpsw 0(%1)\n" 268 - "1:" 269 - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) 270 - : "memory", "cc" ); 271 - #else /* __s390x__ */ 272 - asm volatile ( 273 - " larl %0,0f\n" 274 - " stg %0,8(%1)\n" 275 - " lpswe 0(%1)\n" 276 - "0:" 277 - : "=&a" (reg) : "a" (&wait_psw), "m" (wait_psw) 278 - : "memory", "cc" ); 279 - #endif /* __s390x__ */ 244 + __load_psw_mask(PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | 245 + PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY); 280 246 } 281 247 282 248 /* ··· 263 273 264 274 static inline void disabled_wait(unsigned long code) 265 275 { 266 - char psw_buffer[2*sizeof(psw_t)]; 267 276 unsigned long ctl_buf; 268 - psw_t *dw_psw = (psw_t *)(((unsigned long) &psw_buffer+sizeof(psw_t)-1) 269 - & -sizeof(psw_t)); 277 + psw_t dw_psw; 270 278 271 - dw_psw->mask = PSW_BASE_BITS | PSW_MASK_WAIT; 272 - dw_psw->addr = code; 279 + dw_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; 280 + dw_psw.addr = code; 273 281 /* 274 282 * Store status and then load disabled wait psw, 275 283 * the processor is dead afterwards ··· 289 301 " oi 0x1c0,0x10\n" /* fake protection bit */ 290 302 " lpsw 0(%1)" 291 303 : "=m" (ctl_buf) 292 - : "a" (dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 304 + : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc" ); 293 305 #else /* __s390x__ */ 294 306 asm volatile (" stctg 0,0,0(%2)\n" 295 307 " ni 4(%2),0xef\n" /* switch off protection */ ··· 321 333 " oi 0x384(1),0x10\n" /* fake protection bit */ 322 334 " lpswe 0(%1)" 323 335 : "=m" (ctl_buf) 324 - : "a" (dw_psw), "a" (&ctl_buf), 336 + : "a" (&dw_psw), "a" (&ctl_buf), 325 337 "m" (dw_psw) : "cc", "0", "1"); 326 338 #endif /* __s390x__ */ 327 339 }
+1 -1
include/asm-s390/ptrace.h
··· 276 276 #endif /* __s390x__ */ 277 277 278 278 #define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ 279 - PSW_DEFAULT_KEY) 279 + PSW_MASK_MCHECK | PSW_DEFAULT_KEY) 280 280 #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ 281 281 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ 282 282 PSW_MASK_PSTATE | PSW_DEFAULT_KEY)
+15 -6
include/asm-s390/system.h
··· 16 16 #include <asm/types.h> 17 17 #include <asm/ptrace.h> 18 18 #include <asm/setup.h> 19 + #include <asm/processor.h> 19 20 20 21 #ifdef __KERNEL__ 21 22 ··· 332 331 333 332 #ifdef __s390x__ 334 333 335 - #define __load_psw(psw) \ 336 - __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw), "m" (psw) : "cc" ); 337 - 338 334 #define __ctl_load(array, low, high) ({ \ 339 335 typedef struct { char _[sizeof(array)]; } addrtype; \ 340 336 __asm__ __volatile__ ( \ ··· 387 389 }) 388 390 389 391 #else /* __s390x__ */ 390 - 391 - #define __load_psw(psw) \ 392 - __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" ); 393 392 394 393 #define __ctl_load(array, low, high) ({ \ 395 394 typedef struct { char _[sizeof(array)]; } addrtype; \ ··· 445 450 446 451 /* For spinlocks etc */ 447 452 #define local_irq_save(x) ((x) = local_irq_disable()) 453 + 454 + /* 455 + * Use to set psw mask except for the first byte which 456 + * won't be changed by this function. 457 + */ 458 + static inline void 459 + __set_psw_mask(unsigned long mask) 460 + { 461 + local_save_flags(mask); 462 + __load_psw_mask(mask); 463 + } 464 + 465 + #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) 466 + #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) 448 467 449 468 #ifdef CONFIG_SMP 450 469
+2
include/asm-s390/thread_info.h
··· 96 96 #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 97 97 #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ 98 98 #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 99 + #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 99 100 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 100 101 #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 101 102 TIF_NEED_RESCHED */ ··· 110 109 #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 111 110 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 112 111 #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 112 + #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 113 113 #define _TIF_USEDFPU (1<<TIF_USEDFPU) 114 114 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 115 115 #define _TIF_31BIT (1<<TIF_31BIT)