Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: remove critical section cleanup from entry.S

The current code is rather complex and caused a lot of subtle
and hard to debug bugs in the past. Simplify the code by calling
the system_call handler with interrupts disabled, save
machine state, and re-enable them later.

This requires significant changes to the machine check handling code
as well. When the machine check interrupt arrived while being in kernel
mode the new code will signal pending machine checks with a SIGP external
call. When userspace was interrupted, the handler will switch to the
kernel stack and directly execute s390_handle_mcck().

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Sven Schnelle and committed by
Vasily Gorbik
0b0ed657 11886c19

+143 -390
+1 -1
arch/s390/include/asm/nmi.h
··· 99 99 void nmi_free_per_cpu(struct lowcore *lc); 100 100 101 101 void s390_handle_mcck(void); 102 - void s390_do_machine_check(struct pt_regs *regs); 102 + int s390_do_machine_check(struct pt_regs *regs); 103 103 104 104 #endif /* __ASSEMBLY__ */ 105 105 #endif /* _ASM_S390_NMI_H */
+8 -10
arch/s390/include/asm/processor.h
··· 14 14 15 15 #include <linux/bits.h> 16 16 17 - #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ 18 - #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ 19 - #define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */ 20 - #define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */ 21 - #define CIF_FPU 4 /* restore FPU registers */ 22 - #define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */ 23 - #define CIF_ENABLED_WAIT 6 /* in enabled wait state */ 24 - #define CIF_MCCK_GUEST 7 /* machine check happening in guest */ 25 - #define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */ 17 + #define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */ 18 + #define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */ 19 + #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ 20 + #define CIF_FPU 3 /* restore FPU registers */ 21 + #define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ 22 + #define CIF_ENABLED_WAIT 5 /* in enabled wait state */ 23 + #define CIF_MCCK_GUEST 6 /* machine check happening in guest */ 24 + #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ 26 25 27 - #define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING) 28 26 #define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY) 29 27 #define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY) 30 28 #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
+110 -354
arch/s390/kernel/entry.S
··· 55 55 _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING) 56 56 _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 57 57 _TIF_SYSCALL_TRACEPOINT) 58 - _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \ 59 - _CIF_ASCE_SECONDARY | _CIF_FPU) 58 + _CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU) 60 59 _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 61 60 62 61 _LPP_OFFSET = __LC_LPP 63 - 64 - #define BASED(name) name-cleanup_critical(%r13) 65 62 66 63 .macro TRACE_IRQS_ON 67 64 #ifdef CONFIG_TRACE_IRQFLAGS ··· 113 116 .macro SWITCH_ASYNC savearea,timer 114 117 tmhh %r8,0x0001 # interrupting from user ? 115 118 jnz 2f 119 + #if IS_ENABLED(CONFIG_KVM) 116 120 lgr %r14,%r9 117 - cghi %r14,__LC_RETURN_LPSWE 118 - je 0f 119 - slg %r14,BASED(.Lcritical_start) 120 - clg %r14,BASED(.Lcritical_length) 121 - jhe 1f 122 - 0: 121 + larl %r13,.Lsie_gmap 122 + slgr %r14,%r13 123 + lghi %r13,.Lsie_done - .Lsie_gmap 124 + clgr %r14,%r13 125 + jhe 0f 123 126 lghi %r11,\savearea # inside critical section, do cleanup 124 - brasl %r14,cleanup_critical 125 - tmhh %r8,0x0001 # retest problem state after cleanup 126 - jnz 2f 127 + brasl %r14,.Lcleanup_sie 128 + #endif 129 + 0: larl %r13,.Lpsw_idle_exit 130 + cgr %r13,%r9 131 + jne 1f 132 + 133 + mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK 134 + mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER 135 + # account system time going idle 136 + ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 137 + 138 + lg %r13,__LC_STEAL_TIMER 139 + alg %r13,__CLOCK_IDLE_ENTER(%r2) 140 + slg %r13,__LC_LAST_UPDATE_CLOCK 141 + stg %r13,__LC_STEAL_TIMER 142 + 143 + mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 144 + 145 + lg %r13,__LC_SYSTEM_TIMER 146 + alg %r13,__LC_LAST_UPDATE_TIMER 147 + slg %r13,__TIMER_IDLE_ENTER(%r2) 148 + stg %r13,__LC_SYSTEM_TIMER 149 + mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 150 + 151 + nihh %r8,0xfcfd # clear wait state and irq bits 127 152 1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 128 153 slgr %r14,%r15 129 154 srag %r14,%r14,STACK_SHIFT ··· 171 152 mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 172 153 .endm 173 154 174 - .macro REENABLE_IRQS 155 + .macro RESTORE_SM_CLEAR_PER 175 156 stg %r8,__LC_RETURN_PSW 176 157 ni __LC_RETURN_PSW,0xbf 177 158 ssm __LC_RETURN_PSW 159 + .endm 160 + 161 + .macro ENABLE_INTS 162 + stosm __SF_EMPTY(%r15),3 163 + .endm 164 + 165 + .macro ENABLE_INTS_TRACE 166 + TRACE_IRQS_ON 167 + ENABLE_INTS 168 + .endm 169 + 170 + .macro DISABLE_INTS 171 + stnsm __SF_EMPTY(%r15),0xfc 172 + .endm 173 + 174 + .macro DISABLE_INTS_TRACE 175 + DISABLE_INTS 176 + TRACE_IRQS_OFF 178 177 .endm 179 178 180 179 .macro STCK savearea ··· 291 254 BR_EX %r14 292 255 ENDPROC(__switch_to) 293 256 294 - .L__critical_start: 295 - 296 257 #if IS_ENABLED(CONFIG_KVM) 297 258 /* 298 259 * sie64a calling convention: ··· 323 288 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 324 289 .Lsie_entry: 325 290 sie 0(%r14) 326 - .Lsie_exit: 327 291 BPOFF 328 292 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 329 293 .Lsie_skip: ··· 375 341 376 342 ENTRY(system_call) 377 343 stpt __LC_SYNC_ENTER_TIMER 378 - .Lsysc_stmg: 379 344 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 380 345 BPOFF 381 346 lg %r12,__LC_CURRENT ··· 383 350 .Lsysc_per: 384 351 lg %r15,__LC_KERNEL_STACK 385 352 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 386 - .Lsysc_vtime: 387 353 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 388 354 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 389 355 stmg %r0,%r7,__PT_R0(%r11) ··· 390 358 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 391 359 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 392 360 stg %r14,__PT_FLAGS(%r11) 361 + ENABLE_INTS 393 362 .Lsysc_do_svc: 394 363 # clear user controlled register to prevent speculative use 395 364 xgr %r0,%r0 ··· 426 393 jnz .Lsysc_work 427 394 TSTMSK __TI_flags(%r12),_TIF_WORK 428 395 jnz .Lsysc_work # check for work 429 - TSTMSK __LC_CPU_FLAGS,_CIF_WORK 396 + TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU) 430 397 jnz .Lsysc_work 431 398 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 432 399 .Lsysc_restore: 400 + DISABLE_INTS 401 + TSTMSK __LC_CPU_FLAGS, _CIF_FPU 402 + jz .Lsysc_skip_fpu 403 + brasl %r14,load_fpu_regs 404 + .Lsysc_skip_fpu: 433 405 lg %r14,__LC_VDSO_PER_CPU 434 - lmg %r0,%r10,__PT_R0(%r11) 435 406 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 436 - .Lsysc_exit_timer: 437 407 stpt __LC_EXIT_TIMER 438 408 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 439 - lmg %r11,%r15,__PT_R11(%r11) 440 - b __LC_RETURN_LPSWE(%r0) 441 - .Lsysc_done: 409 + lmg %r0,%r15,__PT_R0(%r11) 410 + b __LC_RETURN_LPSWE 442 411 443 412 # 444 413 # One of the work bits is on. Find out which one. 445 414 # 446 415 .Lsysc_work: 447 - TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 448 - jo .Lsysc_mcck_pending 449 416 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 450 417 jo .Lsysc_reschedule 451 418 TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART ··· 469 436 jo .Lsysc_sigpending 470 437 TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 471 438 jo .Lsysc_notify_resume 472 - TSTMSK __LC_CPU_FLAGS,_CIF_FPU 473 - jo .Lsysc_vxrs 474 439 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 475 440 jnz .Lsysc_asce 476 - j .Lsysc_return # beware of critical section cleanup 441 + j .Lsysc_return 477 442 478 443 # 479 444 # _TIF_NEED_RESCHED is set, call schedule ··· 479 448 .Lsysc_reschedule: 480 449 larl %r14,.Lsysc_return 481 450 jg schedule 482 - 483 - # 484 - # _CIF_MCCK_PENDING is set, call handler 485 - # 486 - .Lsysc_mcck_pending: 487 - larl %r14,.Lsysc_return 488 - jg s390_handle_mcck # TIF bit will be cleared by handler 489 451 490 452 # 491 453 # _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce ··· 499 475 larl %r14,.Lsysc_return 500 476 jg set_fs_fixup 501 477 502 - # 503 - # CIF_FPU is set, restore floating-point controls and floating-point registers. 504 - # 505 - .Lsysc_vxrs: 506 - larl %r14,.Lsysc_return 507 - jg load_fpu_regs 508 478 509 479 # 510 480 # _TIF_SIGPENDING is set, call do_signal ··· 582 564 jnh .Lsysc_tracenogo 583 565 sllg %r8,%r2,3 584 566 lg %r9,0(%r8,%r10) 585 - .Lsysc_tracego: 586 567 lmg %r3,%r7,__PT_R3(%r11) 587 568 stg %r7,STACK_FRAME_OVERHEAD(%r15) 588 569 lg %r2,__PT_ORIG_GPR2(%r11) ··· 602 585 la %r11,STACK_FRAME_OVERHEAD(%r15) 603 586 lg %r12,__LC_CURRENT 604 587 brasl %r14,schedule_tail 605 - TRACE_IRQS_ON 606 - ssm __LC_SVC_NEW_PSW # reenable interrupts 607 588 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 608 589 jne .Lsysc_tracenogo 609 590 # it's a kernel thread ··· 635 620 lghi %r10,1 636 621 0: lg %r12,__LC_CURRENT 637 622 lghi %r11,0 638 - larl %r13,cleanup_critical 639 623 lmg %r8,%r9,__LC_PGM_OLD_PSW 640 624 tmhh %r8,0x0001 # test problem state bit 641 625 jnz 3f # -> fault in user space 642 626 #if IS_ENABLED(CONFIG_KVM) 643 627 # cleanup critical section for program checks in sie64a 644 628 lgr %r14,%r9 645 - slg %r14,BASED(.Lsie_critical_start) 646 - clg %r14,BASED(.Lsie_critical_length) 629 + larl %r13,.Lsie_gmap 630 + slgr %r14,%r13 631 + lghi %r13,.Lsie_done - .Lsie_gmap 632 + clgr %r14,%r13 647 633 jhe 1f 648 634 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 649 635 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE ··· 696 680 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 697 681 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 698 682 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 699 - 6: REENABLE_IRQS 683 + 6: RESTORE_SM_CLEAR_PER 700 684 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 701 685 larl %r1,pgm_check_table 702 686 llgh %r10,__PT_INT_CODE+2(%r11) ··· 718 702 # PER event in supervisor state, must be kprobes 719 703 # 720 704 .Lpgm_kprobe: 721 - REENABLE_IRQS 705 + RESTORE_SM_CLEAR_PER 722 706 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 723 707 lgr %r2,%r11 # pass pointer to pt_regs 724 708 brasl %r14,do_per_trap ··· 729 713 # 730 714 .Lpgm_svcper: 731 715 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 732 - lghi %r13,__TASK_thread 733 716 larl %r14,.Lsysc_per 734 717 stg %r14,__LC_RETURN_PSW+8 735 718 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 736 - lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs 719 + lpswe __LC_RETURN_PSW # branch to .Lsysc_per 737 720 ENDPROC(pgm_check_handler) 738 721 739 722 /* ··· 744 729 BPOFF 745 730 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 746 731 lg %r12,__LC_CURRENT 747 - larl %r13,cleanup_critical 748 732 lmg %r8,%r9,__LC_IO_OLD_PSW 749 733 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 750 734 stmg %r0,%r7,__PT_R0(%r11) ··· 763 749 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 764 750 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 765 751 jo .Lio_restore 752 + #if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 753 + tmhh %r8,0x300 754 + jz 1f 766 755 TRACE_IRQS_OFF 756 + 1: 757 + #endif 767 758 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 768 759 .Lio_loop: 769 760 lgr %r2,%r11 # pass pointer to pt_regs ··· 786 767 j .Lio_loop 787 768 .Lio_return: 788 769 LOCKDEP_SYS_EXIT 789 - TRACE_IRQS_ON 790 - .Lio_tif: 791 770 TSTMSK __TI_flags(%r12),_TIF_WORK 792 771 jnz .Lio_work # there is work to do (signals etc.) 793 772 TSTMSK __LC_CPU_FLAGS,_CIF_WORK 794 773 jnz .Lio_work 795 774 .Lio_restore: 775 + #if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 776 + tm __PT_PSW(%r11),3 777 + jno 0f 778 + TRACE_IRQS_ON 779 + 0: 780 + #endif 796 781 lg %r14,__LC_VDSO_PER_CPU 797 - lmg %r0,%r10,__PT_R0(%r11) 798 782 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 799 783 tm __PT_PSW+1(%r11),0x01 # returning to user ? 800 784 jno .Lio_exit_kernel 801 785 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 802 - .Lio_exit_timer: 803 786 stpt __LC_EXIT_TIMER 804 787 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 805 788 .Lio_exit_kernel: 806 - lmg %r11,%r15,__PT_R11(%r11) 807 - b __LC_RETURN_LPSWE(%r0) 789 + lmg %r0,%r15,__PT_R0(%r11) 790 + b __LC_RETURN_LPSWE 808 791 .Lio_done: 809 792 810 793 # ··· 834 813 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 835 814 la %r11,STACK_FRAME_OVERHEAD(%r1) 836 815 lgr %r15,%r1 837 - # TRACE_IRQS_ON already done at .Lio_return, call 838 - # TRACE_IRQS_OFF to keep things symmetrical 839 - TRACE_IRQS_OFF 840 816 brasl %r14,preempt_schedule_irq 841 817 j .Lio_return 842 818 #else ··· 853 835 # 854 836 # One of the work bits is on. Find out which one. 855 837 # 856 - .Lio_work_tif: 857 - TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 858 - jo .Lio_mcck_pending 859 838 TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 860 839 jo .Lio_reschedule 861 840 #ifdef CONFIG_LIVEPATCH ··· 869 854 jo .Lio_vxrs 870 855 TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY) 871 856 jnz .Lio_asce 872 - j .Lio_return # beware of critical section cleanup 873 - 874 - # 875 - # _CIF_MCCK_PENDING is set, call handler 876 - # 877 - .Lio_mcck_pending: 878 - # TRACE_IRQS_ON already done at .Lio_return 879 - brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 880 - TRACE_IRQS_OFF 881 857 j .Lio_return 882 858 883 859 # ··· 901 895 # _TIF_GUARDED_STORAGE is set, call guarded_storage_load 902 896 # 903 897 .Lio_guarded_storage: 904 - # TRACE_IRQS_ON already done at .Lio_return 905 - ssm __LC_SVC_NEW_PSW # reenable interrupts 898 + ENABLE_INTS_TRACE 906 899 lgr %r2,%r11 # pass pointer to pt_regs 907 900 brasl %r14,gs_load_bc_cb 908 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 909 - TRACE_IRQS_OFF 901 + DISABLE_INTS_TRACE 910 902 j .Lio_return 911 903 912 904 # 913 905 # _TIF_NEED_RESCHED is set, call schedule 914 906 # 915 907 .Lio_reschedule: 916 - # TRACE_IRQS_ON already done at .Lio_return 917 - ssm __LC_SVC_NEW_PSW # reenable interrupts 908 + ENABLE_INTS_TRACE 918 909 brasl %r14,schedule # call scheduler 919 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 920 - TRACE_IRQS_OFF 910 + DISABLE_INTS_TRACE 921 911 j .Lio_return 922 912 923 913 # ··· 930 928 # _TIF_SIGPENDING or is set, call do_signal 931 929 # 932 930 .Lio_sigpending: 933 - # TRACE_IRQS_ON already done at .Lio_return 934 - ssm __LC_SVC_NEW_PSW # reenable interrupts 931 + ENABLE_INTS_TRACE 935 932 lgr %r2,%r11 # pass pointer to pt_regs 936 933 brasl %r14,do_signal 937 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 938 - TRACE_IRQS_OFF 934 + DISABLE_INTS_TRACE 939 935 j .Lio_return 940 936 941 937 # 942 938 # _TIF_NOTIFY_RESUME or is set, call do_notify_resume 943 939 # 944 940 .Lio_notify_resume: 945 - # TRACE_IRQS_ON already done at .Lio_return 946 - ssm __LC_SVC_NEW_PSW # reenable interrupts 941 + ENABLE_INTS_TRACE 947 942 lgr %r2,%r11 # pass pointer to pt_regs 948 943 brasl %r14,do_notify_resume 949 - ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 950 - TRACE_IRQS_OFF 944 + DISABLE_INTS_TRACE 951 945 j .Lio_return 952 946 ENDPROC(io_int_handler) 953 947 ··· 956 958 BPOFF 957 959 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 958 960 lg %r12,__LC_CURRENT 959 - larl %r13,cleanup_critical 960 961 lmg %r8,%r9,__LC_EXT_OLD_PSW 961 962 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER 962 963 stmg %r0,%r7,__PT_R0(%r11) ··· 978 981 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 979 982 TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ 980 983 jo .Lio_restore 984 + #if IS_ENABLED(CONFIG_TRACE_IRQFLAGS) 985 + tmhh %r8,0x300 986 + jz 1f 981 987 TRACE_IRQS_OFF 988 + 1: 989 + #endif 982 990 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 983 991 lgr %r2,%r11 # pass pointer to pt_regs 984 992 lghi %r3,EXT_INTERRUPT ··· 992 990 ENDPROC(ext_int_handler) 993 991 994 992 /* 995 - * Load idle PSW. The second "half" of this function is in .Lcleanup_idle. 993 + * Load idle PSW. 996 994 */ 997 995 ENTRY(psw_idle) 998 996 stg %r3,__SF_EMPTY(%r15) 999 - larl %r1,.Lpsw_idle_lpsw+4 997 + larl %r1,.Lpsw_idle_exit 1000 998 stg %r1,__SF_EMPTY+8(%r15) 1001 999 larl %r1,smp_cpu_mtid 1002 1000 llgf %r1,0(%r1) ··· 1008 1006 BPON 1009 1007 STCK __CLOCK_IDLE_ENTER(%r2) 1010 1008 stpt __TIMER_IDLE_ENTER(%r2) 1011 - .Lpsw_idle_lpsw: 1012 1009 lpswe __SF_EMPTY(%r15) 1010 + .Lpsw_idle_exit: 1013 1011 BR_EX %r14 1014 - .Lpsw_idle_end: 1015 1012 ENDPROC(psw_idle) 1016 1013 1017 1014 /* ··· 1021 1020 * of the register contents at return from io or a system call. 1022 1021 */ 1023 1022 ENTRY(save_fpu_regs) 1023 + stnsm __SF_EMPTY(%r15),0xfc 1024 1024 lg %r2,__LC_CURRENT 1025 1025 aghi %r2,__TASK_thread 1026 1026 TSTMSK __LC_CPU_FLAGS,_CIF_FPU ··· 1053 1051 .Lsave_fpu_regs_done: 1054 1052 oi __LC_CPU_FLAGS+7,_CIF_FPU 1055 1053 .Lsave_fpu_regs_exit: 1054 + ssm __SF_EMPTY(%r15) 1056 1055 BR_EX %r14 1057 1056 .Lsave_fpu_regs_end: 1058 1057 ENDPROC(save_fpu_regs) ··· 1105 1102 .Lload_fpu_regs_end: 1106 1103 ENDPROC(load_fpu_regs) 1107 1104 1108 - .L__critical_end: 1109 - 1110 1105 /* 1111 1106 * Machine check handler routines 1112 1107 */ ··· 1117 1116 lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs 1118 1117 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs 1119 1118 lg %r12,__LC_CURRENT 1120 - larl %r13,cleanup_critical 1121 1119 lmg %r8,%r9,__LC_MCK_OLD_PSW 1122 1120 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE 1123 1121 jo .Lmcck_panic # yes -> rest of mcck code invalid ··· 1202 1202 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 1203 1203 lgr %r2,%r11 # pass pointer to pt_regs 1204 1204 brasl %r14,s390_do_machine_check 1205 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 1206 - jno .Lmcck_return 1205 + cghi %r2,0 1206 + je .Lmcck_return 1207 1207 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 1208 1208 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 1209 1209 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 1210 1210 la %r11,STACK_FRAME_OVERHEAD(%r1) 1211 1211 lgr %r15,%r1 1212 - TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING 1213 - jno .Lmcck_return 1214 1212 TRACE_IRQS_OFF 1215 1213 brasl %r14,s390_handle_mcck 1216 1214 TRACE_IRQS_ON ··· 1278 1280 ENDPROC(stack_overflow) 1279 1281 #endif 1280 1282 1281 - ENTRY(cleanup_critical) 1282 - cghi %r9,__LC_RETURN_LPSWE 1283 - je .Lcleanup_lpswe 1284 1283 #if IS_ENABLED(CONFIG_KVM) 1285 - clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap 1286 - jl 0f 1287 - clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done 1288 - jl .Lcleanup_sie 1289 - #endif 1290 - clg %r9,BASED(.Lcleanup_table) # system_call 1291 - jl 0f 1292 - clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc 1293 - jl .Lcleanup_system_call 1294 - clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif 1295 - jl 0f 1296 - clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore 1297 - jl .Lcleanup_sysc_tif 1298 - clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done 1299 - jl .Lcleanup_sysc_restore 1300 - clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif 1301 - jl 0f 1302 - clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore 1303 - jl .Lcleanup_io_tif 1304 - clg %r9,BASED(.Lcleanup_table+56) # .Lio_done 1305 - jl .Lcleanup_io_restore 1306 - clg %r9,BASED(.Lcleanup_table+64) # psw_idle 1307 - jl 0f 1308 - clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end 1309 - jl .Lcleanup_idle 1310 - clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs 1311 - jl 0f 1312 - clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end 1313 - jl .Lcleanup_save_fpu_regs 1314 - clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs 1315 - jl 0f 1316 - clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end 1317 - jl .Lcleanup_load_fpu_regs 1318 - 0: BR_EX %r14,%r11 1319 - ENDPROC(cleanup_critical) 1320 - 1321 - .align 8 1322 - .Lcleanup_table: 1323 - .quad system_call 1324 - .quad .Lsysc_do_svc 1325 - .quad .Lsysc_tif 1326 - .quad .Lsysc_restore 1327 - .quad .Lsysc_done 1328 - .quad .Lio_tif 1329 - .quad .Lio_restore 1330 - .quad .Lio_done 1331 - .quad psw_idle 1332 - .quad .Lpsw_idle_end 1333 - .quad save_fpu_regs 1334 - .quad .Lsave_fpu_regs_end 1335 - .quad load_fpu_regs 1336 - .quad .Lload_fpu_regs_end 1337 - 1338 - #if IS_ENABLED(CONFIG_KVM) 1339 - .Lcleanup_table_sie: 1340 - .quad .Lsie_gmap 1341 - .quad .Lsie_done 1342 - 1343 1284 .Lcleanup_sie: 1344 - cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1345 - je 1f 1346 - slg %r9,BASED(.Lsie_crit_mcck_start) 1347 - clg %r9,BASED(.Lsie_crit_mcck_length) 1348 - jh 1f 1349 - oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 1350 - 1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1285 + cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt? 1286 + je 1f 1287 + larl %r13,.Lsie_entry 1288 + slgr %r9,%r13 1289 + larl %r13,.Lsie_skip 1290 + clgr %r9,%r13 1291 + jh 1f 1292 + oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 1293 + 1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST) 1351 1294 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 1352 1295 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 1353 1296 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1354 1297 larl %r9,sie_exit # skip forward to sie_exit 1355 1298 BR_EX %r14,%r11 1356 - #endif 1357 1299 1358 - .Lcleanup_system_call: 1359 - # check if stpt has been executed 1360 - clg %r9,BASED(.Lcleanup_system_call_insn) 1361 - jh 0f 1362 - mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 1363 - cghi %r11,__LC_SAVE_AREA_ASYNC 1364 - je 0f 1365 - mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 1366 - 0: # check if stmg has been executed 1367 - clg %r9,BASED(.Lcleanup_system_call_insn+8) 1368 - jh 0f 1369 - mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 1370 - 0: # check if base register setup + TIF bit load has been done 1371 - clg %r9,BASED(.Lcleanup_system_call_insn+16) 1372 - jhe 0f 1373 - # set up saved register r12 task struct pointer 1374 - stg %r12,32(%r11) 1375 - # set up saved register r13 __TASK_thread offset 1376 - mvc 40(8,%r11),BASED(.Lcleanup_system_call_const) 1377 - 0: # check if the user time update has been done 1378 - clg %r9,BASED(.Lcleanup_system_call_insn+24) 1379 - jh 0f 1380 - lg %r15,__LC_EXIT_TIMER 1381 - slg %r15,__LC_SYNC_ENTER_TIMER 1382 - alg %r15,__LC_USER_TIMER 1383 - stg %r15,__LC_USER_TIMER 1384 - 0: # check if the system time update has been done 1385 - clg %r9,BASED(.Lcleanup_system_call_insn+32) 1386 - jh 0f 1387 - lg %r15,__LC_LAST_UPDATE_TIMER 1388 - slg %r15,__LC_EXIT_TIMER 1389 - alg %r15,__LC_SYSTEM_TIMER 1390 - stg %r15,__LC_SYSTEM_TIMER 1391 - 0: # update accounting time stamp 1392 - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 1393 - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 1394 - # set up saved register r11 1395 - lg %r15,__LC_KERNEL_STACK 1396 - la %r9,STACK_FRAME_OVERHEAD(%r15) 1397 - stg %r9,24(%r11) # r11 pt_regs pointer 1398 - # fill pt_regs 1399 - mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC 1400 - stmg %r0,%r7,__PT_R0(%r9) 1401 - mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW 1402 - mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC 1403 - xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9) 1404 - mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL 1405 - # setup saved register r15 1406 - stg %r15,56(%r11) # r15 stack pointer 1407 - # set new psw address and exit 1408 - larl %r9,.Lsysc_do_svc 1409 - BR_EX %r14,%r11 1410 - .Lcleanup_system_call_insn: 1411 - .quad system_call 1412 - .quad .Lsysc_stmg 1413 - .quad .Lsysc_per 1414 - .quad .Lsysc_vtime+36 1415 - .quad .Lsysc_vtime+42 1416 - .Lcleanup_system_call_const: 1417 - .quad __TASK_thread 1418 - 1419 - .Lcleanup_sysc_tif: 1420 - larl %r9,.Lsysc_tif 1421 - BR_EX %r14,%r11 1422 - 1423 - .Lcleanup_sysc_restore: 1424 - # check if stpt has been executed 1425 - clg %r9,BASED(.Lcleanup_sysc_restore_insn) 1426 - jh 0f 1427 - mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1428 - cghi %r11,__LC_SAVE_AREA_ASYNC 1429 - je 0f 1430 - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 1431 - 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) 1432 - je 1f 1433 - lg %r9,24(%r11) # get saved pointer to pt_regs 1434 - mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1435 - mvc 0(64,%r11),__PT_R8(%r9) 1436 - lmg %r0,%r7,__PT_R0(%r9) 1437 - .Lcleanup_lpswe: 1438 - 1: lmg %r8,%r9,__LC_RETURN_PSW 1439 - BR_EX %r14,%r11 1440 - .Lcleanup_sysc_restore_insn: 1441 - .quad .Lsysc_exit_timer 1442 - .quad .Lsysc_done - 4 1443 - 1444 - .Lcleanup_io_tif: 1445 - larl %r9,.Lio_tif 1446 - BR_EX %r14,%r11 1447 - 1448 - .Lcleanup_io_restore: 1449 - # check if stpt has been executed 1450 - clg %r9,BASED(.Lcleanup_io_restore_insn) 1451 - jh 0f 1452 - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER 1453 - 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) 1454 - je 1f 1455 - lg %r9,24(%r11) # get saved r11 pointer to pt_regs 1456 - mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 1457 - mvc 0(64,%r11),__PT_R8(%r9) 1458 - lmg %r0,%r7,__PT_R0(%r9) 1459 - 1: lmg %r8,%r9,__LC_RETURN_PSW 1460 - BR_EX %r14,%r11 1461 - .Lcleanup_io_restore_insn: 1462 - .quad .Lio_exit_timer 1463 - .quad .Lio_done - 4 1464 - 1465 - .Lcleanup_idle: 1466 - ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 1467 - # copy interrupt clock & cpu timer 1468 - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 1469 - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 1470 - cghi %r11,__LC_SAVE_AREA_ASYNC 1471 - je 0f 1472 - mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 1473 - mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 1474 - 0: # check if stck & stpt have been executed 1475 - clg %r9,BASED(.Lcleanup_idle_insn) 1476 - jhe 1f 1477 - mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1478 - mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 1479 - 1: # calculate idle cycles 1480 - clg %r9,BASED(.Lcleanup_idle_insn) 1481 - jl 3f 1482 - larl %r1,smp_cpu_mtid 1483 - llgf %r1,0(%r1) 1484 - ltgr %r1,%r1 1485 - jz 3f 1486 - .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1487 - larl %r3,mt_cycles 1488 - ag %r3,__LC_PERCPU_OFFSET 1489 - la %r4,__SF_EMPTY+16(%r15) 1490 - 2: lg %r0,0(%r3) 1491 - slg %r0,0(%r4) 1492 - alg %r0,64(%r4) 1493 - stg %r0,0(%r3) 1494 - la %r3,8(%r3) 1495 - la %r4,8(%r4) 1496 - brct %r1,2b 1497 - 3: # account system time going idle 1498 - lg %r9,__LC_STEAL_TIMER 1499 - alg %r9,__CLOCK_IDLE_ENTER(%r2) 1500 - slg %r9,__LC_LAST_UPDATE_CLOCK 1501 - stg %r9,__LC_STEAL_TIMER 1502 - mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 1503 - lg %r9,__LC_SYSTEM_TIMER 1504 - alg %r9,__LC_LAST_UPDATE_TIMER 1505 - slg %r9,__TIMER_IDLE_ENTER(%r2) 1506 - stg %r9,__LC_SYSTEM_TIMER 1507 - mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 1508 - # prepare return psw 1509 - nihh %r8,0xfcfd # clear irq & wait state bits 1510 - lg %r9,48(%r11) # return from psw_idle 1511 - BR_EX %r14,%r11 1512 - .Lcleanup_idle_insn: 1513 - .quad .Lpsw_idle_lpsw 1514 - 1515 - .Lcleanup_save_fpu_regs: 1516 - larl %r9,save_fpu_regs 1517 - BR_EX %r14,%r11 1518 - 1519 - .Lcleanup_load_fpu_regs: 1520 - larl %r9,load_fpu_regs 1521 - BR_EX %r14,%r11 1522 - 1523 - /* 1524 - * Integer constants 1525 - */ 1526 - .align 8 1527 - .Lcritical_start: 1528 - .quad .L__critical_start 1529 - .Lcritical_length: 1530 - .quad .L__critical_end - .L__critical_start 1531 - #if IS_ENABLED(CONFIG_KVM) 1532 - .Lsie_critical_start: 1533 - .quad .Lsie_gmap 1534 - .Lsie_critical_length: 1535 - .quad .Lsie_done - .Lsie_gmap 1536 - .Lsie_crit_mcck_start: 1537 - .quad .Lsie_entry 1538 - .Lsie_crit_mcck_length: 1539 - .quad .Lsie_skip - .Lsie_entry 1540 1300 #endif 1541 1301 .section .rodata, "a" 1542 1302 #define SYSCALL(esame,emu) .quad __s390x_ ## esame
+4 -10
arch/s390/kernel/idle.c
··· 24 24 { 25 25 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 26 26 unsigned long long idle_time; 27 - unsigned long psw_mask; 27 + unsigned long psw_mask, flags; 28 28 29 - trace_hardirqs_on(); 30 29 31 30 /* Wait for external, I/O or machine check interrupt. */ 32 31 psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | 33 32 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 34 33 clear_cpu_flag(CIF_NOHZ_DELAY); 35 34 35 + local_irq_save(flags); 36 36 /* Call the assembler magic in entry.S */ 37 37 psw_idle(idle, psw_mask); 38 + local_irq_restore(flags); 38 39 39 - trace_hardirqs_off(); 40 40 41 41 /* Account time spent with enabled wait psw loaded as idle time. */ 42 42 write_seqcount_begin(&idle->seqcount); ··· 118 118 119 119 void arch_cpu_idle_enter(void) 120 120 { 121 - local_mcck_disable(); 122 121 } 123 122 124 123 void arch_cpu_idle(void) 125 124 { 126 - if (!test_cpu_flag(CIF_MCCK_PENDING)) 127 - /* Halt the cpu and keep track of cpu time accounting. */ 128 - enabled_wait(); 125 + enabled_wait(); 129 126 local_irq_enable(); 130 127 } 131 128 132 129 void arch_cpu_idle_exit(void) 133 130 { 134 - local_mcck_enable(); 135 - if (test_cpu_flag(CIF_MCCK_PENDING)) 136 - s390_handle_mcck(); 137 131 } 138 132 139 133 void arch_cpu_idle_dead(void)
+16 -7
arch/s390/kernel/nmi.c
··· 148 148 local_mcck_disable(); 149 149 mcck = *this_cpu_ptr(&cpu_mcck); 150 150 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck)); 151 - clear_cpu_flag(CIF_MCCK_PENDING); 152 151 local_mcck_enable(); 153 152 local_irq_restore(flags); 154 153 ··· 332 333 /* 333 334 * machine check handler. 334 335 */ 335 - void notrace s390_do_machine_check(struct pt_regs *regs) 336 + int notrace s390_do_machine_check(struct pt_regs *regs) 336 337 { 337 338 static int ipd_count; 338 339 static DEFINE_SPINLOCK(ipd_lock); ··· 341 342 unsigned long long tmp; 342 343 union mci mci; 343 344 unsigned long mcck_dam_code; 345 + int mcck_pending = 0; 344 346 345 347 nmi_enter(); 346 348 inc_irq_stat(NMI_NMI); ··· 400 400 */ 401 401 mcck->kill_task = 1; 402 402 mcck->mcck_code = mci.val; 403 - set_cpu_flag(CIF_MCCK_PENDING); 403 + mcck_pending = 1; 404 404 } 405 405 406 406 /* ··· 420 420 mcck->stp_queue |= stp_sync_check(); 421 421 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) 422 422 mcck->stp_queue |= stp_island_check(); 423 - if (mcck->stp_queue) 424 - set_cpu_flag(CIF_MCCK_PENDING); 423 + mcck_pending = 1; 425 424 } 426 425 427 426 /* ··· 441 442 if (mci.cp) { 442 443 /* Channel report word pending */ 443 444 mcck->channel_report = 1; 444 - set_cpu_flag(CIF_MCCK_PENDING); 445 + mcck_pending = 1; 445 446 } 446 447 if (mci.w) { 447 448 /* Warning pending */ 448 449 mcck->warning = 1; 449 - set_cpu_flag(CIF_MCCK_PENDING); 450 + mcck_pending = 1; 450 451 } 451 452 452 453 /* ··· 461 462 *((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR; 462 463 } 463 464 clear_cpu_flag(CIF_MCCK_GUEST); 465 + 466 + if (user_mode(regs) && mcck_pending) { 467 + nmi_exit(); 468 + return 1; 469 + } 470 + 471 + if (mcck_pending) 472 + schedule_mcck_handler(); 473 + 464 474 nmi_exit(); 475 + return 0; 465 476 } 466 477 NOKPROBE_SYMBOL(s390_do_machine_check); 467 478
+1 -2
arch/s390/kernel/setup.c
··· 384 384 lc->restart_psw.addr = (unsigned long) restart_int_handler; 385 385 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; 386 386 lc->external_new_psw.addr = (unsigned long) ext_int_handler; 387 - lc->svc_new_psw.mask = PSW_KERNEL_BITS | 388 - PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 387 + lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; 389 388 lc->svc_new_psw.addr = (unsigned long) system_call; 390 389 lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; 391 390 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
-3
arch/s390/kvm/kvm-s390.c
··· 3995 3995 if (need_resched()) 3996 3996 schedule(); 3997 3997 3998 - if (test_cpu_flag(CIF_MCCK_PENDING)) 3999 - s390_handle_mcck(); 4000 - 4001 3998 if (!kvm_is_ucontrol(vcpu->kvm)) { 4002 3999 rc = kvm_s390_deliver_pending_interrupts(vcpu); 4003 4000 if (rc)
-2
arch/s390/kvm/vsie.c
··· 1002 1002 1003 1003 if (need_resched()) 1004 1004 schedule(); 1005 - if (test_cpu_flag(CIF_MCCK_PENDING)) 1006 - s390_handle_mcck(); 1007 1005 1008 1006 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1009 1007
+3 -1
arch/s390/lib/delay.c
··· 33 33 34 34 static void __udelay_disabled(unsigned long long usecs) 35 35 { 36 - unsigned long cr0, cr0_new, psw_mask; 36 + unsigned long cr0, cr0_new, psw_mask, flags; 37 37 struct s390_idle_data idle; 38 38 u64 end; 39 39 ··· 45 45 psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT; 46 46 set_clock_comparator(end); 47 47 set_cpu_flag(CIF_IGNORE_IRQ); 48 + local_irq_save(flags); 48 49 psw_idle(&idle, psw_mask); 50 + local_irq_restore(flags); 49 51 clear_cpu_flag(CIF_IGNORE_IRQ); 50 52 set_clock_comparator(S390_lowcore.clock_comparator); 51 53 __ctl_load(cr0, 0, 0);