Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: convert to generic entry

This patch converts s390 to use the generic entry infrastructure from
kernel/entry/*.

There are a few special things on s390:

- PIF_PER_TRAP is moved to TIF_PER_TRAP as the generic code doesn't
know about our PIF flags in exit_to_user_mode_loop().

- The old code had several ways to restart syscalls:

a) PIF_SYSCALL_RESTART, which was only set during execve to force a
restart after upgrading a process (usually qemu-kvm) to pgste page
table extensions.

b) PIF_SYSCALL, which is set by do_signal() to indicate that the
current syscall should be restarted. This is changed so that
do_signal() now also uses PIF_SYSCALL_RESTART. Continuing to use
PIF_SYSCALL doesn't work with the generic code, and changing it
to PIF_SYSCALL_RESTART makes PIF_SYSCALL and PIF_SYSCALL_RESTART
more unique.

- On s390 calling sys_sigreturn or sys_rt_sigreturn is implemented by
executing a svc instruction on the process stack which causes a fault.
While handling that fault the fault code sets PIF_SYSCALL to hand over
processing to the syscall code on exit to usermode.

The patch introduces PIF_SYSCALL_RET_SET, which is set if ptrace sets
a return value for a syscall. The s390x ptrace ABI uses r2 both for the
syscall number and return value, so ptrace cannot set the syscall number +
return value at the same time. The flag makes handling that a bit easier.
do_syscall() will just skip executing the syscall if PIF_SYSCALL_RET_SET
is set.

CONFIG_DEBUG_ASCE was removd in favour of the generic CONFIG_DEBUG_ENTRY.
CR1/7/13 will be checked both on kernel entry and exit to contain the
correct asces.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Sven Schnelle and committed by
Vasily Gorbik
56e62a73 ac94a291

+652 -920
+1
arch/s390/Kconfig
··· 123 123 select GENERIC_ALLOCATOR 124 124 select GENERIC_CPU_AUTOPROBE 125 125 select GENERIC_CPU_VULNERABILITIES 126 + select GENERIC_ENTRY 126 127 select GENERIC_FIND_FIRST_BIT 127 128 select GENERIC_GETTIMEOFDAY 128 129 select GENERIC_PTDUMP
+6 -4
arch/s390/Kconfig.debug
··· 6 6 config EARLY_PRINTK 7 7 def_bool y 8 8 9 - config DEBUG_USER_ASCE 10 - bool "Debug User ASCE" 9 + config DEBUG_ENTRY 10 + bool "Debug low-level entry code" 11 + depends on DEBUG_KERNEL 11 12 help 12 - Check on exit to user space that address space control 13 - elements are setup correctly. 13 + This option enables sanity checks in s390 low-level entry code. 14 + Some of these sanity checks may slow down kernel entries and 15 + exits or otherwise impact performance. 14 16 15 17 If unsure, say N.
+1 -1
arch/s390/configs/debug_defconfig
··· 833 833 CONFIG_HIST_TRIGGERS=y 834 834 CONFIG_FTRACE_STARTUP_TEST=y 835 835 # CONFIG_EVENT_TRACE_STARTUP_TEST is not set 836 - CONFIG_DEBUG_USER_ASCE=y 837 836 CONFIG_NOTIFIER_ERROR_INJECTION=m 838 837 CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m 839 838 CONFIG_FAULT_INJECTION=y ··· 856 857 CONFIG_ATOMIC64_SELFTEST=y 857 858 CONFIG_TEST_BITOPS=m 858 859 CONFIG_TEST_BPF=m 860 + CONFIG_DEBUG_ENTRY=y
-1
arch/s390/configs/defconfig
··· 781 781 CONFIG_BLK_DEV_IO_TRACE=y 782 782 CONFIG_BPF_KPROBE_OVERRIDE=y 783 783 CONFIG_HIST_TRIGGERS=y 784 - CONFIG_DEBUG_USER_ASCE=y 785 784 CONFIG_LKDTM=m 786 785 CONFIG_PERCPU_TEST=m 787 786 CONFIG_ATOMIC64_SELFTEST=y
+2
arch/s390/include/asm/cputime.h
··· 35 35 36 36 #define arch_idle_time(cpu) arch_cpu_idle_time(cpu) 37 37 38 + void account_idle_time_irq(void); 39 + 38 40 #endif /* _S390_CPUTIME_H */
+3 -4
arch/s390/include/asm/elf.h
··· 233 233 do { \ 234 234 set_personality(PER_LINUX | \ 235 235 (current->personality & (~PER_MASK))); \ 236 - current->thread.sys_call_table = \ 237 - (unsigned long) &sys_call_table; \ 236 + current->thread.sys_call_table = sys_call_table; \ 238 237 } while (0) 239 238 #else /* CONFIG_COMPAT */ 240 239 #define SET_PERSONALITY(ex) \ ··· 244 245 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 245 246 set_thread_flag(TIF_31BIT); \ 246 247 current->thread.sys_call_table = \ 247 - (unsigned long) &sys_call_table_emu; \ 248 + sys_call_table_emu; \ 248 249 } else { \ 249 250 clear_thread_flag(TIF_31BIT); \ 250 251 current->thread.sys_call_table = \ 251 - (unsigned long) &sys_call_table; \ 252 + sys_call_table; \ 252 253 } \ 253 254 } while (0) 254 255 #endif /* CONFIG_COMPAT */
+60
arch/s390/include/asm/entry-common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef ARCH_S390_ENTRY_COMMON_H 3 + #define ARCH_S390_ENTRY_COMMON_H 4 + 5 + #include <linux/sched.h> 6 + #include <linux/audit.h> 7 + #include <linux/tracehook.h> 8 + #include <linux/processor.h> 9 + #include <linux/uaccess.h> 10 + #include <asm/fpu/api.h> 11 + 12 + #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) 13 + 14 + void do_per_trap(struct pt_regs *regs); 15 + void do_syscall(struct pt_regs *regs); 16 + 17 + typedef void (*pgm_check_func)(struct pt_regs *regs); 18 + 19 + extern pgm_check_func pgm_check_table[128]; 20 + 21 + #ifdef CONFIG_DEBUG_ENTRY 22 + static __always_inline void arch_check_user_regs(struct pt_regs *regs) 23 + { 24 + debug_user_asce(0); 25 + } 26 + 27 + #define arch_check_user_regs arch_check_user_regs 28 + #endif /* CONFIG_DEBUG_ENTRY */ 29 + 30 + static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs, 31 + unsigned long ti_work) 32 + { 33 + if (ti_work & _TIF_PER_TRAP) { 34 + clear_thread_flag(TIF_PER_TRAP); 35 + do_per_trap(regs); 36 + } 37 + 38 + if (ti_work & _TIF_GUARDED_STORAGE) 39 + gs_load_bc_cb(regs); 40 + } 41 + 42 + #define arch_exit_to_user_mode_work arch_exit_to_user_mode_work 43 + 44 + static __always_inline void arch_exit_to_user_mode(void) 45 + { 46 + if (test_cpu_flag(CIF_FPU)) 47 + __load_fpu_regs(); 48 + 49 + if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) 50 + debug_user_asce(1); 51 + } 52 + 53 + #define arch_exit_to_user_mode arch_exit_to_user_mode 54 + 55 + static inline bool on_thread_stack(void) 56 + { 57 + return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1)); 58 + } 59 + 60 + #endif
+2
arch/s390/include/asm/fpu/api.h
··· 47 47 #include <linux/preempt.h> 48 48 49 49 void save_fpu_regs(void); 50 + void load_fpu_regs(void); 51 + void __load_fpu_regs(void); 50 52 51 53 static inline int test_fp_ctl(u32 fpc) 52 54 {
+3 -1
arch/s390/include/asm/idle.h
··· 20 20 unsigned long long clock_idle_exit; 21 21 unsigned long long timer_idle_enter; 22 22 unsigned long long timer_idle_exit; 23 + unsigned long mt_cycles_enter[8]; 23 24 }; 24 25 25 26 extern struct device_attribute dev_attr_idle_count; 26 27 extern struct device_attribute dev_attr_idle_time_us; 27 28 28 - void psw_idle(struct s390_idle_data *, unsigned long); 29 + void psw_idle(struct s390_idle_data *data, unsigned long psw_mask); 30 + void psw_idle_exit(void); 29 31 30 32 #endif /* _S390_IDLE_H */
+2 -2
arch/s390/include/asm/lowcore.h
··· 81 81 psw_t return_mcck_psw; /* 0x02a0 */ 82 82 83 83 /* CPU accounting and timing values. */ 84 - __u64 sync_enter_timer; /* 0x02b0 */ 85 - __u64 async_enter_timer; /* 0x02b8 */ 84 + __u64 sys_enter_timer; /* 0x02b0 */ 85 + __u8 pad_0x02b8[0x02c0-0x02b8]; /* 0x02b8 */ 86 86 __u64 mcck_enter_timer; /* 0x02c0 */ 87 87 __u64 exit_timer; /* 0x02c8 */ 88 88 __u64 user_timer; /* 0x02d0 */
+1
arch/s390/include/asm/nmi.h
··· 99 99 void nmi_free_per_cpu(struct lowcore *lc); 100 100 101 101 void s390_handle_mcck(void); 102 + void __s390_handle_mcck(void); 102 103 int s390_do_machine_check(struct pt_regs *regs); 103 104 104 105 #endif /* __ASSEMBLY__ */
+32 -20
arch/s390/include/asm/processor.h
··· 38 38 #include <asm/runtime_instr.h> 39 39 #include <asm/fpu/types.h> 40 40 #include <asm/fpu/internal.h> 41 + #include <asm/irqflags.h> 42 + 43 + typedef long (*sys_call_ptr_t)(unsigned long, unsigned long, 44 + unsigned long, unsigned long, 45 + unsigned long, unsigned long); 41 46 42 47 static inline void set_cpu_flag(int flag) 43 48 { ··· 106 101 */ 107 102 struct thread_struct { 108 103 unsigned int acrs[NUM_ACRS]; 109 - unsigned long ksp; /* kernel stack pointer */ 110 - unsigned long user_timer; /* task cputime in user space */ 111 - unsigned long guest_timer; /* task cputime in kvm guest */ 112 - unsigned long system_timer; /* task cputime in kernel space */ 113 - unsigned long hardirq_timer; /* task cputime in hardirq context */ 114 - unsigned long softirq_timer; /* task cputime in softirq context */ 115 - unsigned long sys_call_table; /* system call table address */ 116 - unsigned long gmap_addr; /* address of last gmap fault. */ 117 - unsigned int gmap_write_flag; /* gmap fault write indication */ 118 - unsigned int gmap_int_code; /* int code of last gmap fault */ 119 - unsigned int gmap_pfault; /* signal of a pending guest pfault */ 104 + unsigned long ksp; /* kernel stack pointer */ 105 + unsigned long user_timer; /* task cputime in user space */ 106 + unsigned long guest_timer; /* task cputime in kvm guest */ 107 + unsigned long system_timer; /* task cputime in kernel space */ 108 + unsigned long hardirq_timer; /* task cputime in hardirq context */ 109 + unsigned long softirq_timer; /* task cputime in softirq context */ 110 + const sys_call_ptr_t *sys_call_table; /* system call table address */ 111 + unsigned long gmap_addr; /* address of last gmap fault. */ 112 + unsigned int gmap_write_flag; /* gmap fault write indication */ 113 + unsigned int gmap_int_code; /* int code of last gmap fault */ 114 + unsigned int gmap_pfault; /* signal of a pending guest pfault */ 115 + 120 116 /* Per-thread information related to debugging */ 121 - struct per_regs per_user; /* User specified PER registers */ 122 - struct per_event per_event; /* Cause of the last PER trap */ 123 - unsigned long per_flags; /* Flags to control debug behavior */ 124 - unsigned int system_call; /* system call number in signal */ 125 - unsigned long last_break; /* last breaking-event-address. */ 126 - /* pfault_wait is used to block the process on a pfault event */ 117 + struct per_regs per_user; /* User specified PER registers */ 118 + struct per_event per_event; /* Cause of the last PER trap */ 119 + unsigned long per_flags; /* Flags to control debug behavior */ 120 + unsigned int system_call; /* system call number in signal */ 121 + unsigned long last_break; /* last breaking-event-address. */ 122 + /* pfault_wait is used to block the process on a pfault event */ 127 123 unsigned long pfault_wait; 128 124 struct list_head list; 129 125 /* cpu runtime instrumentation */ 130 126 struct runtime_instr_cb *ri_cb; 131 - struct gs_cb *gs_cb; /* Current guarded storage cb */ 132 - struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ 133 - unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 127 + struct gs_cb *gs_cb; /* Current guarded storage cb */ 128 + struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */ 129 + unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ 134 130 /* 135 131 * Warning: 'fpu' is dynamically-sized. It *MUST* be at 136 132 * the end. ··· 190 184 191 185 /* Free guarded storage control block */ 192 186 void guarded_storage_release(struct task_struct *tsk); 187 + void gs_load_bc_cb(struct pt_regs *regs); 193 188 194 189 unsigned long get_wchan(struct task_struct *p); 195 190 #define task_pt_regs(tsk) ((struct pt_regs *) \ ··· 330 323 331 324 extern int s390_isolate_bp(void); 332 325 extern int s390_isolate_bp_guest(void); 326 + 327 + static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) 328 + { 329 + return arch_irqs_disabled_flags(regs->psw.mask); 330 + } 333 331 334 332 #endif /* __ASSEMBLY__ */ 335 333
+6 -3
arch/s390/include/asm/ptrace.h
··· 11 11 #include <uapi/asm/ptrace.h> 12 12 13 13 #define PIF_SYSCALL 0 /* inside a system call */ 14 - #define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ 15 - #define PIF_SYSCALL_RESTART 2 /* restart the current system call */ 14 + #define PIF_SYSCALL_RESTART 1 /* restart the current system call */ 15 + #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ 16 16 #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ 17 17 18 18 #define _PIF_SYSCALL BIT(PIF_SYSCALL) 19 - #define _PIF_PER_TRAP BIT(PIF_PER_TRAP) 20 19 #define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART) 20 + #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) 21 21 #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) 22 22 23 23 #ifndef __ASSEMBLY__ ··· 67 67 typecheck(psw_t, __psw); \ 68 68 &(*(struct psw_bits *)(&(__psw))); \ 69 69 })) 70 + 71 + #define PGM_INT_CODE_MASK 0x7f 72 + #define PGM_INT_CODE_PER 0x80 70 73 71 74 /* 72 75 * The pt_regs struct defines the way the registers are stored on
+9 -2
arch/s390/include/asm/syscall.h
··· 14 14 #include <linux/err.h> 15 15 #include <asm/ptrace.h> 16 16 17 - extern const unsigned long sys_call_table[]; 18 - extern const unsigned long sys_call_table_emu[]; 17 + extern const sys_call_ptr_t sys_call_table[]; 18 + extern const sys_call_ptr_t sys_call_table_emu[]; 19 19 20 20 static inline long syscall_get_nr(struct task_struct *task, 21 21 struct pt_regs *regs) ··· 56 56 struct pt_regs *regs, 57 57 int error, long val) 58 58 { 59 + set_pt_regs_flag(regs, PIF_SYSCALL_RET_SET); 59 60 regs->gprs[2] = error ? error : val; 60 61 } 61 62 ··· 98 97 #endif 99 98 return AUDIT_ARCH_S390X; 100 99 } 100 + 101 + static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) 102 + { 103 + return false; 104 + } 105 + 101 106 #endif /* _ASM_SYSCALL_H */
+3
arch/s390/include/asm/thread_info.h
··· 36 36 */ 37 37 struct thread_info { 38 38 unsigned long flags; /* low level flags */ 39 + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ 39 40 }; 40 41 41 42 /* ··· 69 68 #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ 70 69 #define TIF_ISOLATE_BP 8 /* Run process with isolated BP */ 71 70 #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ 71 + #define TIF_PER_TRAP 10 /* Need to handle PER trap on exit to usermode */ 72 72 73 73 #define TIF_31BIT 16 /* 32bit process */ 74 74 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ ··· 93 91 #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) 94 92 #define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP) 95 93 #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) 94 + #define _TIF_PER_TRAP BIT(TIF_PER_TRAP) 96 95 97 96 #define _TIF_31BIT BIT(TIF_31BIT) 98 97 #define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP)
+1 -1
arch/s390/include/asm/uaccess.h
··· 18 18 #include <asm/extable.h> 19 19 #include <asm/facility.h> 20 20 21 - void debug_user_asce(void); 21 + void debug_user_asce(int exit); 22 22 23 23 static inline int __range_ok(unsigned long addr, unsigned long size) 24 24 {
+14
arch/s390/include/asm/vtime.h
··· 4 4 5 5 #define __ARCH_HAS_VTIME_TASK_SWITCH 6 6 7 + static inline void update_timer_sys(void) 8 + { 9 + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; 10 + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.sys_enter_timer; 11 + S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer; 12 + } 13 + 14 + static inline void update_timer_mcck(void) 15 + { 16 + S390_lowcore.system_timer += S390_lowcore.last_update_timer - S390_lowcore.exit_timer; 17 + S390_lowcore.user_timer += S390_lowcore.exit_timer - S390_lowcore.mcck_enter_timer; 18 + S390_lowcore.last_update_timer = S390_lowcore.mcck_enter_timer; 19 + } 20 + 7 21 #endif /* _S390_VTIME_H */
+3 -2
arch/s390/include/uapi/asm/ptrace.h
··· 179 179 #define ACR_SIZE 4 180 180 181 181 182 - #define PTRACE_OLDSETOPTIONS 21 183 - 182 + #define PTRACE_OLDSETOPTIONS 21 183 + #define PTRACE_SYSEMU 31 184 + #define PTRACE_SYSEMU_SINGLESTEP 32 184 185 #ifndef __ASSEMBLY__ 185 186 #include <linux/stddef.h> 186 187 #include <linux/types.h>
+1 -1
arch/s390/kernel/Makefile
··· 34 34 CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls 35 35 36 36 obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o 37 - obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 37 + obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o 38 38 obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o 39 39 obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o 40 40 obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
+2 -17
arch/s390/kernel/asm-offsets.c
··· 26 26 BLANK(); 27 27 /* thread struct offsets */ 28 28 OFFSET(__THREAD_ksp, thread_struct, ksp); 29 - OFFSET(__THREAD_sysc_table, thread_struct, sys_call_table); 30 - OFFSET(__THREAD_last_break, thread_struct, last_break); 31 - OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc); 32 - OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs); 33 - OFFSET(__THREAD_per_cause, thread_struct, per_event.cause); 34 - OFFSET(__THREAD_per_address, thread_struct, per_event.address); 35 - OFFSET(__THREAD_per_paid, thread_struct, per_event.paid); 36 - OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb); 37 29 BLANK(); 38 30 /* thread info offsets */ 39 31 OFFSET(__TI_flags, task_struct, thread_info.flags); 40 32 BLANK(); 41 33 /* pt_regs offsets */ 42 - OFFSET(__PT_ARGS, pt_regs, args); 43 34 OFFSET(__PT_PSW, pt_regs, psw); 44 35 OFFSET(__PT_GPRS, pt_regs, gprs); 45 36 OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); 46 - OFFSET(__PT_INT_CODE, pt_regs, int_code); 47 - OFFSET(__PT_INT_PARM, pt_regs, int_parm); 48 - OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long); 49 37 OFFSET(__PT_FLAGS, pt_regs, flags); 50 38 OFFSET(__PT_CR1, pt_regs, cr1); 51 39 DEFINE(__PT_SIZE, sizeof(struct pt_regs)); ··· 52 64 OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); 53 65 OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); 54 66 OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); 67 + OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter); 55 68 BLANK(); 56 69 /* hardware defined lowcore locations 0x000 - 0x1ff */ 57 70 OFFSET(__LC_EXT_PARAMS, lowcore, ext_params); ··· 104 115 OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags); 105 116 OFFSET(__LC_RETURN_PSW, lowcore, return_psw); 106 117 OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw); 107 - OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer); 108 - OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer); 118 + OFFSET(__LC_SYS_ENTER_TIMER, lowcore, sys_enter_timer); 109 119 OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer); 110 120 OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer); 111 - OFFSET(__LC_USER_TIMER, lowcore, user_timer); 112 - OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer); 113 - OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer); 114 121 OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer); 115 122 OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock); 116 123 OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
+1
arch/s390/kernel/compat_signal.c
··· 118 118 fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu); 119 119 120 120 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 121 + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); 121 122 return 0; 122 123 } 123 124
+89 -714
arch/s390/kernel/entry.S
··· 51 51 STACK_SIZE = 1 << STACK_SHIFT 52 52 STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE 53 53 54 - _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 - _TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING | \ 56 - _TIF_NOTIFY_SIGNAL) 57 - _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ 58 - _TIF_SYSCALL_TRACEPOINT) 59 - _CIF_WORK = (_CIF_FPU) 60 - _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART) 61 - 62 54 _LPP_OFFSET = __LC_LPP 63 - 64 - .macro TRACE_IRQS_ON 65 - #ifdef CONFIG_TRACE_IRQFLAGS 66 - basr %r2,%r0 67 - brasl %r14,trace_hardirqs_on_caller 68 - #endif 69 - .endm 70 - 71 - .macro TRACE_IRQS_OFF 72 - #ifdef CONFIG_TRACE_IRQFLAGS 73 - basr %r2,%r0 74 - brasl %r14,trace_hardirqs_off_caller 75 - #endif 76 - .endm 77 - 78 - .macro LOCKDEP_SYS_EXIT 79 - #ifdef CONFIG_LOCKDEP 80 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 81 - jz .+10 82 - brasl %r14,lockdep_sys_exit 83 - #endif 84 - .endm 85 55 86 56 .macro CHECK_STACK savearea 87 57 #ifdef CONFIG_CHECK_STACK 88 58 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 89 59 lghi %r14,\savearea 90 60 jz stack_overflow 91 - #endif 92 - .endm 93 - 94 - .macro DEBUG_USER_ASCE 95 - #ifdef CONFIG_DEBUG_USER_ASCE 96 - brasl %r14,debug_user_asce 97 61 #endif 98 62 .endm 99 63 ··· 81 117 #endif 82 118 .endm 83 119 84 - .macro SWITCH_ASYNC savearea,timer,clock 120 + .macro SWITCH_KERNEL savearea 85 121 tmhh %r8,0x0001 # interrupting from user ? 86 - jnz 4f 122 + jnz 1f 87 123 #if IS_ENABLED(CONFIG_KVM) 88 124 lgr %r14,%r9 89 125 larl %r13,.Lsie_gmap ··· 94 130 lghi %r11,\savearea # inside critical section, do cleanup 95 131 brasl %r14,.Lcleanup_sie 96 132 #endif 97 - 0: larl %r13,.Lpsw_idle_exit 98 - cgr %r13,%r9 99 - jne 3f 100 - 101 - larl %r1,smp_cpu_mtid 102 - llgf %r1,0(%r1) 103 - ltgr %r1,%r1 104 - jz 2f # no SMT, skip mt_cycles calculation 105 - .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 106 - larl %r3,mt_cycles 107 - ag %r3,__LC_PERCPU_OFFSET 108 - la %r4,__SF_EMPTY+16(%r15) 109 - 1: lg %r0,0(%r3) 110 - slg %r0,0(%r4) 111 - alg %r0,64(%r4) 112 - stg %r0,0(%r3) 113 - la %r3,8(%r3) 114 - la %r4,8(%r4) 115 - brct %r1,1b 116 - 117 - 2: mvc __CLOCK_IDLE_EXIT(8,%r2), \clock 118 - mvc __TIMER_IDLE_EXIT(8,%r2), \timer 119 - # account system time going idle 120 - ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT 121 - 122 - lg %r13,__LC_STEAL_TIMER 123 - alg %r13,__CLOCK_IDLE_ENTER(%r2) 124 - slg %r13,__LC_LAST_UPDATE_CLOCK 125 - stg %r13,__LC_STEAL_TIMER 126 - 127 - mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2) 128 - 129 - lg %r13,__LC_SYSTEM_TIMER 130 - alg %r13,__LC_LAST_UPDATE_TIMER 131 - slg %r13,__TIMER_IDLE_ENTER(%r2) 132 - stg %r13,__LC_SYSTEM_TIMER 133 - mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 134 - 135 - nihh %r8,0xfcfd # clear wait state and irq bits 136 - 3: lg %r14,__LC_ASYNC_STACK # are we already on the target stack? 137 - slgr %r14,%r15 138 - srag %r14,%r14,STACK_SHIFT 139 - jnz 5f 140 - CHECK_STACK \savearea 133 + 0: CHECK_STACK \savearea 134 + lgr %r11,%r15 141 135 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 142 - j 6f 143 - 4: UPDATE_VTIME %r14,%r15,\timer 144 - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 145 - 5: lg %r15,__LC_ASYNC_STACK # load async stack 146 - 6: la %r11,STACK_FRAME_OVERHEAD(%r15) 147 - .endm 148 - 149 - .macro UPDATE_VTIME w1,w2,enter_timer 150 - lg \w1,__LC_EXIT_TIMER 151 - lg \w2,__LC_LAST_UPDATE_TIMER 152 - slg \w1,\enter_timer 153 - slg \w2,__LC_EXIT_TIMER 154 - alg \w1,__LC_USER_TIMER 155 - alg \w2,__LC_SYSTEM_TIMER 156 - stg \w1,__LC_USER_TIMER 157 - stg \w2,__LC_SYSTEM_TIMER 158 - mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer 159 - .endm 160 - 161 - .macro RESTORE_SM_CLEAR_PER 162 - stg %r8,__LC_RETURN_PSW 163 - ni __LC_RETURN_PSW,0xbf 164 - ssm __LC_RETURN_PSW 165 - .endm 166 - 167 - .macro ENABLE_INTS 168 - stosm __SF_EMPTY(%r15),3 169 - .endm 170 - 171 - .macro ENABLE_INTS_TRACE 172 - TRACE_IRQS_ON 173 - ENABLE_INTS 174 - .endm 175 - 176 - .macro DISABLE_INTS 177 - stnsm __SF_EMPTY(%r15),0xfc 178 - .endm 179 - 180 - .macro DISABLE_INTS_TRACE 181 - DISABLE_INTS 182 - TRACE_IRQS_OFF 136 + stg %r11,__SF_BACKCHAIN(%r15) 137 + j 2f 138 + 1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 139 + lctlg %c1,%c1,__LC_KERNEL_ASCE 140 + lg %r15,__LC_KERNEL_STACK 141 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 142 + 2: la %r11,STACK_FRAME_OVERHEAD(%r15) 183 143 .endm 184 144 185 145 .macro STCK savearea ··· 155 267 "jnz .+8; .long 0xb2e8d000", 82 156 268 .endm 157 269 158 - GEN_BR_THUNK %r9 159 270 GEN_BR_THUNK %r14 160 271 GEN_BR_THUNK %r14,%r11 161 272 162 273 .section .kprobes.text, "ax" 163 274 .Ldummy: 164 275 /* 165 - * This nop exists only in order to avoid that __switch_to starts at 276 + * This nop exists only in order to avoid that __bpon starts at 166 277 * the beginning of the kprobes text section. In that case we would 167 278 * have several symbols at the same address. E.g. objdump would take 168 279 * an arbitrary symbol name when disassembling this code. 169 - * With the added nop in between the __switch_to symbol is unique 280 + * With the added nop in between the __bpon symbol is unique 170 281 * again. 171 282 */ 172 283 nop 0 ··· 214 327 stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area 215 328 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 216 329 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags 217 - TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? 218 - jno .Lsie_load_guest_gprs 219 - brasl %r14,load_fpu_regs # load guest fp/vx regs 220 - .Lsie_load_guest_gprs: 221 330 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 222 331 lg %r14,__LC_GMAP # get gmap pointer 223 332 ltgr %r14,%r14 ··· 253 370 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 254 371 xgr %r0,%r0 # clear guest registers to 255 372 xgr %r1,%r1 # prevent speculative use 256 - xgr %r2,%r2 257 373 xgr %r3,%r3 258 374 xgr %r4,%r4 259 375 xgr %r5,%r5 ··· 279 397 */ 280 398 281 399 ENTRY(system_call) 282 - stpt __LC_SYNC_ENTER_TIMER 400 + stpt __LC_SYS_ENTER_TIMER 283 401 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 284 402 BPOFF 285 - lg %r12,__LC_CURRENT 286 - lghi %r14,_PIF_SYSCALL 403 + lghi %r14,0 287 404 .Lsysc_per: 288 405 lctlg %c1,%c1,__LC_KERNEL_ASCE 289 - lghi %r13,__TASK_thread 406 + lg %r12,__LC_CURRENT 290 407 lg %r15,__LC_KERNEL_STACK 291 - la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 292 - UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 293 - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 294 - stmg %r0,%r7,__PT_R0(%r11) 295 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 296 - mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 297 - mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 298 - stg %r14,__PT_FLAGS(%r11) 299 408 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 300 - ENABLE_INTS 301 - .Lsysc_do_svc: 409 + stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 410 + BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 302 411 # clear user controlled register to prevent speculative use 303 412 xgr %r0,%r0 304 - # load address of system call table 305 - lg %r10,__THREAD_sysc_table(%r13,%r12) 306 - llgh %r8,__PT_INT_CODE+2(%r11) 307 - slag %r8,%r8,3 # shift and test for svc 0 308 - jnz .Lsysc_nr_ok 309 - # svc 0: system call number in %r1 310 - llgfr %r1,%r1 # clear high word in r1 311 - sth %r1,__PT_INT_CODE+2(%r11) 312 - cghi %r1,NR_syscalls 313 - jnl .Lsysc_nr_ok 314 - slag %r8,%r1,3 315 - .Lsysc_nr_ok: 316 - stg %r2,__PT_ORIG_GPR2(%r11) 317 - stg %r7,STACK_FRAME_OVERHEAD(%r15) 318 - lg %r9,0(%r8,%r10) # get system call add. 319 - TSTMSK __TI_flags(%r12),_TIF_TRACE 320 - jnz .Lsysc_tracesys 321 - BASR_EX %r14,%r9 # call sys_xxxx 322 - stg %r2,__PT_R2(%r11) # store return value 323 - 324 - .Lsysc_return: 325 - #ifdef CONFIG_DEBUG_RSEQ 326 - lgr %r2,%r11 327 - brasl %r14,rseq_syscall 328 - #endif 329 - LOCKDEP_SYS_EXIT 330 - .Lsysc_tif: 331 - DISABLE_INTS 332 - TSTMSK __PT_FLAGS(%r11),_PIF_WORK 333 - jnz .Lsysc_work 334 - TSTMSK __TI_flags(%r12),_TIF_WORK 335 - jnz .Lsysc_work # check for work 336 - DEBUG_USER_ASCE 413 + xgr %r1,%r1 414 + xgr %r4,%r4 415 + xgr %r5,%r5 416 + xgr %r6,%r6 417 + xgr %r7,%r7 418 + xgr %r8,%r8 419 + xgr %r9,%r9 420 + xgr %r10,%r10 421 + xgr %r11,%r11 422 + la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 423 + lgr %r3,%r14 424 + brasl %r14,__do_syscall 337 425 lctlg %c1,%c1,__LC_USER_ASCE 338 - BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 339 - TSTMSK __LC_CPU_FLAGS, _CIF_FPU 340 - jz .Lsysc_skip_fpu 341 - brasl %r14,load_fpu_regs 342 - .Lsysc_skip_fpu: 343 - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 426 + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 427 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 428 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 344 429 stpt __LC_EXIT_TIMER 345 - lmg %r0,%r15,__PT_R0(%r11) 346 430 b __LC_RETURN_LPSWE 347 - 348 - # 349 - # One of the work bits is on. Find out which one. 350 - # 351 - .Lsysc_work: 352 - ENABLE_INTS 353 - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 354 - jo .Lsysc_reschedule 355 - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 356 - jo .Lsysc_syscall_restart 357 - #ifdef CONFIG_UPROBES 358 - TSTMSK __TI_flags(%r12),_TIF_UPROBE 359 - jo .Lsysc_uprobe_notify 360 - #endif 361 - TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 362 - jo .Lsysc_guarded_storage 363 - TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP 364 - jo .Lsysc_singlestep 365 - #ifdef CONFIG_LIVEPATCH 366 - TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 367 - jo .Lsysc_patch_pending # handle live patching just before 368 - # signals and possible syscall restart 369 - #endif 370 - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART 371 - jo .Lsysc_syscall_restart 372 - TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) 373 - jnz .Lsysc_sigpending 374 - TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 375 - jo .Lsysc_notify_resume 376 - j .Lsysc_return 377 - 378 - # 379 - # _TIF_NEED_RESCHED is set, call schedule 380 - # 381 - .Lsysc_reschedule: 382 - larl %r14,.Lsysc_return 383 - jg schedule 384 - 385 - # 386 - # _TIF_SIGPENDING is set, call do_signal 387 - # 388 - .Lsysc_sigpending: 389 - lgr %r2,%r11 # pass pointer to pt_regs 390 - brasl %r14,do_signal 391 - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 392 - jno .Lsysc_return 393 - .Lsysc_do_syscall: 394 - lghi %r13,__TASK_thread 395 - lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 396 - lghi %r1,0 # svc 0 returns -ENOSYS 397 - j .Lsysc_do_svc 398 - 399 - # 400 - # _TIF_NOTIFY_RESUME is set, call do_notify_resume 401 - # 402 - .Lsysc_notify_resume: 403 - lgr %r2,%r11 # pass pointer to pt_regs 404 - larl %r14,.Lsysc_return 405 - jg do_notify_resume 406 - 407 - # 408 - # _TIF_UPROBE is set, call uprobe_notify_resume 409 - # 410 - #ifdef CONFIG_UPROBES 411 - .Lsysc_uprobe_notify: 412 - lgr %r2,%r11 # pass pointer to pt_regs 413 - larl %r14,.Lsysc_return 414 - jg uprobe_notify_resume 415 - #endif 416 - 417 - # 418 - # _TIF_GUARDED_STORAGE is set, call guarded_storage_load 419 - # 420 - .Lsysc_guarded_storage: 421 - lgr %r2,%r11 # pass pointer to pt_regs 422 - larl %r14,.Lsysc_return 423 - jg gs_load_bc_cb 424 - # 425 - # _TIF_PATCH_PENDING is set, call klp_update_patch_state 426 - # 427 - #ifdef CONFIG_LIVEPATCH 428 - .Lsysc_patch_pending: 429 - lg %r2,__LC_CURRENT # pass pointer to task struct 430 - larl %r14,.Lsysc_return 431 - jg klp_update_patch_state 432 - #endif 433 - 434 - # 435 - # _PIF_PER_TRAP is set, call do_per_trap 436 - # 437 - .Lsysc_singlestep: 438 - ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 439 - lgr %r2,%r11 # pass pointer to pt_regs 440 - larl %r14,.Lsysc_return 441 - jg do_per_trap 442 - 443 - # 444 - # _PIF_SYSCALL_RESTART is set, repeat the current system call 445 - # 446 - .Lsysc_syscall_restart: 447 - ni __PT_FLAGS+7(%r11),255-_PIF_SYSCALL_RESTART 448 - lmg %r1,%r7,__PT_R1(%r11) # load svc arguments 449 - lg %r2,__PT_ORIG_GPR2(%r11) 450 - j .Lsysc_do_svc 451 - 452 - # 453 - # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 454 - # and after the system call 455 - # 456 - .Lsysc_tracesys: 457 - lgr %r2,%r11 # pass pointer to pt_regs 458 - la %r3,0 459 - llgh %r0,__PT_INT_CODE+2(%r11) 460 - stg %r0,__PT_R2(%r11) 461 - brasl %r14,do_syscall_trace_enter 462 - lghi %r0,NR_syscalls 463 - clgr %r0,%r2 464 - jnh .Lsysc_tracenogo 465 - sllg %r8,%r2,3 466 - lg %r9,0(%r8,%r10) 467 - lmg %r3,%r7,__PT_R3(%r11) 468 - stg %r7,STACK_FRAME_OVERHEAD(%r15) 469 - lg %r2,__PT_ORIG_GPR2(%r11) 470 - BASR_EX %r14,%r9 # call sys_xxx 471 - stg %r2,__PT_R2(%r11) # store return value 472 - .Lsysc_tracenogo: 473 - TSTMSK __TI_flags(%r12),_TIF_TRACE 474 - jz .Lsysc_return 475 - lgr %r2,%r11 # pass pointer to pt_regs 476 - larl %r14,.Lsysc_return 477 - jg do_syscall_trace_exit 478 431 ENDPROC(system_call) 479 432 480 433 # 481 434 # a new process exits the kernel with ret_from_fork 482 435 # 483 436 ENTRY(ret_from_fork) 484 - la %r11,STACK_FRAME_OVERHEAD(%r15) 485 - lg %r12,__LC_CURRENT 486 - brasl %r14,schedule_tail 487 - tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 488 - jne .Lsysc_tracenogo 489 - # it's a kernel thread 490 - lmg %r9,%r10,__PT_R9(%r11) # load gprs 491 - la %r2,0(%r10) 492 - BASR_EX %r14,%r9 493 - j .Lsysc_tracenogo 437 + lgr %r3,%r11 438 + brasl %r14,__ret_from_fork 439 + lctlg %c1,%c1,__LC_USER_ASCE 440 + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 441 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 442 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 443 + stpt __LC_EXIT_TIMER 444 + b __LC_RETURN_LPSWE 494 445 ENDPROC(ret_from_fork) 495 - 496 - ENTRY(kernel_thread_starter) 497 - la %r2,0(%r10) 498 - BASR_EX %r14,%r9 499 - j .Lsysc_tracenogo 500 - ENDPROC(kernel_thread_starter) 501 446 502 447 /* 503 448 * Program check handler routine 504 449 */ 505 450 506 451 ENTRY(pgm_check_handler) 507 - stpt __LC_SYNC_ENTER_TIMER 452 + stpt __LC_SYS_ENTER_TIMER 508 453 BPOFF 509 454 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 510 - lg %r10,__LC_LAST_BREAK 511 - srag %r11,%r10,12 512 - jnz 0f 513 - /* if __LC_LAST_BREAK is < 4096, it contains one of 514 - * the lpswe addresses in lowcore. Set it to 1 (initial state) 515 - * to prevent leaking that address to userspace. 516 - */ 517 - lghi %r10,1 518 - 0: lg %r12,__LC_CURRENT 519 - lghi %r11,0 455 + lg %r12,__LC_CURRENT 456 + lghi %r10,0 520 457 lmg %r8,%r9,__LC_PGM_OLD_PSW 521 458 tmhh %r8,0x0001 # coming from user space? 522 459 jno .Lpgm_skip_asce 523 460 lctlg %c1,%c1,__LC_KERNEL_ASCE 524 - j 3f 461 + j 3f # -> fault in user space 525 462 .Lpgm_skip_asce: 526 463 #if IS_ENABLED(CONFIG_KVM) 527 464 # cleanup critical section for program checks in sie64a ··· 354 653 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 355 654 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 356 655 larl %r9,sie_exit # skip forward to sie_exit 357 - lghi %r11,_PIF_GUEST_FAULT 656 + lghi %r10,_PIF_GUEST_FAULT 358 657 #endif 359 658 1: tmhh %r8,0x4000 # PER bit set in old PSW ? 360 659 jnz 2f # -> enabled, can't be a double fault ··· 362 661 jnz .Lpgm_svcper # -> single stepped svc 363 662 2: CHECK_STACK __LC_SAVE_AREA_SYNC 364 663 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 365 - # CHECK_VMAP_STACK branches to stack_overflow or 5f 366 - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f 367 - 3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 368 - BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 664 + # CHECK_VMAP_STACK branches to stack_overflow or 4f 665 + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 666 + 3: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP 369 667 lg %r15,__LC_KERNEL_STACK 370 - lgr %r14,%r12 371 - aghi %r14,__TASK_thread # pointer to thread_struct 372 - lghi %r13,__LC_PGM_TDB 373 - tm __LC_PGM_ILC+2,0x02 # check for transaction abort 374 - jz 4f 375 - mvc __THREAD_trap_tdb(256,%r14),0(%r13) 376 - 4: stg %r10,__THREAD_last_break(%r14) 377 - 5: lgr %r13,%r11 378 - la %r11,STACK_FRAME_OVERHEAD(%r15) 668 + 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 669 + stg %r10,__PT_FLAGS(%r11) 670 + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 379 671 stmg %r0,%r7,__PT_R0(%r11) 672 + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 673 + stmg %r8,%r9,__PT_PSW(%r11) 674 + 380 675 # clear user controlled registers to prevent speculative use 381 676 xgr %r0,%r0 382 677 xgr %r1,%r1 383 - xgr %r2,%r2 384 678 xgr %r3,%r3 385 679 xgr %r4,%r4 386 680 xgr %r5,%r5 387 681 xgr %r6,%r6 388 682 xgr %r7,%r7 389 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 390 - stmg %r8,%r9,__PT_PSW(%r11) 391 - mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC 392 - mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE 393 - stg %r13,__PT_FLAGS(%r11) 394 - stg %r10,__PT_ARGS(%r11) 395 - tm __LC_PGM_ILC+3,0x80 # check for per exception 396 - jz 6f 397 - tmhh %r8,0x0001 # kernel per event ? 398 - jz .Lpgm_kprobe 399 - oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 400 - mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 401 - mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 402 - mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 403 - 6: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 404 - RESTORE_SM_CLEAR_PER 405 - larl %r1,pgm_check_table 406 - llgh %r10,__PT_INT_CODE+2(%r11) 407 - nill %r10,0x007f 408 - sll %r10,3 409 - je .Lpgm_return 410 - lg %r9,0(%r10,%r1) # load address of handler routine 411 - lgr %r2,%r11 # pass pointer to pt_regs 412 - BASR_EX %r14,%r9 # branch to interrupt-handler 413 - .Lpgm_return: 414 - LOCKDEP_SYS_EXIT 415 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 416 - jno .Lpgm_restore 417 - TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL 418 - jo .Lsysc_do_syscall 419 - j .Lsysc_tif 420 - .Lpgm_restore: 421 - DISABLE_INTS 422 - TSTMSK __LC_CPU_FLAGS, _CIF_FPU 423 - jz .Lpgm_skip_fpu 424 - brasl %r14,load_fpu_regs 425 - .Lpgm_skip_fpu: 426 - mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 683 + lgr %r2,%r11 684 + brasl %r14,__do_pgm_check 685 + tmhh %r8,0x0001 # returning to user space? 686 + jno .Lpgm_exit_kernel 687 + lctlg %c1,%c1,__LC_USER_ASCE 688 + BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 427 689 stpt __LC_EXIT_TIMER 428 - lmg %r0,%r15,__PT_R0(%r11) 690 + .Lpgm_exit_kernel: 691 + mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 692 + lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 429 693 b __LC_RETURN_LPSWE 430 - 431 - # 432 - # PER event in supervisor state, must be kprobes 433 - # 434 - .Lpgm_kprobe: 435 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 436 - RESTORE_SM_CLEAR_PER 437 - lgr %r2,%r11 # pass pointer to pt_regs 438 - brasl %r14,do_per_trap 439 - j .Lpgm_return 440 694 441 695 # 442 696 # single stepped system call ··· 400 744 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 401 745 larl %r14,.Lsysc_per 402 746 stg %r14,__LC_RETURN_PSW+8 403 - lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 747 + lghi %r14,1 404 748 lpswe __LC_RETURN_PSW # branch to .Lsysc_per 405 749 ENDPROC(pgm_check_handler) 406 750 407 751 /* 408 - * IO interrupt handler routine 752 + * Interrupt handler macro used for external and IO interrupts. 409 753 */ 410 - ENTRY(io_int_handler) 754 + .macro INT_HANDLER name,lc_old_psw,handler 755 + ENTRY(\name) 411 756 STCK __LC_INT_CLOCK 412 - stpt __LC_ASYNC_ENTER_TIMER 757 + stpt __LC_SYS_ENTER_TIMER 413 758 BPOFF 414 759 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 415 760 lg %r12,__LC_CURRENT 416 - lmg %r8,%r9,__LC_IO_OLD_PSW 417 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK 761 + lmg %r8,%r9,\lc_old_psw 762 + SWITCH_KERNEL __LC_SAVE_AREA_ASYNC 418 763 stmg %r0,%r7,__PT_R0(%r11) 419 764 # clear user controlled registers to prevent speculative use 420 765 xgr %r0,%r0 421 766 xgr %r1,%r1 422 - xgr %r2,%r2 423 767 xgr %r3,%r3 424 768 xgr %r4,%r4 425 769 xgr %r5,%r5 ··· 428 772 xgr %r10,%r10 429 773 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 430 774 stmg %r8,%r9,__PT_PSW(%r11) 431 - tm __PT_PSW+1(%r11),0x01 # coming from user space? 432 - jno .Lio_skip_asce 775 + tm %r8,0x0001 # coming from user space? 776 + jno 1f 433 777 lctlg %c1,%c1,__LC_KERNEL_ASCE 434 - .Lio_skip_asce: 435 - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 436 - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 437 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 438 - TRACE_IRQS_OFF 439 - .Lio_loop: 440 - lgr %r2,%r11 # pass pointer to pt_regs 441 - lghi %r3,IO_INTERRUPT 442 - tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 443 - jz .Lio_call 444 - lghi %r3,THIN_INTERRUPT 445 - .Lio_call: 446 - brasl %r14,do_IRQ 447 - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR 448 - jz .Lio_return 449 - tpi 0 450 - jz .Lio_return 451 - mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 452 - j .Lio_loop 453 - .Lio_return: 454 - LOCKDEP_SYS_EXIT 455 - TSTMSK __TI_flags(%r12),_TIF_WORK 456 - jnz .Lio_work # there is work to do (signals etc.) 457 - TSTMSK __LC_CPU_FLAGS,_CIF_WORK 458 - jnz .Lio_work 459 - .Lio_restore: 460 - TRACE_IRQS_ON 778 + 1: lgr %r2,%r11 # pass pointer to pt_regs 779 + brasl %r14,\handler 461 780 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 462 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 463 - jno .Lio_exit_kernel 464 - DEBUG_USER_ASCE 781 + tmhh %r8,0x0001 # returning to user ? 782 + jno 2f 465 783 lctlg %c1,%c1,__LC_USER_ASCE 466 784 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP 467 785 stpt __LC_EXIT_TIMER 468 - .Lio_exit_kernel: 469 - lmg %r0,%r15,__PT_R0(%r11) 786 + 2: lmg %r0,%r15,__PT_R0(%r11) 470 787 b __LC_RETURN_LPSWE 471 - .Lio_done: 788 + ENDPROC(\name) 789 + .endm 472 790 473 - # 474 - # There is work todo, find out in which context we have been interrupted: 475 - # 1) if we return to user space we can do all _TIF_WORK work 476 - # 2) if we return to kernel code and kvm is enabled check if we need to 477 - # modify the psw to leave SIE 478 - # 3) if we return to kernel code and preemptive scheduling is enabled check 479 - # the preemption counter and if it is zero call preempt_schedule_irq 480 - # Before any work can be done, a switch to the kernel stack is required. 481 - # 482 - .Lio_work: 483 - tm __PT_PSW+1(%r11),0x01 # returning to user ? 484 - jo .Lio_work_user # yes -> do resched & signal 485 - #ifdef CONFIG_PREEMPTION 486 - # check for preemptive scheduling 487 - icm %r0,15,__LC_PREEMPT_COUNT 488 - jnz .Lio_restore # preemption is disabled 489 - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 490 - jno .Lio_restore 491 - # switch to kernel stack 492 - lg %r1,__PT_R15(%r11) 493 - aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 494 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 495 - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 496 - la %r11,STACK_FRAME_OVERHEAD(%r1) 497 - lgr %r15,%r1 498 - brasl %r14,preempt_schedule_irq 499 - j .Lio_return 500 - #else 501 - j .Lio_restore 502 - #endif 503 - 504 - # 505 - # Need to do work before returning to userspace, switch to kernel stack 506 - # 507 - .Lio_work_user: 508 - lg %r1,__LC_KERNEL_STACK 509 - mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 510 - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 511 - la %r11,STACK_FRAME_OVERHEAD(%r1) 512 - lgr %r15,%r1 513 - 514 - # 515 - # One of the work bits is on. Find out which one. 516 - # 517 - TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED 518 - jo .Lio_reschedule 519 - #ifdef CONFIG_LIVEPATCH 520 - TSTMSK __TI_flags(%r12),_TIF_PATCH_PENDING 521 - jo .Lio_patch_pending 522 - #endif 523 - TSTMSK __TI_flags(%r12),(_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL) 524 - jnz .Lio_sigpending 525 - TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME 526 - jo .Lio_notify_resume 527 - TSTMSK __TI_flags(%r12),_TIF_GUARDED_STORAGE 528 - jo .Lio_guarded_storage 529 - TSTMSK __LC_CPU_FLAGS,_CIF_FPU 530 - jo .Lio_vxrs 531 - j .Lio_return 532 - 533 - # 534 - # CIF_FPU is set, restore floating-point controls and floating-point registers. 535 - # 536 - .Lio_vxrs: 537 - larl %r14,.Lio_return 538 - jg load_fpu_regs 539 - 540 - # 541 - # _TIF_GUARDED_STORAGE is set, call guarded_storage_load 542 - # 543 - .Lio_guarded_storage: 544 - ENABLE_INTS_TRACE 545 - lgr %r2,%r11 # pass pointer to pt_regs 546 - brasl %r14,gs_load_bc_cb 547 - DISABLE_INTS_TRACE 548 - j .Lio_return 549 - 550 - # 551 - # _TIF_NEED_RESCHED is set, call schedule 552 - # 553 - .Lio_reschedule: 554 - ENABLE_INTS_TRACE 555 - brasl %r14,schedule # call scheduler 556 - DISABLE_INTS_TRACE 557 - j .Lio_return 558 - 559 - # 560 - # _TIF_PATCH_PENDING is set, call klp_update_patch_state 561 - # 562 - #ifdef CONFIG_LIVEPATCH 563 - .Lio_patch_pending: 564 - lg %r2,__LC_CURRENT # pass pointer to task struct 565 - larl %r14,.Lio_return 566 - jg klp_update_patch_state 567 - #endif 568 - 569 - # 570 - # _TIF_SIGPENDING or is set, call do_signal 571 - # 572 - .Lio_sigpending: 573 - ENABLE_INTS_TRACE 574 - lgr %r2,%r11 # pass pointer to pt_regs 575 - brasl %r14,do_signal 576 - DISABLE_INTS_TRACE 577 - j .Lio_return 578 - 579 - # 580 - # _TIF_NOTIFY_RESUME or is set, call do_notify_resume 581 - # 582 - .Lio_notify_resume: 583 - ENABLE_INTS_TRACE 584 - lgr %r2,%r11 # pass pointer to pt_regs 585 - brasl %r14,do_notify_resume 586 - DISABLE_INTS_TRACE 587 - j .Lio_return 588 - ENDPROC(io_int_handler) 589 - 590 - /* 591 - * External interrupt handler routine 592 - */ 593 - ENTRY(ext_int_handler) 594 - STCK __LC_INT_CLOCK 595 - stpt __LC_ASYNC_ENTER_TIMER 596 - BPOFF 597 - stmg %r8,%r15,__LC_SAVE_AREA_ASYNC 598 - lg %r12,__LC_CURRENT 599 - lmg %r8,%r9,__LC_EXT_OLD_PSW 600 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER,__LC_INT_CLOCK 601 - stmg %r0,%r7,__PT_R0(%r11) 602 - # clear user controlled registers to prevent speculative use 603 - xgr %r0,%r0 604 - xgr %r1,%r1 605 - xgr %r2,%r2 606 - xgr %r3,%r3 607 - xgr %r4,%r4 608 - xgr %r5,%r5 609 - xgr %r6,%r6 610 - xgr %r7,%r7 611 - xgr %r10,%r10 612 - mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 613 - stmg %r8,%r9,__PT_PSW(%r11) 614 - tm __PT_PSW+1(%r11),0x01 # coming from user space? 615 - jno .Lext_skip_asce 616 - lctlg %c1,%c1,__LC_KERNEL_ASCE 617 - .Lext_skip_asce: 618 - lghi %r1,__LC_EXT_PARAMS2 619 - mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 620 - mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 621 - mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) 622 - xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 623 - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 624 - TRACE_IRQS_OFF 625 - lgr %r2,%r11 # pass pointer to pt_regs 626 - lghi %r3,EXT_INTERRUPT 627 - brasl %r14,do_IRQ 628 - j .Lio_return 629 - ENDPROC(ext_int_handler) 791 + INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 792 + INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 630 793 631 794 /* 632 795 * Load idle PSW. 633 796 */ 634 797 ENTRY(psw_idle) 635 798 stg %r3,__SF_EMPTY(%r15) 636 - larl %r1,.Lpsw_idle_exit 799 + larl %r1,psw_idle_exit 637 800 stg %r1,__SF_EMPTY+8(%r15) 638 801 larl %r1,smp_cpu_mtid 639 802 llgf %r1,0(%r1) 640 803 ltgr %r1,%r1 641 804 jz .Lpsw_idle_stcctm 642 - .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 805 + .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) 643 806 .Lpsw_idle_stcctm: 644 807 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT 645 808 BPON 646 809 STCK __CLOCK_IDLE_ENTER(%r2) 647 810 stpt __TIMER_IDLE_ENTER(%r2) 648 811 lpswe __SF_EMPTY(%r15) 649 - .Lpsw_idle_exit: 812 + .globl psw_idle_exit 813 + psw_idle_exit: 650 814 BR_EX %r14 651 815 ENDPROC(psw_idle) 652 - 653 - /* 654 - * Store floating-point controls and floating-point or vector register 655 - * depending whether the vector facility is available. A critical section 656 - * cleanup assures that the registers are stored even if interrupted for 657 - * some other work. The CIF_FPU flag is set to trigger a lazy restore 658 - * of the register contents at return from io or a system call. 659 - */ 660 - ENTRY(save_fpu_regs) 661 - stnsm __SF_EMPTY(%r15),0xfc 662 - lg %r2,__LC_CURRENT 663 - aghi %r2,__TASK_thread 664 - TSTMSK __LC_CPU_FLAGS,_CIF_FPU 665 - jo .Lsave_fpu_regs_exit 666 - stfpc __THREAD_FPU_fpc(%r2) 667 - lg %r3,__THREAD_FPU_regs(%r2) 668 - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 669 - jz .Lsave_fpu_regs_fp # no -> store FP regs 670 - VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) 671 - VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) 672 - j .Lsave_fpu_regs_done # -> set CIF_FPU flag 673 - .Lsave_fpu_regs_fp: 674 - std 0,0(%r3) 675 - std 1,8(%r3) 676 - std 2,16(%r3) 677 - std 3,24(%r3) 678 - std 4,32(%r3) 679 - std 5,40(%r3) 680 - std 6,48(%r3) 681 - std 7,56(%r3) 682 - std 8,64(%r3) 683 - std 9,72(%r3) 684 - std 10,80(%r3) 685 - std 11,88(%r3) 686 - std 12,96(%r3) 687 - std 13,104(%r3) 688 - std 14,112(%r3) 689 - std 15,120(%r3) 690 - .Lsave_fpu_regs_done: 691 - oi __LC_CPU_FLAGS+7,_CIF_FPU 692 - .Lsave_fpu_regs_exit: 693 - ssm __SF_EMPTY(%r15) 694 - BR_EX %r14 695 - .Lsave_fpu_regs_end: 696 - ENDPROC(save_fpu_regs) 697 - EXPORT_SYMBOL(save_fpu_regs) 698 - 699 - /* 700 - * Load floating-point controls and floating-point or vector registers. 701 - * A critical section cleanup assures that the register contents are 702 - * loaded even if interrupted for some other work. 703 - * 704 - * There are special calling conventions to fit into sysc and io return work: 705 - * %r15: <kernel stack> 706 - * The function requires: 707 - * %r4 708 - */ 709 - load_fpu_regs: 710 - stnsm __SF_EMPTY(%r15),0xfc 711 - lg %r4,__LC_CURRENT 712 - aghi %r4,__TASK_thread 713 - TSTMSK __LC_CPU_FLAGS,_CIF_FPU 714 - jno .Lload_fpu_regs_exit 715 - lfpc __THREAD_FPU_fpc(%r4) 716 - TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX 717 - lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area 718 - jz .Lload_fpu_regs_fp # -> no VX, load FP regs 719 - VLM %v0,%v15,0,%r4 720 - VLM %v16,%v31,256,%r4 721 - j .Lload_fpu_regs_done 722 - .Lload_fpu_regs_fp: 723 - ld 0,0(%r4) 724 - ld 1,8(%r4) 725 - ld 2,16(%r4) 726 - ld 3,24(%r4) 727 - ld 4,32(%r4) 728 - ld 5,40(%r4) 729 - ld 6,48(%r4) 730 - ld 7,56(%r4) 731 - ld 8,64(%r4) 732 - ld 9,72(%r4) 733 - ld 10,80(%r4) 734 - ld 11,88(%r4) 735 - ld 12,96(%r4) 736 - ld 13,104(%r4) 737 - ld 14,112(%r4) 738 - ld 15,120(%r4) 739 - .Lload_fpu_regs_done: 740 - ni __LC_CPU_FLAGS+7,255-_CIF_FPU 741 - .Lload_fpu_regs_exit: 742 - ssm __SF_EMPTY(%r15) 743 - BR_EX %r14 744 - .Lload_fpu_regs_end: 745 - ENDPROC(load_fpu_regs) 746 816 747 817 /* 748 818 * Machine check handler routines ··· 528 1146 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 529 1147 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID 530 1148 jo 3f 531 - la %r14,__LC_SYNC_ENTER_TIMER 532 - clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER 533 - jl 0f 534 - la %r14,__LC_ASYNC_ENTER_TIMER 535 - 0: clc 0(8,%r14),__LC_EXIT_TIMER 1149 + la %r14,__LC_SYS_ENTER_TIMER 1150 + clc 0(8,%r14),__LC_EXIT_TIMER 536 1151 jl 1f 537 1152 la %r14,__LC_EXIT_TIMER 538 1153 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER ··· 544 1165 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 545 1166 jno .Lmcck_panic 546 1167 4: ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 547 - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER,__LC_MCCK_CLOCK 1168 + SWITCH_KERNEL __LC_GPREGS_SAVE_AREA+64 548 1169 .Lmcck_skip: 549 1170 lghi %r14,__LC_GPREGS_SAVE_AREA+64 550 1171 stmg %r0,%r7,__PT_R0(%r11) 551 1172 # clear user controlled registers to prevent speculative use 552 1173 xgr %r0,%r0 553 1174 xgr %r1,%r1 554 - xgr %r2,%r2 555 1175 xgr %r3,%r3 556 1176 xgr %r4,%r4 557 1177 xgr %r5,%r5 ··· 561 1183 stmg %r8,%r9,__PT_PSW(%r11) 562 1184 la %r14,4095 563 1185 mvc __PT_CR1(8,%r11),__LC_CREGS_SAVE_AREA-4095+8(%r14) 564 - lctlg %c1,%c1,__LC_KERNEL_ASCE 565 1186 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 566 1187 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 567 1188 lgr %r2,%r11 # pass pointer to pt_regs ··· 572 1195 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 573 1196 la %r11,STACK_FRAME_OVERHEAD(%r1) 574 1197 lgr %r15,%r1 575 - TRACE_IRQS_OFF 576 1198 brasl %r14,s390_handle_mcck 577 - TRACE_IRQS_ON 578 1199 .Lmcck_return: 579 1200 lctlg %c1,%c1,__PT_CR1(%r11) 580 1201 lmg %r0,%r10,__PT_R0(%r11)
+5 -7
arch/s390/kernel/entry.h
··· 17 17 void mcck_int_handler(void); 18 18 void restart_int_handler(void); 19 19 20 - asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 21 - asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 20 + void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs); 21 + void __do_pgm_check(struct pt_regs *regs); 22 + void __do_syscall(struct pt_regs *regs, int per_trap); 22 23 23 24 void do_protection_exception(struct pt_regs *regs); 24 25 void do_dat_exception(struct pt_regs *regs); ··· 49 48 void vector_exception(struct pt_regs *regs); 50 49 void monitor_event_exception(struct pt_regs *regs); 51 50 52 - void do_per_trap(struct pt_regs *regs); 53 51 void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str); 54 - void syscall_trace(struct pt_regs *regs, int entryexit); 55 52 void kernel_stack_overflow(struct pt_regs * regs); 56 53 void do_signal(struct pt_regs *regs); 57 54 void handle_signal32(struct ksignal *ksig, sigset_t *oldset, ··· 57 58 void do_notify_resume(struct pt_regs *regs); 58 59 59 60 void __init init_IRQ(void); 60 - void do_IRQ(struct pt_regs *regs, int irq); 61 + void do_io_irq(struct pt_regs *regs); 62 + void do_ext_irq(struct pt_regs *regs); 61 63 void do_restart(void); 62 64 void __init startup_init(void); 63 65 void die(struct pt_regs *regs, const char *str); ··· 81 81 long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags); 82 82 83 83 DECLARE_PER_CPU(u64, mt_cycles[8]); 84 - 85 - void gs_load_bc_cb(struct pt_regs *regs); 86 84 87 85 unsigned long stack_alloc(void); 88 86 void stack_free(unsigned long stack);
+88
arch/s390/kernel/fpu.c
··· 175 175 : "1", "cc"); 176 176 } 177 177 EXPORT_SYMBOL(__kernel_fpu_end); 178 + 179 + void __load_fpu_regs(void) 180 + { 181 + struct fpu *state = &current->thread.fpu; 182 + unsigned long *regs = current->thread.fpu.regs; 183 + 184 + asm volatile("lfpc %0" : : "Q" (state->fpc)); 185 + if (likely(MACHINE_HAS_VX)) { 186 + asm volatile("lgr 1,%0\n" 187 + "VLM 0,15,0,1\n" 188 + "VLM 16,31,256,1\n" 189 + : 190 + : "d" (regs) 191 + : "1", "cc", "memory"); 192 + } else { 193 + asm volatile("ld 0,%0" : : "Q" (regs[0])); 194 + asm volatile("ld 1,%0" : : "Q" (regs[1])); 195 + asm volatile("ld 2,%0" : : "Q" (regs[2])); 196 + asm volatile("ld 3,%0" : : "Q" (regs[3])); 197 + asm volatile("ld 4,%0" : : "Q" (regs[4])); 198 + asm volatile("ld 5,%0" : : "Q" (regs[5])); 199 + asm volatile("ld 6,%0" : : "Q" (regs[6])); 200 + asm volatile("ld 7,%0" : : "Q" (regs[7])); 201 + asm volatile("ld 8,%0" : : "Q" (regs[8])); 202 + asm volatile("ld 9,%0" : : "Q" (regs[9])); 203 + asm volatile("ld 10,%0" : : "Q" (regs[10])); 204 + asm volatile("ld 11,%0" : : "Q" (regs[11])); 205 + asm volatile("ld 12,%0" : : "Q" (regs[12])); 206 + asm volatile("ld 13,%0" : : "Q" (regs[13])); 207 + asm volatile("ld 14,%0" : : "Q" (regs[14])); 208 + asm volatile("ld 15,%0" : : "Q" (regs[15])); 209 + } 210 + clear_cpu_flag(CIF_FPU); 211 + } 212 + EXPORT_SYMBOL(__load_fpu_regs); 213 + 214 + void load_fpu_regs(void) 215 + { 216 + raw_local_irq_disable(); 217 + __load_fpu_regs(); 218 + raw_local_irq_enable(); 219 + } 220 + EXPORT_SYMBOL(load_fpu_regs); 221 + 222 + void save_fpu_regs(void) 223 + { 224 + unsigned long flags, *regs; 225 + struct fpu *state; 226 + 227 + local_irq_save(flags); 228 + 229 + if (test_cpu_flag(CIF_FPU)) 230 + goto out; 231 + 232 + state = &current->thread.fpu; 233 + regs = current->thread.fpu.regs; 234 + 235 + asm volatile("stfpc %0" : "=Q" (state->fpc)); 236 + if (likely(MACHINE_HAS_VX)) { 237 + asm volatile("lgr 1,%0\n" 238 + "VSTM 0,15,0,1\n" 239 + "VSTM 16,31,256,1\n" 240 + : 241 + : "d" (regs) 242 + : "1", "cc", "memory"); 243 + } else { 244 + asm volatile("std 0,%0" : "=Q" (regs[0])); 245 + asm volatile("std 1,%0" : "=Q" (regs[1])); 246 + asm volatile("std 2,%0" : "=Q" (regs[2])); 247 + asm volatile("std 3,%0" : "=Q" (regs[3])); 248 + asm volatile("std 4,%0" : "=Q" (regs[4])); 249 + asm volatile("std 5,%0" : "=Q" (regs[5])); 250 + asm volatile("std 6,%0" : "=Q" (regs[6])); 251 + asm volatile("std 7,%0" : "=Q" (regs[7])); 252 + asm volatile("std 8,%0" : "=Q" (regs[8])); 253 + asm volatile("std 9,%0" : "=Q" (regs[9])); 254 + asm volatile("std 10,%0" : "=Q" (regs[10])); 255 + asm volatile("std 11,%0" : "=Q" (regs[11])); 256 + asm volatile("std 12,%0" : "=Q" (regs[12])); 257 + asm volatile("std 13,%0" : "=Q" (regs[13])); 258 + asm volatile("std 14,%0" : "=Q" (regs[14])); 259 + asm volatile("std 15,%0" : "=Q" (regs[15])); 260 + } 261 + set_cpu_flag(CIF_FPU); 262 + out: 263 + local_irq_restore(flags); 264 + } 265 + EXPORT_SYMBOL(save_fpu_regs);
+24
arch/s390/kernel/idle.c
··· 14 14 #include <linux/cpu.h> 15 15 #include <linux/sched/cputime.h> 16 16 #include <trace/events/power.h> 17 + #include <asm/cpu_mf.h> 17 18 #include <asm/nmi.h> 18 19 #include <asm/smp.h> 19 20 #include "entry.h" 20 21 21 22 static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 23 + 24 + void account_idle_time_irq(void) 25 + { 26 + struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 27 + u64 cycles_new[8]; 28 + int i; 29 + 30 + clear_cpu_flag(CIF_ENABLED_WAIT); 31 + if (smp_cpu_mtid) { 32 + stcctm(MT_DIAG, smp_cpu_mtid, cycles_new); 33 + for (i = 0; i < smp_cpu_mtid; i++) 34 + this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); 35 + } 36 + 37 + idle->clock_idle_exit = S390_lowcore.int_clock; 38 + idle->timer_idle_exit = S390_lowcore.sys_enter_timer; 39 + 40 + S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock; 41 + S390_lowcore.last_update_clock = idle->clock_idle_exit; 42 + 43 + S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter; 44 + S390_lowcore.last_update_timer = idle->timer_idle_exit; 45 + } 22 46 23 47 void arch_cpu_idle(void) 24 48 {
+84 -5
arch/s390/kernel/irq.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/cpu.h> 23 23 #include <linux/irq.h> 24 + #include <linux/entry-common.h> 24 25 #include <asm/irq_regs.h> 25 26 #include <asm/cputime.h> 26 27 #include <asm/lowcore.h> ··· 96 95 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"}, 97 96 }; 98 97 99 - void do_IRQ(struct pt_regs *regs, int irq) 98 + static void do_IRQ(struct pt_regs *regs, int irq) 100 99 { 101 - struct pt_regs *old_regs; 102 - 103 - old_regs = set_irq_regs(regs); 104 - irq_enter(); 105 100 if (tod_after_eq(S390_lowcore.int_clock, 106 101 S390_lowcore.clock_comparator)) 107 102 /* Serve timer interrupts first. */ 108 103 clock_comparator_work(); 109 104 generic_handle_irq(irq); 105 + } 106 + 107 + static int on_async_stack(void) 108 + { 109 + unsigned long frame = current_frame_address(); 110 + 111 + return !!!((S390_lowcore.async_stack - frame) >> (PAGE_SHIFT + THREAD_SIZE_ORDER)); 112 + } 113 + 114 + static void do_irq_async(struct pt_regs *regs, int irq) 115 + { 116 + if (on_async_stack()) 117 + do_IRQ(regs, irq); 118 + else 119 + CALL_ON_STACK(do_IRQ, S390_lowcore.async_stack, 2, regs, irq); 120 + } 121 + 122 + static int irq_pending(struct pt_regs *regs) 123 + { 124 + int cc; 125 + 126 + asm volatile("tpi 0\n" 127 + "ipm %0" : "=d" (cc) : : "cc"); 128 + return cc >> 28; 129 + } 130 + 131 + void noinstr do_io_irq(struct pt_regs *regs) 132 + { 133 + irqentry_state_t state = irqentry_enter(regs); 134 + struct pt_regs *old_regs = set_irq_regs(regs); 135 + int from_idle; 136 + 137 + irq_enter(); 138 + 139 + if (user_mode(regs)) 140 + update_timer_sys(); 141 + 142 + from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit; 143 + if (from_idle) 144 + account_idle_time_irq(); 145 + 146 + do { 147 + memcpy(&regs->int_code, &S390_lowcore.subchannel_id, 12); 148 + if (S390_lowcore.io_int_word & BIT(31)) 149 + do_irq_async(regs, THIN_INTERRUPT); 150 + else 151 + do_irq_async(regs, IO_INTERRUPT); 152 + } while (MACHINE_IS_LPAR && irq_pending(regs)); 153 + 110 154 irq_exit(); 111 155 set_irq_regs(old_regs); 156 + irqentry_exit(regs, state); 157 + 158 + if (from_idle) 159 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 160 + } 161 + 162 + void noinstr do_ext_irq(struct pt_regs *regs) 163 + { 164 + irqentry_state_t state = irqentry_enter(regs); 165 + struct pt_regs *old_regs = set_irq_regs(regs); 166 + int from_idle; 167 + 168 + irq_enter(); 169 + 170 + if (user_mode(regs)) 171 + update_timer_sys(); 172 + 173 + memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4); 174 + regs->int_parm = S390_lowcore.ext_params; 175 + regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2; 176 + 177 + from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit; 178 + if (from_idle) 179 + account_idle_time_irq(); 180 + 181 + do_irq_async(regs, EXT_INTERRUPT); 182 + 183 + irq_exit(); 184 + set_irq_regs(old_regs); 185 + irqentry_exit(regs, state); 186 + 187 + if (from_idle) 188 + regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT); 112 189 } 113 190 114 191 static void show_msi_interrupt(struct seq_file *p, int irq)
+12 -7
arch/s390/kernel/nmi.c
··· 131 131 NOKPROBE_SYMBOL(s390_handle_damage); 132 132 133 133 /* 134 - * Main machine check handler function. Will be called with interrupts enabled 135 - * or disabled and machine checks enabled or disabled. 134 + * Main machine check handler function. Will be called with interrupts disabled 135 + * and machine checks enabled. 136 136 */ 137 - void s390_handle_mcck(void) 137 + void __s390_handle_mcck(void) 138 138 { 139 - unsigned long flags; 140 139 struct mcck_struct mcck; 141 140 142 141 /* ··· 143 144 * machine checks. Afterwards delete the old state and enable machine 144 145 * checks again. 145 146 */ 146 - local_irq_save(flags); 147 147 local_mcck_disable(); 148 148 mcck = *this_cpu_ptr(&cpu_mcck); 149 149 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck)); 150 150 local_mcck_enable(); 151 - local_irq_restore(flags); 152 151 153 152 if (mcck.channel_report) 154 153 crw_handle_channel_report(); ··· 178 181 do_exit(SIGSEGV); 179 182 } 180 183 } 181 - EXPORT_SYMBOL_GPL(s390_handle_mcck); 182 184 185 + void noinstr s390_handle_mcck(void) 186 + { 187 + trace_hardirqs_off(); 188 + __s390_handle_mcck(); 189 + trace_hardirqs_on(); 190 + } 183 191 /* 184 192 * returns 0 if all required registers are available 185 193 * returns 1 otherwise ··· 346 344 int mcck_pending = 0; 347 345 348 346 nmi_enter(); 347 + 348 + if (user_mode(regs)) 349 + update_timer_mcck(); 349 350 inc_irq_stat(NMI_NMI); 350 351 mci.val = S390_lowcore.mcck_interruption_code; 351 352 mcck = this_cpu_ptr(&cpu_mcck);
+23 -7
arch/s390/kernel/process.c
··· 29 29 #include <linux/random.h> 30 30 #include <linux/export.h> 31 31 #include <linux/init_task.h> 32 + #include <linux/entry-common.h> 32 33 #include <asm/cpu_mf.h> 33 34 #include <asm/io.h> 34 35 #include <asm/processor.h> ··· 44 43 #include <asm/unwind.h> 45 44 #include "entry.h" 46 45 47 - asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 46 + void ret_from_fork(void) asm("ret_from_fork"); 48 47 49 - extern void kernel_thread_starter(void); 48 + void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs) 49 + { 50 + void (*func)(void *arg); 51 + 52 + schedule_tail(prev); 53 + 54 + if (!user_mode(regs)) { 55 + /* Kernel thread */ 56 + func = (void *)regs->gprs[9]; 57 + func((void *)regs->gprs[10]); 58 + } 59 + clear_pt_regs_flag(regs, PIF_SYSCALL); 60 + syscall_exit_to_user_mode(regs); 61 + } 50 62 51 63 void flush_thread(void) 52 64 { ··· 122 108 p->thread.last_break = 1; 123 109 124 110 frame->sf.back_chain = 0; 111 + frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame); 112 + frame->sf.gprs[6] = (unsigned long)p; 125 113 /* new return point is ret_from_fork */ 126 - frame->sf.gprs[8] = (unsigned long) ret_from_fork; 114 + frame->sf.gprs[8] = (unsigned long)ret_from_fork; 127 115 /* fake return stack for resume(), don't go back to schedule */ 128 - frame->sf.gprs[9] = (unsigned long) frame; 116 + frame->sf.gprs[9] = (unsigned long)frame; 129 117 130 118 /* Store access registers to kernel stack of new process. */ 131 119 if (unlikely(p->flags & PF_KTHREAD)) { ··· 136 120 frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | 137 121 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; 138 122 frame->childregs.psw.addr = 139 - (unsigned long) kernel_thread_starter; 123 + (unsigned long)__ret_from_fork; 140 124 frame->childregs.gprs[9] = new_stackp; /* function */ 141 125 frame->childregs.gprs[10] = arg; 142 - frame->childregs.gprs[11] = (unsigned long) do_exit; 126 + frame->childregs.gprs[11] = (unsigned long)do_exit; 143 127 frame->childregs.orig_gpr2 = -1; 144 128 145 129 return 0; ··· 169 153 return 0; 170 154 } 171 155 172 - asmlinkage void execve_tail(void) 156 + void execve_tail(void) 173 157 { 174 158 current->thread.fpu.fpc = 0; 175 159 asm volatile("sfpc %0" : : "d" (0));
+12 -105
arch/s390/kernel/ptrace.c
··· 7 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 8 */ 9 9 10 + #include "asm/ptrace.h" 10 11 #include <linux/kernel.h> 11 12 #include <linux/sched.h> 12 13 #include <linux/sched/task_stack.h> ··· 37 36 #ifdef CONFIG_COMPAT 38 37 #include "compat_ptrace.h" 39 38 #endif 40 - 41 - #define CREATE_TRACE_POINTS 42 - #include <trace/events/syscalls.h> 43 39 44 40 void update_cr_regs(struct task_struct *task) 45 41 { ··· 138 140 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); 139 141 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 140 142 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 141 - clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP); 143 + clear_tsk_thread_flag(task, TIF_PER_TRAP); 142 144 task->thread.per_flags = 0; 143 145 } 144 146 ··· 320 322 child->thread.per_user.end = data; 321 323 } 322 324 323 - static void fixup_int_code(struct task_struct *child, addr_t data) 324 - { 325 - struct pt_regs *regs = task_pt_regs(child); 326 - int ilc = regs->int_code >> 16; 327 - u16 insn; 328 - 329 - if (ilc > 6) 330 - return; 331 - 332 - if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16), 333 - &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn)) 334 - return; 335 - 336 - /* double check that tracee stopped on svc instruction */ 337 - if ((insn >> 8) != 0xa) 338 - return; 339 - 340 - regs->int_code = 0x20000 | (data & 0xffff); 341 - } 342 325 /* 343 326 * Write a word to the user area of a process at location addr. This 344 327 * operation does have an additional problem compared to peek_user. ··· 353 374 } 354 375 355 376 if (test_pt_regs_flag(regs, PIF_SYSCALL) && 356 - addr == offsetof(struct user, regs.gprs[2])) 357 - fixup_int_code(child, data); 358 - *(addr_t *)((addr_t) &regs->psw + addr) = data; 377 + addr == offsetof(struct user, regs.gprs[2])) { 378 + struct pt_regs *regs = task_pt_regs(child); 359 379 380 + regs->int_code = 0x20000 | (data & 0xffff); 381 + } 382 + *(addr_t *)((addr_t) &regs->psw + addr) = data; 360 383 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { 361 384 /* 362 385 * access registers are stored in the thread structure ··· 723 742 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) | 724 743 (__u64)(tmp & PSW32_ADDR_AMODE); 725 744 } else { 726 - 727 745 if (test_pt_regs_flag(regs, PIF_SYSCALL) && 728 - addr == offsetof(struct compat_user, regs.gprs[2])) 729 - fixup_int_code(child, data); 746 + addr == offsetof(struct compat_user, regs.gprs[2])) { 747 + struct pt_regs *regs = task_pt_regs(child); 748 + 749 + regs->int_code = 0x20000 | (data & 0xffff); 750 + } 730 751 /* gpr 0-15 */ 731 752 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp; 732 753 } ··· 844 861 return compat_ptrace_request(child, request, addr, data); 845 862 } 846 863 #endif 847 - 848 - asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) 849 - { 850 - unsigned long mask = -1UL; 851 - long ret = -1; 852 - 853 - if (is_compat_task()) 854 - mask = 0xffffffff; 855 - 856 - /* 857 - * The sysc_tracesys code in entry.S stored the system 858 - * call number to gprs[2]. 859 - */ 860 - if (test_thread_flag(TIF_SYSCALL_TRACE) && 861 - tracehook_report_syscall_entry(regs)) { 862 - /* 863 - * Tracing decided this syscall should not happen. Skip 864 - * the system call and the system call restart handling. 865 - */ 866 - goto skip; 867 - } 868 - 869 - #ifdef CONFIG_SECCOMP 870 - /* Do the secure computing check after ptrace. */ 871 - if (unlikely(test_thread_flag(TIF_SECCOMP))) { 872 - struct seccomp_data sd; 873 - 874 - if (is_compat_task()) { 875 - sd.instruction_pointer = regs->psw.addr & 0x7fffffff; 876 - sd.arch = AUDIT_ARCH_S390; 877 - } else { 878 - sd.instruction_pointer = regs->psw.addr; 879 - sd.arch = AUDIT_ARCH_S390X; 880 - } 881 - 882 - sd.nr = regs->int_code & 0xffff; 883 - sd.args[0] = regs->orig_gpr2 & mask; 884 - sd.args[1] = regs->gprs[3] & mask; 885 - sd.args[2] = regs->gprs[4] & mask; 886 - sd.args[3] = regs->gprs[5] & mask; 887 - sd.args[4] = regs->gprs[6] & mask; 888 - sd.args[5] = regs->gprs[7] & mask; 889 - 890 - if (__secure_computing(&sd) == -1) 891 - goto skip; 892 - } 893 - #endif /* CONFIG_SECCOMP */ 894 - 895 - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 896 - trace_sys_enter(regs, regs->int_code & 0xffff); 897 - 898 - 899 - audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask, 900 - regs->gprs[3] &mask, regs->gprs[4] &mask, 901 - regs->gprs[5] &mask); 902 - 903 - if ((signed long)regs->gprs[2] >= NR_syscalls) { 904 - regs->gprs[2] = -ENOSYS; 905 - ret = -ENOSYS; 906 - } 907 - return regs->gprs[2]; 908 - skip: 909 - clear_pt_regs_flag(regs, PIF_SYSCALL); 910 - return ret; 911 - } 912 - 913 - asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) 914 - { 915 - audit_syscall_exit(regs); 916 - 917 - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 918 - trace_sys_exit(regs, regs->gprs[2]); 919 - 920 - if (test_thread_flag(TIF_SYSCALL_TRACE)) 921 - tracehook_report_syscall_exit(regs, 0); 922 - } 923 864 924 865 /* 925 866 * user_regset definitions.
+1 -2
arch/s390/kernel/setup.c
··· 411 411 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, 412 412 sizeof(lc->alt_stfle_fac_list)); 413 413 nmi_alloc_boot_cpu(lc); 414 - lc->sync_enter_timer = S390_lowcore.sync_enter_timer; 415 - lc->async_enter_timer = S390_lowcore.async_enter_timer; 414 + lc->sys_enter_timer = S390_lowcore.sys_enter_timer; 416 415 lc->exit_timer = S390_lowcore.exit_timer; 417 416 lc->user_timer = S390_lowcore.user_timer; 418 417 lc->system_timer = S390_lowcore.system_timer;
+8 -4
arch/s390/kernel/signal.c
··· 170 170 fpregs_load(&user_sregs.fpregs, &current->thread.fpu); 171 171 172 172 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ 173 + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); 173 174 return 0; 174 175 } 175 176 ··· 460 459 * the kernel can handle, and then we build all the user-level signal handling 461 460 * stack-frames in one go after that. 462 461 */ 463 - void do_signal(struct pt_regs *regs) 462 + 463 + void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) 464 464 { 465 465 struct ksignal ksig; 466 466 sigset_t *oldset = sigmask_to_save(); ··· 474 472 current->thread.system_call = 475 473 test_pt_regs_flag(regs, PIF_SYSCALL) ? regs->int_code : 0; 476 474 477 - if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) { 475 + if (has_signal && get_signal(&ksig)) { 478 476 /* Whee! Actually deliver the signal. */ 479 477 if (current->thread.system_call) { 480 478 regs->int_code = current->thread.system_call; ··· 500 498 } 501 499 /* No longer in a system call */ 502 500 clear_pt_regs_flag(regs, PIF_SYSCALL); 501 + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); 503 502 rseq_signal_deliver(&ksig, regs); 504 503 if (is_compat_task()) 505 504 handle_signal32(&ksig, oldset, regs); ··· 511 508 512 509 /* No handlers present - check for system call restart */ 513 510 clear_pt_regs_flag(regs, PIF_SYSCALL); 511 + clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART); 514 512 if (current->thread.system_call) { 515 513 regs->int_code = current->thread.system_call; 516 514 switch (regs->gprs[2]) { ··· 524 520 case -ERESTARTNOINTR: 525 521 /* Restart system call with magic TIF bit. */ 526 522 regs->gprs[2] = regs->orig_gpr2; 527 - set_pt_regs_flag(regs, PIF_SYSCALL); 523 + set_pt_regs_flag(regs, PIF_SYSCALL_RESTART); 528 524 if (test_thread_flag(TIF_SINGLE_STEP)) 529 - clear_pt_regs_flag(regs, PIF_PER_TRAP); 525 + clear_thread_flag(TIF_PER_TRAP); 530 526 break; 531 527 } 532 528 }
+1 -1
arch/s390/kernel/smp.c
··· 499 499 if (test_bit(ec_call_function_single, &bits)) 500 500 generic_smp_call_function_single_interrupt(); 501 501 if (test_bit(ec_mcck_pending, &bits)) 502 - s390_handle_mcck(); 502 + __s390_handle_mcck(); 503 503 } 504 504 505 505 static void do_ext_call_interrupt(struct ext_code ext_code,
+70
arch/s390/kernel/sys_s390.c arch/s390/kernel/syscall.c
··· 29 29 #include <linux/unistd.h> 30 30 #include <linux/ipc.h> 31 31 #include <linux/uaccess.h> 32 + #include <linux/string.h> 33 + #include <linux/thread_info.h> 34 + #include <linux/entry-common.h> 35 + 36 + #include <asm/ptrace.h> 37 + #include <asm/vtime.h> 38 + 32 39 #include "entry.h" 33 40 34 41 /* ··· 106 99 SYSCALL_DEFINE0(ni_syscall) 107 100 { 108 101 return -ENOSYS; 102 + } 103 + 104 + void do_syscall(struct pt_regs *regs) 105 + { 106 + unsigned long nr; 107 + 108 + nr = regs->int_code & 0xffff; 109 + if (!nr) { 110 + nr = regs->gprs[1] & 0xffff; 111 + regs->int_code &= ~0xffffUL; 112 + regs->int_code |= nr; 113 + } 114 + 115 + regs->gprs[2] = nr; 116 + 117 + nr = syscall_enter_from_user_mode_work(regs, nr); 118 + 119 + /* 120 + * In the s390 ptrace ABI, both the syscall number and the return value 121 + * use gpr2. However, userspace puts the syscall number either in the 122 + * svc instruction itself, or uses gpr1. To make at least skipping syscalls 123 + * work, the ptrace code sets PIF_SYSCALL_RET_SET, which is checked here 124 + * and if set, the syscall will be skipped. 125 + */ 126 + if (!test_pt_regs_flag(regs, PIF_SYSCALL_RET_SET)) { 127 + regs->gprs[2] = -ENOSYS; 128 + if (likely(nr < NR_syscalls)) { 129 + regs->gprs[2] = current->thread.sys_call_table[nr]( 130 + regs->orig_gpr2, regs->gprs[3], 131 + regs->gprs[4], regs->gprs[5], 132 + regs->gprs[6], regs->gprs[7]); 133 + } 134 + } else { 135 + clear_pt_regs_flag(regs, PIF_SYSCALL_RET_SET); 136 + } 137 + syscall_exit_to_user_mode_work(regs); 138 + } 139 + 140 + void noinstr __do_syscall(struct pt_regs *regs, int per_trap) 141 + { 142 + enter_from_user_mode(regs); 143 + 144 + memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long)); 145 + memcpy(&regs->int_code, &S390_lowcore.svc_ilc, sizeof(regs->int_code)); 146 + regs->psw = S390_lowcore.svc_old_psw; 147 + 148 + update_timer_sys(); 149 + 150 + local_irq_enable(); 151 + regs->orig_gpr2 = regs->gprs[2]; 152 + 153 + if (per_trap) 154 + set_thread_flag(TIF_PER_TRAP); 155 + 156 + for (;;) { 157 + regs->flags = 0; 158 + set_pt_regs_flag(regs, PIF_SYSCALL); 159 + do_syscall(regs); 160 + if (!test_pt_regs_flag(regs, PIF_SYSCALL_RESTART)) 161 + break; 162 + local_irq_enable(); 163 + } 164 + exit_to_user_mode(); 109 165 }
+65
arch/s390/kernel/traps.c
··· 13 13 * 'Traps.c' handles hardware traps and faults after we have saved some 14 14 * state in 'asm.s'. 15 15 */ 16 + #include "asm/irqflags.h" 17 + #include "asm/ptrace.h" 16 18 #include <linux/kprobes.h> 17 19 #include <linux/kdebug.h> 18 20 #include <linux/extable.h> ··· 25 23 #include <linux/slab.h> 26 24 #include <linux/uaccess.h> 27 25 #include <linux/cpu.h> 26 + #include <linux/entry-common.h> 28 27 #include <asm/fpu/api.h> 28 + #include <asm/vtime.h> 29 29 #include "entry.h" 30 30 31 31 static inline void __user *get_trap_ip(struct pt_regs *regs) ··· 291 287 sort_extable(__start_dma_ex_table, __stop_dma_ex_table); 292 288 local_mcck_enable(); 293 289 test_monitor_call(); 290 + } 291 + 292 + void noinstr __do_pgm_check(struct pt_regs *regs) 293 + { 294 + unsigned long last_break = S390_lowcore.breaking_event_addr; 295 + unsigned int trapnr, syscall_redirect = 0; 296 + irqentry_state_t state; 297 + 298 + regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc; 299 + regs->int_parm_long = S390_lowcore.trans_exc_code; 300 + 301 + state = irqentry_enter(regs); 302 + 303 + if (user_mode(regs)) { 304 + update_timer_sys(); 305 + if (last_break < 4096) 306 + last_break = 1; 307 + current->thread.last_break = last_break; 308 + regs->args[0] = last_break; 309 + } 310 + 311 + if (S390_lowcore.pgm_code & 0x0200) { 312 + /* transaction abort */ 313 + memcpy(&current->thread.trap_tdb, &S390_lowcore.pgm_tdb, 256); 314 + } 315 + 316 + if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) { 317 + if (user_mode(regs)) { 318 + struct per_event *ev = &current->thread.per_event; 319 + 320 + set_thread_flag(TIF_PER_TRAP); 321 + ev->address = S390_lowcore.per_address; 322 + ev->cause = *(u16 *)&S390_lowcore.per_code; 323 + ev->paid = S390_lowcore.per_access_id; 324 + } else { 325 + /* PER event in kernel is kprobes */ 326 + __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); 327 + do_per_trap(regs); 328 + goto out; 329 + } 330 + } 331 + 332 + if (!irqs_disabled_flags(regs->psw.mask)) 333 + trace_hardirqs_on(); 334 + __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER); 335 + 336 + trapnr = regs->int_code & PGM_INT_CODE_MASK; 337 + if (trapnr) 338 + pgm_check_table[trapnr](regs); 339 + syscall_redirect = user_mode(regs) && test_pt_regs_flag(regs, PIF_SYSCALL); 340 + out: 341 + local_irq_disable(); 342 + irqentry_exit(regs, state); 343 + 344 + if (syscall_redirect) { 345 + enter_from_user_mode(regs); 346 + local_irq_enable(); 347 + regs->orig_gpr2 = regs->gprs[2]; 348 + do_syscall(regs); 349 + exit_to_user_mode(); 350 + } 294 351 }
+3 -3
arch/s390/kernel/uprobes.c
··· 32 32 return -EINVAL; 33 33 if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) 34 34 return -EINVAL; 35 - clear_pt_regs_flag(regs, PIF_PER_TRAP); 35 + clear_thread_flag(TIF_PER_TRAP); 36 36 auprobe->saved_per = psw_bits(regs->psw).per; 37 37 auprobe->saved_int_code = regs->int_code; 38 38 regs->int_code = UPROBE_TRAP_NR; ··· 103 103 /* fix per address */ 104 104 current->thread.per_event.address = utask->vaddr; 105 105 /* trigger per event */ 106 - set_pt_regs_flag(regs, PIF_PER_TRAP); 106 + set_thread_flag(TIF_PER_TRAP); 107 107 } 108 108 return 0; 109 109 } ··· 259 259 return; 260 260 current->thread.per_event.address = regs->psw.addr; 261 261 current->thread.per_event.cause = PER_EVENT_STORE >> 16; 262 - set_pt_regs_flag(regs, PIF_PER_TRAP); 262 + set_thread_flag(TIF_PER_TRAP); 263 263 } 264 264 265 265 /*
+3
arch/s390/kvm/kvm-s390.c
··· 45 45 #include <asm/timex.h> 46 46 #include <asm/ap.h> 47 47 #include <asm/uv.h> 48 + #include <asm/fpu/api.h> 48 49 #include "kvm-s390.h" 49 50 #include "gaccess.h" 50 51 ··· 4148 4147 vcpu->run->s.regs.gprs, 4149 4148 sizeof(sie_page->pv_grregs)); 4150 4149 } 4150 + if (test_cpu_flag(CIF_FPU)) 4151 + load_fpu_regs(); 4151 4152 exit_reason = sie64a(vcpu->arch.sie_block, 4152 4153 vcpu->run->s.regs.gprs); 4153 4154 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
+3
arch/s390/kvm/vsie.c
··· 18 18 #include <asm/sclp.h> 19 19 #include <asm/nmi.h> 20 20 #include <asm/dis.h> 21 + #include <asm/fpu/api.h> 21 22 #include "kvm-s390.h" 22 23 #include "gaccess.h" 23 24 ··· 1029 1028 */ 1030 1029 vcpu->arch.sie_block->prog0c |= PROG_IN_SIE; 1031 1030 barrier(); 1031 + if (test_cpu_flag(CIF_FPU)) 1032 + load_fpu_regs(); 1032 1033 if (!kvm_s390_vcpu_sie_inhibited(vcpu)) 1033 1034 rc = sie64a(scb_s, vcpu->run->s.regs.gprs); 1034 1035 barrier();
+7 -5
arch/s390/lib/uaccess.c
··· 16 16 #include <asm/mmu_context.h> 17 17 #include <asm/facility.h> 18 18 19 - #ifdef CONFIG_DEBUG_USER_ASCE 20 - void debug_user_asce(void) 19 + #ifdef CONFIG_DEBUG_ENTRY 20 + void debug_user_asce(int exit) 21 21 { 22 22 unsigned long cr1, cr7; 23 23 ··· 25 25 __ctl_store(cr7, 7, 7); 26 26 if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce) 27 27 return; 28 - panic("incorrect ASCE on kernel exit\n" 28 + panic("incorrect ASCE on kernel %s\n" 29 29 "cr1: %016lx cr7: %016lx\n" 30 30 "kernel: %016llx user: %016llx\n", 31 - cr1, cr7, S390_lowcore.kernel_asce, S390_lowcore.user_asce); 31 + exit ? "exit" : "entry", cr1, cr7, 32 + S390_lowcore.kernel_asce, S390_lowcore.user_asce); 33 + 32 34 } 33 - #endif /*CONFIG_DEBUG_USER_ASCE */ 35 + #endif /*CONFIG_DEBUG_ENTRY */ 34 36 35 37 #ifndef CONFIG_HAVE_MARCH_Z10_FEATURES 36 38 static DEFINE_STATIC_KEY_FALSE(have_mvcos);
+1 -1
arch/s390/mm/fault.c
··· 385 385 * The instruction that caused the program check has 386 386 * been nullified. Don't signal single step via SIGTRAP. 387 387 */ 388 - clear_pt_regs_flag(regs, PIF_PER_TRAP); 388 + clear_thread_flag(TIF_PER_TRAP); 389 389 390 390 if (kprobe_page_fault(regs, 14)) 391 391 return 0;