Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (30 commits)
[S390] wire up sys_perf_counter_open
[S390] wire up sys_rt_tgsigqueueinfo
[S390] ftrace: add system call tracer support
[S390] ftrace: add function graph tracer support
[S390] ftrace: add function trace mcount test support
[S390] ftrace: add dynamic ftrace support
[S390] kprobes: use probe_kernel_write
[S390] maccess: arch specific probe_kernel_write() implementation
[S390] maccess: add weak attribute to probe_kernel_write
[S390] profile_tick called twice
[S390] dasd: forward internal errors to dasd_sleep_on caller
[S390] dasd: sync after async probe
[S390] dasd: check_characteristics cleanup
[S390] dasd: no High Performance FICON in 31-bit mode
[S390] dcssblk: revert devt conversion
[S390] qdio: fix access beyond ARRAY_SIZE of irq_ptr->{in,out}put_qs
[S390] vmalloc: add vmalloc kernel parameter support
[S390] uaccess: use might_fault() instead of might_sleep()
[S390] 3270: lock dependency fixes
[S390] 3270: do not register with tty_register_device
...

+1328 -344
+23
arch/s390/Kconfig
··· 82 select USE_GENERIC_SMP_HELPERS if SMP 83 select HAVE_SYSCALL_WRAPPERS 84 select HAVE_FUNCTION_TRACER 85 select HAVE_DEFAULT_NO_SPIN_MUTEXES 86 select HAVE_OPROFILE 87 select HAVE_KPROBES ··· 572 the KVM hypervisor. This will add detection for KVM as well as a 573 virtio transport. If KVM is detected, the virtio console will be 574 the default console. 575 endmenu 576 577 source "net/Kconfig"
··· 82 select USE_GENERIC_SMP_HELPERS if SMP 83 select HAVE_SYSCALL_WRAPPERS 84 select HAVE_FUNCTION_TRACER 85 + select HAVE_FUNCTION_TRACE_MCOUNT_TEST 86 + select HAVE_FTRACE_MCOUNT_RECORD 87 + select HAVE_FTRACE_SYSCALLS 88 + select HAVE_DYNAMIC_FTRACE 89 + select HAVE_FUNCTION_GRAPH_TRACER 90 select HAVE_DEFAULT_NO_SPIN_MUTEXES 91 select HAVE_OPROFILE 92 select HAVE_KPROBES ··· 567 the KVM hypervisor. This will add detection for KVM as well as a 568 virtio transport. If KVM is detected, the virtio console will be 569 the default console. 570 + 571 + config SECCOMP 572 + bool "Enable seccomp to safely compute untrusted bytecode" 573 + depends on PROC_FS 574 + default y 575 + help 576 + This kernel feature is useful for number crunching applications 577 + that may need to compute untrusted bytecode during their 578 + execution. By using pipes or other transports made available to 579 + the process as file descriptors supporting the read/write 580 + syscalls, it's possible to isolate those applications in 581 + their own address space using seccomp. Once seccomp is 582 + enabled via /proc/<pid>/seccomp, it cannot be disabled 583 + and the task is only allowed to execute a few safe syscalls 584 + defined by each seccomp mode. 585 + 586 + If unsure, say Y. 587 + 588 endmenu 589 590 source "net/Kconfig"
+18 -1
arch/s390/include/asm/compat.h
··· 5 */ 6 #include <linux/types.h> 7 #include <linux/sched.h> 8 9 #define PSW32_MASK_PER 0x40000000UL 10 #define PSW32_MASK_DAT 0x04000000UL ··· 164 return (u32)(unsigned long)uptr; 165 } 166 167 static inline void __user *compat_alloc_user_space(long len) 168 { 169 unsigned long stack; 170 171 stack = KSTK_ESP(current); 172 - if (test_thread_flag(TIF_31BIT)) 173 stack &= 0x7fffffffUL; 174 return (void __user *) (stack - len); 175 }
··· 5 */ 6 #include <linux/types.h> 7 #include <linux/sched.h> 8 + #include <linux/thread_info.h> 9 10 #define PSW32_MASK_PER 0x40000000UL 11 #define PSW32_MASK_DAT 0x04000000UL ··· 163 return (u32)(unsigned long)uptr; 164 } 165 166 + #ifdef CONFIG_COMPAT 167 + 168 + static inline int is_compat_task(void) 169 + { 170 + return test_thread_flag(TIF_31BIT); 171 + } 172 + 173 + #else 174 + 175 + static inline int is_compat_task(void) 176 + { 177 + return 0; 178 + } 179 + 180 + #endif 181 + 182 static inline void __user *compat_alloc_user_space(long len) 183 { 184 unsigned long stack; 185 186 stack = KSTK_ESP(current); 187 + if (is_compat_task()) 188 stack &= 0x7fffffffUL; 189 return (void __user *) (stack - len); 190 }
-32
arch/s390/include/asm/cpu.h
··· 1 - /* 2 - * include/asm-s390/cpu.h 3 - * 4 - * Copyright IBM Corp. 2007 5 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 6 - */ 7 - 8 - #ifndef _ASM_S390_CPU_H_ 9 - #define _ASM_S390_CPU_H_ 10 - 11 - #include <linux/types.h> 12 - #include <linux/percpu.h> 13 - #include <linux/spinlock.h> 14 - 15 - struct s390_idle_data { 16 - spinlock_t lock; 17 - unsigned long long idle_count; 18 - unsigned long long idle_enter; 19 - unsigned long long idle_time; 20 - }; 21 - 22 - DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 23 - 24 - void vtime_start_cpu(void); 25 - 26 - static inline void s390_idle_check(void) 27 - { 28 - if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) 29 - vtime_start_cpu(); 30 - } 31 - 32 - #endif /* _ASM_S390_CPU_H_ */
···
+19
arch/s390/include/asm/cputime.h
··· 9 #ifndef _S390_CPUTIME_H 10 #define _S390_CPUTIME_H 11 12 #include <asm/div64.h> 13 14 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ ··· 177 return __div(cputime, 4096000000ULL / USER_HZ); 178 } 179 180 cputime64_t s390_get_idle_time(int cpu); 181 182 #define arch_idle_time(cpu) s390_get_idle_time(cpu) 183 184 #endif /* _S390_CPUTIME_H */
··· 9 #ifndef _S390_CPUTIME_H 10 #define _S390_CPUTIME_H 11 12 + #include <linux/types.h> 13 + #include <linux/percpu.h> 14 + #include <linux/spinlock.h> 15 #include <asm/div64.h> 16 17 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ ··· 174 return __div(cputime, 4096000000ULL / USER_HZ); 175 } 176 177 + struct s390_idle_data { 178 + spinlock_t lock; 179 + unsigned long long idle_count; 180 + unsigned long long idle_enter; 181 + unsigned long long idle_time; 182 + }; 183 + 184 + DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 185 + 186 + void vtime_start_cpu(void); 187 cputime64_t s390_get_idle_time(int cpu); 188 189 #define arch_idle_time(cpu) s390_get_idle_time(cpu) 190 + 191 + static inline void s390_idle_check(void) 192 + { 193 + if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) 194 + vtime_start_cpu(); 195 + } 196 197 #endif /* _S390_CPUTIME_H */
+21
arch/s390/include/asm/ftrace.h
··· 2 #define _ASM_S390_FTRACE_H 3 4 #ifndef __ASSEMBLY__ 5 extern void _mcount(void); 6 #endif 7 8 #endif /* _ASM_S390_FTRACE_H */
··· 2 #define _ASM_S390_FTRACE_H 3 4 #ifndef __ASSEMBLY__ 5 + 6 extern void _mcount(void); 7 + extern unsigned long ftrace_dyn_func; 8 + 9 + struct dyn_arch_ftrace { }; 10 + 11 + #define MCOUNT_ADDR ((long)_mcount) 12 + 13 + #ifdef CONFIG_64BIT 14 + #define MCOUNT_OFFSET_RET 18 15 + #define MCOUNT_INSN_SIZE 24 16 + #define MCOUNT_OFFSET 14 17 + #else 18 + #define MCOUNT_OFFSET_RET 26 19 + #define MCOUNT_INSN_SIZE 30 20 + #define MCOUNT_OFFSET 8 21 #endif 22 23 + static inline unsigned long ftrace_call_adjust(unsigned long addr) 24 + { 25 + return addr - MCOUNT_OFFSET; 26 + } 27 + 28 + #endif /* __ASSEMBLY__ */ 29 #endif /* _ASM_S390_FTRACE_H */
+7 -2
arch/s390/include/asm/lowcore.h
··· 30 #define __LC_SUBCHANNEL_NR 0x00ba 31 #define __LC_IO_INT_PARM 0x00bc 32 #define __LC_IO_INT_WORD 0x00c0 33 #define __LC_MCCK_CODE 0x00e8 34 35 #define __LC_DUMP_REIPL 0x0e00 ··· 68 #define __LC_CPUID 0x02b0 69 #define __LC_INT_CLOCK 0x02c8 70 #define __LC_MACHINE_FLAGS 0x02d8 71 #define __LC_IRB 0x0300 72 #define __LC_PFAULT_INTPARM 0x0080 73 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 ··· 114 #define __LC_INT_CLOCK 0x0340 115 #define __LC_VDSO_PER_CPU 0x0350 116 #define __LC_MACHINE_FLAGS 0x0358 117 #define __LC_IRB 0x0380 118 #define __LC_PASTE 0x03c0 119 #define __LC_PFAULT_INTPARM 0x11b8 ··· 283 __u64 int_clock; /* 0x02c8 */ 284 __u64 clock_comparator; /* 0x02d0 */ 285 __u32 machine_flags; /* 0x02d8 */ 286 - __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */ 287 288 /* Interrupt response block */ 289 __u8 irb[64]; /* 0x0300 */ ··· 389 __u64 clock_comparator; /* 0x0348 */ 390 __u64 vdso_per_cpu_data; /* 0x0350 */ 391 __u64 machine_flags; /* 0x0358 */ 392 - __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */ 393 394 /* Interrupt response block. */ 395 __u8 irb[64]; /* 0x0380 */
··· 30 #define __LC_SUBCHANNEL_NR 0x00ba 31 #define __LC_IO_INT_PARM 0x00bc 32 #define __LC_IO_INT_WORD 0x00c0 33 + #define __LC_STFL_FAC_LIST 0x00c8 34 #define __LC_MCCK_CODE 0x00e8 35 36 #define __LC_DUMP_REIPL 0x0e00 ··· 67 #define __LC_CPUID 0x02b0 68 #define __LC_INT_CLOCK 0x02c8 69 #define __LC_MACHINE_FLAGS 0x02d8 70 + #define __LC_FTRACE_FUNC 0x02dc 71 #define __LC_IRB 0x0300 72 #define __LC_PFAULT_INTPARM 0x0080 73 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 ··· 112 #define __LC_INT_CLOCK 0x0340 113 #define __LC_VDSO_PER_CPU 0x0350 114 #define __LC_MACHINE_FLAGS 0x0358 115 + #define __LC_FTRACE_FUNC 0x0360 116 #define __LC_IRB 0x0380 117 #define __LC_PASTE 0x03c0 118 #define __LC_PFAULT_INTPARM 0x11b8 ··· 280 __u64 int_clock; /* 0x02c8 */ 281 __u64 clock_comparator; /* 0x02d0 */ 282 __u32 machine_flags; /* 0x02d8 */ 283 + __u32 ftrace_func; /* 0x02dc */ 284 + __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 285 286 /* Interrupt response block */ 287 __u8 irb[64]; /* 0x0300 */ ··· 385 __u64 clock_comparator; /* 0x0348 */ 386 __u64 vdso_per_cpu_data; /* 0x0350 */ 387 __u64 machine_flags; /* 0x0358 */ 388 + __u64 ftrace_func; /* 0x0360 */ 389 + __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */ 390 391 /* Interrupt response block. */ 392 __u8 irb[64]; /* 0x0380 */
+5 -2
arch/s390/include/asm/pgtable.h
··· 112 * effect, this also makes sure that 64 bit module code cannot be used 113 * as system call address. 114 */ 115 #ifndef __s390x__ 116 - #define VMALLOC_START 0x78000000UL 117 #define VMALLOC_END 0x7e000000UL 118 #define VMEM_MAP_END 0x80000000UL 119 #else /* __s390x__ */ 120 - #define VMALLOC_START 0x3e000000000UL 121 #define VMALLOC_END 0x3e040000000UL 122 #define VMEM_MAP_END 0x40000000000UL 123 #endif /* __s390x__ */
··· 112 * effect, this also makes sure that 64 bit module code cannot be used 113 * as system call address. 114 */ 115 + 116 + extern unsigned long VMALLOC_START; 117 + 118 #ifndef __s390x__ 119 + #define VMALLOC_SIZE (96UL << 20) 120 #define VMALLOC_END 0x7e000000UL 121 #define VMEM_MAP_END 0x80000000UL 122 #else /* __s390x__ */ 123 + #define VMALLOC_SIZE (1UL << 30) 124 #define VMALLOC_END 0x3e040000000UL 125 #define VMEM_MAP_END 0x40000000000UL 126 #endif /* __s390x__ */
+16
arch/s390/include/asm/seccomp.h
···
··· 1 + #ifndef _ASM_S390_SECCOMP_H 2 + #define _ASM_S390_SECCOMP_H 3 + 4 + #include <linux/unistd.h> 5 + 6 + #define __NR_seccomp_read __NR_read 7 + #define __NR_seccomp_write __NR_write 8 + #define __NR_seccomp_exit __NR_exit 9 + #define __NR_seccomp_sigreturn __NR_sigreturn 10 + 11 + #define __NR_seccomp_read_32 __NR_read 12 + #define __NR_seccomp_write_32 __NR_write 13 + #define __NR_seccomp_exit_32 __NR_exit 14 + #define __NR_seccomp_sigreturn_32 __NR_sigreturn 15 + 16 + #endif /* _ASM_S390_SECCOMP_H */
+16 -3
arch/s390/include/asm/spinlock.h
··· 122 #define __raw_write_can_lock(x) ((x)->lock == 0) 123 124 extern void _raw_read_lock_wait(raw_rwlock_t *lp); 125 extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 126 extern void _raw_write_lock_wait(raw_rwlock_t *lp); 127 extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 128 129 static inline void __raw_read_lock(raw_rwlock_t *rw) ··· 134 old = rw->lock & 0x7fffffffU; 135 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 136 _raw_read_lock_wait(rw); 137 } 138 139 static inline void __raw_read_unlock(raw_rwlock_t *rw) ··· 159 { 160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 161 _raw_write_lock_wait(rw); 162 } 163 164 static inline void __raw_write_unlock(raw_rwlock_t *rw) ··· 187 return 1; 188 return _raw_write_trylock_retry(rw); 189 } 190 - 191 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 192 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 193 194 #define _raw_read_relax(lock) cpu_relax() 195 #define _raw_write_relax(lock) cpu_relax()
··· 122 #define __raw_write_can_lock(x) ((x)->lock == 0) 123 124 extern void _raw_read_lock_wait(raw_rwlock_t *lp); 125 + extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 126 extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 127 extern void _raw_write_lock_wait(raw_rwlock_t *lp); 128 + extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 129 extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 130 131 static inline void __raw_read_lock(raw_rwlock_t *rw) ··· 132 old = rw->lock & 0x7fffffffU; 133 if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 134 _raw_read_lock_wait(rw); 135 + } 136 + 137 + static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) 138 + { 139 + unsigned int old; 140 + old = rw->lock & 0x7fffffffU; 141 + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) 142 + _raw_read_lock_wait_flags(rw, flags); 143 } 144 145 static inline void __raw_read_unlock(raw_rwlock_t *rw) ··· 149 { 150 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 151 _raw_write_lock_wait(rw); 152 + } 153 + 154 + static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) 155 + { 156 + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 157 + _raw_write_lock_wait_flags(rw, flags); 158 } 159 160 static inline void __raw_write_unlock(raw_rwlock_t *rw) ··· 171 return 1; 172 return _raw_write_trylock_retry(rw); 173 } 174 175 #define _raw_read_relax(lock) cpu_relax() 176 #define _raw_write_relax(lock) cpu_relax()
+1
arch/s390/include/asm/syscall.h
··· 12 #ifndef _ASM_SYSCALL_H 13 #define _ASM_SYSCALL_H 1 14 15 #include <asm/ptrace.h> 16 17 static inline long syscall_get_nr(struct task_struct *task,
··· 12 #ifndef _ASM_SYSCALL_H 13 #define _ASM_SYSCALL_H 1 14 15 + #include <linux/sched.h> 16 #include <asm/ptrace.h> 17 18 static inline long syscall_get_nr(struct task_struct *task,
+8 -4
arch/s390/include/asm/thread_info.h
··· 83 /* 84 * thread information flags bit numbers 85 */ 86 - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 87 #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 88 #define TIF_SIGPENDING 2 /* signal pending */ 89 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 90 #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 91 - #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ 92 #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 93 #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 94 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 95 #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 96 TIF_NEED_RESCHED */ ··· 101 #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ 102 #define TIF_FREEZE 21 /* thread is freezing for suspend */ 103 104 - #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 105 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 106 #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 107 #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 108 #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 109 #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 110 - #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 111 #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 112 #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 113 #define _TIF_USEDFPU (1<<TIF_USEDFPU) 114 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 115 #define _TIF_31BIT (1<<TIF_31BIT)
··· 83 /* 84 * thread information flags bit numbers 85 */ 86 #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 87 #define TIF_SIGPENDING 2 /* signal pending */ 88 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 89 #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 90 #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91 #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 92 + #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 93 + #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94 + #define TIF_SECCOMP 10 /* secure computing */ 95 + #define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ 96 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 97 #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 98 TIF_NEED_RESCHED */ ··· 99 #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ 100 #define TIF_FREEZE 21 /* thread is freezing for suspend */ 101 102 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 103 #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 104 #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 105 #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 106 #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 107 #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 108 #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 109 + #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 110 + #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 111 + #define _TIF_SECCOMP (1<<TIF_SECCOMP) 112 + #define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE) 113 #define _TIF_USEDFPU (1<<TIF_USEDFPU) 114 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 115 #define _TIF_31BIT (1<<TIF_31BIT)
+8 -8
arch/s390/include/asm/uaccess.h
··· 131 132 #define put_user(x, ptr) \ 133 ({ \ 134 - might_sleep(); \ 135 __put_user(x, ptr); \ 136 }) 137 ··· 180 181 #define get_user(x, ptr) \ 182 ({ \ 183 - might_sleep(); \ 184 __get_user(x, ptr); \ 185 }) 186 ··· 231 static inline unsigned long __must_check 232 copy_to_user(void __user *to, const void *from, unsigned long n) 233 { 234 - might_sleep(); 235 if (access_ok(VERIFY_WRITE, to, n)) 236 n = __copy_to_user(to, from, n); 237 return n; ··· 282 static inline unsigned long __must_check 283 copy_from_user(void *to, const void __user *from, unsigned long n) 284 { 285 - might_sleep(); 286 if (access_ok(VERIFY_READ, from, n)) 287 n = __copy_from_user(to, from, n); 288 else ··· 299 static inline unsigned long __must_check 300 copy_in_user(void __user *to, const void __user *from, unsigned long n) 301 { 302 - might_sleep(); 303 if (__access_ok(from,n) && __access_ok(to,n)) 304 n = __copy_in_user(to, from, n); 305 return n; ··· 312 strncpy_from_user(char *dst, const char __user *src, long count) 313 { 314 long res = -EFAULT; 315 - might_sleep(); 316 if (access_ok(VERIFY_READ, src, 1)) 317 res = uaccess.strncpy_from_user(count, src, dst); 318 return res; ··· 321 static inline unsigned long 322 strnlen_user(const char __user * src, unsigned long n) 323 { 324 - might_sleep(); 325 return uaccess.strnlen_user(n, src); 326 } 327 ··· 354 static inline unsigned long __must_check 355 clear_user(void __user *to, unsigned long n) 356 { 357 - might_sleep(); 358 if (access_ok(VERIFY_WRITE, to, n)) 359 n = uaccess.clear_user(n, to); 360 return n;
··· 131 132 #define put_user(x, ptr) \ 133 ({ \ 134 + might_fault(); \ 135 __put_user(x, ptr); \ 136 }) 137 ··· 180 181 #define get_user(x, ptr) \ 182 ({ \ 183 + might_fault(); \ 184 __get_user(x, ptr); \ 185 }) 186 ··· 231 static inline unsigned long __must_check 232 copy_to_user(void __user *to, const void *from, unsigned long n) 233 { 234 + might_fault(); 235 if (access_ok(VERIFY_WRITE, to, n)) 236 n = __copy_to_user(to, from, n); 237 return n; ··· 282 static inline unsigned long __must_check 283 copy_from_user(void *to, const void __user *from, unsigned long n) 284 { 285 + might_fault(); 286 if (access_ok(VERIFY_READ, from, n)) 287 n = __copy_from_user(to, from, n); 288 else ··· 299 static inline unsigned long __must_check 300 copy_in_user(void __user *to, const void __user *from, unsigned long n) 301 { 302 + might_fault(); 303 if (__access_ok(from,n) && __access_ok(to,n)) 304 n = __copy_in_user(to, from, n); 305 return n; ··· 312 strncpy_from_user(char *dst, const char __user *src, long count) 313 { 314 long res = -EFAULT; 315 + might_fault(); 316 if (access_ok(VERIFY_READ, src, 1)) 317 res = uaccess.strncpy_from_user(count, src, dst); 318 return res; ··· 321 static inline unsigned long 322 strnlen_user(const char __user * src, unsigned long n) 323 { 324 + might_fault(); 325 return uaccess.strnlen_user(n, src); 326 } 327 ··· 354 static inline unsigned long __must_check 355 clear_user(void __user *to, unsigned long n) 356 { 357 + might_fault(); 358 if (access_ok(VERIFY_WRITE, to, n)) 359 n = uaccess.clear_user(n, to); 360 return n;
+3 -1
arch/s390/include/asm/unistd.h
··· 267 #define __NR_epoll_create1 327 268 #define __NR_preadv 328 269 #define __NR_pwritev 329 270 - #define NR_syscalls 330 271 272 /* 273 * There are some system calls that are not present on 64 bit, some
··· 267 #define __NR_epoll_create1 327 268 #define __NR_preadv 328 269 #define __NR_pwritev 329 270 + #define __NR_rt_tgsigqueueinfo 330 271 + #define __NR_perf_counter_open 331 272 + #define NR_syscalls 332 273 274 /* 275 * There are some system calls that are not present on 64 bit, some
+5 -2
arch/s390/kernel/Makefile
··· 3 # 4 5 ifdef CONFIG_FUNCTION_TRACER 6 - # Do not trace early boot code 7 CFLAGS_REMOVE_early.o = -pg 8 endif 9 10 # ··· 23 obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 25 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 26 - vdso.o vtime.o sysinfo.o nmi.o 27 28 obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 29 obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) ··· 42 obj-$(CONFIG_STACKTRACE) += stacktrace.o 43 obj-$(CONFIG_KPROBES) += kprobes.o 44 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 45 46 # Kexec part 47 S390_KEXEC_OBJS := machine_kexec.o crash.o
··· 3 # 4 5 ifdef CONFIG_FUNCTION_TRACER 6 + # Don't trace early setup code and tracing code 7 CFLAGS_REMOVE_early.o = -pg 8 + CFLAGS_REMOVE_ftrace.o = -pg 9 endif 10 11 # ··· 22 obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 23 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 24 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 25 + vdso.o vtime.o sysinfo.o nmi.o sclp.o 26 27 obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28 obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) ··· 41 obj-$(CONFIG_STACKTRACE) += stacktrace.o 42 obj-$(CONFIG_KPROBES) += kprobes.o 43 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 44 + obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 45 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 46 47 # Kexec part 48 S390_KEXEC_OBJS := machine_kexec.o crash.o
+17
arch/s390/kernel/compat_wrapper.S
··· 1823 llgfr %r5,%r5 # u32 1824 llgfr %r6,%r6 # u32 1825 jg compat_sys_pwritev # branch to system call
··· 1823 llgfr %r5,%r5 # u32 1824 llgfr %r6,%r6 # u32 1825 jg compat_sys_pwritev # branch to system call 1826 + 1827 + .globl compat_sys_rt_tgsigqueueinfo_wrapper 1828 + compat_sys_rt_tgsigqueueinfo_wrapper: 1829 + lgfr %r2,%r2 # compat_pid_t 1830 + lgfr %r3,%r3 # compat_pid_t 1831 + lgfr %r4,%r4 # int 1832 + llgtr %r5,%r5 # struct compat_siginfo * 1833 + jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call 1834 + 1835 + .globl sys_perf_counter_open_wrapper 1836 + sys_perf_counter_open_wrapper: 1837 + llgtr %r2,%r2 # const struct perf_counter_attr * 1838 + lgfr %r3,%r3 # pid_t 1839 + lgfr %r4,%r4 # int 1840 + lgfr %r5,%r5 # int 1841 + llgfr %r6,%r6 # unsigned long 1842 + jg sys_perf_counter_open # branch to system call
+4
arch/s390/kernel/early.c
··· 11 #include <linux/errno.h> 12 #include <linux/string.h> 13 #include <linux/ctype.h> 14 #include <linux/lockdep.h> 15 #include <linux/module.h> 16 #include <linux/pfn.h> ··· 411 sclp_facilities_detect(); 412 detect_memory_layout(memory_chunk); 413 S390_lowcore.machine_flags = machine_flags; 414 lockdep_on(); 415 }
··· 11 #include <linux/errno.h> 12 #include <linux/string.h> 13 #include <linux/ctype.h> 14 + #include <linux/ftrace.h> 15 #include <linux/lockdep.h> 16 #include <linux/module.h> 17 #include <linux/pfn.h> ··· 410 sclp_facilities_detect(); 411 detect_memory_layout(memory_chunk); 412 S390_lowcore.machine_flags = machine_flags; 413 + #ifdef CONFIG_DYNAMIC_FTRACE 414 + S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; 415 + #endif 416 lockdep_on(); 417 }
+5 -2
arch/s390/kernel/entry.S
··· 53 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_MCCK_PENDING) 56 57 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 58 STACK_SIZE = 1 << STACK_SHIFT ··· 267 sth %r7,SP_SVCNR(%r15) 268 sll %r7,2 # svc number *4 269 l %r8,BASED(.Lsysc_table) 270 - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 271 l %r8,0(%r7,%r8) # get system call addr. 272 bnz BASED(sysc_tracesys) 273 basr %r14,%r8 # call sys_xxxx ··· 407 basr %r14,%r8 # call sys_xxx 408 st %r2,SP_R2(%r15) # store return value 409 sysc_tracenogo: 410 - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 411 bz BASED(sysc_return) 412 l %r1,BASED(.Ltrace_exit) 413 la %r2,SP_PTREGS(%r15) # load pt_regs ··· 1109 1110 .section .rodata, "a" 1111 #define SYSCALL(esa,esame,emu) .long esa 1112 sys_call_table: 1113 #include "syscalls.S" 1114 #undef SYSCALL
··· 53 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 55 _TIF_MCCK_PENDING) 56 + _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57 + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) 58 59 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 60 STACK_SIZE = 1 << STACK_SHIFT ··· 265 sth %r7,SP_SVCNR(%r15) 266 sll %r7,2 # svc number *4 267 l %r8,BASED(.Lsysc_table) 268 + tm __TI_flags+2(%r9),_TIF_SYSCALL 269 l %r8,0(%r7,%r8) # get system call addr. 270 bnz BASED(sysc_tracesys) 271 basr %r14,%r8 # call sys_xxxx ··· 405 basr %r14,%r8 # call sys_xxx 406 st %r2,SP_R2(%r15) # store return value 407 sysc_tracenogo: 408 + tm __TI_flags+2(%r9),_TIF_SYSCALL 409 bz BASED(sysc_return) 410 l %r1,BASED(.Ltrace_exit) 411 la %r2,SP_PTREGS(%r15) # load pt_regs ··· 1107 1108 .section .rodata, "a" 1109 #define SYSCALL(esa,esame,emu) .long esa 1110 + .globl sys_call_table 1111 sys_call_table: 1112 #include "syscalls.S" 1113 #undef SYSCALL
+5 -2
arch/s390/kernel/entry64.S
··· 56 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 57 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 58 _TIF_MCCK_PENDING) 59 60 #define BASED(name) name-system_call(%r13) 61 ··· 262 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 263 sysc_noemu: 264 #endif 265 - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 266 lgf %r8,0(%r7,%r10) # load address of system call routine 267 jnz sysc_tracesys 268 basr %r14,%r8 # call sys_xxxx ··· 393 basr %r14,%r8 # call sys_xxx 394 stg %r2,SP_R2(%r15) # store return value 395 sysc_tracenogo: 396 - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) 397 jz sysc_return 398 la %r2,SP_PTREGS(%r15) # load pt_regs 399 larl %r14,sysc_return # return point is sysc_return ··· 1060 1061 .section .rodata, "a" 1062 #define SYSCALL(esa,esame,emu) .long esame 1063 sys_call_table: 1064 #include "syscalls.S" 1065 #undef SYSCALL
··· 56 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 57 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 58 _TIF_MCCK_PENDING) 59 + _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 60 + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) 61 62 #define BASED(name) name-system_call(%r13) 63 ··· 260 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 261 sysc_noemu: 262 #endif 263 + tm __TI_flags+6(%r9),_TIF_SYSCALL 264 lgf %r8,0(%r7,%r10) # load address of system call routine 265 jnz sysc_tracesys 266 basr %r14,%r8 # call sys_xxxx ··· 391 basr %r14,%r8 # call sys_xxx 392 stg %r2,SP_R2(%r15) # store return value 393 sysc_tracenogo: 394 + tm __TI_flags+6(%r9),_TIF_SYSCALL 395 jz sysc_return 396 la %r2,SP_PTREGS(%r15) # load pt_regs 397 larl %r14,sysc_return # return point is sysc_return ··· 1058 1059 .section .rodata, "a" 1060 #define SYSCALL(esa,esame,emu) .long esame 1061 + .globl sys_call_table 1062 sys_call_table: 1063 #include "syscalls.S" 1064 #undef SYSCALL
+260
arch/s390/kernel/ftrace.c
···
··· 1 + /* 2 + * Dynamic function tracer architecture backend. 3 + * 4 + * Copyright IBM Corp. 2009 5 + * 6 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 + * 8 + */ 9 + 10 + #include <linux/hardirq.h> 11 + #include <linux/uaccess.h> 12 + #include <linux/ftrace.h> 13 + #include <linux/kernel.h> 14 + #include <linux/types.h> 15 + #include <trace/syscall.h> 16 + #include <asm/lowcore.h> 17 + 18 + #ifdef CONFIG_DYNAMIC_FTRACE 19 + 20 + void ftrace_disable_code(void); 21 + void ftrace_disable_return(void); 22 + void ftrace_call_code(void); 23 + void ftrace_nop_code(void); 24 + 25 + #define FTRACE_INSN_SIZE 4 26 + 27 + #ifdef CONFIG_64BIT 28 + 29 + asm( 30 + " .align 4\n" 31 + "ftrace_disable_code:\n" 32 + " j 0f\n" 33 + " .word 0x0024\n" 34 + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 35 + " basr %r14,%r1\n" 36 + "ftrace_disable_return:\n" 37 + " lg %r14,8(15)\n" 38 + " lgr %r0,%r0\n" 39 + "0:\n"); 40 + 41 + asm( 42 + " .align 4\n" 43 + "ftrace_nop_code:\n" 44 + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 45 + 46 + asm( 47 + " .align 4\n" 48 + "ftrace_call_code:\n" 49 + " stg %r14,8(%r15)\n"); 50 + 51 + #else /* CONFIG_64BIT */ 52 + 53 + asm( 54 + " .align 4\n" 55 + "ftrace_disable_code:\n" 56 + " j 0f\n" 57 + " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" 58 + " basr %r14,%r1\n" 59 + "ftrace_disable_return:\n" 60 + " l %r14,4(%r15)\n" 61 + " j 0f\n" 62 + " bcr 0,%r7\n" 63 + " bcr 0,%r7\n" 64 + " bcr 0,%r7\n" 65 + " bcr 0,%r7\n" 66 + " bcr 0,%r7\n" 67 + " bcr 0,%r7\n" 68 + "0:\n"); 69 + 70 + asm( 71 + " .align 4\n" 72 + "ftrace_nop_code:\n" 73 + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); 74 + 75 + asm( 76 + " .align 4\n" 77 + "ftrace_call_code:\n" 78 + " st %r14,4(%r15)\n"); 79 + 80 + #endif /* CONFIG_64BIT */ 81 + 82 + static int ftrace_modify_code(unsigned long ip, 83 + void *old_code, int old_size, 84 + void *new_code, int new_size) 85 + { 86 + unsigned char replaced[MCOUNT_INSN_SIZE]; 87 + 88 + /* 89 + * Note: Due to modules code can disappear and change. 90 + * We need to protect against faulting as well as code 91 + * changing. We do this by using the probe_kernel_* 92 + * functions. 93 + * This however is just a simple sanity check. 94 + */ 95 + if (probe_kernel_read(replaced, (void *)ip, old_size)) 96 + return -EFAULT; 97 + if (memcmp(replaced, old_code, old_size) != 0) 98 + return -EINVAL; 99 + if (probe_kernel_write((void *)ip, new_code, new_size)) 100 + return -EPERM; 101 + return 0; 102 + } 103 + 104 + static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, 105 + unsigned long addr) 106 + { 107 + return ftrace_modify_code(rec->ip, 108 + ftrace_call_code, FTRACE_INSN_SIZE, 109 + ftrace_disable_code, MCOUNT_INSN_SIZE); 110 + } 111 + 112 + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 113 + unsigned long addr) 114 + { 115 + if (addr == MCOUNT_ADDR) 116 + return ftrace_make_initial_nop(mod, rec, addr); 117 + return ftrace_modify_code(rec->ip, 118 + ftrace_call_code, FTRACE_INSN_SIZE, 119 + ftrace_nop_code, FTRACE_INSN_SIZE); 120 + } 121 + 122 + int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 123 + { 124 + return ftrace_modify_code(rec->ip, 125 + ftrace_nop_code, FTRACE_INSN_SIZE, 126 + ftrace_call_code, FTRACE_INSN_SIZE); 127 + } 128 + 129 + int ftrace_update_ftrace_func(ftrace_func_t func) 130 + { 131 + ftrace_dyn_func = (unsigned long)func; 132 + return 0; 133 + } 134 + 135 + int __init ftrace_dyn_arch_init(void *data) 136 + { 137 + *(unsigned long *)data = 0; 138 + return 0; 139 + } 140 + 141 + #endif /* CONFIG_DYNAMIC_FTRACE */ 142 + 143 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 144 + #ifdef CONFIG_DYNAMIC_FTRACE 145 + /* 146 + * Patch the kernel code at ftrace_graph_caller location: 147 + * The instruction there is branch relative on condition. The condition mask 148 + * is either all ones (always branch aka disable ftrace_graph_caller) or all 149 + * zeroes (nop aka enable ftrace_graph_caller). 150 + * Instruction format for brc is a7m4xxxx where m is the condition mask. 151 + */ 152 + int ftrace_enable_ftrace_graph_caller(void) 153 + { 154 + unsigned short opcode = 0xa704; 155 + 156 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 157 + } 158 + 159 + int ftrace_disable_ftrace_graph_caller(void) 160 + { 161 + unsigned short opcode = 0xa7f4; 162 + 163 + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); 164 + } 165 + 166 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 167 + { 168 + return addr - (ftrace_disable_return - ftrace_disable_code); 169 + } 170 + 171 + #else /* CONFIG_DYNAMIC_FTRACE */ 172 + 173 + static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) 174 + { 175 + return addr - MCOUNT_OFFSET_RET; 176 + } 177 + 178 + #endif /* CONFIG_DYNAMIC_FTRACE */ 179 + 180 + /* 181 + * Hook the return address and push it in the stack of return addresses 182 + * in current thread info. 183 + */ 184 + unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) 185 + { 186 + struct ftrace_graph_ent trace; 187 + 188 + /* Nmi's are currently unsupported. */ 189 + if (unlikely(in_nmi())) 190 + goto out; 191 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 192 + goto out; 193 + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) 194 + goto out; 195 + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; 196 + /* Only trace if the calling function expects to. */ 197 + if (!ftrace_graph_entry(&trace)) { 198 + current->curr_ret_stack--; 199 + goto out; 200 + } 201 + parent = (unsigned long)return_to_handler; 202 + out: 203 + return parent; 204 + } 205 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 206 + 207 + #ifdef CONFIG_FTRACE_SYSCALLS 208 + 209 + extern unsigned long __start_syscalls_metadata[]; 210 + extern unsigned long __stop_syscalls_metadata[]; 211 + extern unsigned int sys_call_table[]; 212 + 213 + static struct syscall_metadata **syscalls_metadata; 214 + 215 + struct syscall_metadata *syscall_nr_to_meta(int nr) 216 + { 217 + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) 218 + return NULL; 219 + 220 + return syscalls_metadata[nr]; 221 + } 222 + 223 + static struct syscall_metadata *find_syscall_meta(unsigned long syscall) 224 + { 225 + struct syscall_metadata *start; 226 + struct syscall_metadata *stop; 227 + char str[KSYM_SYMBOL_LEN]; 228 + 229 + start = (struct syscall_metadata *)__start_syscalls_metadata; 230 + stop = (struct syscall_metadata *)__stop_syscalls_metadata; 231 + kallsyms_lookup(syscall, NULL, NULL, NULL, str); 232 + 233 + for ( ; start < stop; start++) { 234 + if (start->name && !strcmp(start->name + 3, str + 3)) 235 + return start; 236 + } 237 + return NULL; 238 + } 239 + 240 + void arch_init_ftrace_syscalls(void) 241 + { 242 + struct syscall_metadata *meta; 243 + int i; 244 + static atomic_t refs; 245 + 246 + if (atomic_inc_return(&refs) != 1) 247 + goto out; 248 + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, 249 + GFP_KERNEL); 250 + if (!syscalls_metadata) 251 + goto out; 252 + for (i = 0; i < NR_syscalls; i++) { 253 + meta = find_syscall_meta((unsigned long)sys_call_table[i]); 254 + syscalls_metadata[i] = meta; 255 + } 256 + return; 257 + out: 258 + atomic_dec(&refs); 259 + } 260 + #endif
+47 -18
arch/s390/kernel/head.S
··· 1 /* 2 - * arch/s390/kernel/head.S 3 - * 4 - * Copyright (C) IBM Corp. 1999,2006 5 * 6 * Author(s): Hartmut Penner <hp@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com> ··· 477 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 478 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) 479 #ifndef CONFIG_MARCH_G5 480 - # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} 481 - stidp __LC_CPUID # store cpuid 482 - lhi %r0,(3f-2f) / 2 483 - la %r1,2f-.LPG0(%r13) 484 - 0: clc __LC_CPUID+4(2),0(%r1) 485 - jne 3f 486 - lpsw 1f-.LPG0(13) # machine type not good enough, crash 487 .align 16 488 - 1: .long 0x000a0000,0x00000000 489 - 2: 490 #if defined(CONFIG_MARCH_Z10) 491 - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 492 #elif defined(CONFIG_MARCH_Z9_109) 493 - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 494 #elif defined(CONFIG_MARCH_Z990) 495 - .short 0x9672, 0x2064, 0x2066 496 #elif defined(CONFIG_MARCH_Z900) 497 - .short 0x9672 498 #endif 499 - 3: la %r1,2(%r1) 500 - brct %r0,0b 501 #endif 502 503 l %r13,4f-.LPG0(%r13)
··· 1 /* 2 + * Copyright IBM Corp. 1999,2009 3 * 4 * Author(s): Hartmut Penner <hp@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> ··· 479 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) 480 mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) 481 #ifndef CONFIG_MARCH_G5 482 + # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 483 + xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 484 + stfl __LC_STFL_FAC_LIST # store facility list 485 + tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 486 + jz 0f 487 + la %r0,0 488 + .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended 489 + 0: l %r0,__LC_STFL_FAC_LIST 490 + n %r0,2f+8-.LPG0(%r13) 491 + cl %r0,2f+8-.LPG0(%r13) 492 + jne 1f 493 + l %r0,__LC_STFL_FAC_LIST+4 494 + n %r0,2f+12-.LPG0(%r13) 495 + cl %r0,2f+12-.LPG0(%r13) 496 + je 3f 497 + 1: l %r15,.Lstack-.LPG0(%r13) 498 + ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE 499 + ahi %r15,-96 500 + la %r2,.Lals_string-.LPG0(%r13) 501 + l %r3,.Lsclp_print-.LPG0(%r13) 502 + basr %r14,%r3 503 + lpsw 2f-.LPG0(%r13) # machine type not good enough, crash 504 + .Lals_string: 505 + .asciz "The Linux kernel requires more recent processor hardware" 506 + .Lsclp_print: 507 + .long _sclp_print_early 508 + .Lstack: 509 + .long init_thread_union 510 .align 16 511 + 2: .long 0x000a0000,0x8badcccc 512 + #if defined(CONFIG_64BIT) 513 #if defined(CONFIG_MARCH_Z10) 514 + .long 0xc100efe3, 0xf0680000 515 #elif defined(CONFIG_MARCH_Z9_109) 516 + .long 0xc100efc3, 0x00000000 517 #elif defined(CONFIG_MARCH_Z990) 518 + .long 0xc0002000, 0x00000000 519 #elif defined(CONFIG_MARCH_Z900) 520 + .long 0xc0000000, 0x00000000 521 #endif 522 + #else 523 + #if defined(CONFIG_MARCH_Z10) 524 + .long 0x8100c880, 0x00000000 525 + #elif defined(CONFIG_MARCH_Z9_109) 526 + .long 0x8100c880, 0x00000000 527 + #elif defined(CONFIG_MARCH_Z990) 528 + .long 0x80002000, 0x00000000 529 + #elif defined(CONFIG_MARCH_Z900) 530 + .long 0x80000000, 0x00000000 531 + #endif 532 + #endif 533 + 3: 534 #endif 535 536 l %r13,4f-.LPG0(%r13)
+2 -29
arch/s390/kernel/kprobes.c
··· 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 #include <asm/cacheflush.h> 29 #include <asm/sections.h> 30 - #include <asm/uaccess.h> 31 #include <linux/module.h> 32 33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; ··· 155 static int __kprobes swap_instruction(void *aref) 156 { 157 struct ins_replace_args *args = aref; 158 - u32 *addr; 159 - u32 instr; 160 - int err = -EFAULT; 161 162 - /* 163 - * Text segment is read-only, hence we use stura to bypass dynamic 164 - * address translation to exchange the instruction. Since stura 165 - * always operates on four bytes, but we only want to exchange two 166 - * bytes do some calculations to get things right. In addition we 167 - * shall not cross any page boundaries (vmalloc area!) when writing 168 - * the new instruction. 169 - */ 170 - addr = (u32 *)((unsigned long)args->ptr & -4UL); 171 - if ((unsigned long)args->ptr & 2) 172 - instr = ((*addr) & 0xffff0000) | args->new; 173 - else 174 - instr = ((*addr) & 0x0000ffff) | args->new << 16; 175 - 176 - asm volatile( 177 - " lra %1,0(%1)\n" 178 - "0: stura %2,%1\n" 179 - "1: la %0,0\n" 180 - "2:\n" 181 - EX_TABLE(0b,2b) 182 - : "+d" (err) 183 - : "a" (addr), "d" (instr) 184 - : "memory", "cc"); 185 - 186 - return err; 187 } 188 189 void __kprobes arch_arm_kprobe(struct kprobe *p)
··· 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 + #include <linux/uaccess.h> 29 #include <asm/cacheflush.h> 30 #include <asm/sections.h> 31 #include <linux/module.h> 32 33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; ··· 155 static int __kprobes swap_instruction(void *aref) 156 { 157 struct ins_replace_args *args = aref; 158 159 + return probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); 160 } 161 162 void __kprobes arch_arm_kprobe(struct kprobe *p)
+187 -29
arch/s390/kernel/mcount.S
··· 1 /* 2 - * Copyright IBM Corp. 2008 3 * 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * ··· 7 8 #include <asm/asm-offsets.h> 9 10 - #ifndef CONFIG_64BIT 11 - .globl _mcount 12 - _mcount: 13 - stm %r0,%r5,8(%r15) 14 - st %r14,56(%r15) 15 - lr %r1,%r15 16 - ahi %r15,-96 17 - l %r3,100(%r15) 18 - la %r2,0(%r14) 19 - st %r1,__SF_BACKCHAIN(%r15) 20 - la %r3,0(%r3) 21 - bras %r14,0f 22 - .long ftrace_trace_function 23 - 0: l %r14,0(%r14) 24 - l %r14,0(%r14) 25 - basr %r14,%r14 26 - ahi %r15,96 27 - lm %r0,%r5,8(%r15) 28 - l %r14,56(%r15) 29 - br %r14 30 - 31 - .globl ftrace_stub 32 ftrace_stub: 33 br %r14 34 35 - #else /* CONFIG_64BIT */ 36 37 - .globl _mcount 38 _mcount: 39 - stmg %r0,%r5,16(%r15) 40 stg %r14,112(%r15) 41 lgr %r1,%r15 42 aghi %r15,-160 ··· 74 larl %r14,ftrace_trace_function 75 lg %r14,0(%r14) 76 basr %r14,%r14 77 aghi %r15,160 78 - lmg %r0,%r5,16(%r15) 79 lg %r14,112(%r15) 80 br %r14 81 82 - .globl ftrace_stub 83 - ftrace_stub: 84 br %r14 85 86 #endif /* CONFIG_64BIT */
··· 1 /* 2 + * Copyright IBM Corp. 2008,2009 3 * 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * ··· 7 8 #include <asm/asm-offsets.h> 9 10 + .globl ftrace_stub 11 ftrace_stub: 12 br %r14 13 14 + #ifdef CONFIG_64BIT 15 16 + #ifdef CONFIG_DYNAMIC_FTRACE 17 + 18 + .globl _mcount 19 _mcount: 20 + br %r14 21 + 22 + .globl ftrace_caller 23 + ftrace_caller: 24 + larl %r1,function_trace_stop 25 + icm %r1,0xf,0(%r1) 26 + bnzr %r14 27 + stmg %r2,%r5,32(%r15) 28 + stg %r14,112(%r15) 29 + lgr %r1,%r15 30 + aghi %r15,-160 31 + stg %r1,__SF_BACKCHAIN(%r15) 32 + lgr %r2,%r14 33 + lg %r3,168(%r15) 34 + larl %r14,ftrace_dyn_func 35 + lg %r14,0(%r14) 36 + basr %r14,%r14 37 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 38 + .globl ftrace_graph_caller 39 + ftrace_graph_caller: 40 + # This unconditional branch gets runtime patched. Change only if 41 + # you know what you are doing. See ftrace_enable_graph_caller(). 42 + j 0f 43 + lg %r2,272(%r15) 44 + lg %r3,168(%r15) 45 + brasl %r14,prepare_ftrace_return 46 + stg %r2,168(%r15) 47 + 0: 48 + #endif 49 + aghi %r15,160 50 + lmg %r2,%r5,32(%r15) 51 + lg %r14,112(%r15) 52 + br %r14 53 + 54 + .data 55 + .globl ftrace_dyn_func 56 + ftrace_dyn_func: 57 + .quad ftrace_stub 58 + .previous 59 + 60 + #else /* CONFIG_DYNAMIC_FTRACE */ 61 + 62 + .globl _mcount 63 + _mcount: 64 + larl %r1,function_trace_stop 65 + icm %r1,0xf,0(%r1) 66 + bnzr %r14 67 + stmg %r2,%r5,32(%r15) 68 stg %r14,112(%r15) 69 lgr %r1,%r15 70 aghi %r15,-160 ··· 46 larl %r14,ftrace_trace_function 47 lg %r14,0(%r14) 48 basr %r14,%r14 49 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 50 + lg %r2,272(%r15) 51 + lg %r3,168(%r15) 52 + brasl %r14,prepare_ftrace_return 53 + stg %r2,168(%r15) 54 + #endif 55 aghi %r15,160 56 + lmg %r2,%r5,32(%r15) 57 lg %r14,112(%r15) 58 br %r14 59 60 + #endif /* CONFIG_DYNAMIC_FTRACE */ 61 + 62 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 63 + 64 + .globl return_to_handler 65 + return_to_handler: 66 + stmg %r2,%r5,32(%r15) 67 + lgr %r1,%r15 68 + aghi %r15,-160 69 + stg %r1,__SF_BACKCHAIN(%r15) 70 + brasl %r14,ftrace_return_to_handler 71 + aghi %r15,160 72 + lgr %r14,%r2 73 + lmg %r2,%r5,32(%r15) 74 br %r14 75 + 76 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 77 + 78 + #else /* CONFIG_64BIT */ 79 + 80 + #ifdef CONFIG_DYNAMIC_FTRACE 81 + 82 + .globl _mcount 83 + _mcount: 84 + br %r14 85 + 86 + .globl ftrace_caller 87 + ftrace_caller: 88 + stm %r2,%r5,16(%r15) 89 + bras %r1,2f 90 + 0: .long ftrace_trace_function 91 + 1: .long function_trace_stop 92 + 2: l %r2,1b-0b(%r1) 93 + icm %r2,0xf,0(%r2) 94 + jnz 3f 95 + st %r14,56(%r15) 96 + lr %r0,%r15 97 + ahi %r15,-96 98 + l %r3,100(%r15) 99 + la %r2,0(%r14) 100 + st %r0,__SF_BACKCHAIN(%r15) 101 + la %r3,0(%r3) 102 + l %r14,0b-0b(%r1) 103 + l %r14,0(%r14) 104 + basr %r14,%r14 105 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 106 + .globl ftrace_graph_caller 107 + ftrace_graph_caller: 108 + # This unconditional branch gets runtime patched. Change only if 109 + # you know what you are doing. See ftrace_enable_graph_caller(). 110 + j 1f 111 + bras %r1,0f 112 + .long prepare_ftrace_return 113 + 0: l %r2,152(%r15) 114 + l %r4,0(%r1) 115 + l %r3,100(%r15) 116 + basr %r14,%r4 117 + st %r2,100(%r15) 118 + 1: 119 + #endif 120 + ahi %r15,96 121 + l %r14,56(%r15) 122 + 3: lm %r2,%r5,16(%r15) 123 + br %r14 124 + 125 + .data 126 + .globl ftrace_dyn_func 127 + ftrace_dyn_func: 128 + .long ftrace_stub 129 + .previous 130 + 131 + #else /* CONFIG_DYNAMIC_FTRACE */ 132 + 133 + .globl _mcount 134 + _mcount: 135 + stm %r2,%r5,16(%r15) 136 + bras %r1,2f 137 + 0: .long ftrace_trace_function 138 + 1: .long function_trace_stop 139 + 2: l %r2,1b-0b(%r1) 140 + icm %r2,0xf,0(%r2) 141 + jnz 3f 142 + st %r14,56(%r15) 143 + lr %r0,%r15 144 + ahi %r15,-96 145 + l %r3,100(%r15) 146 + la %r2,0(%r14) 147 + st %r0,__SF_BACKCHAIN(%r15) 148 + la %r3,0(%r3) 149 + l %r14,0b-0b(%r1) 150 + l %r14,0(%r14) 151 + basr %r14,%r14 152 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 153 + bras %r1,0f 154 + .long prepare_ftrace_return 155 + 0: l %r2,152(%r15) 156 + l %r4,0(%r1) 157 + l %r3,100(%r15) 158 + basr %r14,%r4 159 + st %r2,100(%r15) 160 + #endif 161 + ahi %r15,96 162 + l %r14,56(%r15) 163 + 3: lm %r2,%r5,16(%r15) 164 + br %r14 165 + 166 + #endif /* CONFIG_DYNAMIC_FTRACE */ 167 + 168 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 169 + 170 + .globl return_to_handler 171 + return_to_handler: 172 + stm %r2,%r5,16(%r15) 173 + st %r14,56(%r15) 174 + lr %r0,%r15 175 + ahi %r15,-96 176 + st %r0,__SF_BACKCHAIN(%r15) 177 + bras %r1,0f 178 + .long ftrace_return_to_handler 179 + 0: l %r2,0b-0b(%r1) 180 + basr %r14,%r2 181 + lr %r14,%r2 182 + ahi %r15,96 183 + lm %r2,%r5,16(%r15) 184 + br %r14 185 + 186 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 187 188 #endif /* CONFIG_64BIT */
+1 -1
arch/s390/kernel/nmi.c
··· 16 #include <asm/lowcore.h> 17 #include <asm/smp.h> 18 #include <asm/etr.h> 19 - #include <asm/cpu.h> 20 #include <asm/nmi.h> 21 #include <asm/crw.h> 22
··· 16 #include <asm/lowcore.h> 17 #include <asm/smp.h> 18 #include <asm/etr.h> 19 + #include <asm/cputime.h> 20 #include <asm/nmi.h> 21 #include <asm/crw.h> 22
+2 -1
arch/s390/kernel/process.c
··· 32 #include <linux/elfcore.h> 33 #include <linux/kernel_stat.h> 34 #include <linux/syscalls.h> 35 #include <asm/uaccess.h> 36 #include <asm/pgtable.h> 37 #include <asm/system.h> ··· 205 save_fp_regs(&p->thread.fp_regs); 206 /* Set a new TLS ? */ 207 if (clone_flags & CLONE_SETTLS) { 208 - if (test_thread_flag(TIF_31BIT)) { 209 p->thread.acrs[0] = (unsigned int) regs->gprs[6]; 210 } else { 211 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
··· 32 #include <linux/elfcore.h> 33 #include <linux/kernel_stat.h> 34 #include <linux/syscalls.h> 35 + #include <asm/compat.h> 36 #include <asm/uaccess.h> 37 #include <asm/pgtable.h> 38 #include <asm/system.h> ··· 204 save_fp_regs(&p->thread.fp_regs); 205 /* Set a new TLS ? */ 206 if (clone_flags & CLONE_SETTLS) { 207 + if (is_compat_task()) { 208 p->thread.acrs[0] = (unsigned int) regs->gprs[6]; 209 } else { 210 p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
+16 -7
arch/s390/kernel/ptrace.c
··· 36 #include <linux/elf.h> 37 #include <linux/regset.h> 38 #include <linux/tracehook.h> 39 - 40 #include <asm/segment.h> 41 #include <asm/page.h> 42 #include <asm/pgtable.h> ··· 71 if (per_info->single_step) { 72 per_info->control_regs.bits.starting_addr = 0; 73 #ifdef CONFIG_COMPAT 74 - if (test_thread_flag(TIF_31BIT)) 75 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 76 else 77 #endif ··· 484 { 485 __u32 tmp; 486 487 - if (!test_thread_flag(TIF_31BIT) || 488 - (addr & 3) || addr > sizeof(struct user) - 3) 489 return -EIO; 490 491 tmp = __peek_user_compat(child, addr); ··· 585 static int poke_user_compat(struct task_struct *child, 586 addr_t addr, addr_t data) 587 { 588 - if (!test_thread_flag(TIF_31BIT) || 589 - (addr & 3) || addr > sizeof(struct user32) - 3) 590 return -EIO; 591 592 return __poke_user_compat(child, addr, data); ··· 642 { 643 long ret; 644 645 /* 646 * The sysc_tracesys code in entry.S stored the system 647 * call number to gprs[2]. ··· 662 ret = -1; 663 } 664 665 if (unlikely(current->audit_context)) 666 - audit_syscall_entry(test_thread_flag(TIF_31BIT) ? 667 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, 668 regs->gprs[2], regs->orig_gpr2, 669 regs->gprs[3], regs->gprs[4], ··· 679 if (unlikely(current->audit_context)) 680 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), 681 regs->gprs[2]); 682 683 if (test_thread_flag(TIF_SYSCALL_TRACE)) 684 tracehook_report_syscall_exit(regs, 0);
··· 36 #include <linux/elf.h> 37 #include <linux/regset.h> 38 #include <linux/tracehook.h> 39 + #include <linux/seccomp.h> 40 + #include <trace/syscall.h> 41 + #include <asm/compat.h> 42 #include <asm/segment.h> 43 #include <asm/page.h> 44 #include <asm/pgtable.h> ··· 69 if (per_info->single_step) { 70 per_info->control_regs.bits.starting_addr = 0; 71 #ifdef CONFIG_COMPAT 72 + if (is_compat_task()) 73 per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 74 else 75 #endif ··· 482 { 483 __u32 tmp; 484 485 + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) 486 return -EIO; 487 488 tmp = __peek_user_compat(child, addr); ··· 584 static int poke_user_compat(struct task_struct *child, 585 addr_t addr, addr_t data) 586 { 587 + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 588 return -EIO; 589 590 return __poke_user_compat(child, addr, data); ··· 642 { 643 long ret; 644 645 + /* Do the secure computing check first. */ 646 + secure_computing(regs->gprs[2]); 647 + 648 /* 649 * The sysc_tracesys code in entry.S stored the system 650 * call number to gprs[2]. ··· 659 ret = -1; 660 } 661 662 + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 663 + ftrace_syscall_enter(regs); 664 + 665 if (unlikely(current->audit_context)) 666 + audit_syscall_entry(is_compat_task() ? 667 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, 668 regs->gprs[2], regs->orig_gpr2, 669 regs->gprs[3], regs->gprs[4], ··· 673 if (unlikely(current->audit_context)) 674 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), 675 regs->gprs[2]); 676 + 677 + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 678 + ftrace_syscall_exit(regs); 679 680 if (test_thread_flag(TIF_SYSCALL_TRACE)) 681 tracehook_report_syscall_exit(regs, 0);
+3 -2
arch/s390/kernel/s390_ext.c
··· 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/errno.h> 14 #include <linux/kernel_stat.h> 15 #include <linux/interrupt.h> 16 - #include <asm/cpu.h> 17 #include <asm/lowcore.h> 18 #include <asm/s390_ext.h> 19 #include <asm/irq_regs.h> ··· 113 return 0; 114 } 115 116 - void do_extint(struct pt_regs *regs, unsigned short code) 117 { 118 ext_int_info_t *p; 119 int index;
··· 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 + #include <linux/ftrace.h> 14 #include <linux/errno.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/interrupt.h> 17 + #include <asm/cputime.h> 18 #include <asm/lowcore.h> 19 #include <asm/s390_ext.h> 20 #include <asm/irq_regs.h> ··· 112 return 0; 113 } 114 115 + void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) 116 { 117 ext_int_info_t *p; 118 int index;
+327
arch/s390/kernel/sclp.S
···
··· 1 + /* 2 + * Mini SCLP driver. 3 + * 4 + * Copyright IBM Corp. 2004,2009 5 + * 6 + * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>, 7 + * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 + * 9 + */ 10 + 11 + LC_EXT_NEW_PSW = 0x58 # addr of ext int handler 12 + LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter 13 + LC_EXT_INT_CODE = 0x86 # addr of ext int code 14 + 15 + # 16 + # Subroutine which waits synchronously until either an external interruption 17 + # or a timeout occurs. 18 + # 19 + # Parameters: 20 + # R2 = 0 for no timeout, non-zero for timeout in (approximated) seconds 21 + # 22 + # Returns: 23 + # R2 = 0 on interrupt, 2 on timeout 24 + # R3 = external interruption parameter if R2=0 25 + # 26 + 27 + .section ".init.text","ax" 28 + 29 + _sclp_wait_int: 30 + stm %r6,%r15,24(%r15) # save registers 31 + basr %r13,0 # get base register 32 + .LbaseS1: 33 + ahi %r15,-96 # create stack frame 34 + la %r8,LC_EXT_NEW_PSW # register int handler 35 + mvc .LoldpswS1-.LbaseS1(8,%r13),0(%r8) 36 + mvc 0(8,%r8),.LextpswS1-.LbaseS1(%r13) 37 + lhi %r6,0x0200 # cr mask for ext int (cr0.54) 38 + ltr %r2,%r2 39 + jz .LsetctS1 40 + ahi %r6,0x0800 # cr mask for clock int (cr0.52) 41 + stck .LtimeS1-.LbaseS1(%r13) # initiate timeout 42 + al %r2,.LtimeS1-.LbaseS1(%r13) 43 + st %r2,.LtimeS1-.LbaseS1(%r13) 44 + sckc .LtimeS1-.LbaseS1(%r13) 45 + 46 + .LsetctS1: 47 + stctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # enable required interrupts 48 + l %r0,.LctlS1-.LbaseS1(%r13) 49 + lhi %r1,~(0x200 | 0x800) # clear old values 50 + nr %r1,%r0 51 + or %r1,%r6 # set new value 52 + st %r1,.LctlS1-.LbaseS1(%r13) 53 + lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) 54 + st %r0,.LctlS1-.LbaseS1(%r13) 55 + lhi %r2,2 # return code for timeout 56 + .LloopS1: 57 + lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt 58 + .LwaitS1: 59 + lh %r7,LC_EXT_INT_CODE 60 + chi %r7,0x1004 # timeout? 61 + je .LtimeoutS1 62 + chi %r7,0x2401 # service int? 63 + jne .LloopS1 64 + sr %r2,%r2 65 + l %r3,LC_EXT_INT_PARAM 66 + .LtimeoutS1: 67 + lctl %c0,%c0,.LctlS1-.LbaseS1(%r13) # restore interrupt setting 68 + # restore old handler 69 + mvc 0(8,%r8),.LoldpswS1-.LbaseS1(%r13) 70 + lm %r6,%r15,120(%r15) # restore registers 71 + br %r14 # return to caller 72 + 73 + .align 8 74 + .LoldpswS1: 75 + .long 0, 0 # old ext int PSW 76 + .LextpswS1: 77 + .long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int 78 + .LwaitpswS1: 79 + .long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int 80 + .LtimeS1: 81 + .quad 0 # current time 82 + .LctlS1: 83 + .long 0 # CT0 contents 84 + 85 + # 86 + # Subroutine to synchronously issue a service call. 87 + # 88 + # Parameters: 89 + # R2 = command word 90 + # R3 = sccb address 91 + # 92 + # Returns: 93 + # R2 = 0 on success, 1 on failure 94 + # R3 = sccb response code if R2 = 0 95 + # 96 + 97 + _sclp_servc: 98 + stm %r6,%r15,24(%r15) # save registers 99 + ahi %r15,-96 # create stack frame 100 + lr %r6,%r2 # save command word 101 + lr %r7,%r3 # save sccb address 102 + .LretryS2: 103 + lhi %r2,1 # error return code 104 + .insn rre,0xb2200000,%r6,%r7 # servc 105 + brc 1,.LendS2 # exit if not operational 106 + brc 8,.LnotbusyS2 # go on if not busy 107 + sr %r2,%r2 # wait until no longer busy 108 + bras %r14,_sclp_wait_int 109 + j .LretryS2 # retry 110 + .LnotbusyS2: 111 + sr %r2,%r2 # wait until result 112 + bras %r14,_sclp_wait_int 113 + sr %r2,%r2 114 + lh %r3,6(%r7) 115 + .LendS2: 116 + lm %r6,%r15,120(%r15) # restore registers 117 + br %r14 118 + 119 + # 120 + # Subroutine to set up the SCLP interface. 121 + # 122 + # Parameters: 123 + # R2 = 0 to activate, non-zero to deactivate 124 + # 125 + # Returns: 126 + # R2 = 0 on success, non-zero on failure 127 + # 128 + 129 + _sclp_setup: 130 + stm %r6,%r15,24(%r15) # save registers 131 + ahi %r15,-96 # create stack frame 132 + basr %r13,0 # get base register 133 + .LbaseS3: 134 + l %r6,.LsccbS0-.LbaseS3(%r13) # prepare init mask sccb 135 + mvc 0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13) 136 + ltr %r2,%r2 # initialization? 137 + jz .LdoinitS3 # go ahead 138 + # clear masks 139 + xc .LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6) 140 + .LdoinitS3: 141 + l %r2,.LwritemaskS3-.LbaseS3(%r13)# get command word 142 + lr %r3,%r6 # get sccb address 143 + bras %r14,_sclp_servc # issue service call 144 + ltr %r2,%r2 # servc successful? 145 + jnz .LerrorS3 146 + chi %r3,0x20 # write mask successful? 147 + jne .LerrorS3 148 + # check masks 149 + la %r2,.LinitmaskS3-.LinitsccbS3(%r6) 150 + l %r1,0(%r2) # receive mask ok? 151 + n %r1,12(%r2) 152 + cl %r1,0(%r2) 153 + jne .LerrorS3 154 + l %r1,4(%r2) # send mask ok? 155 + n %r1,8(%r2) 156 + cl %r1,4(%r2) 157 + sr %r2,%r2 158 + je .LendS3 159 + .LerrorS3: 160 + lhi %r2,1 # error return code 161 + .LendS3: 162 + lm %r6,%r15,120(%r15) # restore registers 163 + br %r14 164 + .LwritemaskS3: 165 + .long 0x00780005 # SCLP command for write mask 166 + .LinitsccbS3: 167 + .word .LinitendS3-.LinitsccbS3 168 + .byte 0,0,0,0 169 + .word 0 170 + .word 0 171 + .word 4 172 + .LinitmaskS3: 173 + .long 0x80000000 174 + .long 0x40000000 175 + .long 0 176 + .long 0 177 + .LinitendS3: 178 + 179 + # 180 + # Subroutine which prints a given text to the SCLP console. 181 + # 182 + # Parameters: 183 + # R2 = address of nil-terminated ASCII text 184 + # 185 + # Returns: 186 + # R2 = 0 on success, 1 on failure 187 + # 188 + 189 + _sclp_print: 190 + stm %r6,%r15,24(%r15) # save registers 191 + ahi %r15,-96 # create stack frame 192 + basr %r13,0 # get base register 193 + .LbaseS4: 194 + l %r8,.LsccbS0-.LbaseS4(%r13) # prepare write data sccb 195 + mvc 0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13) 196 + la %r7,.LmtoS4-.LwritesccbS4(%r8) # current mto addr 197 + sr %r0,%r0 198 + l %r10,.Lascebc-.LbaseS4(%r13) # address of translation table 199 + .LinitmtoS4: 200 + # initialize mto 201 + mvc 0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13) 202 + lhi %r6,.LmtoendS4-.LmtoS4 # current mto length 203 + .LloopS4: 204 + ic %r0,0(%r2) # get character 205 + ahi %r2,1 206 + ltr %r0,%r0 # end of string? 207 + jz .LfinalizemtoS4 208 + chi %r0,0x15 # end of line (NL)? 209 + jz .LfinalizemtoS4 210 + stc %r0,0(%r6,%r7) # copy to mto 211 + la %r11,0(%r6,%r7) 212 + tr 0(1,%r11),0(%r10) # translate to EBCDIC 213 + ahi %r6,1 214 + j .LloopS4 215 + .LfinalizemtoS4: 216 + sth %r6,0(%r7) # update mto length 217 + lh %r9,.LmdbS4-.LwritesccbS4(%r8) # update mdb length 218 + ar %r9,%r6 219 + sth %r9,.LmdbS4-.LwritesccbS4(%r8) 220 + lh %r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length 221 + ar %r9,%r6 222 + sth %r9,.LevbufS4-.LwritesccbS4(%r8) 223 + lh %r9,0(%r8) # update sccb length 224 + ar %r9,%r6 225 + sth %r9,0(%r8) 226 + ar %r7,%r6 # update current mto adress 227 + ltr %r0,%r0 # more characters? 228 + jnz .LinitmtoS4 229 + l %r2,.LwritedataS4-.LbaseS4(%r13)# write data 230 + lr %r3,%r8 231 + bras %r14,_sclp_servc 232 + ltr %r2,%r2 # servc successful? 233 + jnz .LendS4 234 + chi %r3,0x20 # write data successful? 235 + je .LendS4 236 + lhi %r2,1 # error return code 237 + .LendS4: 238 + lm %r6,%r15,120(%r15) # restore registers 239 + br %r14 240 + 241 + # 242 + # Function which prints a given text to the SCLP console. 243 + # 244 + # Parameters: 245 + # R2 = address of nil-terminated ASCII text 246 + # 247 + # Returns: 248 + # R2 = 0 on success, 1 on failure 249 + # 250 + 251 + .globl _sclp_print_early 252 + _sclp_print_early: 253 + stm %r6,%r15,24(%r15) # save registers 254 + ahi %r15,-96 # create stack frame 255 + lr %r10,%r2 # save string pointer 256 + lhi %r2,0 257 + bras %r14,_sclp_setup # enable console 258 + ltr %r2,%r2 259 + jnz .LendS5 260 + lr %r2,%r10 261 + bras %r14,_sclp_print # print string 262 + ltr %r2,%r2 263 + jnz .LendS5 264 + lhi %r2,1 265 + bras %r14,_sclp_setup # disable console 266 + .LendS5: 267 + lm %r6,%r15,120(%r15) # restore registers 268 + br %r14 269 + 270 + .LwritedataS4: 271 + .long 0x00760005 # SCLP command for write data 272 + .LwritesccbS4: 273 + # sccb 274 + .word .LmtoS4-.LwritesccbS4 275 + .byte 0 276 + .byte 0,0,0 277 + .word 0 278 + 279 + # evbuf 280 + .LevbufS4: 281 + .word .LmtoS4-.LevbufS4 282 + .byte 0x02 283 + .byte 0 284 + .word 0 285 + 286 + .LmdbS4: 287 + # mdb 288 + .word .LmtoS4-.LmdbS4 289 + .word 1 290 + .long 0xd4c4c240 291 + .long 1 292 + 293 + # go 294 + .LgoS4: 295 + .word .LmtoS4-.LgoS4 296 + .word 1 297 + .long 0 298 + .byte 0,0,0,0,0,0,0,0 299 + .byte 0,0,0 300 + .byte 0 301 + .byte 0,0,0,0,0,0,0 302 + .byte 0 303 + .word 0 304 + .byte 0,0,0,0,0,0,0,0,0,0 305 + .byte 0,0,0,0,0,0,0,0 306 + .byte 0,0,0,0,0,0,0,0 307 + 308 + .LmtoS4: 309 + .word .LmtoendS4-.LmtoS4 310 + .word 4 311 + .word 0x1000 312 + .byte 0 313 + .byte 0,0,0 314 + .LmtoendS4: 315 + 316 + # Global constants 317 + .LsccbS0: 318 + .long _sclp_work_area 319 + .Lascebc: 320 + .long _ascebc 321 + .previous 322 + 323 + .section ".init.data","a" 324 + .balign 4096 325 + _sclp_work_area: 326 + .fill 4096 327 + .previous
+2
arch/s390/kernel/setup.c
··· 42 #include <linux/ctype.h> 43 #include <linux/reboot.h> 44 #include <linux/topology.h> 45 46 #include <asm/ipl.h> 47 #include <asm/uaccess.h> ··· 443 lc->steal_timer = S390_lowcore.steal_timer; 444 lc->last_update_timer = S390_lowcore.last_update_timer; 445 lc->last_update_clock = S390_lowcore.last_update_clock; 446 set_prefix((u32)(unsigned long) lc); 447 lowcore_ptr[0] = lc; 448 }
··· 42 #include <linux/ctype.h> 43 #include <linux/reboot.h> 44 #include <linux/topology.h> 45 + #include <linux/ftrace.h> 46 47 #include <asm/ipl.h> 48 #include <asm/uaccess.h> ··· 442 lc->steal_timer = S390_lowcore.steal_timer; 443 lc->last_update_timer = S390_lowcore.last_update_timer; 444 lc->last_update_clock = S390_lowcore.last_update_clock; 445 + lc->ftrace_func = S390_lowcore.ftrace_func; 446 set_prefix((u32)(unsigned long) lc); 447 lowcore_ptr[0] = lc; 448 }
+2 -1
arch/s390/kernel/signal.c
··· 26 #include <linux/binfmts.h> 27 #include <linux/tracehook.h> 28 #include <linux/syscalls.h> 29 #include <asm/ucontext.h> 30 #include <asm/uaccess.h> 31 #include <asm/lowcore.h> ··· 483 /* Whee! Actually deliver the signal. */ 484 int ret; 485 #ifdef CONFIG_COMPAT 486 - if (test_thread_flag(TIF_31BIT)) { 487 ret = handle_signal32(signr, &ka, &info, oldset, regs); 488 } 489 else
··· 26 #include <linux/binfmts.h> 27 #include <linux/tracehook.h> 28 #include <linux/syscalls.h> 29 + #include <linux/compat.h> 30 #include <asm/ucontext.h> 31 #include <asm/uaccess.h> 32 #include <asm/lowcore.h> ··· 482 /* Whee! Actually deliver the signal. */ 483 int ret; 484 #ifdef CONFIG_COMPAT 485 + if (is_compat_task()) { 486 ret = handle_signal32(signr, &ka, &info, oldset, regs); 487 } 488 else
+2 -1
arch/s390/kernel/smp.c
··· 47 #include <asm/timer.h> 48 #include <asm/lowcore.h> 49 #include <asm/sclp.h> 50 - #include <asm/cpu.h> 51 #include <asm/vdso.h> 52 #include "entry.h" 53 ··· 572 cpu_lowcore->cpu_nr = cpu; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 575 eieio(); 576 577 while (signal_processor(cpu, sigp_restart) == sigp_busy)
··· 47 #include <asm/timer.h> 48 #include <asm/lowcore.h> 49 #include <asm/sclp.h> 50 + #include <asm/cputime.h> 51 #include <asm/vdso.h> 52 #include "entry.h" 53 ··· 572 cpu_lowcore->cpu_nr = cpu; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 574 cpu_lowcore->machine_flags = S390_lowcore.machine_flags; 575 + cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; 576 eieio(); 577 578 while (signal_processor(cpu, sigp_restart) == sigp_busy)
+2
arch/s390/kernel/syscalls.S
··· 338 SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) 339 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) 340 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
··· 338 SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) 339 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) 340 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 341 + SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ 342 + SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper)
+1 -8
arch/s390/kernel/time.c
··· 70 /* 71 * Scheduler clock - returns current time in nanosec units. 72 */ 73 - unsigned long long sched_clock(void) 74 { 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 76 } ··· 95 xtime->tv_nsec = ((todval * 1000) >> 12); 96 } 97 98 - #ifdef CONFIG_PROFILING 99 - #define s390_do_profile() profile_tick(CPU_PROFILING) 100 - #else 101 - #define s390_do_profile() do { ; } while(0) 102 - #endif /* CONFIG_PROFILING */ 103 - 104 void clock_comparator_work(void) 105 { 106 struct clock_event_device *cd; ··· 103 set_clock_comparator(S390_lowcore.clock_comparator); 104 cd = &__get_cpu_var(comparators); 105 cd->event_handler(cd); 106 - s390_do_profile(); 107 } 108 109 /*
··· 70 /* 71 * Scheduler clock - returns current time in nanosec units. 72 */ 73 + unsigned long long notrace sched_clock(void) 74 { 75 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 76 } ··· 95 xtime->tv_nsec = ((todval * 1000) >> 12); 96 } 97 98 void clock_comparator_work(void) 99 { 100 struct clock_event_device *cd; ··· 109 set_clock_comparator(S390_lowcore.clock_comparator); 110 cd = &__get_cpu_var(comparators); 111 cd->event_handler(cd); 112 } 113 114 /*
+15 -4
arch/s390/kernel/vdso.c
··· 22 #include <linux/elf.h> 23 #include <linux/security.h> 24 #include <linux/bootmem.h> 25 - 26 #include <asm/pgtable.h> 27 #include <asm/system.h> 28 #include <asm/processor.h> ··· 53 54 static int __init vdso_setup(char *s) 55 { 56 - vdso_enabled = simple_strtoul(s, NULL, 0); 57 - return 1; 58 } 59 __setup("vdso=", vdso_setup); 60 ··· 214 vdso_pagelist = vdso64_pagelist; 215 vdso_pages = vdso64_pages; 216 #ifdef CONFIG_COMPAT 217 - if (test_thread_flag(TIF_31BIT)) { 218 vdso_pagelist = vdso32_pagelist; 219 vdso_pages = vdso32_pages; 220 }
··· 22 #include <linux/elf.h> 23 #include <linux/security.h> 24 #include <linux/bootmem.h> 25 + #include <linux/compat.h> 26 #include <asm/pgtable.h> 27 #include <asm/system.h> 28 #include <asm/processor.h> ··· 53 54 static int __init vdso_setup(char *s) 55 { 56 + unsigned long val; 57 + int rc; 58 + 59 + rc = 0; 60 + if (strncmp(s, "on", 3) == 0) 61 + vdso_enabled = 1; 62 + else if (strncmp(s, "off", 4) == 0) 63 + vdso_enabled = 0; 64 + else { 65 + rc = strict_strtoul(s, 0, &val); 66 + vdso_enabled = rc ? 0 : !!val; 67 + } 68 + return !rc; 69 } 70 __setup("vdso=", vdso_setup); 71 ··· 203 vdso_pagelist = vdso64_pagelist; 204 vdso_pages = vdso64_pages; 205 #ifdef CONFIG_COMPAT 206 + if (is_compat_task()) { 207 vdso_pagelist = vdso32_pagelist; 208 vdso_pages = vdso32_pages; 209 }
+1
arch/s390/kernel/vmlinux.lds.S
··· 34 SCHED_TEXT 35 LOCK_TEXT 36 KPROBES_TEXT 37 *(.fixup) 38 *(.gnu.warning) 39 } :text = 0x0700
··· 34 SCHED_TEXT 35 LOCK_TEXT 36 KPROBES_TEXT 37 + IRQENTRY_TEXT 38 *(.fixup) 39 *(.gnu.warning) 40 } :text = 0x0700
+1 -1
arch/s390/kernel/vtime.c
··· 23 #include <asm/s390_ext.h> 24 #include <asm/timer.h> 25 #include <asm/irq_regs.h> 26 - #include <asm/cpu.h> 27 28 static ext_int_info_t ext_int_info_timer; 29
··· 23 #include <asm/s390_ext.h> 24 #include <asm/timer.h> 25 #include <asm/irq_regs.h> 26 + #include <asm/cputime.h> 27 28 static ext_int_info_t ext_int_info_timer; 29
+1 -1
arch/s390/kvm/kvm-s390.c
··· 512 BUG(); 513 } 514 515 - might_sleep(); 516 517 do { 518 __vcpu_run(vcpu);
··· 512 BUG(); 513 } 514 515 + might_fault(); 516 517 do { 518 __vcpu_run(vcpu);
+40
arch/s390/lib/spinlock.c
··· 124 } 125 EXPORT_SYMBOL(_raw_read_lock_wait); 126 127 int _raw_read_trylock_retry(raw_rwlock_t *rw) 128 { 129 unsigned int old; ··· 177 } 178 } 179 EXPORT_SYMBOL(_raw_write_lock_wait); 180 181 int _raw_write_trylock_retry(raw_rwlock_t *rw) 182 {
··· 124 } 125 EXPORT_SYMBOL(_raw_read_lock_wait); 126 127 + void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 128 + { 129 + unsigned int old; 130 + int count = spin_retry; 131 + 132 + local_irq_restore(flags); 133 + while (1) { 134 + if (count-- <= 0) { 135 + _raw_yield(); 136 + count = spin_retry; 137 + } 138 + if (!__raw_read_can_lock(rw)) 139 + continue; 140 + old = rw->lock & 0x7fffffffU; 141 + local_irq_disable(); 142 + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) 143 + return; 144 + } 145 + } 146 + EXPORT_SYMBOL(_raw_read_lock_wait_flags); 147 + 148 int _raw_read_trylock_retry(raw_rwlock_t *rw) 149 { 150 unsigned int old; ··· 156 } 157 } 158 EXPORT_SYMBOL(_raw_write_lock_wait); 159 + 160 + void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 161 + { 162 + int count = spin_retry; 163 + 164 + local_irq_restore(flags); 165 + while (1) { 166 + if (count-- <= 0) { 167 + _raw_yield(); 168 + count = spin_retry; 169 + } 170 + if (!__raw_write_can_lock(rw)) 171 + continue; 172 + local_irq_disable(); 173 + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 174 + return; 175 + } 176 + } 177 + EXPORT_SYMBOL(_raw_write_lock_wait_flags); 178 179 int _raw_write_trylock_retry(raw_rwlock_t *rw) 180 {
+1 -1
arch/s390/mm/Makefile
··· 2 # Makefile for the linux s390-specific parts of the memory manager. 3 # 4 5 - obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o 6 obj-$(CONFIG_CMM) += cmm.o 7 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8 obj-$(CONFIG_PAGE_STATES) += page-states.o
··· 2 # Makefile for the linux s390-specific parts of the memory manager. 3 # 4 5 + obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o 6 obj-$(CONFIG_CMM) += cmm.o 7 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8 obj-$(CONFIG_PAGE_STATES) += page-states.o
+2 -1
arch/s390/mm/fault.c
··· 19 #include <linux/ptrace.h> 20 #include <linux/mman.h> 21 #include <linux/mm.h> 22 #include <linux/smp.h> 23 #include <linux/kdebug.h> 24 #include <linux/smp_lock.h> ··· 240 up_read(&mm->mmap_sem); 241 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 242 #ifdef CONFIG_COMPAT 243 - compat = test_tsk_thread_flag(current, TIF_31BIT); 244 if (compat && instruction == 0x0a77) 245 sys32_sigreturn(); 246 else if (compat && instruction == 0x0aad)
··· 19 #include <linux/ptrace.h> 20 #include <linux/mman.h> 21 #include <linux/mm.h> 22 + #include <linux/compat.h> 23 #include <linux/smp.h> 24 #include <linux/kdebug.h> 25 #include <linux/smp_lock.h> ··· 239 up_read(&mm->mmap_sem); 240 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 241 #ifdef CONFIG_COMPAT 242 + compat = is_compat_task(); 243 if (compat && instruction == 0x0a77) 244 sys32_sigreturn(); 245 else if (compat && instruction == 0x0aad)
+61
arch/s390/mm/maccess.c
···
··· 1 + /* 2 + * Access kernel memory without faulting -- s390 specific implementation. 3 + * 4 + * Copyright IBM Corp. 2009 5 + * 6 + * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 + * 8 + */ 9 + 10 + #include <linux/uaccess.h> 11 + #include <linux/kernel.h> 12 + #include <linux/types.h> 13 + #include <linux/errno.h> 14 + #include <asm/system.h> 15 + 16 + /* 17 + * This function writes to kernel memory bypassing DAT and possible 18 + * write protection. It copies one to four bytes from src to dst 19 + * using the stura instruction. 20 + * Returns the number of bytes copied or -EFAULT. 21 + */ 22 + static long probe_kernel_write_odd(void *dst, void *src, size_t size) 23 + { 24 + unsigned long count, aligned; 25 + int offset, mask; 26 + int rc = -EFAULT; 27 + 28 + aligned = (unsigned long) dst & ~3UL; 29 + offset = (unsigned long) dst & 3; 30 + count = min_t(unsigned long, 4 - offset, size); 31 + mask = (0xf << (4 - count)) & 0xf; 32 + mask >>= offset; 33 + asm volatile( 34 + " bras 1,0f\n" 35 + " icm 0,0,0(%3)\n" 36 + "0: l 0,0(%1)\n" 37 + " lra %1,0(%1)\n" 38 + "1: ex %2,0(1)\n" 39 + "2: stura 0,%1\n" 40 + " la %0,0\n" 41 + "3:\n" 42 + EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) 43 + : "+d" (rc), "+a" (aligned) 44 + : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); 45 + return rc ? rc : count; 46 + } 47 + 48 + long probe_kernel_write(void *dst, void *src, size_t size) 49 + { 50 + long copied = 0; 51 + 52 + while (size) { 53 + copied = probe_kernel_write_odd(dst, src, size); 54 + if (copied < 0) 55 + break; 56 + dst += copied; 57 + src += copied; 58 + size -= copied; 59 + } 60 + return copied < 0 ? -EFAULT : 0; 61 + }
+5 -6
arch/s390/mm/mmap.c
··· 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <asm/pgalloc.h> 31 32 /* 33 * Top of mmap area (just below the process stack). ··· 56 /* 57 * Force standard allocation for 64 bit programs. 58 */ 59 - if (!test_thread_flag(TIF_31BIT)) 60 return 1; 61 #endif 62 return sysctl_legacy_va_layout || ··· 92 93 int s390_mmap_check(unsigned long addr, unsigned long len) 94 { 95 - if (!test_thread_flag(TIF_31BIT) && 96 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 97 return crst_table_upgrade(current->mm, 1UL << 53); 98 return 0; ··· 109 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 110 if (!(area & ~PAGE_MASK)) 111 return area; 112 - if (area == -ENOMEM && 113 - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { 114 /* Upgrade the page table to 4 levels and retry. */ 115 rc = crst_table_upgrade(mm, 1UL << 53); 116 if (rc) ··· 131 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 132 if (!(area & ~PAGE_MASK)) 133 return area; 134 - if (area == -ENOMEM && 135 - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { 136 /* Upgrade the page table to 4 levels and retry. */ 137 rc = crst_table_upgrade(mm, 1UL << 53); 138 if (rc)
··· 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <asm/pgalloc.h> 31 + #include <asm/compat.h> 32 33 /* 34 * Top of mmap area (just below the process stack). ··· 55 /* 56 * Force standard allocation for 64 bit programs. 57 */ 58 + if (!is_compat_task()) 59 return 1; 60 #endif 61 return sysctl_legacy_va_layout || ··· 91 92 int s390_mmap_check(unsigned long addr, unsigned long len) 93 { 94 + if (!is_compat_task() && 95 len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) 96 return crst_table_upgrade(current->mm, 1UL << 53); 97 return 0; ··· 108 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); 109 if (!(area & ~PAGE_MASK)) 110 return area; 111 + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 112 /* Upgrade the page table to 4 levels and retry. */ 113 rc = crst_table_upgrade(mm, 1UL << 53); 114 if (rc) ··· 131 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); 132 if (!(area & ~PAGE_MASK)) 133 return area; 134 + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { 135 /* Upgrade the page table to 4 levels and retry. */ 136 rc = crst_table_upgrade(mm, 1UL << 53); 137 if (rc)
+13 -3
arch/s390/mm/pgtable.c
··· 1 /* 2 - * arch/s390/mm/pgtable.c 3 - * 4 - * Copyright IBM Corp. 2007 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 */ 7 ··· 50 } 51 52 #endif 53 54 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 55 {
··· 1 /* 2 + * Copyright IBM Corp. 2007,2009 3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 4 */ 5 ··· 52 } 53 54 #endif 55 + 56 + unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; 57 + EXPORT_SYMBOL(VMALLOC_START); 58 + 59 + static int __init parse_vmalloc(char *arg) 60 + { 61 + if (!arg) 62 + return -EINVAL; 63 + VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; 64 + return 0; 65 + } 66 + early_param("vmalloc", parse_vmalloc); 67 68 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) 69 {
+26 -8
drivers/s390/block/dasd.c
··· 851 852 /* Check the cqr */ 853 rc = dasd_check_cqr(cqr); 854 - if (rc) 855 return rc; 856 device = (struct dasd_device *) cqr->startdev; 857 if (cqr->retries < 0) { 858 /* internal error 14 - start_IO run out of retries */ ··· 917 BUG(); 918 break; 919 } 920 return rc; 921 } 922 ··· 1457 dasd_add_request_tail(cqr); 1458 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1459 1460 - /* Request status is either done or failed. */ 1461 - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1462 return rc; 1463 } 1464 ··· 1484 dasd_cancel_req(cqr); 1485 /* wait (non-interruptible) for final status */ 1486 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1487 } 1488 - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1489 return rc; 1490 } 1491 ··· 1537 1538 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1539 1540 - /* Request status is either done or failed. */ 1541 - rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1542 return rc; 1543 } 1544 ··· 2445 2446 2447 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2448 - void **rdc_buffer, int rdc_buffer_size) 2449 { 2450 int ret; 2451 struct dasd_ccw_req *cqr; 2452 2453 - cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2454 magic); 2455 if (IS_ERR(cqr)) 2456 return PTR_ERR(cqr);
··· 851 852 /* Check the cqr */ 853 rc = dasd_check_cqr(cqr); 854 + if (rc) { 855 + cqr->intrc = rc; 856 return rc; 857 + } 858 device = (struct dasd_device *) cqr->startdev; 859 if (cqr->retries < 0) { 860 /* internal error 14 - start_IO run out of retries */ ··· 915 BUG(); 916 break; 917 } 918 + cqr->intrc = rc; 919 return rc; 920 } 921 ··· 1454 dasd_add_request_tail(cqr); 1455 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1456 1457 + if (cqr->status == DASD_CQR_DONE) 1458 + rc = 0; 1459 + else if (cqr->intrc) 1460 + rc = cqr->intrc; 1461 + else 1462 + rc = -EIO; 1463 return rc; 1464 } 1465 ··· 1477 dasd_cancel_req(cqr); 1478 /* wait (non-interruptible) for final status */ 1479 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1480 + cqr->intrc = rc; 1481 } 1482 + 1483 + if (cqr->status == DASD_CQR_DONE) 1484 + rc = 0; 1485 + else if (cqr->intrc) 1486 + rc = cqr->intrc; 1487 + else 1488 + rc = -EIO; 1489 return rc; 1490 } 1491 ··· 1523 1524 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1525 1526 + if (cqr->status == DASD_CQR_DONE) 1527 + rc = 0; 1528 + else if (cqr->intrc) 1529 + rc = cqr->intrc; 1530 + else 1531 + rc = -EIO; 1532 return rc; 1533 } 1534 ··· 2427 2428 2429 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2430 + void *rdc_buffer, int rdc_buffer_size) 2431 { 2432 int ret; 2433 struct dasd_ccw_req *cqr; 2434 2435 + cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 2436 magic); 2437 if (IS_ERR(cqr)) 2438 return PTR_ERR(cqr);
+1
drivers/s390/block/dasd_diag.c
··· 202 rc = -EIO; 203 break; 204 } 205 return rc; 206 } 207
··· 202 rc = -EIO; 203 break; 204 } 205 + cqr->intrc = rc; 206 return rc; 207 } 208
+28 -16
drivers/s390/block/dasd_eckd.c
··· 1097 { 1098 struct dasd_eckd_private *private; 1099 struct dasd_block *block; 1100 - void *rdc_data; 1101 int is_known, rc; 1102 1103 private = (struct dasd_eckd_private *) device->private; 1104 - if (private == NULL) { 1105 - private = kzalloc(sizeof(struct dasd_eckd_private), 1106 - GFP_KERNEL | GFP_DMA); 1107 - if (private == NULL) { 1108 dev_warn(&device->cdev->dev, 1109 "Allocating memory for private DASD data " 1110 "failed\n"); 1111 return -ENOMEM; 1112 } 1113 device->private = (void *) private; 1114 } 1115 /* Invalidate status of initial analysis. */ 1116 private->init_cqr_status = -1; ··· 1161 goto out_err3; 1162 1163 /* Read Device Characteristics */ 1164 - rdc_data = (void *) &(private->rdc_data); 1165 - memset(rdc_data, 0, sizeof(rdc_data)); 1166 - rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 1167 if (rc) { 1168 DBF_EVENT(DBF_WARNING, 1169 "Read device characteristics failed, rc=%d for " ··· 1182 private->rdc_data.dev_model, 1183 private->rdc_data.cu_type, 1184 private->rdc_data.cu_model.model, 1185 - private->real_cyl, 1186 private->rdc_data.trk_per_cyl, 1187 private->rdc_data.sec_per_trk); 1188 return 0; ··· 2335 { 2336 int tpm, cmdrtd, cmdwtd; 2337 int use_prefix; 2338 - 2339 - struct dasd_eckd_private *private; 2340 int fcx_in_css, fcx_in_gneq, fcx_in_features; 2341 struct dasd_device *basedev; 2342 sector_t first_rec, last_rec; 2343 sector_t first_trk, last_trk; ··· 2361 last_offs = sector_div(last_trk, blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2363 2364 - /* is transport mode supported ? */ 2365 fcx_in_css = css_general_characteristics.fcx; 2366 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2367 fcx_in_features = private->features.feature[40] & 0x80; 2368 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 2369 2370 /* is read track data and write track data in command mode supported? */ 2371 cmdrtd = private->features.feature[9] & 0x20; ··· 3017 " I/O status report for device %s:\n", 3018 dev_name(&device->cdev->dev)); 3019 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3020 - " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 3021 - scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw)); 3022 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3023 " device %s: Failing CCW: %p\n", 3024 dev_name(&device->cdev->dev), ··· 3120 " I/O status report for device %s:\n", 3121 dev_name(&device->cdev->dev)); 3122 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3123 - " in req: %p CS: 0x%02X DS: 0x%02X " 3124 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3125 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3126 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3127 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3128 " device %s: Failing TCW: %p\n", ··· 3279 static int __init 3280 dasd_eckd_init(void) 3281 { 3282 ASCEBC(dasd_eckd_discipline.ebcname, 4); 3283 - return ccw_driver_register(&dasd_eckd_driver); 3284 } 3285 3286 static void __exit
··· 1097 { 1098 struct dasd_eckd_private *private; 1099 struct dasd_block *block; 1100 int is_known, rc; 1101 1102 private = (struct dasd_eckd_private *) device->private; 1103 + if (!private) { 1104 + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1105 + if (!private) { 1106 dev_warn(&device->cdev->dev, 1107 "Allocating memory for private DASD data " 1108 "failed\n"); 1109 return -ENOMEM; 1110 } 1111 device->private = (void *) private; 1112 + } else { 1113 + memset(private, 0, sizeof(*private)); 1114 } 1115 /* Invalidate status of initial analysis. */ 1116 private->init_cqr_status = -1; ··· 1161 goto out_err3; 1162 1163 /* Read Device Characteristics */ 1164 + rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, 1165 + 64); 1166 if (rc) { 1167 DBF_EVENT(DBF_WARNING, 1168 "Read device characteristics failed, rc=%d for " ··· 1183 private->rdc_data.dev_model, 1184 private->rdc_data.cu_type, 1185 private->rdc_data.cu_model.model, 1186 + private->real_cyl, 1187 private->rdc_data.trk_per_cyl, 1188 private->rdc_data.sec_per_trk); 1189 return 0; ··· 2336 { 2337 int tpm, cmdrtd, cmdwtd; 2338 int use_prefix; 2339 + #if defined(CONFIG_64BIT) 2340 int fcx_in_css, fcx_in_gneq, fcx_in_features; 2341 + #endif 2342 + struct dasd_eckd_private *private; 2343 struct dasd_device *basedev; 2344 sector_t first_rec, last_rec; 2345 sector_t first_trk, last_trk; ··· 2361 last_offs = sector_div(last_trk, blk_per_trk); 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2363 2364 + /* is transport mode supported? */ 2365 + #if defined(CONFIG_64BIT) 2366 fcx_in_css = css_general_characteristics.fcx; 2367 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2368 fcx_in_features = private->features.feature[40] & 0x80; 2369 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 2370 + #else 2371 + tpm = 0; 2372 + #endif 2373 2374 /* is read track data and write track data in command mode supported? */ 2375 cmdrtd = private->features.feature[9] & 0x20; ··· 3013 " I/O status report for device %s:\n", 3014 dev_name(&device->cdev->dev)); 3015 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3016 + " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", 3017 + req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3018 + scsw_cc(&irb->scsw), req->intrc); 3019 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3020 " device %s: Failing CCW: %p\n", 3021 dev_name(&device->cdev->dev), ··· 3115 " I/O status report for device %s:\n", 3116 dev_name(&device->cdev->dev)); 3117 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3118 + " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d " 3119 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3120 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3121 + scsw_cc(&irb->scsw), req->intrc, 3122 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3123 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3124 " device %s: Failing TCW: %p\n", ··· 3273 static int __init 3274 dasd_eckd_init(void) 3275 { 3276 + int ret; 3277 + 3278 ASCEBC(dasd_eckd_discipline.ebcname, 4); 3279 + ret = ccw_driver_register(&dasd_eckd_driver); 3280 + if (!ret) 3281 + wait_for_device_probe(); 3282 + 3283 + return ret; 3284 } 3285 3286 static void __exit
+14 -8
drivers/s390/block/dasd_fba.c
··· 122 struct dasd_block *block; 123 struct dasd_fba_private *private; 124 struct ccw_device *cdev = device->cdev; 125 - void *rdc_data; 126 int rc; 127 128 private = (struct dasd_fba_private *) device->private; 129 - if (private == NULL) { 130 - private = kzalloc(sizeof(struct dasd_fba_private), 131 - GFP_KERNEL | GFP_DMA); 132 - if (private == NULL) { 133 dev_warn(&device->cdev->dev, 134 "Allocating memory for private DASD " 135 "data failed\n"); 136 return -ENOMEM; 137 } 138 device->private = (void *) private; 139 } 140 block = dasd_alloc_block(); 141 if (IS_ERR(block)) { ··· 150 block->base = device; 151 152 /* Read Device Characteristics */ 153 - rdc_data = (void *) &(private->rdc_data); 154 - rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 155 if (rc) { 156 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 157 "error %d for device: %s", ··· 604 static int __init 605 dasd_fba_init(void) 606 { 607 ASCEBC(dasd_fba_discipline.ebcname, 4); 608 - return ccw_driver_register(&dasd_fba_driver); 609 } 610 611 static void __exit
··· 122 struct dasd_block *block; 123 struct dasd_fba_private *private; 124 struct ccw_device *cdev = device->cdev; 125 int rc; 126 127 private = (struct dasd_fba_private *) device->private; 128 + if (!private) { 129 + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 130 + if (!private) { 131 dev_warn(&device->cdev->dev, 132 "Allocating memory for private DASD " 133 "data failed\n"); 134 return -ENOMEM; 135 } 136 device->private = (void *) private; 137 + } else { 138 + memset(private, 0, sizeof(*private)); 139 } 140 block = dasd_alloc_block(); 141 if (IS_ERR(block)) { ··· 150 block->base = device; 151 152 /* Read Device Characteristics */ 153 + rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data, 154 + 32); 155 if (rc) { 156 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 157 "error %d for device: %s", ··· 604 static int __init 605 dasd_fba_init(void) 606 { 607 + int ret; 608 + 609 ASCEBC(dasd_fba_discipline.ebcname, 4); 610 + ret = ccw_driver_register(&dasd_fba_driver); 611 + if (!ret) 612 + wait_for_device_probe(); 613 + 614 + return ret; 615 } 616 617 static void __exit
+2 -1
drivers/s390/block/dasd_int.h
··· 173 void *data; /* pointer to data area */ 174 175 /* these are important for recovering erroneous requests */ 176 struct irb irb; /* device status in case of an error */ 177 struct dasd_ccw_req *refers; /* ERP-chain queueing. */ 178 void *function; /* originating ERP action */ ··· 579 int dasd_generic_notify(struct ccw_device *, int); 580 void dasd_generic_handle_state_change(struct dasd_device *); 581 582 - int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); 583 char *dasd_get_sense(struct irb *); 584 585 /* externals in dasd_devmap.c */
··· 173 void *data; /* pointer to data area */ 174 175 /* these are important for recovering erroneous requests */ 176 + int intrc; /* internal error, e.g. from start_IO */ 177 struct irb irb; /* device status in case of an error */ 178 struct dasd_ccw_req *refers; /* ERP-chain queueing. */ 179 void *function; /* originating ERP action */ ··· 578 int dasd_generic_notify(struct ccw_device *, int); 579 void dasd_generic_handle_state_change(struct dasd_device *); 580 581 + int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); 582 char *dasd_get_sense(struct irb *); 583 584 /* externals in dasd_devmap.c */
+2 -2
drivers/s390/block/dcssblk.c
··· 127 found = 0; 128 // test if minor available 129 list_for_each_entry(entry, &dcssblk_devices, lh) 130 - if (minor == MINOR(disk_devt(entry->gd))) 131 found++; 132 if (!found) break; // got unused minor 133 } ··· 625 if (rc) 626 goto release_gd; 627 sprintf(dev_info->gd->disk_name, "dcssblk%d", 628 - MINOR(disk_devt(dev_info->gd))); 629 list_add_tail(&dev_info->lh, &dcssblk_devices); 630 631 if (!try_module_get(THIS_MODULE)) {
··· 127 found = 0; 128 // test if minor available 129 list_for_each_entry(entry, &dcssblk_devices, lh) 130 + if (minor == entry->gd->first_minor) 131 found++; 132 if (!found) break; // got unused minor 133 } ··· 625 if (rc) 626 goto release_gd; 627 sprintf(dev_info->gd->disk_name, "dcssblk%d", 628 + dev_info->gd->first_minor); 629 list_add_tail(&dev_info->lh, &dcssblk_devices); 630 631 if (!try_module_get(THIS_MODULE)) {
+14 -24
drivers/s390/char/con3270.c
··· 64 #define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */ 65 #define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 66 #define CON_UPDATE_STATUS 4 /* Update status line. */ 67 - #define CON_UPDATE_ALL 7 68 69 static void con3270_update(struct con3270 *); 70 ··· 73 */ 74 static void con3270_set_timer(struct con3270 *cp, int expires) 75 { 76 - if (expires == 0) { 77 - if (timer_pending(&cp->timer)) 78 - del_timer(&cp->timer); 79 - return; 80 - } 81 - if (timer_pending(&cp->timer) && 82 - mod_timer(&cp->timer, jiffies + expires)) 83 - return; 84 - cp->timer.function = (void (*)(unsigned long)) con3270_update; 85 - cp->timer.data = (unsigned long) cp; 86 - cp->timer.expires = jiffies + expires; 87 - add_timer(&cp->timer); 88 } 89 90 /* ··· 217 218 spin_lock_irqsave(&cp->view.lock, flags); 219 updated = 0; 220 if (cp->update_flags & CON_UPDATE_ERASE) { 221 /* Use erase write alternate to initialize display. */ 222 raw3270_request_set_cmd(wrq, TC_EWRITEA); ··· 300 deactivate = 1; 301 break; 302 case 0x6d: /* clear: start from scratch. */ 303 - con3270_rebuild_update(cp); 304 cp->update_flags = CON_UPDATE_ALL; 305 con3270_set_timer(cp, 1); 306 break; ··· 379 static int 380 con3270_activate(struct raw3270_view *view) 381 { 382 - unsigned long flags; 383 struct con3270 *cp; 384 385 cp = (struct con3270 *) view; 386 - spin_lock_irqsave(&cp->view.lock, flags); 387 - cp->nr_up = 0; 388 - con3270_rebuild_update(cp); 389 - con3270_update_status(cp); 390 cp->update_flags = CON_UPDATE_ALL; 391 con3270_set_timer(cp, 1); 392 - spin_unlock_irqrestore(&cp->view.lock, flags); 393 return 0; 394 } 395 396 static void 397 con3270_deactivate(struct raw3270_view *view) 398 { 399 - unsigned long flags; 400 struct con3270 *cp; 401 402 cp = (struct con3270 *) view; 403 - spin_lock_irqsave(&cp->view.lock, flags); 404 del_timer(&cp->timer); 405 - spin_unlock_irqrestore(&cp->view.lock, flags); 406 } 407 408 static int ··· 492 con3270_cline_end(cp); 493 } 494 /* Setup timer to output current console buffer after 1/10 second */ 495 if (cp->view.dev && !timer_pending(&cp->timer)) 496 con3270_set_timer(cp, HZ/10); 497 spin_unlock_irqrestore(&cp->view.lock,flags); ··· 613 614 INIT_LIST_HEAD(&condev->lines); 615 INIT_LIST_HEAD(&condev->update); 616 - init_timer(&condev->timer); 617 tasklet_init(&condev->readlet, 618 (void (*)(unsigned long)) con3270_read_tasklet, 619 (unsigned long) condev->read);
··· 64 #define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */ 65 #define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 66 #define CON_UPDATE_STATUS 4 /* Update status line. */ 67 + #define CON_UPDATE_ALL 8 /* Recreate screen. */ 68 69 static void con3270_update(struct con3270 *); 70 ··· 73 */ 74 static void con3270_set_timer(struct con3270 *cp, int expires) 75 { 76 + if (expires == 0) 77 + del_timer(&cp->timer); 78 + else 79 + mod_timer(&cp->timer, jiffies + expires); 80 } 81 82 /* ··· 225 226 spin_lock_irqsave(&cp->view.lock, flags); 227 updated = 0; 228 + if (cp->update_flags & CON_UPDATE_ALL) { 229 + con3270_rebuild_update(cp); 230 + con3270_update_status(cp); 231 + cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST | 232 + CON_UPDATE_STATUS; 233 + } 234 if (cp->update_flags & CON_UPDATE_ERASE) { 235 /* Use erase write alternate to initialize display. */ 236 raw3270_request_set_cmd(wrq, TC_EWRITEA); ··· 302 deactivate = 1; 303 break; 304 case 0x6d: /* clear: start from scratch. */ 305 cp->update_flags = CON_UPDATE_ALL; 306 con3270_set_timer(cp, 1); 307 break; ··· 382 static int 383 con3270_activate(struct raw3270_view *view) 384 { 385 struct con3270 *cp; 386 387 cp = (struct con3270 *) view; 388 cp->update_flags = CON_UPDATE_ALL; 389 con3270_set_timer(cp, 1); 390 return 0; 391 } 392 393 static void 394 con3270_deactivate(struct raw3270_view *view) 395 { 396 struct con3270 *cp; 397 398 cp = (struct con3270 *) view; 399 del_timer(&cp->timer); 400 } 401 402 static int ··· 504 con3270_cline_end(cp); 505 } 506 /* Setup timer to output current console buffer after 1/10 second */ 507 + cp->nr_up = 0; 508 if (cp->view.dev && !timer_pending(&cp->timer)) 509 con3270_set_timer(cp, HZ/10); 510 spin_unlock_irqrestore(&cp->view.lock,flags); ··· 624 625 INIT_LIST_HEAD(&condev->lines); 626 INIT_LIST_HEAD(&condev->update); 627 + setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update, 628 + (unsigned long) condev); 629 tasklet_init(&condev->readlet, 630 (void (*)(unsigned long)) con3270_read_tasklet, 631 (unsigned long) condev->read);
+18 -39
drivers/s390/char/tty3270.c
··· 112 #define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 113 #define TTY_UPDATE_INPUT 4 /* Update input line. */ 114 #define TTY_UPDATE_STATUS 8 /* Update status line. */ 115 - #define TTY_UPDATE_ALL 15 116 117 static void tty3270_update(struct tty3270 *); 118 ··· 121 */ 122 static void tty3270_set_timer(struct tty3270 *tp, int expires) 123 { 124 - if (expires == 0) { 125 - if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 126 - raw3270_put_view(&tp->view); 127 - return; 128 - } 129 - if (timer_pending(&tp->timer) && 130 - mod_timer(&tp->timer, jiffies + expires)) 131 - return; 132 - raw3270_get_view(&tp->view); 133 - tp->timer.function = (void (*)(unsigned long)) tty3270_update; 134 - tp->timer.data = (unsigned long) tp; 135 - tp->timer.expires = jiffies + expires; 136 - add_timer(&tp->timer); 137 } 138 139 /* ··· 328 tp = (struct tty3270 *) rq->view; 329 if (rq->rc != 0) { 330 /* Write wasn't successfull. Refresh all. */ 331 - tty3270_rebuild_update(tp); 332 tp->update_flags = TTY_UPDATE_ALL; 333 tty3270_set_timer(tp, 1); 334 } ··· 356 357 spin_lock(&tp->view.lock); 358 updated = 0; 359 if (tp->update_flags & TTY_UPDATE_ERASE) { 360 /* Use erase write alternate to erase display. */ 361 raw3270_request_set_cmd(wrq, TC_EWRITEA); ··· 421 xchg(&tp->write, wrq); 422 } 423 spin_unlock(&tp->view.lock); 424 - raw3270_put_view(&tp->view); 425 } 426 427 /* ··· 565 tty3270_set_timer(tp, 1); 566 } else if (tp->input->string[0] == 0x6d) { 567 /* Display has been cleared. Redraw. */ 568 - tty3270_rebuild_update(tp); 569 tp->update_flags = TTY_UPDATE_ALL; 570 tty3270_set_timer(tp, 1); 571 } ··· 635 tty3270_activate(struct raw3270_view *view) 636 { 637 struct tty3270 *tp; 638 - unsigned long flags; 639 640 tp = (struct tty3270 *) view; 641 - spin_lock_irqsave(&tp->view.lock, flags); 642 - tp->nr_up = 0; 643 - tty3270_rebuild_update(tp); 644 - tty3270_update_status(tp); 645 tp->update_flags = TTY_UPDATE_ALL; 646 tty3270_set_timer(tp, 1); 647 - spin_unlock_irqrestore(&tp->view.lock, flags); 648 return 0; 649 } 650 651 static void 652 tty3270_deactivate(struct raw3270_view *view) 653 { 654 } 655 656 static int ··· 735 { 736 int pages; 737 738 kbd_free(tp->kbd); 739 raw3270_request_free(tp->kreset); 740 raw3270_request_free(tp->read); ··· 882 INIT_LIST_HEAD(&tp->update); 883 INIT_LIST_HEAD(&tp->rcl_lines); 884 tp->rcl_max = 20; 885 - init_timer(&tp->timer); 886 tasklet_init(&tp->readlet, 887 (void (*)(unsigned long)) tty3270_read_tasklet, 888 (unsigned long) tp->read); ··· 1748 .set_termios = tty3270_set_termios 1749 }; 1750 1751 - static void tty3270_notifier(int index, int active) 1752 - { 1753 - if (active) 1754 - tty_register_device(tty3270_driver, index, NULL); 1755 - else 1756 - tty_unregister_device(tty3270_driver, index); 1757 - } 1758 - 1759 /* 1760 * 3270 tty registration code called from tty_init(). 1761 * Most kernel services (incl. kmalloc) are available at this poimt. ··· 1782 return ret; 1783 } 1784 tty3270_driver = driver; 1785 - ret = raw3270_register_notifier(tty3270_notifier); 1786 - if (ret) { 1787 - put_tty_driver(driver); 1788 - return ret; 1789 - 1790 - } 1791 return 0; 1792 } 1793 ··· 1790 { 1791 struct tty_driver *driver; 1792 1793 - raw3270_unregister_notifier(tty3270_notifier); 1794 driver = tty3270_driver; 1795 tty3270_driver = NULL; 1796 tty_unregister_driver(driver);
··· 112 #define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */ 113 #define TTY_UPDATE_INPUT 4 /* Update input line. */ 114 #define TTY_UPDATE_STATUS 8 /* Update status line. */ 115 + #define TTY_UPDATE_ALL 16 /* Recreate screen. */ 116 117 static void tty3270_update(struct tty3270 *); 118 ··· 121 */ 122 static void tty3270_set_timer(struct tty3270 *tp, int expires) 123 { 124 + if (expires == 0) 125 + del_timer(&tp->timer); 126 + else 127 + mod_timer(&tp->timer, jiffies + expires); 128 } 129 130 /* ··· 337 tp = (struct tty3270 *) rq->view; 338 if (rq->rc != 0) { 339 /* Write wasn't successfull. Refresh all. */ 340 tp->update_flags = TTY_UPDATE_ALL; 341 tty3270_set_timer(tp, 1); 342 } ··· 366 367 spin_lock(&tp->view.lock); 368 updated = 0; 369 + if (tp->update_flags & TTY_UPDATE_ALL) { 370 + tty3270_rebuild_update(tp); 371 + tty3270_update_status(tp); 372 + tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST | 373 + TTY_UPDATE_INPUT | TTY_UPDATE_STATUS; 374 + } 375 if (tp->update_flags & TTY_UPDATE_ERASE) { 376 /* Use erase write alternate to erase display. */ 377 raw3270_request_set_cmd(wrq, TC_EWRITEA); ··· 425 xchg(&tp->write, wrq); 426 } 427 spin_unlock(&tp->view.lock); 428 } 429 430 /* ··· 570 tty3270_set_timer(tp, 1); 571 } else if (tp->input->string[0] == 0x6d) { 572 /* Display has been cleared. Redraw. */ 573 tp->update_flags = TTY_UPDATE_ALL; 574 tty3270_set_timer(tp, 1); 575 } ··· 641 tty3270_activate(struct raw3270_view *view) 642 { 643 struct tty3270 *tp; 644 645 tp = (struct tty3270 *) view; 646 tp->update_flags = TTY_UPDATE_ALL; 647 tty3270_set_timer(tp, 1); 648 return 0; 649 } 650 651 static void 652 tty3270_deactivate(struct raw3270_view *view) 653 { 654 + struct tty3270 *tp; 655 + 656 + tp = (struct tty3270 *) view; 657 + del_timer(&tp->timer); 658 } 659 660 static int ··· 743 { 744 int pages; 745 746 + del_timer_sync(&tp->timer); 747 kbd_free(tp->kbd); 748 raw3270_request_free(tp->kreset); 749 raw3270_request_free(tp->read); ··· 889 INIT_LIST_HEAD(&tp->update); 890 INIT_LIST_HEAD(&tp->rcl_lines); 891 tp->rcl_max = 20; 892 + setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update, 893 + (unsigned long) tp); 894 tasklet_init(&tp->readlet, 895 (void (*)(unsigned long)) tty3270_read_tasklet, 896 (unsigned long) tp->read); ··· 1754 .set_termios = tty3270_set_termios 1755 }; 1756 1757 /* 1758 * 3270 tty registration code called from tty_init(). 1759 * Most kernel services (incl. kmalloc) are available at this poimt. ··· 1796 return ret; 1797 } 1798 tty3270_driver = driver; 1799 return 0; 1800 } 1801 ··· 1810 { 1811 struct tty_driver *driver; 1812 1813 driver = tty3270_driver; 1814 tty3270_driver = NULL; 1815 tty_unregister_driver(driver);
+3 -3
drivers/s390/cio/cio.c
··· 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/slab.h> ··· 29 #include <asm/chpid.h> 30 #include <asm/airq.h> 31 #include <asm/isc.h> 32 - #include <asm/cpu.h> 33 #include <asm/fcx.h> 34 #include <asm/nmi.h> 35 #include <asm/crw.h> ··· 627 * handlers). 628 * 629 */ 630 - void 631 - do_IRQ (struct pt_regs *regs) 632 { 633 struct tpi_info *tpi_info; 634 struct subchannel *sch;
··· 12 #define KMSG_COMPONENT "cio" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 + #include <linux/ftrace.h> 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> ··· 28 #include <asm/chpid.h> 29 #include <asm/airq.h> 30 #include <asm/isc.h> 31 + #include <asm/cputime.h> 32 #include <asm/fcx.h> 33 #include <asm/nmi.h> 34 #include <asm/crw.h> ··· 626 * handlers). 627 * 628 */ 629 + void __irq_entry do_IRQ(struct pt_regs *regs) 630 { 631 struct tpi_info *tpi_info; 632 struct subchannel *sch;
+8 -16
drivers/s390/cio/device_ops.c
··· 114 struct subchannel *sch; 115 int ret; 116 117 - if (!cdev) 118 return -ENODEV; 119 if (cdev->private->state == DEV_STATE_NOT_OPER) 120 return -ENODEV; ··· 122 cdev->private->state != DEV_STATE_W4SENSE) 123 return -EINVAL; 124 sch = to_subchannel(cdev->dev.parent); 125 - if (!sch) 126 - return -ENODEV; 127 ret = cio_clear(sch); 128 if (ret == 0) 129 cdev->private->intparm = intparm; ··· 159 struct subchannel *sch; 160 int ret; 161 162 - if (!cdev) 163 return -ENODEV; 164 sch = to_subchannel(cdev->dev.parent); 165 - if (!sch) 166 - return -ENODEV; 167 if (cdev->private->state == DEV_STATE_NOT_OPER) 168 return -ENODEV; 169 if (cdev->private->state == DEV_STATE_VERIFY || ··· 335 struct subchannel *sch; 336 int ret; 337 338 - if (!cdev) 339 return -ENODEV; 340 if (cdev->private->state == DEV_STATE_NOT_OPER) 341 return -ENODEV; ··· 343 cdev->private->state != DEV_STATE_W4SENSE) 344 return -EINVAL; 345 sch = to_subchannel(cdev->dev.parent); 346 - if (!sch) 347 - return -ENODEV; 348 ret = cio_halt(sch); 349 if (ret == 0) 350 cdev->private->intparm = intparm; ··· 366 { 367 struct subchannel *sch; 368 369 - if (!cdev) 370 return -ENODEV; 371 sch = to_subchannel(cdev->dev.parent); 372 - if (!sch) 373 - return -ENODEV; 374 if (cdev->private->state == DEV_STATE_NOT_OPER) 375 return -ENODEV; 376 if (cdev->private->state != DEV_STATE_ONLINE || ··· 463 { 464 struct subchannel *sch; 465 466 - sch = to_subchannel(cdev->dev.parent); 467 - if (!sch) 468 return 0; 469 - else 470 - return sch->lpm; 471 } 472 473 /*
··· 114 struct subchannel *sch; 115 int ret; 116 117 + if (!cdev || !cdev->dev.parent) 118 return -ENODEV; 119 if (cdev->private->state == DEV_STATE_NOT_OPER) 120 return -ENODEV; ··· 122 cdev->private->state != DEV_STATE_W4SENSE) 123 return -EINVAL; 124 sch = to_subchannel(cdev->dev.parent); 125 ret = cio_clear(sch); 126 if (ret == 0) 127 cdev->private->intparm = intparm; ··· 161 struct subchannel *sch; 162 int ret; 163 164 + if (!cdev || !cdev->dev.parent) 165 return -ENODEV; 166 sch = to_subchannel(cdev->dev.parent); 167 if (cdev->private->state == DEV_STATE_NOT_OPER) 168 return -ENODEV; 169 if (cdev->private->state == DEV_STATE_VERIFY || ··· 339 struct subchannel *sch; 340 int ret; 341 342 + if (!cdev || !cdev->dev.parent) 343 return -ENODEV; 344 if (cdev->private->state == DEV_STATE_NOT_OPER) 345 return -ENODEV; ··· 347 cdev->private->state != DEV_STATE_W4SENSE) 348 return -EINVAL; 349 sch = to_subchannel(cdev->dev.parent); 350 ret = cio_halt(sch); 351 if (ret == 0) 352 cdev->private->intparm = intparm; ··· 372 { 373 struct subchannel *sch; 374 375 + if (!cdev || !cdev->dev.parent) 376 return -ENODEV; 377 sch = to_subchannel(cdev->dev.parent); 378 if (cdev->private->state == DEV_STATE_NOT_OPER) 379 return -ENODEV; 380 if (cdev->private->state != DEV_STATE_ONLINE || ··· 471 { 472 struct subchannel *sch; 473 474 + if (!cdev->dev.parent) 475 return 0; 476 + 477 + sch = to_subchannel(cdev->dev.parent); 478 + return sch->lpm; 479 } 480 481 /*
+17 -37
drivers/s390/cio/qdio_main.c
··· 881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 882 } 883 884 - static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, 885 - int dstat) 886 - { 887 - struct qdio_irq *irq_ptr = cdev->private->qdio_data; 888 - 889 - if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { 890 - DBF_ERROR("EQ:ck con"); 891 - goto error; 892 - } 893 - 894 - if (!(dstat & DEV_STAT_DEV_END)) { 895 - DBF_ERROR("EQ:no dev"); 896 - goto error; 897 - } 898 - 899 - if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { 900 - DBF_ERROR("EQ: bad io"); 901 - goto error; 902 - } 903 - return 0; 904 - error: 905 - DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 906 - DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 907 - 908 - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 909 - return 1; 910 - } 911 - 912 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 913 int dstat) 914 { 915 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 916 917 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 918 - if (!qdio_establish_check_errors(cdev, cstat, dstat)) 919 - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 920 } 921 922 /* qdio interrupt handler */ ··· 930 } 931 } 932 qdio_irq_check_sense(irq_ptr, irb); 933 - 934 cstat = irb->scsw.cmd.cstat; 935 dstat = irb->scsw.cmd.dstat; 936 ··· 937 case QDIO_IRQ_STATE_INACTIVE: 938 qdio_establish_handle_irq(cdev, cstat, dstat); 939 break; 940 - 941 case QDIO_IRQ_STATE_CLEANUP: 942 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 943 break; 944 - 945 case QDIO_IRQ_STATE_ESTABLISHED: 946 case QDIO_IRQ_STATE_ACTIVE: 947 if (cstat & SCHN_STAT_PCI) { 948 qdio_int_handler_pci(irq_ptr); 949 return; 950 } 951 - if ((cstat & ~SCHN_STAT_PCI) || dstat) { 952 qdio_handle_activate_check(cdev, intparm, cstat, 953 dstat); 954 - break; 955 - } 956 default: 957 WARN_ON(1); 958 } ··· 1494 1495 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1496 (count > QDIO_MAX_BUFFERS_PER_Q) || 1497 - (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) 1498 return -EINVAL; 1499 1500 if (!count)
··· 881 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 882 } 883 884 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 885 int dstat) 886 { 887 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 888 889 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 890 + 891 + if (cstat) 892 + goto error; 893 + if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 894 + goto error; 895 + if (!(dstat & DEV_STAT_DEV_END)) 896 + goto error; 897 + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 898 + return; 899 + 900 + error: 901 + DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 902 + DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 903 + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 904 } 905 906 /* qdio interrupt handler */ ··· 946 } 947 } 948 qdio_irq_check_sense(irq_ptr, irb); 949 cstat = irb->scsw.cmd.cstat; 950 dstat = irb->scsw.cmd.dstat; 951 ··· 954 case QDIO_IRQ_STATE_INACTIVE: 955 qdio_establish_handle_irq(cdev, cstat, dstat); 956 break; 957 case QDIO_IRQ_STATE_CLEANUP: 958 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 959 break; 960 case QDIO_IRQ_STATE_ESTABLISHED: 961 case QDIO_IRQ_STATE_ACTIVE: 962 if (cstat & SCHN_STAT_PCI) { 963 qdio_int_handler_pci(irq_ptr); 964 return; 965 } 966 + if (cstat || dstat) 967 qdio_handle_activate_check(cdev, intparm, cstat, 968 dstat); 969 + break; 970 default: 971 WARN_ON(1); 972 } ··· 1514 1515 if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || 1516 (count > QDIO_MAX_BUFFERS_PER_Q) || 1517 + (q_nr >= QDIO_MAX_QUEUES_PER_IRQ)) 1518 return -EINVAL; 1519 1520 if (!count)
-12
drivers/s390/cio/qdio_perf.c
··· 25 static struct proc_dir_entry *qdio_perf_pde; 26 #endif 27 28 - inline void qdio_perf_stat_inc(atomic_long_t *count) 29 - { 30 - if (qdio_performance_stats) 31 - atomic_long_inc(count); 32 - } 33 - 34 - inline void qdio_perf_stat_dec(atomic_long_t *count) 35 - { 36 - if (qdio_performance_stats) 37 - atomic_long_dec(count); 38 - } 39 - 40 /* 41 * procfs functions 42 */
··· 25 static struct proc_dir_entry *qdio_perf_pde; 26 #endif 27 28 /* 29 * procfs functions 30 */
+6 -4
drivers/s390/cio/qdio_perf.h
··· 9 #define QDIO_PERF_H 10 11 #include <linux/types.h> 12 - #include <linux/device.h> 13 #include <asm/atomic.h> 14 15 struct qdio_perf_stats { ··· 49 extern struct qdio_perf_stats perf_stats; 50 extern int qdio_performance_stats; 51 52 int qdio_setup_perf_stats(void); 53 void qdio_remove_perf_stats(void); 54 - 55 - extern void qdio_perf_stat_inc(atomic_long_t *count); 56 - extern void qdio_perf_stat_dec(atomic_long_t *count); 57 58 #endif
··· 9 #define QDIO_PERF_H 10 11 #include <linux/types.h> 12 #include <asm/atomic.h> 13 14 struct qdio_perf_stats { ··· 50 extern struct qdio_perf_stats perf_stats; 51 extern int qdio_performance_stats; 52 53 + static inline void qdio_perf_stat_inc(atomic_long_t *count) 54 + { 55 + if (qdio_performance_stats) 56 + atomic_long_inc(count); 57 + } 58 + 59 int qdio_setup_perf_stats(void); 60 void qdio_remove_perf_stats(void); 61 62 #endif
+1 -1
mm/maccess.c
··· 39 * Safely write to address @dst from the buffer at @src. If a kernel fault 40 * happens, handle that and return -EFAULT. 41 */ 42 - long probe_kernel_write(void *dst, void *src, size_t size) 43 { 44 long ret; 45 mm_segment_t old_fs = get_fs();
··· 39 * Safely write to address @dst from the buffer at @src. If a kernel fault 40 * happens, handle that and return -EFAULT. 41 */ 42 + long notrace __weak probe_kernel_write(void *dst, void *src, size_t size) 43 { 44 long ret; 45 mm_segment_t old_fs = get_fs();
+13
scripts/recordmcount.pl
··· 185 $objcopy .= " -O elf32-i386"; 186 $cc .= " -m32"; 187 188 } elsif ($arch eq "sh") { 189 $alignment = 2; 190
··· 185 $objcopy .= " -O elf32-i386"; 186 $cc .= " -m32"; 187 188 + } elsif ($arch eq "s390" && $bits == 32) { 189 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; 190 + $alignment = 4; 191 + $ld .= " -m elf_s390"; 192 + $cc .= " -m31"; 193 + 194 + } elsif ($arch eq "s390" && $bits == 64) { 195 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; 196 + $alignment = 8; 197 + $type = ".quad"; 198 + $ld .= " -m elf64_s390"; 199 + $cc .= " -m64"; 200 + 201 } elsif ($arch eq "sh") { 202 $alignment = 2; 203