Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Fix clock event multiplier printf format.
sparc64: Use clock{source,events}_calc_mult_shift().
sparc64: Use free_bootmem_late() in mdesc_lmb_free().
sparc: Add alignment and emulation fault perf events.
sparc64: Add syscall tracepoint support.
sparc: Stop trying to be so fancy and use __builtin_{memcpy,memset}()
sparc: Use __builtin_object_size() to validate the buffer size for copy_from_user()
sparc64: Add some missing __kprobes annotations to kernel fault paths.
sparc64: Use kprobes_built_in() to avoid ifdefs in fault_64.c
sparc: Validate that kprobe address is 4-byte aligned.
sparc64: Don't specify IRQF_SHARED for LDC interrupts.
sparc64: Fix stack debugging IRQ stack regression.
sparc64: Fix overly strict range type matching for PCI devices.

+153 -209
+1
arch/sparc/Kconfig
··· 43 43 select HAVE_SYSCALL_WRAPPERS 44 44 select HAVE_DYNAMIC_FTRACE 45 45 select HAVE_FTRACE_MCOUNT_RECORD 46 + select HAVE_SYSCALL_TRACEPOINTS 46 47 select USE_GENERIC_SMP_HELPERS if SMP 47 48 select RTC_DRV_CMOS 48 49 select RTC_DRV_BQ4802
+14
arch/sparc/Kconfig.debug
··· 33 33 depends on MCOUNT 34 34 default y 35 35 36 + config DEBUG_STRICT_USER_COPY_CHECKS 37 + bool "Strict copy size checks" 38 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING 39 + ---help--- 40 + Enabling this option turns a certain set of sanity checks for user 41 + copy operations into compile time failures. 42 + 43 + The copy_from_user() etc checks are there to help test if there 44 + are sufficient security checks on the length argument of 45 + the copy operation, by having gcc prove that the argument is 46 + within bounds. 47 + 48 + If unsure, or if you run an older (pre 4.4) gcc, say N. 49 + 36 50 endmenu
+2 -76
arch/sparc/include/asm/string_32.h
··· 16 16 #ifdef __KERNEL__ 17 17 18 18 extern void __memmove(void *,const void *,__kernel_size_t); 19 - extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t); 20 - extern __kernel_size_t __memset(void *,int,__kernel_size_t); 21 19 22 20 #ifndef EXPORT_SYMTAB_STROPS 23 21 ··· 30 32 }) 31 33 32 34 #define __HAVE_ARCH_MEMCPY 33 - 34 - static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n) 35 - { 36 - extern void __copy_1page(void *, const void *); 37 - 38 - if(n <= 32) { 39 - __builtin_memcpy(to, from, n); 40 - } else if (((unsigned int) to & 7) != 0) { 41 - /* Destination is not aligned on the double-word boundary */ 42 - __memcpy(to, from, n); 43 - } else { 44 - switch(n) { 45 - case PAGE_SIZE: 46 - __copy_1page(to, from); 47 - break; 48 - default: 49 - __memcpy(to, from, n); 50 - break; 51 - } 52 - } 53 - return to; 54 - } 55 - 56 - static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n) 57 - { 58 - __memcpy(to, from, n); 59 - return to; 60 - } 61 - 62 - #undef memcpy 63 - #define memcpy(t, f, n) \ 64 - (__builtin_constant_p(n) ? \ 65 - __constant_memcpy((t),(f),(n)) : \ 66 - __nonconstant_memcpy((t),(f),(n))) 35 + #define memcpy(t, f, n) __builtin_memcpy(t, f, n) 67 36 68 37 #define __HAVE_ARCH_MEMSET 69 - 70 - static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count) 71 - { 72 - extern void bzero_1page(void *); 73 - extern __kernel_size_t __bzero(void *, __kernel_size_t); 74 - 75 - if(!c) { 76 - if(count == PAGE_SIZE) 77 - bzero_1page(s); 78 - else 79 - __bzero(s, count); 80 - } else { 81 - __memset(s, c, count); 82 - } 83 - return s; 84 - } 85 - 86 - static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count) 87 - { 88 - extern __kernel_size_t __bzero(void *, __kernel_size_t); 89 - 90 - if(!c) 91 - __bzero(s, count); 92 - else 93 - __memset(s, c, count); 94 - return s; 95 - } 96 - 97 - static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count) 98 - { 99 - __memset(s, c, count); 100 - return s; 101 - } 102 - 103 - #undef memset 104 - #define memset(s, c, count) \ 105 - (__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \ 106 - __constant_c_and_count_memset((s), (c), (count)) : \ 107 - __constant_c_memset((s), (c), (count))) \ 108 - : __nonconstant_memset((s), (c), (count))) 38 + #define memset(s, c, count) __builtin_memset(s, c, count) 109 39 110 40 #define __HAVE_ARCH_MEMSCAN 111 41
+2 -23
arch/sparc/include/asm/string_64.h
··· 15 15 16 16 #include <asm/asi.h> 17 17 18 - extern void *__memset(void *,int,__kernel_size_t); 19 - 20 18 #ifndef EXPORT_SYMTAB_STROPS 21 19 22 20 /* First the mem*() things. */ ··· 22 24 extern void *memmove(void *, const void *, __kernel_size_t); 23 25 24 26 #define __HAVE_ARCH_MEMCPY 25 - extern void *memcpy(void *, const void *, __kernel_size_t); 27 + #define memcpy(t, f, n) __builtin_memcpy(t, f, n) 26 28 27 29 #define __HAVE_ARCH_MEMSET 28 - extern void *__builtin_memset(void *,int,__kernel_size_t); 29 - 30 - static inline void *__constant_memset(void *s, int c, __kernel_size_t count) 31 - { 32 - extern __kernel_size_t __bzero(void *, __kernel_size_t); 33 - 34 - if (!c) { 35 - __bzero(s, count); 36 - return s; 37 - } else 38 - return __memset(s, c, count); 39 - } 40 - 41 - #undef memset 42 - #define memset(s, c, count) \ 43 - ((__builtin_constant_p(count) && (count) <= 32) ? \ 44 - __builtin_memset((s), (c), (count)) : \ 45 - (__builtin_constant_p(c) ? \ 46 - __constant_memset((s), (c), (count)) : \ 47 - __memset((s), (c), (count)))) 30 + #define memset(s, c, count) __builtin_memset(s, c, count) 48 31 49 32 #define __HAVE_ARCH_MEMSCAN 50 33
+2
arch/sparc/include/asm/thread_info_64.h
··· 227 227 /* flag bit 8 is available */ 228 228 #define TIF_SECCOMP 9 /* secure computing */ 229 229 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ 230 + #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ 230 231 /* flag bit 11 is available */ 231 232 /* NOTE: Thread flags >= 12 should be ones we have no interest 232 233 * in using in assembly, else we can't use the mask as ··· 247 246 #define _TIF_32BIT (1<<TIF_32BIT) 248 247 #define _TIF_SECCOMP (1<<TIF_SECCOMP) 249 248 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 249 + #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 250 250 #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) 251 251 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 252 252 #define _TIF_FREEZE (1<<TIF_FREEZE)
+15
arch/sparc/include/asm/uaccess_32.h
··· 260 260 return __copy_user(to, (__force void __user *) from, n); 261 261 } 262 262 263 + extern void copy_from_user_overflow(void) 264 + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 265 + __compiletime_error("copy_from_user() buffer size is not provably correct") 266 + #else 267 + __compiletime_warning("copy_from_user() buffer size is not provably correct") 268 + #endif 269 + ; 270 + 263 271 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) 264 272 { 273 + int sz = __compiletime_object_size(to); 274 + 275 + if (unlikely(sz != -1 && sz < n)) { 276 + copy_from_user_overflow(); 277 + return -EFAULT; 278 + } 279 + 265 280 if (n && __access_ok((unsigned long) from, n)) 266 281 return __copy_user((__force void __user *) to, from, n); 267 282 else
+18 -3
arch/sparc/include/asm/uaccess_64.h
··· 6 6 */ 7 7 8 8 #ifdef __KERNEL__ 9 + #include <linux/errno.h> 9 10 #include <linux/compiler.h> 10 11 #include <linux/string.h> 11 12 #include <linux/thread_info.h> ··· 205 204 206 205 extern int __get_user_bad(void); 207 206 207 + extern void copy_from_user_overflow(void) 208 + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 209 + __compiletime_error("copy_from_user() buffer size is not provably correct") 210 + #else 211 + __compiletime_warning("copy_from_user() buffer size is not provably correct") 212 + #endif 213 + ; 214 + 208 215 extern unsigned long __must_check ___copy_from_user(void *to, 209 216 const void __user *from, 210 217 unsigned long size); ··· 221 212 static inline unsigned long __must_check 222 213 copy_from_user(void *to, const void __user *from, unsigned long size) 223 214 { 224 - unsigned long ret = ___copy_from_user(to, from, size); 215 + unsigned long ret = (unsigned long) -EFAULT; 216 + int sz = __compiletime_object_size(to); 225 217 226 - if (unlikely(ret)) 227 - ret = copy_from_user_fixup(to, from, size); 218 + if (likely(sz == -1 || sz >= size)) { 219 + ret = ___copy_from_user(to, from, size); 220 + if (unlikely(ret)) 221 + ret = copy_from_user_fixup(to, from, size); 222 + } else { 223 + copy_from_user_overflow(); 224 + } 228 225 return ret; 229 226 } 230 227 #define __copy_from_user copy_from_user
+1 -1
arch/sparc/include/asm/unistd.h
··· 398 398 #define __NR_perf_event_open 327 399 399 #define __NR_recvmmsg 328 400 400 401 - #define NR_SYSCALLS 329 401 + #define NR_syscalls 329 402 402 403 403 #ifdef __32bit_syscall_numbers__ 404 404 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+1 -1
arch/sparc/kernel/entry.S
··· 1294 1294 sethi %hi(PSR_SYSCALL), %l4 1295 1295 or %l0, %l4, %l0 1296 1296 /* Direct access to user regs, must faster. */ 1297 - cmp %g1, NR_SYSCALLS 1297 + cmp %g1, NR_syscalls 1298 1298 bgeu linux_sparc_ni_syscall 1299 1299 sll %g1, 2, %l4 1300 1300 ld [%l7 + %l4], %l7
+11
arch/sparc/kernel/ftrace.c
··· 4 4 #include <linux/percpu.h> 5 5 #include <linux/init.h> 6 6 #include <linux/list.h> 7 + #include <trace/syscall.h> 7 8 8 9 #include <asm/ftrace.h> 9 10 ··· 92 91 } 93 92 #endif 94 93 94 + #ifdef CONFIG_FTRACE_SYSCALLS 95 + 96 + extern unsigned int sys_call_table[]; 97 + 98 + unsigned long __init arch_syscall_addr(int nr) 99 + { 100 + return (unsigned long)sys_call_table[nr]; 101 + } 102 + 103 + #endif
+3
arch/sparc/kernel/kprobes.c
··· 46 46 47 47 int __kprobes arch_prepare_kprobe(struct kprobe *p) 48 48 { 49 + if ((unsigned long) p->addr & 0x3UL) 50 + return -EILSEQ; 51 + 49 52 p->ainsn.insn[0] = *p->addr; 50 53 flushi(&p->ainsn.insn[0]); 51 54
+2 -2
arch/sparc/kernel/ldc.c
··· 1242 1242 snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); 1243 1243 1244 1244 err = request_irq(lp->cfg.rx_irq, ldc_rx, 1245 - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1245 + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, 1246 1246 lp->rx_irq_name, lp); 1247 1247 if (err) 1248 1248 return err; 1249 1249 1250 1250 err = request_irq(lp->cfg.tx_irq, ldc_tx, 1251 - IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, 1251 + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, 1252 1252 lp->tx_irq_name, lp); 1253 1253 if (err) { 1254 1254 free_irq(lp->cfg.rx_irq, lp);
+6 -15
arch/sparc/kernel/mdesc.c
··· 10 10 #include <linux/slab.h> 11 11 #include <linux/mm.h> 12 12 #include <linux/miscdevice.h> 13 + #include <linux/bootmem.h> 13 14 14 15 #include <asm/cpudata.h> 15 16 #include <asm/hypervisor.h> ··· 109 108 110 109 static void mdesc_lmb_free(struct mdesc_handle *hp) 111 110 { 112 - unsigned int alloc_size, handle_size = hp->handle_size; 113 - unsigned long start, end; 111 + unsigned int alloc_size; 112 + unsigned long start; 114 113 115 114 BUG_ON(atomic_read(&hp->refcnt) != 0); 116 115 BUG_ON(!list_empty(&hp->list)); 117 116 118 - alloc_size = PAGE_ALIGN(handle_size); 119 - 120 - start = (unsigned long) hp; 121 - end = start + alloc_size; 122 - 123 - while (start < end) { 124 - struct page *p; 125 - 126 - p = virt_to_page(start); 127 - ClearPageReserved(p); 128 - __free_page(p); 129 - start += PAGE_SIZE; 130 - } 117 + alloc_size = PAGE_ALIGN(hp->handle_size); 118 + start = __pa(hp); 119 + free_bootmem_late(start, alloc_size); 131 120 } 132 121 133 122 static struct mdesc_mem_ops lmb_mdesc_ops = {
+12 -2
arch/sparc/kernel/of_device_64.c
··· 104 104 int i; 105 105 106 106 /* Check address type match */ 107 - if ((addr[0] ^ range[0]) & 0x03000000) 108 - return -EINVAL; 107 + if (!((addr[0] ^ range[0]) & 0x03000000)) 108 + goto type_match; 109 109 110 + /* Special exception, we can map a 64-bit address into 111 + * a 32-bit range. 112 + */ 113 + if ((addr[0] & 0x03000000) == 0x03000000 && 114 + (range[0] & 0x03000000) == 0x02000000) 115 + goto type_match; 116 + 117 + return -EINVAL; 118 + 119 + type_match: 110 120 if (of_out_of_range(addr + 1, range + 1, range + na + pna, 111 121 na - 1, ns)) 112 122 return -EINVAL;
+10
arch/sparc/kernel/ptrace_64.c
··· 23 23 #include <linux/signal.h> 24 24 #include <linux/regset.h> 25 25 #include <linux/tracehook.h> 26 + #include <trace/syscall.h> 26 27 #include <linux/compat.h> 27 28 #include <linux/elf.h> 28 29 ··· 37 36 #include <asm/page.h> 38 37 #include <asm/cpudata.h> 39 38 #include <asm/cacheflush.h> 39 + 40 + #define CREATE_TRACE_POINTS 41 + #include <trace/events/syscalls.h> 40 42 41 43 #include "entry.h" 42 44 ··· 1063 1059 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1064 1060 ret = tracehook_report_syscall_entry(regs); 1065 1061 1062 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1063 + trace_sys_enter(regs, regs->u_regs[UREG_G1]); 1064 + 1066 1065 if (unlikely(current->audit_context) && !ret) 1067 1066 audit_syscall_entry((test_thread_flag(TIF_32BIT) ? 1068 1067 AUDIT_ARCH_SPARC : ··· 1090 1083 1091 1084 audit_syscall_exit(result, regs->u_regs[UREG_I0]); 1092 1085 } 1086 + 1087 + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1088 + trace_sys_exit(regs, regs->u_regs[UREG_G1]); 1093 1089 1094 1090 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1095 1091 tracehook_report_syscall_exit(regs, 0);
+7 -7
arch/sparc/kernel/syscalls.S
··· 62 62 #endif 63 63 .align 32 64 64 1: ldx [%g6 + TI_FLAGS], %l5 65 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 65 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 66 66 be,pt %icc, rtrap 67 67 nop 68 68 call syscall_trace_leave ··· 187 187 .globl linux_sparc_syscall32 188 188 linux_sparc_syscall32: 189 189 /* Direct access to user regs, much faster. */ 190 - cmp %g1, NR_SYSCALLS ! IEU1 Group 190 + cmp %g1, NR_syscalls ! IEU1 Group 191 191 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 192 192 srl %i0, 0, %o0 ! IEU0 193 193 sll %g1, 2, %l4 ! IEU0 Group ··· 198 198 199 199 srl %i5, 0, %o5 ! IEU1 200 200 srl %i2, 0, %o2 ! IEU0 Group 201 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 201 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 202 202 bne,pn %icc, linux_syscall_trace32 ! CTI 203 203 mov %i0, %l5 ! IEU1 204 204 call %l7 ! CTI Group brk forced ··· 210 210 .globl linux_sparc_syscall 211 211 linux_sparc_syscall: 212 212 /* Direct access to user regs, much faster. */ 213 - cmp %g1, NR_SYSCALLS ! IEU1 Group 213 + cmp %g1, NR_syscalls ! IEU1 Group 214 214 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI 215 215 mov %i0, %o0 ! IEU0 216 216 sll %g1, 2, %l4 ! IEU0 Group ··· 221 221 222 222 mov %i3, %o3 ! IEU1 223 223 mov %i4, %o4 ! IEU0 Group 224 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 224 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 225 225 bne,pn %icc, linux_syscall_trace ! CTI Group 226 226 mov %i0, %l5 ! IEU0 227 227 2: call %l7 ! CTI Group brk forced ··· 245 245 246 246 cmp %o0, -ERESTART_RESTARTBLOCK 247 247 bgeu,pn %xcc, 1f 248 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 248 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 249 249 80: 250 250 /* System call success, clear Carry condition code. */ 251 251 andn %g3, %g2, %g3 ··· 260 260 /* System call failure, set Carry condition code. 261 261 * Also, get abs(errno) to return to the process. 262 262 */ 263 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 263 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 264 264 sub %g0, %o0, %o0 265 265 or %g3, %g2, %g3 266 266 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+3 -23
arch/sparc/kernel/time_64.c
··· 774 774 static struct clocksource clocksource_tick = { 775 775 .rating = 100, 776 776 .mask = CLOCKSOURCE_MASK(64), 777 - .shift = 16, 778 777 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 779 778 }; 780 - 781 - static void __init setup_clockevent_multiplier(unsigned long hz) 782 - { 783 - unsigned long mult, shift = 32; 784 - 785 - while (1) { 786 - mult = div_sc(hz, NSEC_PER_SEC, shift); 787 - if (mult && (mult >> 32UL) == 0UL) 788 - break; 789 - 790 - shift--; 791 - } 792 - 793 - sparc64_clockevent.shift = shift; 794 - sparc64_clockevent.mult = mult; 795 - } 796 779 797 780 static unsigned long tb_ticks_per_usec __read_mostly; 798 781 ··· 811 828 clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); 812 829 813 830 clocksource_tick.name = tick_ops->name; 814 - clocksource_tick.mult = 815 - clocksource_hz2mult(freq, 816 - clocksource_tick.shift); 831 + clocksource_calc_mult_shift(&clocksource_tick, freq, 4); 817 832 clocksource_tick.read = clocksource_tick_read; 818 833 819 834 printk("clocksource: mult[%x] shift[%d]\n", ··· 820 839 clocksource_register(&clocksource_tick); 821 840 822 841 sparc64_clockevent.name = tick_ops->name; 823 - 824 - setup_clockevent_multiplier(freq); 842 + clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); 825 843 826 844 sparc64_clockevent.max_delta_ns = 827 845 clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); 828 846 sparc64_clockevent.min_delta_ns = 829 847 clockevent_delta2ns(0xF, &sparc64_clockevent); 830 848 831 - printk("clockevent: mult[%ux] shift[%d]\n", 849 + printk("clockevent: mult[%x] shift[%d]\n", 832 850 sparc64_clockevent.mult, sparc64_clockevent.shift); 833 851 834 852 setup_sparc64_timer();
+3 -12
arch/sparc/kernel/unaligned_32.c
··· 17 17 #include <asm/uaccess.h> 18 18 #include <linux/smp.h> 19 19 #include <linux/smp_lock.h> 20 - 21 - /* #define DEBUG_MNA */ 20 + #include <linux/perf_event.h> 22 21 23 22 enum direction { 24 23 load, /* ld, ldd, ldh, ldsh */ ··· 27 28 fpstore, 28 29 invalid, 29 30 }; 30 - 31 - #ifdef DEBUG_MNA 32 - static char *dirstrings[] = { 33 - "load", "store", "both", "fpload", "fpstore", "invalid" 34 - }; 35 - #endif 36 31 37 32 static inline enum direction decode_direction(unsigned int insn) 38 33 { ··· 248 255 unsigned long addr = compute_effective_address(regs, insn); 249 256 int err; 250 257 251 - #ifdef DEBUG_MNA 252 - printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", 253 - regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); 254 - #endif 258 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 255 259 switch (dir) { 256 260 case load: 257 261 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), ··· 340 350 } 341 351 342 352 addr = compute_effective_address(regs, insn); 353 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 343 354 switch(dir) { 344 355 case load: 345 356 err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
+9 -14
arch/sparc/kernel/unaligned_64.c
··· 20 20 #include <asm/uaccess.h> 21 21 #include <linux/smp.h> 22 22 #include <linux/bitops.h> 23 + #include <linux/perf_event.h> 23 24 #include <asm/fpumacro.h> 24 - 25 - /* #define DEBUG_MNA */ 26 25 27 26 enum direction { 28 27 load, /* ld, ldd, ldh, ldsh */ ··· 31 32 fpst, 32 33 invalid, 33 34 }; 34 - 35 - #ifdef DEBUG_MNA 36 - static char *dirstrings[] = { 37 - "load", "store", "both", "fpload", "fpstore", "invalid" 38 - }; 39 - #endif 40 35 41 36 static inline enum direction decode_direction(unsigned int insn) 42 37 { ··· 320 327 321 328 addr = compute_effective_address(regs, insn, 322 329 ((insn >> 25) & 0x1f)); 323 - #ifdef DEBUG_MNA 324 - printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " 325 - "retpc[%016lx]\n", 326 - regs->tpc, dirstrings[dir], addr, size, 327 - regs->u_regs[UREG_RETPC]); 328 - #endif 330 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); 329 331 switch (asi) { 330 332 case ASI_NL: 331 333 case ASI_AIUPL: ··· 387 399 int ret, i, rd = ((insn >> 25) & 0x1f); 388 400 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 389 401 402 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 390 403 if (insn & 0x2000) { 391 404 maybe_flush_windows(0, 0, rd, from_kernel); 392 405 value = sign_extend_imm13(insn); ··· 433 444 struct fpustate *f = FPUSTATE; 434 445 int asi = decode_asi(insn, regs); 435 446 int flag = (freg < 32) ? FPRS_DL : FPRS_DU; 447 + 448 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 436 449 437 450 save_and_clear_fpu(); 438 451 current_thread_info()->xfsr[0] &= ~0x1c000; ··· 557 566 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 558 567 unsigned long *reg; 559 568 569 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 570 + 560 571 maybe_flush_windows(0, 0, rd, from_kernel); 561 572 reg = fetch_reg_addr(rd, regs); 562 573 if (from_kernel || rd < 16) { ··· 589 596 590 597 if (tstate & TSTATE_PRIV) 591 598 die_if_kernel("lddfmna from kernel", regs); 599 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 592 600 if (test_thread_flag(TIF_32BIT)) 593 601 pc = (u32)pc; 594 602 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { ··· 651 657 652 658 if (tstate & TSTATE_PRIV) 653 659 die_if_kernel("stdfmna from kernel", regs); 660 + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); 654 661 if (test_thread_flag(TIF_32BIT)) 655 662 pc = (u32)pc; 656 663 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+3
arch/sparc/kernel/visemul.c
··· 5 5 #include <linux/kernel.h> 6 6 #include <linux/errno.h> 7 7 #include <linux/thread_info.h> 8 + #include <linux/perf_event.h> 8 9 9 10 #include <asm/ptrace.h> 10 11 #include <asm/pstate.h> ··· 801 800 unsigned int opf; 802 801 803 802 BUG_ON(regs->tstate & TSTATE_PRIV); 803 + 804 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 804 805 805 806 if (test_thread_flag(TIF_32BIT)) 806 807 pc = (u32)pc;
+1
arch/sparc/lib/Makefile
··· 44 44 obj-$(CONFIG_SPARC32) += atomic32.o 45 45 obj-y += ksyms.o 46 46 obj-$(CONFIG_SPARC64) += PeeCeeI.o 47 + obj-y += usercopy.o
-5
arch/sparc/lib/bzero.S
··· 6 6 7 7 .text 8 8 9 - .globl __memset 10 - .type __memset, #function 11 - __memset: /* %o0=buf, %o1=pat, %o2=len */ 12 - 13 9 .globl memset 14 10 .type memset, #function 15 11 memset: /* %o0=buf, %o1=pat, %o2=len */ ··· 79 83 retl 80 84 mov %o3, %o0 81 85 .size __bzero, .-__bzero 82 - .size __memset, .-__memset 83 86 .size memset, .-memset 84 87 85 88 #define EX_ST(x,y) \
+1 -1
arch/sparc/lib/checksum_32.S
··· 560 560 mov %i0, %o1 561 561 mov %i1, %o0 562 562 5: 563 - call __memcpy 563 + call memcpy 564 564 mov %i2, %o2 565 565 tst %o0 566 566 bne,a 2f
-2
arch/sparc/lib/ksyms.c
··· 30 30 EXPORT_SYMBOL(memcmp); 31 31 EXPORT_SYMBOL(memcpy); 32 32 EXPORT_SYMBOL(memset); 33 - EXPORT_SYMBOL(__memset); 34 33 EXPORT_SYMBOL(memmove); 35 34 EXPORT_SYMBOL(__bzero); 36 35 ··· 80 81 81 82 /* Special internal versions of library functions. */ 82 83 EXPORT_SYMBOL(__copy_1page); 83 - EXPORT_SYMBOL(__memcpy); 84 84 EXPORT_SYMBOL(__memmove); 85 85 EXPORT_SYMBOL(bzero_1page); 86 86
+3 -2
arch/sparc/lib/mcount.S
··· 64 64 2: sethi %hi(softirq_stack), %g3 65 65 or %g3, %lo(softirq_stack), %g3 66 66 ldx [%g3 + %g1], %g7 67 + sub %g7, STACK_BIAS, %g7 67 68 cmp %sp, %g7 68 - bleu,pt %xcc, 2f 69 + bleu,pt %xcc, 3f 69 70 sethi %hi(THREAD_SIZE), %g3 70 71 add %g7, %g3, %g7 71 72 cmp %sp, %g7 ··· 76 75 * again, we are already trying to output the stack overflow 77 76 * message. 78 77 */ 79 - sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 78 + 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 80 79 or %g7, %lo(ovstack), %g7 81 80 add %g7, OVSTACKSIZE, %g3 82 81 sub %g3, STACK_BIAS + 192, %g3
-3
arch/sparc/lib/memcpy.S
··· 543 543 b 3f 544 544 add %o0, 2, %o0 545 545 546 - #ifdef __KERNEL__ 547 - FUNC(__memcpy) 548 - #endif 549 546 FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ 550 547 551 548 sub %o0, %o1, %o4
+1 -2
arch/sparc/lib/memset.S
··· 60 60 .globl __bzero_begin 61 61 __bzero_begin: 62 62 63 - .globl __bzero, __memset, 63 + .globl __bzero 64 64 .globl memset 65 65 .globl __memset_start, __memset_end 66 66 __memset_start: 67 - __memset: 68 67 memset: 69 68 and %o1, 0xff, %g3 70 69 sll %g3, 8, %g2
+8
arch/sparc/lib/usercopy.c
··· 1 + #include <linux/module.h> 2 + #include <linux/bug.h> 3 + 4 + void copy_from_user_overflow(void) 5 + { 6 + WARN(1, "Buffer overflow detected!\n"); 7 + } 8 + EXPORT_SYMBOL(copy_from_user_overflow);
+3
arch/sparc/math-emu/math_32.c
··· 67 67 #include <linux/types.h> 68 68 #include <linux/sched.h> 69 69 #include <linux/mm.h> 70 + #include <linux/perf_event.h> 70 71 #include <asm/uaccess.h> 71 72 72 73 #include "sfp-util_32.h" ··· 163 162 int i; 164 163 int retcode = 0; /* assume all succeed */ 165 164 unsigned long insn; 165 + 166 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 166 167 167 168 #ifdef DEBUG_MATHEMU 168 169 printk("In do_mathemu()... pc is %08lx\n", regs->pc);
+2
arch/sparc/math-emu/math_64.c
··· 11 11 #include <linux/types.h> 12 12 #include <linux/sched.h> 13 13 #include <linux/errno.h> 14 + #include <linux/perf_event.h> 14 15 15 16 #include <asm/fpumacro.h> 16 17 #include <asm/ptrace.h> ··· 184 183 185 184 if (tstate & TSTATE_PRIV) 186 185 die_if_kernel("unfinished/unimplemented FPop from kernel", regs); 186 + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); 187 187 if (test_thread_flag(TIF_32BIT)) 188 188 pc = (u32)pc; 189 189 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+9 -15
arch/sparc/mm/fault_64.c
··· 31 31 #include <asm/sections.h> 32 32 #include <asm/mmu_context.h> 33 33 34 - #ifdef CONFIG_KPROBES 35 - static inline int notify_page_fault(struct pt_regs *regs) 34 + static inline __kprobes int notify_page_fault(struct pt_regs *regs) 36 35 { 37 36 int ret = 0; 38 37 39 38 /* kprobe_running() needs smp_processor_id() */ 40 - if (!user_mode(regs)) { 39 + if (kprobes_built_in() && !user_mode(regs)) { 41 40 preempt_disable(); 42 41 if (kprobe_running() && kprobe_fault_handler(regs, 0)) 43 42 ret = 1; ··· 44 45 } 45 46 return ret; 46 47 } 47 - #else 48 - static inline int notify_page_fault(struct pt_regs *regs) 49 - { 50 - return 0; 51 - } 52 - #endif 53 48 54 49 static void __kprobes unhandled_fault(unsigned long address, 55 50 struct task_struct *tsk, ··· 66 73 die_if_kernel("Oops", regs); 67 74 } 68 75 69 - static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) 76 + static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) 70 77 { 71 78 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 72 79 regs->tpc); ··· 163 170 return insn; 164 171 } 165 172 166 - static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, 167 - unsigned int insn, unsigned long address) 173 + static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, 174 + int fault_code, unsigned int insn, 175 + unsigned long address) 168 176 { 169 177 unsigned char asi = ASI_P; 170 178 ··· 219 225 unhandled_fault (address, current, regs); 220 226 } 221 227 222 - static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) 228 + static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) 223 229 { 224 230 static int times; 225 231 ··· 231 237 show_regs(regs); 232 238 } 233 239 234 - static void noinline bogus_32bit_fault_address(struct pt_regs *regs, 235 - unsigned long addr) 240 + static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, 241 + unsigned long addr) 236 242 { 237 243 static int times; 238 244