Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: stacktrace/lockdep/irqflags tracing support.

Wire up all of the essentials for lockdep..

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+289 -103
+8
arch/sh/Kconfig
··· 51 51 config ARCH_MAY_HAVE_PC_FDC 52 52 bool 53 53 54 + config STACKTRACE_SUPPORT 55 + bool 56 + default y 57 + 58 + config LOCKDEP_SUPPORT 59 + bool 60 + default y 61 + 54 62 source "init/Kconfig" 55 63 56 64 menu "System type"
+4
arch/sh/Kconfig.debug
··· 1 1 menu "Kernel hacking" 2 2 3 + config TRACE_IRQFLAGS_SUPPORT 4 + bool 5 + default y 6 + 3 7 source "lib/Kconfig.debug" 4 8 5 9 config SH_STANDARD_BIOS
+1
arch/sh/kernel/Makefile
··· 21 21 obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 22 22 obj-$(CONFIG_APM) += apm.o 23 23 obj-$(CONFIG_PM) += pm.o 24 + obj-$(CONFIG_STACKTRACE) += stacktrace.o
+16
arch/sh/kernel/cpu/sh2/entry.S
··· 184 184 add r15,r8 185 185 mov.l r9,@r8 186 186 mov r9,r8 187 + #ifdef CONFIG_TRACE_IRQFLAGS 188 + mov.l 5f, r9 189 + jsr @r9 190 + nop 191 + #endif 187 192 sti 188 193 bra system_call 189 194 nop ··· 198 193 2: .long break_point_trap_software 199 194 3: .long NR_syscalls 200 195 4: .long sys_call_table 196 + #ifdef CONFIG_TRACE_IRQFLAGS 197 + 5: .long trace_hardirqs_on 198 + #endif 201 199 202 200 #if defined(CONFIG_SH_STANDARD_BIOS) 203 201 /* Unwind the stack and jmp to the debug entry */ ··· 263 255 264 256 restore_all: 265 257 cli 258 + #ifdef CONFIG_TRACE_IRQFLAGS 259 + mov.l 3f, r0 260 + jsr @r0 261 + nop 262 + #endif 266 263 mov r15,r0 267 264 mov.l $cpu_mode,r2 268 265 mov #OFF_SR,r3 ··· 320 307 .long __current_thread_info 321 308 $cpu_mode: 322 309 .long __cpu_mode 310 + #ifdef CONFIG_TRACE_IRQFLAGS 311 + 3: .long trace_hardirqs_off 312 + #endif 323 313 324 314 ! common exception handler 325 315 #include "../../entry-common.S"
+1 -1
arch/sh/kernel/cpu/sh3/entry.S
··· 140 140 mov.l 1f, r0 141 141 mov.l @r0, r6 ! address 142 142 mov.l 3f, r0 143 - sti 143 + 144 144 jmp @r0 145 145 mov r15, r4 ! regs 146 146
+63 -1
arch/sh/kernel/entry-common.S
··· 100 100 .align 2 101 101 ENTRY(exception_error) 102 102 ! 103 + #ifdef CONFIG_TRACE_IRQFLAGS 104 + mov.l 3f, r0 105 + jsr @r0 106 + nop 107 + #endif 103 108 sti 104 109 mov.l 2f, r0 105 110 jmp @r0 ··· 114 109 .align 2 115 110 1: .long break_point_trap_software 116 111 2: .long do_exception_error 112 + #ifdef CONFIG_TRACE_IRQFLAGS 113 + 3: .long trace_hardirqs_on 114 + #endif 117 115 118 116 .align 2 119 117 ret_from_exception: 120 118 preempt_stop() 119 + #ifdef CONFIG_TRACE_IRQFLAGS 120 + mov.l 4f, r0 121 + jsr @r0 122 + nop 123 + #endif 121 124 ENTRY(ret_from_irq) 122 125 ! 123 126 mov #OFF_SR, r0 ··· 156 143 mov.l 1f, r0 157 144 mov.l r0, @(TI_PRE_COUNT,r8) 158 145 146 + #ifdef CONFIG_TRACE_IRQFLAGS 147 + mov.l 3f, r0 148 + jsr @r0 149 + nop 150 + #endif 159 151 sti 160 152 mov.l 2f, r0 161 153 jsr @r0 ··· 168 150 mov #0, r0 169 151 mov.l r0, @(TI_PRE_COUNT,r8) 170 152 cli 153 + #ifdef CONFIG_TRACE_IRQFLAGS 154 + mov.l 4f, r0 155 + jsr @r0 156 + nop 157 + #endif 171 158 172 159 bra need_resched 173 160 nop 161 + 174 162 noresched: 175 163 bra __restore_all 176 164 nop ··· 184 160 .align 2 185 161 1: .long PREEMPT_ACTIVE 186 162 2: .long schedule 163 + #ifdef CONFIG_TRACE_IRQFLAGS 164 + 3: .long trace_hardirqs_on 165 + 4: .long trace_hardirqs_off 166 + #endif 187 167 #endif 188 168 189 169 ENTRY(resume_userspace) 190 170 ! r8: current_thread_info 191 171 cli 172 + #ifdef CONFIG_TRACE_IRQFLAGS 173 + mov.l 5f, r0 174 + jsr @r0 175 + nop 176 + #endif 192 177 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 193 178 tst #_TIF_WORK_MASK, r0 194 179 bt/s __restore_all ··· 243 210 jsr @r1 ! schedule 244 211 nop 245 212 cli 213 + #ifdef CONFIG_TRACE_IRQFLAGS 214 + mov.l 5f, r0 215 + jsr @r0 216 + nop 217 + #endif 246 218 ! 247 219 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags 248 220 tst #_TIF_WORK_MASK, r0 ··· 259 221 1: .long schedule 260 222 2: .long do_notify_resume 261 223 3: .long restore_all 224 + #ifdef CONFIG_TRACE_IRQFLAGS 225 + 4: .long trace_hardirqs_on 226 + 5: .long trace_hardirqs_off 227 + #endif 262 228 263 229 .align 2 264 230 syscall_exit_work: ··· 271 229 tst #_TIF_SYSCALL_TRACE, r0 272 230 bt/s work_pending 273 231 tst #_TIF_NEED_RESCHED, r0 232 + #ifdef CONFIG_TRACE_IRQFLAGS 233 + mov.l 5f, r0 234 + jsr @r0 235 + nop 236 + #endif 274 237 sti 275 238 ! XXX setup arguments... 276 239 mov.l 4f, r0 ! do_syscall_trace ··· 312 265 mov.l r0, @(OFF_R0,r15) ! Return value 313 266 314 267 __restore_all: 315 - mov.l 1f,r0 268 + mov.l 1f, r0 316 269 jmp @r0 317 270 nop 318 271 ··· 378 331 mov #OFF_TRA, r9 379 332 add r15, r9 380 333 mov.l r8, @r9 ! set TRA value to tra 334 + #ifdef CONFIG_TRACE_IRQFLAGS 335 + mov.l 5f, r10 336 + jsr @r10 337 + nop 338 + #endif 381 339 sti 340 + 382 341 ! 383 342 get_current_thread_info r8, r10 384 343 mov.l @(TI_FLAGS,r8), r8 ··· 408 355 ! 409 356 syscall_exit: 410 357 cli 358 + #ifdef CONFIG_TRACE_IRQFLAGS 359 + mov.l 6f, r0 360 + jsr @r0 361 + nop 362 + #endif 411 363 ! 412 364 get_current_thread_info r8, r0 413 365 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags ··· 427 369 2: .long NR_syscalls 428 370 3: .long sys_call_table 429 371 4: .long do_syscall_trace 372 + #ifdef CONFIG_TRACE_IRQFLAGS 373 + 5: .long trace_hardirqs_on 374 + 6: .long trace_hardirqs_off 375 + #endif
+43
arch/sh/kernel/stacktrace.c
··· 1 + /* 2 + * arch/sh/kernel/stacktrace.c 3 + * 4 + * Stack trace management functions 5 + * 6 + * Copyright (C) 2006 Paul Mundt 7 + * 8 + * This file is subject to the terms and conditions of the GNU General Public 9 + * License. See the file "COPYING" in the main directory of this archive 10 + * for more details. 11 + */ 12 + #include <linux/sched.h> 13 + #include <linux/stacktrace.h> 14 + #include <linux/thread_info.h> 15 + #include <asm/ptrace.h> 16 + 17 + /* 18 + * Save stack-backtrace addresses into a stack_trace buffer. 19 + */ 20 + void save_stack_trace(struct stack_trace *trace, struct task_struct *task) 21 + { 22 + unsigned long *sp; 23 + 24 + if (!task) 25 + task = current; 26 + if (task == current) 27 + sp = (unsigned long *)current_stack_pointer; 28 + else 29 + sp = (unsigned long *)task->thread.sp; 30 + 31 + while (!kstack_end(sp)) { 32 + unsigned long addr = *sp++; 33 + 34 + if (__kernel_text_address(addr)) { 35 + if (trace->skip > 0) 36 + trace->skip--; 37 + else 38 + trace->entries[trace->nr_entries++] = addr; 39 + if (trace->nr_entries >= trace->max_entries) 40 + break; 41 + } 42 + } 43 + }
+3
arch/sh/mm/fault.c
··· 37 37 int si_code; 38 38 siginfo_t info; 39 39 40 + trace_hardirqs_on(); 41 + local_irq_enable(); 42 + 40 43 #ifdef CONFIG_SH_KGDB 41 44 if (kgdb_nofault && kgdb_bus_err_hook) 42 45 kgdb_bus_err_hook();
+123
include/asm-sh/irqflags.h
··· 1 + #ifndef __ASM_SH_IRQFLAGS_H 2 + #define __ASM_SH_IRQFLAGS_H 3 + 4 + static inline void raw_local_irq_enable(void) 5 + { 6 + unsigned long __dummy0, __dummy1; 7 + 8 + __asm__ __volatile__ ( 9 + "stc sr, %0\n\t" 10 + "and %1, %0\n\t" 11 + #ifdef CONFIG_CPU_HAS_SR_RB 12 + "stc r6_bank, %1\n\t" 13 + "or %1, %0\n\t" 14 + #endif 15 + "ldc %0, sr\n\t" 16 + : "=&r" (__dummy0), "=r" (__dummy1) 17 + : "1" (~0x000000f0) 18 + : "memory" 19 + ); 20 + } 21 + 22 + static inline void raw_local_irq_disable(void) 23 + { 24 + unsigned long flags; 25 + 26 + __asm__ __volatile__ ( 27 + "stc sr, %0\n\t" 28 + "or #0xf0, %0\n\t" 29 + "ldc %0, sr\n\t" 30 + : "=&z" (flags) 31 + : /* no inputs */ 32 + : "memory" 33 + ); 34 + } 35 + 36 + static inline void set_bl_bit(void) 37 + { 38 + unsigned long __dummy0, __dummy1; 39 + 40 + __asm__ __volatile__ ( 41 + "stc sr, %0\n\t" 42 + "or %2, %0\n\t" 43 + "and %3, %0\n\t" 44 + "ldc %0, sr\n\t" 45 + : "=&r" (__dummy0), "=r" (__dummy1) 46 + : "r" (0x10000000), "r" (0xffffff0f) 47 + : "memory" 48 + ); 49 + } 50 + 51 + static inline void clear_bl_bit(void) 52 + { 53 + unsigned long __dummy0, __dummy1; 54 + 55 + __asm__ __volatile__ ( 56 + "stc sr, %0\n\t" 57 + "and %2, %0\n\t" 58 + "ldc %0, sr\n\t" 59 + : "=&r" (__dummy0), "=r" (__dummy1) 60 + : "1" (~0x10000000) 61 + : "memory" 62 + ); 63 + } 64 + 65 + static inline unsigned long __raw_local_save_flags(void) 66 + { 67 + unsigned long flags; 68 + 69 + __asm__ __volatile__ ( 70 + "stc sr, %0\n\t" 71 + "and #0xf0, %0\n\t" 72 + : "=&z" (flags) 73 + : /* no inputs */ 74 + : "memory" 75 + ); 76 + 77 + return flags; 78 + } 79 + 80 + #define raw_local_save_flags(flags) \ 81 + do { (flags) = __raw_local_save_flags(); } while (0) 82 + 83 + static inline int raw_irqs_disabled_flags(unsigned long flags) 84 + { 85 + return (flags != 0); 86 + } 87 + 88 + static inline int raw_irqs_disabled(void) 89 + { 90 + unsigned long flags = __raw_local_save_flags(); 91 + 92 + return raw_irqs_disabled_flags(flags); 93 + } 94 + 95 + static inline unsigned long __raw_local_irq_save(void) 96 + { 97 + unsigned long flags, __dummy; 98 + 99 + __asm__ __volatile__ ( 100 + "stc sr, %1\n\t" 101 + "mov %1, %0\n\t" 102 + "or #0xf0, %0\n\t" 103 + "ldc %0, sr\n\t" 104 + "mov %1, %0\n\t" 105 + "and #0xf0, %0\n\t" 106 + : "=&z" (flags), "=&r" (__dummy) 107 + : /* no inputs */ 108 + : "memory" 109 + ); 110 + 111 + return flags; 112 + } 113 + 114 + #define raw_local_irq_save(flags) \ 115 + do { (flags) = __raw_local_irq_save(); } while (0) 116 + 117 + static inline void raw_local_irq_restore(unsigned long flags) 118 + { 119 + if ((flags & 0xf0) != 0xf0) 120 + raw_local_irq_enable(); 121 + } 122 + 123 + #endif /* __ASM_SH_IRQFLAGS_H */
+26 -1
include/asm-sh/rwsem.h
··· 25 25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 26 26 spinlock_t wait_lock; 27 27 struct list_head wait_list; 28 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 29 + struct lockdep_map dep_map; 30 + #endif 28 31 }; 32 + 33 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 34 + # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } 35 + #else 36 + # define __RWSEM_DEP_MAP_INIT(lockname) 37 + #endif 29 38 30 39 #define __RWSEM_INITIALIZER(name) \ 31 40 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 32 - LIST_HEAD_INIT((name).wait_list) } 41 + LIST_HEAD_INIT((name).wait_list) \ 42 + __RWSEM_DEP_MAP_INIT(name) } 33 43 34 44 #define DECLARE_RWSEM(name) \ 35 45 struct rw_semaphore name = __RWSEM_INITIALIZER(name) ··· 48 38 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); 49 39 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); 50 40 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 41 + 42 + extern void __init_rwsem(struct rw_semaphore *sem, const char *name, 43 + struct lock_class_key *key); 44 + 45 + #define init_rwsem(sem) \ 46 + do { \ 47 + static struct lock_class_key __key; \ 48 + \ 49 + __init_rwsem((sem), #sem, &__key); \ 50 + } while (0) 51 51 52 52 static inline void init_rwsem(struct rw_semaphore *sem) 53 53 { ··· 159 139 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count)); 160 140 if (tmp < 0) 161 141 rwsem_downgrade_wake(sem); 142 + } 143 + 144 + static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 145 + { 146 + __down_write(sem); 162 147 } 163 148 164 149 /*
+1 -100
include/asm-sh/system.h
··· 6 6 * Copyright (C) 2002 Paul Mundt 7 7 */ 8 8 9 + #include <linux/irqflags.h> 9 10 #include <asm/types.h> 10 11 11 12 /* ··· 132 131 133 132 #define set_mb(var, value) do { xchg(&var, value); } while (0) 134 133 135 - /* Interrupt Control */ 136 - #ifdef CONFIG_CPU_HAS_SR_RB 137 - static inline void local_irq_enable(void) 138 - { 139 - unsigned long __dummy0, __dummy1; 140 - 141 - __asm__ __volatile__("stc sr, %0\n\t" 142 - "and %1, %0\n\t" 143 - "stc r6_bank, %1\n\t" 144 - "or %1, %0\n\t" 145 - "ldc %0, sr" 146 - : "=&r" (__dummy0), "=r" (__dummy1) 147 - : "1" (~0x000000f0) 148 - : "memory"); 149 - } 150 - #else 151 - static inline void local_irq_enable(void) 152 - { 153 - unsigned long __dummy0, __dummy1; 154 - 155 - __asm__ __volatile__ ( 156 - "stc sr, %0\n\t" 157 - "and %1, %0\n\t" 158 - "ldc %0, sr\n\t" 159 - : "=&r" (__dummy0), "=r" (__dummy1) 160 - : "1" (~0x000000f0) 161 - : "memory"); 162 - } 163 - #endif 164 - 165 - static inline void local_irq_disable(void) 166 - { 167 - unsigned long __dummy; 168 - __asm__ __volatile__("stc sr, %0\n\t" 169 - "or #0xf0, %0\n\t" 170 - "ldc %0, sr" 171 - : "=&z" (__dummy) 172 - : /* no inputs */ 173 - : "memory"); 174 - } 175 - 176 - static inline void set_bl_bit(void) 177 - { 178 - unsigned long __dummy0, __dummy1; 179 - 180 - __asm__ __volatile__ ("stc sr, %0\n\t" 181 - "or %2, %0\n\t" 182 - "and %3, %0\n\t" 183 - "ldc %0, sr" 184 - : "=&r" (__dummy0), "=r" (__dummy1) 185 - : "r" (0x10000000), "r" (0xffffff0f) 186 - : "memory"); 187 - } 188 - 189 - static inline void clear_bl_bit(void) 190 - { 191 - unsigned long __dummy0, __dummy1; 192 - 193 - __asm__ __volatile__ ("stc sr, %0\n\t" 194 - "and %2, %0\n\t" 195 - "ldc %0, sr" 196 - : "=&r" (__dummy0), "=r" (__dummy1) 197 - : "1" (~0x10000000) 198 - : "memory"); 199 - } 200 - 201 - #define local_save_flags(x) \ 202 - __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) 203 - 204 - #define irqs_disabled() \ 205 - ({ \ 206 - unsigned long flags; \ 207 - local_save_flags(flags); \ 208 - (flags != 0); \ 209 - }) 210 - 211 - static inline unsigned long local_irq_save(void) 212 - { 213 - unsigned long flags, __dummy; 214 - 215 - __asm__ __volatile__("stc sr, %1\n\t" 216 - "mov %1, %0\n\t" 217 - "or #0xf0, %0\n\t" 218 - "ldc %0, sr\n\t" 219 - "mov %1, %0\n\t" 220 - "and #0xf0, %0" 221 - : "=&z" (flags), "=&r" (__dummy) 222 - :/**/ 223 - : "memory" ); 224 - return flags; 225 - } 226 - 227 - #define local_irq_restore(x) do { \ 228 - if ((x & 0x000000f0) != 0x000000f0) \ 229 - local_irq_enable(); \ 230 - } while (0) 231 - 232 134 /* 233 135 * Jump to P2 area. 234 136 * When handling TLB or caches, we need to do it from P2 area. ··· 167 263 "2:" \ 168 264 : "=&r" (__dummy)); \ 169 265 } while (0) 170 - 171 - /* For spinlocks etc */ 172 - #define local_irq_save(x) x = local_irq_save() 173 266 174 267 static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) 175 268 {