Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
ftrace: fix modular build
ftrace: disable tracing on acpi idle calls
ftrace: remove latency-tracer leftover
ftrace: only trace preempt off with preempt tracer
ftrace: fix 4d3702b6 (post-v2.6.26): WARNING: at kernel/lockdep.c:2731 check_flags (ftrace)

+28 -16
+6
drivers/acpi/processor_idle.c
··· 272 272 /* Common C-state entry for C2, C3, .. */ 273 273 static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 274 274 { 275 + /* Don't trace irqs off for idle */ 276 + stop_critical_timings(); 275 277 if (cstate->entry_method == ACPI_CSTATE_FFH) { 276 278 /* Call into architectural FFH based C-state */ 277 279 acpi_processor_ffh_cstate_enter(cstate); ··· 286 284 gets asserted in time to freeze execution properly. */ 287 285 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 288 286 } 287 + start_critical_timings(); 289 288 } 290 289 #endif /* !CONFIG_CPU_IDLE */ 291 290 ··· 1421 1418 */ 1422 1419 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 1423 1420 { 1421 + /* Don't trace irqs off for idle */ 1422 + stop_critical_timings(); 1424 1423 if (cx->entry_method == ACPI_CSTATE_FFH) { 1425 1424 /* Call into architectural FFH based C-state */ 1426 1425 acpi_processor_ffh_cstate_enter(cx); ··· 1437 1432 gets asserted in time to freeze execution properly. */ 1438 1433 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 1439 1434 } 1435 + start_critical_timings(); 1440 1436 } 1441 1437 1442 1438 /**
-3
kernel/trace/trace.c
··· 1203 1203 1204 1204 iter->pos = *pos; 1205 1205 1206 - if (last_ent && !ent) 1207 - seq_puts(m, "\n\nvim:ft=help\n"); 1208 - 1209 1206 return ent; 1210 1207 } 1211 1208
+6 -2
kernel/trace/trace_irqsoff.c
··· 253 253 if (preempt_trace() || irq_trace()) 254 254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 255 255 } 256 + EXPORT_SYMBOL_GPL(start_critical_timings); 256 257 257 258 void stop_critical_timings(void) 258 259 { 259 260 if (preempt_trace() || irq_trace()) 260 261 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 261 262 } 263 + EXPORT_SYMBOL_GPL(stop_critical_timings); 262 264 263 265 #ifdef CONFIG_IRQSOFF_TRACER 264 266 #ifdef CONFIG_PROVE_LOCKING ··· 339 337 #ifdef CONFIG_PREEMPT_TRACER 340 338 void trace_preempt_on(unsigned long a0, unsigned long a1) 341 339 { 342 - stop_critical_timing(a0, a1); 340 + if (preempt_trace()) 341 + stop_critical_timing(a0, a1); 343 342 } 344 343 345 344 void trace_preempt_off(unsigned long a0, unsigned long a1) 346 345 { 347 - start_critical_timing(a0, a1); 346 + if (preempt_trace()) 347 + start_critical_timing(a0, a1); 348 348 } 349 349 #endif /* CONFIG_PREEMPT_TRACER */ 350 350
+16 -11
kernel/trace/trace_sched_wakeup.c
··· 26 26 static int wakeup_cpu; 27 27 static unsigned wakeup_prio = -1; 28 28 29 - static DEFINE_SPINLOCK(wakeup_lock); 29 + static raw_spinlock_t wakeup_lock = 30 + (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 30 31 31 32 static void __wakeup_reset(struct trace_array *tr); 32 33 ··· 57 56 if (unlikely(disabled != 1)) 58 57 goto out; 59 58 60 - spin_lock_irqsave(&wakeup_lock, flags); 59 + local_irq_save(flags); 60 + __raw_spin_lock(&wakeup_lock); 61 61 62 62 if (unlikely(!wakeup_task)) 63 63 goto unlock; ··· 73 71 trace_function(tr, data, ip, parent_ip, flags); 74 72 75 73 unlock: 76 - spin_unlock_irqrestore(&wakeup_lock, flags); 74 + __raw_spin_unlock(&wakeup_lock); 75 + local_irq_restore(flags); 77 76 78 77 out: 79 78 atomic_dec(&data->disabled); ··· 148 145 if (likely(disabled != 1)) 149 146 goto out; 150 147 151 - spin_lock_irqsave(&wakeup_lock, flags); 148 + local_irq_save(flags); 149 + __raw_spin_lock(&wakeup_lock); 152 150 153 151 /* We could race with grabbing wakeup_lock */ 154 152 if (unlikely(!tracer_enabled || next != wakeup_task)) ··· 178 174 179 175 out_unlock: 180 176 __wakeup_reset(tr); 181 - spin_unlock_irqrestore(&wakeup_lock, flags); 177 + __raw_spin_unlock(&wakeup_lock); 178 + local_irq_restore(flags); 182 179 out: 183 180 atomic_dec(&tr->data[cpu]->disabled); 184 181 } ··· 214 209 struct trace_array_cpu *data; 215 210 int cpu; 216 211 217 - assert_spin_locked(&wakeup_lock); 218 - 219 212 for_each_possible_cpu(cpu) { 220 213 data = tr->data[cpu]; 221 214 tracing_reset(data); ··· 232 229 { 233 230 unsigned long flags; 234 231 235 - spin_lock_irqsave(&wakeup_lock, flags); 232 + local_irq_save(flags); 233 + __raw_spin_lock(&wakeup_lock); 236 234 __wakeup_reset(tr); 237 - spin_unlock_irqrestore(&wakeup_lock, flags); 235 + __raw_spin_unlock(&wakeup_lock); 236 + local_irq_restore(flags); 238 237 } 239 238 240 239 static void ··· 257 252 goto out; 258 253 259 254 /* interrupts should be off from try_to_wake_up */ 260 - spin_lock(&wakeup_lock); 255 + __raw_spin_lock(&wakeup_lock); 261 256 262 257 /* check for races. */ 263 258 if (!tracer_enabled || p->prio >= wakeup_prio) ··· 279 274 CALLER_ADDR1, CALLER_ADDR2, flags); 280 275 281 276 out_locked: 282 - spin_unlock(&wakeup_lock); 277 + __raw_spin_unlock(&wakeup_lock); 283 278 out: 284 279 atomic_dec(&tr->data[cpu]->disabled); 285 280 }