Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent

+187 -107
+2
include/linux/ftrace_event.h
··· 144 144 enum trace_reg { 145 145 TRACE_REG_REGISTER, 146 146 TRACE_REG_UNREGISTER, 147 + #ifdef CONFIG_PERF_EVENTS 147 148 TRACE_REG_PERF_REGISTER, 148 149 TRACE_REG_PERF_UNREGISTER, 149 150 TRACE_REG_PERF_OPEN, 150 151 TRACE_REG_PERF_CLOSE, 151 152 TRACE_REG_PERF_ADD, 152 153 TRACE_REG_PERF_DEL, 154 + #endif 153 155 }; 154 156 155 157 struct ftrace_event_call;
+9 -6
include/linux/kernel.h
··· 427 427 * Most likely, you want to use tracing_on/tracing_off. 428 428 */ 429 429 #ifdef CONFIG_RING_BUFFER 430 - void tracing_on(void); 431 - void tracing_off(void); 432 430 /* trace_off_permanent stops recording with no way to bring it back */ 433 431 void tracing_off_permanent(void); 434 - int tracing_is_on(void); 435 432 #else 436 - static inline void tracing_on(void) { } 437 - static inline void tracing_off(void) { } 438 433 static inline void tracing_off_permanent(void) { } 439 - static inline int tracing_is_on(void) { return 0; } 440 434 #endif 441 435 442 436 enum ftrace_dump_mode { ··· 440 446 }; 441 447 442 448 #ifdef CONFIG_TRACING 449 + void tracing_on(void); 450 + void tracing_off(void); 451 + int tracing_is_on(void); 452 + 443 453 extern void tracing_start(void); 444 454 extern void tracing_stop(void); 445 455 extern void ftrace_off_permanent(void); ··· 528 530 static inline void tracing_stop(void) { } 529 531 static inline void ftrace_off_permanent(void) { } 530 532 static inline void trace_dump_stack(void) { } 533 + 534 + static inline void tracing_on(void) { } 535 + static inline void tracing_off(void) { } 536 + static inline int tracing_is_on(void) { return 0; } 537 + 531 538 static inline int 532 539 trace_printk(const char *fmt, ...) 533 540 {
+3
include/linux/ring_buffer.h
··· 151 151 152 152 void ring_buffer_record_disable(struct ring_buffer *buffer); 153 153 void ring_buffer_record_enable(struct ring_buffer *buffer); 154 + void ring_buffer_record_off(struct ring_buffer *buffer); 155 + void ring_buffer_record_on(struct ring_buffer *buffer); 156 + int ring_buffer_record_is_on(struct ring_buffer *buffer); 154 157 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 155 158 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 156 159
+1 -1
kernel/trace/Kconfig
··· 141 141 config FUNCTION_TRACER 142 142 bool "Kernel Function Tracer" 143 143 depends on HAVE_FUNCTION_TRACER 144 - select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE 144 + select FRAME_POINTER if !ARM_UNWIND && !PPC && !S390 && !MICROBLAZE 145 145 select KALLSYMS 146 146 select GENERIC_TRACER 147 147 select CONTEXT_SWITCH_TRACER
+2 -1
kernel/trace/ftrace.c
··· 249 249 #else 250 250 __ftrace_trace_function = func; 251 251 #endif 252 - ftrace_trace_function = ftrace_test_stop_func; 252 + ftrace_trace_function = 253 + (func == ftrace_stub) ? func : ftrace_test_stop_func; 253 254 #endif 254 255 } 255 256
+60 -97
kernel/trace/ring_buffer.c
··· 154 154 155 155 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; 156 156 157 + /* Used for individual buffers (after the counter) */ 158 + #define RB_BUFFER_OFF (1 << 20) 159 + 157 160 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) 158 - 159 - /** 160 - * tracing_on - enable all tracing buffers 161 - * 162 - * This function enables all tracing buffers that may have been 163 - * disabled with tracing_off. 164 - */ 165 - void tracing_on(void) 166 - { 167 - set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 168 - } 169 - EXPORT_SYMBOL_GPL(tracing_on); 170 - 171 - /** 172 - * tracing_off - turn off all tracing buffers 173 - * 174 - * This function stops all tracing buffers from recording data. 175 - * It does not disable any overhead the tracers themselves may 176 - * be causing. This function simply causes all recording to 177 - * the ring buffers to fail. 178 - */ 179 - void tracing_off(void) 180 - { 181 - clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 182 - } 183 - EXPORT_SYMBOL_GPL(tracing_off); 184 161 185 162 /** 186 163 * tracing_off_permanent - permanently disable ring buffers ··· 169 192 { 170 193 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); 171 194 } 172 - 173 - /** 174 - * tracing_is_on - show state of ring buffers enabled 175 - */ 176 - int tracing_is_on(void) 177 - { 178 - return ring_buffer_flags == RB_BUFFERS_ON; 179 - } 180 - EXPORT_SYMBOL_GPL(tracing_is_on); 181 195 182 196 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) 183 197 #define RB_ALIGNMENT 4U ··· 2587 2619 EXPORT_SYMBOL_GPL(ring_buffer_record_enable); 2588 2620 2589 2621 /** 2622 + * ring_buffer_record_off - stop all writes into the buffer 2623 + * @buffer: The ring buffer to stop writes to. 2624 + * 2625 + * This prevents all writes to the buffer. Any attempt to write 2626 + * to the buffer after this will fail and return NULL. 2627 + * 2628 + * This is different than ring_buffer_record_disable() as 2629 + * it works like an on/off switch, where as the disable() verison 2630 + * must be paired with a enable(). 2631 + */ 2632 + void ring_buffer_record_off(struct ring_buffer *buffer) 2633 + { 2634 + unsigned int rd; 2635 + unsigned int new_rd; 2636 + 2637 + do { 2638 + rd = atomic_read(&buffer->record_disabled); 2639 + new_rd = rd | RB_BUFFER_OFF; 2640 + } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2641 + } 2642 + EXPORT_SYMBOL_GPL(ring_buffer_record_off); 2643 + 2644 + /** 2645 + * ring_buffer_record_on - restart writes into the buffer 2646 + * @buffer: The ring buffer to start writes to. 2647 + * 2648 + * This enables all writes to the buffer that was disabled by 2649 + * ring_buffer_record_off(). 2650 + * 2651 + * This is different than ring_buffer_record_enable() as 2652 + * it works like an on/off switch, where as the enable() verison 2653 + * must be paired with a disable(). 2654 + */ 2655 + void ring_buffer_record_on(struct ring_buffer *buffer) 2656 + { 2657 + unsigned int rd; 2658 + unsigned int new_rd; 2659 + 2660 + do { 2661 + rd = atomic_read(&buffer->record_disabled); 2662 + new_rd = rd & ~RB_BUFFER_OFF; 2663 + } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); 2664 + } 2665 + EXPORT_SYMBOL_GPL(ring_buffer_record_on); 2666 + 2667 + /** 2668 + * ring_buffer_record_is_on - return true if the ring buffer can write 2669 + * @buffer: The ring buffer to see if write is enabled 2670 + * 2671 + * Returns true if the ring buffer is in a state that it accepts writes. 2672 + */ 2673 + int ring_buffer_record_is_on(struct ring_buffer *buffer) 2674 + { 2675 + return !atomic_read(&buffer->record_disabled); 2676 + } 2677 + 2678 + /** 2590 2679 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 2591 2680 * @buffer: The ring buffer to stop writes to. 2592 2681 * @cpu: The CPU buffer to stop ··· 4063 4038 return ret; 4064 4039 } 4065 4040 EXPORT_SYMBOL_GPL(ring_buffer_read_page); 4066 - 4067 - #ifdef CONFIG_TRACING 4068 - static ssize_t 4069 - rb_simple_read(struct file *filp, char __user *ubuf, 4070 - size_t cnt, loff_t *ppos) 4071 - { 4072 - unsigned long *p = filp->private_data; 4073 - char buf[64]; 4074 - int r; 4075 - 4076 - if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) 4077 - r = sprintf(buf, "permanently disabled\n"); 4078 - else 4079 - r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); 4080 - 4081 - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4082 - } 4083 - 4084 - static ssize_t 4085 - rb_simple_write(struct file *filp, const char __user *ubuf, 4086 - size_t cnt, loff_t *ppos) 4087 - { 4088 - unsigned long *p = filp->private_data; 4089 - unsigned long val; 4090 - int ret; 4091 - 4092 - ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4093 - if (ret) 4094 - return ret; 4095 - 4096 - if (val) 4097 - set_bit(RB_BUFFERS_ON_BIT, p); 4098 - else 4099 - clear_bit(RB_BUFFERS_ON_BIT, p); 4100 - 4101 - (*ppos)++; 4102 - 4103 - return cnt; 4104 - } 4105 - 4106 - static const struct file_operations rb_simple_fops = { 4107 - .open = tracing_open_generic, 4108 - .read = rb_simple_read, 4109 - .write = rb_simple_write, 4110 - .llseek = default_llseek, 4111 - }; 4112 - 4113 - 4114 - static __init int rb_init_debugfs(void) 4115 - { 4116 - struct dentry *d_tracer; 4117 - 4118 - d_tracer = tracing_init_dentry(); 4119 - 4120 - trace_create_file("tracing_on", 0644, d_tracer, 4121 - &ring_buffer_flags, &rb_simple_fops); 4122 - 4123 - return 0; 4124 - } 4125 - 4126 - fs_initcall(rb_init_debugfs); 4127 - #endif 4128 4041 4129 4042 #ifdef CONFIG_HOTPLUG_CPU 4130 4043 static int rb_cpu_notify(struct notifier_block *self,
+109
kernel/trace/trace.c
··· 36 36 #include <linux/ctype.h> 37 37 #include <linux/init.h> 38 38 #include <linux/poll.h> 39 + #include <linux/nmi.h> 39 40 #include <linux/fs.h> 40 41 41 42 #include "trace.h" ··· 351 350 } 352 351 353 352 static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 353 + 354 + /** 355 + * tracing_on - enable tracing buffers 356 + * 357 + * This function enables tracing buffers that may have been 358 + * disabled with tracing_off. 359 + */ 360 + void tracing_on(void) 361 + { 362 + if (global_trace.buffer) 363 + ring_buffer_record_on(global_trace.buffer); 364 + /* 365 + * This flag is only looked at when buffers haven't been 366 + * allocated yet. We don't really care about the race 367 + * between setting this flag and actually turning 368 + * on the buffer. 369 + */ 370 + global_trace.buffer_disabled = 0; 371 + } 372 + EXPORT_SYMBOL_GPL(tracing_on); 373 + 374 + /** 375 + * tracing_off - turn off tracing buffers 376 + * 377 + * This function stops the tracing buffers from recording data. 378 + * It does not disable any overhead the tracers themselves may 379 + * be causing. This function simply causes all recording to 380 + * the ring buffers to fail. 381 + */ 382 + void tracing_off(void) 383 + { 384 + if (global_trace.buffer) 385 + ring_buffer_record_on(global_trace.buffer); 386 + /* 387 + * This flag is only looked at when buffers haven't been 388 + * allocated yet. We don't really care about the race 389 + * between setting this flag and actually turning 390 + * on the buffer. 391 + */ 392 + global_trace.buffer_disabled = 1; 393 + } 394 + EXPORT_SYMBOL_GPL(tracing_off); 395 + 396 + /** 397 + * tracing_is_on - show state of ring buffers enabled 398 + */ 399 + int tracing_is_on(void) 400 + { 401 + if (global_trace.buffer) 402 + return ring_buffer_record_is_on(global_trace.buffer); 403 + return !global_trace.buffer_disabled; 404 + } 405 + EXPORT_SYMBOL_GPL(tracing_is_on); 354 406 355 407 /** 356 408 * trace_wake_up - wake up tasks waiting for trace input ··· 4621 4567 create_trace_option_core_file(trace_options[i], i); 4622 4568 } 4623 4569 4570 + static ssize_t 4571 + rb_simple_read(struct file *filp, char __user *ubuf, 4572 + size_t cnt, loff_t *ppos) 4573 + { 4574 + struct ring_buffer *buffer = filp->private_data; 4575 + char buf[64]; 4576 + int r; 4577 + 4578 + if (buffer) 4579 + r = ring_buffer_record_is_on(buffer); 4580 + else 4581 + r = 0; 4582 + 4583 + r = sprintf(buf, "%d\n", r); 4584 + 4585 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 4586 + } 4587 + 4588 + static ssize_t 4589 + rb_simple_write(struct file *filp, const char __user *ubuf, 4590 + size_t cnt, loff_t *ppos) 4591 + { 4592 + struct ring_buffer *buffer = filp->private_data; 4593 + unsigned long val; 4594 + int ret; 4595 + 4596 + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4597 + if (ret) 4598 + return ret; 4599 + 4600 + if (buffer) { 4601 + if (val) 4602 + ring_buffer_record_on(buffer); 4603 + else 4604 + ring_buffer_record_off(buffer); 4605 + } 4606 + 4607 + (*ppos)++; 4608 + 4609 + return cnt; 4610 + } 4611 + 4612 + static const struct file_operations rb_simple_fops = { 4613 + .open = tracing_open_generic, 4614 + .read = rb_simple_read, 4615 + .write = rb_simple_write, 4616 + .llseek = default_llseek, 4617 + }; 4618 + 4624 4619 static __init int tracer_init_debugfs(void) 4625 4620 { 4626 4621 struct dentry *d_tracer; ··· 4728 4625 4729 4626 trace_create_file("trace_clock", 0644, d_tracer, NULL, 4730 4627 &trace_clock_fops); 4628 + 4629 + trace_create_file("tracing_on", 0644, d_tracer, 4630 + global_trace.buffer, &rb_simple_fops); 4731 4631 4732 4632 #ifdef CONFIG_DYNAMIC_FTRACE 4733 4633 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, ··· 4904 4798 if (ret != TRACE_TYPE_NO_CONSUME) 4905 4799 trace_consume(&iter); 4906 4800 } 4801 + touch_nmi_watchdog(); 4907 4802 4908 4803 trace_printk_seq(&iter.seq); 4909 4804 } ··· 4970 4863 goto out_free_cpumask; 4971 4864 } 4972 4865 global_trace.entries = ring_buffer_size(global_trace.buffer); 4866 + if (global_trace.buffer_disabled) 4867 + tracing_off(); 4973 4868 4974 4869 4975 4870 #ifdef CONFIG_TRACER_MAX_TRACE
+1 -2
kernel/trace/trace.h
··· 154 154 struct ring_buffer *buffer; 155 155 unsigned long entries; 156 156 int cpu; 157 + int buffer_disabled; 157 158 cycle_t time_start; 158 159 struct task_struct *waiter; 159 160 struct trace_array_cpu *data[NR_CPUS]; ··· 836 835 filter) 837 836 #include "trace_entries.h" 838 837 839 - #ifdef CONFIG_PERF_EVENTS 840 838 #ifdef CONFIG_FUNCTION_TRACER 841 839 int perf_ftrace_event_register(struct ftrace_event_call *call, 842 840 enum trace_reg type, void *data); 843 841 #else 844 842 #define perf_ftrace_event_register NULL 845 843 #endif /* CONFIG_FUNCTION_TRACER */ 846 - #endif /* CONFIG_PERF_EVENTS */ 847 844 848 845 #endif /* _LINUX_KERNEL_TRACE_H */