Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

+118 -44
+7
Documentation/trace/ftrace.txt
··· 226 226 Traces and records the max latency that it takes for 227 227 the highest priority task to get scheduled after 228 228 it has been woken up. 229 + Traces all tasks as an average developer would expect. 230 + 231 + "wakeup_rt" 232 + 233 + Traces and records the max latency that it takes for just 234 + RT tasks (as the current "wakeup" does). This is useful 235 + for those interested in wake up timings of RT tasks. 229 236 230 237 "hw-branch-tracer" 231 238
+12 -12
arch/x86/kernel/process.c
··· 377 377 void default_idle(void) 378 378 { 379 379 if (hlt_use_halt()) { 380 - trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 381 - trace_cpu_idle(1, smp_processor_id()); 380 + trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); 381 + trace_cpu_idle_rcuidle(1, smp_processor_id()); 382 382 current_thread_info()->status &= ~TS_POLLING; 383 383 /* 384 384 * TS_POLLING-cleared state must be visible before we ··· 391 391 else 392 392 local_irq_enable(); 393 393 current_thread_info()->status |= TS_POLLING; 394 - trace_power_end(smp_processor_id()); 395 - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 394 + trace_power_end_rcuidle(smp_processor_id()); 395 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 396 396 } else { 397 397 local_irq_enable(); 398 398 /* loop is done by the caller */ ··· 450 450 static void mwait_idle(void) 451 451 { 452 452 if (!need_resched()) { 453 - trace_power_start(POWER_CSTATE, 1, smp_processor_id()); 454 - trace_cpu_idle(1, smp_processor_id()); 453 + trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); 454 + trace_cpu_idle_rcuidle(1, smp_processor_id()); 455 455 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) 456 456 clflush((void *)&current_thread_info()->flags); 457 457 ··· 461 461 __sti_mwait(0, 0); 462 462 else 463 463 local_irq_enable(); 464 - trace_power_end(smp_processor_id()); 465 - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 464 + trace_power_end_rcuidle(smp_processor_id()); 465 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 466 466 } else 467 467 local_irq_enable(); 468 468 } ··· 474 474 */ 475 475 static void poll_idle(void) 476 476 { 477 - trace_power_start(POWER_CSTATE, 0, smp_processor_id()); 478 - trace_cpu_idle(0, smp_processor_id()); 477 + trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); 478 + trace_cpu_idle_rcuidle(0, smp_processor_id()); 479 479 local_irq_enable(); 480 480 while (!need_resched()) 481 481 cpu_relax(); 482 - trace_power_end(smp_processor_id()); 483 - trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 482 + trace_power_end_rcuidle(smp_processor_id()); 483 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 484 484 } 485 485 486 486 /*
+4 -4
drivers/cpuidle/cpuidle.c
··· 94 94 95 95 target_state = &drv->states[next_state]; 96 96 97 - trace_power_start(POWER_CSTATE, next_state, dev->cpu); 98 - trace_cpu_idle(next_state, dev->cpu); 97 + trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu); 98 + trace_cpu_idle_rcuidle(next_state, dev->cpu); 99 99 100 100 entered_state = target_state->enter(dev, drv, next_state); 101 101 102 - trace_power_end(dev->cpu); 103 - trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); 102 + trace_power_end_rcuidle(dev->cpu); 103 + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 104 104 105 105 if (entered_state >= 0) { 106 106 /* Update cpuidle counters */
+2 -2
include/linux/ftrace.h
··· 178 178 }; 179 179 180 180 int ftrace_force_update(void); 181 - void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 181 + int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 182 182 int len, int reset); 183 - void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 183 + int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 184 184 int len, int reset); 185 185 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 186 186 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
+1 -6
include/linux/interrupt.h
··· 20 20 #include <linux/atomic.h> 21 21 #include <asm/ptrace.h> 22 22 #include <asm/system.h> 23 - #include <trace/events/irq.h> 24 23 25 24 /* 26 25 * These correspond to the IORESOURCE_IRQ_* defines in ··· 455 456 asmlinkage void __do_softirq(void); 456 457 extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 457 458 extern void softirq_init(void); 458 - static inline void __raise_softirq_irqoff(unsigned int nr) 459 - { 460 - trace_softirq_raise(nr); 461 - or_softirq_pending(1UL << nr); 462 - } 459 + extern void __raise_softirq_irqoff(unsigned int nr); 463 460 464 461 extern void raise_softirq_irqoff(unsigned int nr); 465 462 extern void raise_softirq(unsigned int nr);
+18 -4
include/linux/tracepoint.h
··· 114 114 * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just 115 115 * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". 116 116 */ 117 - #define __DO_TRACE(tp, proto, args, cond) \ 117 + #define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \ 118 118 do { \ 119 119 struct tracepoint_func *it_func_ptr; \ 120 120 void *it_func; \ ··· 122 122 \ 123 123 if (!(cond)) \ 124 124 return; \ 125 + prercu; \ 125 126 rcu_read_lock_sched_notrace(); \ 126 127 it_func_ptr = rcu_dereference_sched((tp)->funcs); \ 127 128 if (it_func_ptr) { \ ··· 133 132 } while ((++it_func_ptr)->func); \ 134 133 } \ 135 134 rcu_read_unlock_sched_notrace(); \ 135 + postrcu; \ 136 136 } while (0) 137 137 138 138 /* ··· 141 139 * not add unwanted padding between the beginning of the section and the 142 140 * structure. Force alignment to the same alignment as the section start. 143 141 */ 144 - #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 142 + #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 145 143 extern struct tracepoint __tracepoint_##name; \ 146 144 static inline void trace_##name(proto) \ 147 145 { \ ··· 149 147 __DO_TRACE(&__tracepoint_##name, \ 150 148 TP_PROTO(data_proto), \ 151 149 TP_ARGS(data_args), \ 152 - TP_CONDITION(cond)); \ 150 + TP_CONDITION(cond),,); \ 151 + } \ 152 + static inline void trace_##name##_rcuidle(proto) \ 153 + { \ 154 + if (static_branch(&__tracepoint_##name.key)) \ 155 + __DO_TRACE(&__tracepoint_##name, \ 156 + TP_PROTO(data_proto), \ 157 + TP_ARGS(data_args), \ 158 + TP_CONDITION(cond), \ 159 + rcu_idle_exit(), \ 160 + rcu_idle_enter()); \ 153 161 } \ 154 162 static inline int \ 155 163 register_trace_##name(void (*probe)(data_proto), void *data) \ ··· 202 190 EXPORT_SYMBOL(__tracepoint_##name) 203 191 204 192 #else /* !CONFIG_TRACEPOINTS */ 205 - #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 193 + #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 206 194 static inline void trace_##name(proto) \ 195 + { } \ 196 + static inline void trace_##name##_rcuidle(proto) \ 207 197 { } \ 208 198 static inline int \ 209 199 register_trace_##name(void (*probe)(data_proto), \
+2
include/trace/events/power.h
··· 151 151 events get removed */ 152 152 static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {}; 153 153 static inline void trace_power_end(u64 cpuid) {}; 154 + static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {}; 155 + static inline void trace_power_end_rcuidle(u64 cpuid) {}; 154 156 static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {}; 155 157 #endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */ 156 158
+41
include/trace/events/printk.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM printk 3 + 4 + #if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_PRINTK_H 6 + 7 + #include <linux/tracepoint.h> 8 + 9 + TRACE_EVENT_CONDITION(console, 10 + TP_PROTO(const char *log_buf, unsigned start, unsigned end, 11 + unsigned log_buf_len), 12 + 13 + TP_ARGS(log_buf, start, end, log_buf_len), 14 + 15 + TP_CONDITION(start != end), 16 + 17 + TP_STRUCT__entry( 18 + __dynamic_array(char, msg, end - start + 1) 19 + ), 20 + 21 + TP_fast_assign( 22 + if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) { 23 + memcpy(__get_dynamic_array(msg), 24 + log_buf + (start & (log_buf_len - 1)), 25 + log_buf_len - (start & (log_buf_len - 1))); 26 + memcpy((char *)__get_dynamic_array(msg) + 27 + log_buf_len - (start & (log_buf_len - 1)), 28 + log_buf, end & (log_buf_len - 1)); 29 + } else 30 + memcpy(__get_dynamic_array(msg), 31 + log_buf + (start & (log_buf_len - 1)), 32 + end - start); 33 + ((char *)__get_dynamic_array(msg))[end - start] = 0; 34 + ), 35 + 36 + TP_printk("%s", __get_str(msg)) 37 + ); 38 + #endif /* _TRACE_PRINTK_H */ 39 + 40 + /* This part must be outside protection */ 41 + #include <trace/define_trace.h>
+2
kernel/irq/chip.c
··· 16 16 #include <linux/interrupt.h> 17 17 #include <linux/kernel_stat.h> 18 18 19 + #include <trace/events/irq.h> 20 + 19 21 #include "internals.h" 20 22 21 23 /**
+5
kernel/printk.c
··· 44 44 45 45 #include <asm/uaccess.h> 46 46 47 + #define CREATE_TRACE_POINTS 48 + #include <trace/events/printk.h> 49 + 47 50 /* 48 51 * Architectures can override it: 49 52 */ ··· 545 542 static void _call_console_drivers(unsigned start, 546 543 unsigned end, int msg_log_level) 547 544 { 545 + trace_console(&LOG_BUF(0), start, end, log_buf_len); 546 + 548 547 if ((msg_log_level < console_loglevel || ignore_loglevel) && 549 548 console_drivers && start != end) { 550 549 if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
+6
kernel/softirq.c
··· 385 385 local_irq_restore(flags); 386 386 } 387 387 388 + void __raise_softirq_irqoff(unsigned int nr) 389 + { 390 + trace_softirq_raise(nr); 391 + or_softirq_pending(1UL << nr); 392 + } 393 + 388 394 void open_softirq(int nr, void (*action)(struct softirq_action *)) 389 395 { 390 396 softirq_vec[nr].action = action;
+10 -7
kernel/trace/ftrace.c
··· 1129 1129 return NULL; 1130 1130 1131 1131 size = 1 << size_bits; 1132 - hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL); 1132 + hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1133 1133 1134 1134 if (!hash->buckets) { 1135 1135 kfree(hash); ··· 3146 3146 mutex_lock(&ftrace_regex_lock); 3147 3147 if (reset) 3148 3148 ftrace_filter_reset(hash); 3149 - if (buf) 3150 - ftrace_match_records(hash, buf, len); 3149 + if (buf && !ftrace_match_records(hash, buf, len)) { 3150 + ret = -EINVAL; 3151 + goto out_regex_unlock; 3152 + } 3151 3153 3152 3154 mutex_lock(&ftrace_lock); 3153 3155 ret = ftrace_hash_move(ops, enable, orig_hash, hash); ··· 3159 3157 3160 3158 mutex_unlock(&ftrace_lock); 3161 3159 3160 + out_regex_unlock: 3162 3161 mutex_unlock(&ftrace_regex_lock); 3163 3162 3164 3163 free_ftrace_hash(hash); ··· 3176 3173 * Filters denote which functions should be enabled when tracing is enabled. 3177 3174 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 3178 3175 */ 3179 - void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3176 + int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3180 3177 int len, int reset) 3181 3178 { 3182 - ftrace_set_regex(ops, buf, len, reset, 1); 3179 + return ftrace_set_regex(ops, buf, len, reset, 1); 3183 3180 } 3184 3181 EXPORT_SYMBOL_GPL(ftrace_set_filter); 3185 3182 ··· 3194 3191 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 3195 3192 * for tracing. 3196 3193 */ 3197 - void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3194 + int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3198 3195 int len, int reset) 3199 3196 { 3200 - ftrace_set_regex(ops, buf, len, reset, 0); 3197 + return ftrace_set_regex(ops, buf, len, reset, 0); 3201 3198 } 3202 3199 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3203 3200 /**
+3 -3
kernel/trace/trace.c
··· 2764 2764 "tracing mini-HOWTO:\n\n" 2765 2765 "# mount -t debugfs nodev /sys/kernel/debug\n\n" 2766 2766 "# cat /sys/kernel/debug/tracing/available_tracers\n" 2767 - "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" 2767 + "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n" 2768 2768 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2769 2769 "nop\n" 2770 - "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" 2770 + "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n" 2771 2771 "# cat /sys/kernel/debug/tracing/current_tracer\n" 2772 - "sched_switch\n" 2772 + "wakeup\n" 2773 2773 "# cat /sys/kernel/debug/tracing/trace_options\n" 2774 2774 "noprint-parent nosym-offset nosym-addr noverbose\n" 2775 2775 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
+3 -4
kernel/trace/trace_events_filter.c
··· 685 685 686 686 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds) 687 687 { 688 - stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL); 688 + stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL); 689 689 if (!stack->preds) 690 690 return -ENOMEM; 691 691 stack->index = n_preds; ··· 826 826 if (filter->preds) 827 827 __free_preds(filter); 828 828 829 - filter->preds = 830 - kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL); 829 + filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL); 831 830 832 831 if (!filter->preds) 833 832 return -ENOMEM; ··· 1485 1486 children = count_leafs(preds, &preds[root->left]); 1486 1487 children += count_leafs(preds, &preds[root->right]); 1487 1488 1488 - root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL); 1489 + root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL); 1489 1490 if (!root->ops) 1490 1491 return -ENOMEM; 1491 1492
+2 -2
kernel/trace/trace_syscalls.c
··· 468 468 unsigned long addr; 469 469 int i; 470 470 471 - syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * 472 - NR_syscalls, GFP_KERNEL); 471 + syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata), 472 + GFP_KERNEL); 473 473 if (!syscalls_metadata) { 474 474 WARN_ON(1); 475 475 return -ENOMEM;