Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core' of git://amd64.org/linux/rric into perf/core

+252 -94
+3
Documentation/kernel-parameters.txt
··· 1885 1885 arch_perfmon: [X86] Force use of architectural 1886 1886 perfmon on Intel CPUs instead of the 1887 1887 CPU specific event set. 1888 + timer: [X86] Force use of architectural NMI 1889 + timer mode (see also oprofile.timer 1890 + for generic hr timer mode) 1888 1891 1889 1892 oops=panic Always panic on oopses. Default is to just kill the 1890 1893 process, but there is a small probability of
+4
arch/Kconfig
··· 30 30 config HAVE_OPROFILE 31 31 bool 32 32 33 + config OPROFILE_NMI_TIMER 34 + def_bool y 35 + depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI 36 + 33 37 config KPROBES 34 38 bool "Kprobes" 35 39 depends on MODULES
+1 -2
arch/x86/oprofile/Makefile
··· 4 4 oprof.o cpu_buffer.o buffer_sync.o \ 5 5 event_buffer.o oprofile_files.o \ 6 6 oprofilefs.o oprofile_stats.o \ 7 - timer_int.o ) 7 + timer_int.o nmi_timer_int.o ) 8 8 9 9 oprofile-y := $(DRIVER_OBJS) init.o backtrace.o 10 10 oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_amd.o \ 11 11 op_model_ppro.o op_model_p4.o 12 - oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
+7 -18
arch/x86/oprofile/init.c
··· 16 16 * with the NMI mode driver. 17 17 */ 18 18 19 + #ifdef CONFIG_X86_LOCAL_APIC 19 20 extern int op_nmi_init(struct oprofile_operations *ops); 20 - extern int op_nmi_timer_init(struct oprofile_operations *ops); 21 21 extern void op_nmi_exit(void); 22 - extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); 22 + #else 23 + static int op_nmi_init(struct oprofile_operations *ops) { return -ENODEV; } 24 + static void op_nmi_exit(void) { } 25 + #endif 23 26 27 + extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); 24 28 25 29 int __init oprofile_arch_init(struct oprofile_operations *ops) 26 30 { 27 - int ret; 28 - 29 - ret = -ENODEV; 30 - 31 - #ifdef CONFIG_X86_LOCAL_APIC 32 - ret = op_nmi_init(ops); 33 - #endif 34 - #ifdef CONFIG_X86_IO_APIC 35 - if (ret < 0) 36 - ret = op_nmi_timer_init(ops); 37 - #endif 38 31 ops->backtrace = x86_backtrace; 39 - 40 - return ret; 32 + return op_nmi_init(ops); 41 33 } 42 - 43 34 44 35 void oprofile_arch_exit(void) 45 36 { 46 - #ifdef CONFIG_X86_LOCAL_APIC 47 37 op_nmi_exit(); 48 - #endif 49 38 }
+21 -6
arch/x86/oprofile/nmi_int.c
··· 595 595 return 0; 596 596 } 597 597 598 - static int force_arch_perfmon; 599 - static int force_cpu_type(const char *str, struct kernel_param *kp) 598 + enum __force_cpu_type { 599 + reserved = 0, /* do not force */ 600 + timer, 601 + arch_perfmon, 602 + }; 603 + 604 + static int force_cpu_type; 605 + 606 + static int set_cpu_type(const char *str, struct kernel_param *kp) 600 607 { 601 - if (!strcmp(str, "arch_perfmon")) { 602 - force_arch_perfmon = 1; 608 + if (!strcmp(str, "timer")) { 609 + force_cpu_type = timer; 610 + printk(KERN_INFO "oprofile: forcing NMI timer mode\n"); 611 + } else if (!strcmp(str, "arch_perfmon")) { 612 + force_cpu_type = arch_perfmon; 603 613 printk(KERN_INFO "oprofile: forcing architectural perfmon\n"); 614 + } else { 615 + force_cpu_type = 0; 604 616 } 605 617 606 618 return 0; 607 619 } 608 - module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0); 620 + module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0); 609 621 610 622 static int __init ppro_init(char **cpu_type) 611 623 { 612 624 __u8 cpu_model = boot_cpu_data.x86_model; 613 625 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */ 614 626 615 - if (force_arch_perfmon && cpu_has_arch_perfmon) 627 + if (force_cpu_type == arch_perfmon && cpu_has_arch_perfmon) 616 628 return 0; 617 629 618 630 /* ··· 689 677 int ret = 0; 690 678 691 679 if (!cpu_has_apic) 680 + return -ENODEV; 681 + 682 + if (force_cpu_type == timer) 692 683 return -ENODEV; 693 684 694 685 switch (vendor) {
-50
arch/x86/oprofile/nmi_timer_int.c
··· 1 - /** 2 - * @file nmi_timer_int.c 3 - * 4 - * @remark Copyright 2003 OProfile authors 5 - * @remark Read the file COPYING 6 - * 7 - * @author Zwane Mwaikambo <zwane@linuxpower.ca> 8 - */ 9 - 10 - #include <linux/init.h> 11 - #include <linux/smp.h> 12 - #include <linux/errno.h> 13 - #include <linux/oprofile.h> 14 - #include <linux/rcupdate.h> 15 - #include <linux/kdebug.h> 16 - 17 - #include <asm/nmi.h> 18 - #include <asm/apic.h> 19 - #include <asm/ptrace.h> 20 - 21 - static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs) 22 - { 23 - oprofile_add_sample(regs, 0); 24 - return NMI_HANDLED; 25 - } 26 - 27 - static int timer_start(void) 28 - { 29 - if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify, 30 - 0, "oprofile-timer")) 31 - return 1; 32 - return 0; 33 - } 34 - 35 - 36 - static void timer_stop(void) 37 - { 38 - unregister_nmi_handler(NMI_LOCAL, "oprofile-timer"); 39 - synchronize_sched(); /* Allow already-started NMIs to complete. */ 40 - } 41 - 42 - 43 - int __init op_nmi_timer_init(struct oprofile_operations *ops) 44 - { 45 - ops->start = timer_start; 46 - ops->stop = timer_stop; 47 - ops->cpu_type = "timer"; 48 - printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); 49 - return 0; 50 - }
+173
drivers/oprofile/nmi_timer_int.c
··· 1 + /** 2 + * @file nmi_timer_int.c 3 + * 4 + * @remark Copyright 2011 Advanced Micro Devices, Inc. 5 + * 6 + * @author Robert Richter <robert.richter@amd.com> 7 + */ 8 + 9 + #include <linux/init.h> 10 + #include <linux/smp.h> 11 + #include <linux/errno.h> 12 + #include <linux/oprofile.h> 13 + #include <linux/perf_event.h> 14 + 15 + #ifdef CONFIG_OPROFILE_NMI_TIMER 16 + 17 + static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events); 18 + static int ctr_running; 19 + 20 + static struct perf_event_attr nmi_timer_attr = { 21 + .type = PERF_TYPE_HARDWARE, 22 + .config = PERF_COUNT_HW_CPU_CYCLES, 23 + .size = sizeof(struct perf_event_attr), 24 + .pinned = 1, 25 + .disabled = 1, 26 + }; 27 + 28 + static void nmi_timer_callback(struct perf_event *event, 29 + struct perf_sample_data *data, 30 + struct pt_regs *regs) 31 + { 32 + event->hw.interrupts = 0; /* don't throttle interrupts */ 33 + oprofile_add_sample(regs, 0); 34 + } 35 + 36 + static int nmi_timer_start_cpu(int cpu) 37 + { 38 + struct perf_event *event = per_cpu(nmi_timer_events, cpu); 39 + 40 + if (!event) { 41 + event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL, 42 + nmi_timer_callback, NULL); 43 + if (IS_ERR(event)) 44 + return PTR_ERR(event); 45 + per_cpu(nmi_timer_events, cpu) = event; 46 + } 47 + 48 + if (event && ctr_running) 49 + perf_event_enable(event); 50 + 51 + return 0; 52 + } 53 + 54 + static void nmi_timer_stop_cpu(int cpu) 55 + { 56 + struct perf_event *event = per_cpu(nmi_timer_events, cpu); 57 + 58 + if (event && ctr_running) 59 + perf_event_disable(event); 60 + } 61 + 62 + static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action, 63 + void *data) 64 + { 65 + int cpu = (unsigned long)data; 66 + switch (action) { 67 + case CPU_DOWN_FAILED: 68 + case CPU_ONLINE: 69 + nmi_timer_start_cpu(cpu); 70 + break; 71 + case CPU_DOWN_PREPARE: 72 + nmi_timer_stop_cpu(cpu); 73 + break; 74 + } 75 + return NOTIFY_DONE; 76 + } 77 + 78 + static struct notifier_block nmi_timer_cpu_nb = { 79 + .notifier_call = nmi_timer_cpu_notifier 80 + }; 81 + 82 + static int nmi_timer_start(void) 83 + { 84 + int cpu; 85 + 86 + get_online_cpus(); 87 + ctr_running = 1; 88 + for_each_online_cpu(cpu) 89 + nmi_timer_start_cpu(cpu); 90 + put_online_cpus(); 91 + 92 + return 0; 93 + } 94 + 95 + static void nmi_timer_stop(void) 96 + { 97 + int cpu; 98 + 99 + get_online_cpus(); 100 + for_each_online_cpu(cpu) 101 + nmi_timer_stop_cpu(cpu); 102 + ctr_running = 0; 103 + put_online_cpus(); 104 + } 105 + 106 + static void nmi_timer_shutdown(void) 107 + { 108 + struct perf_event *event; 109 + int cpu; 110 + 111 + get_online_cpus(); 112 + unregister_cpu_notifier(&nmi_timer_cpu_nb); 113 + for_each_possible_cpu(cpu) { 114 + event = per_cpu(nmi_timer_events, cpu); 115 + if (!event) 116 + continue; 117 + perf_event_disable(event); 118 + per_cpu(nmi_timer_events, cpu) = NULL; 119 + perf_event_release_kernel(event); 120 + } 121 + 122 + put_online_cpus(); 123 + } 124 + 125 + static int nmi_timer_setup(void) 126 + { 127 + int cpu, err; 128 + u64 period; 129 + 130 + /* clock cycles per tick: */ 131 + period = (u64)cpu_khz * 1000; 132 + do_div(period, HZ); 133 + nmi_timer_attr.sample_period = period; 134 + 135 + get_online_cpus(); 136 + err = register_cpu_notifier(&nmi_timer_cpu_nb); 137 + if (err) 138 + goto out; 139 + /* can't attach events to offline cpus: */ 140 + for_each_online_cpu(cpu) { 141 + err = nmi_timer_start_cpu(cpu); 142 + if (err) 143 + break; 144 + } 145 + if (err) 146 + nmi_timer_shutdown(); 147 + out: 148 + put_online_cpus(); 149 + return err; 150 + } 151 + 152 + int __init op_nmi_timer_init(struct oprofile_operations *ops) 153 + { 154 + int err = 0; 155 + 156 + err = nmi_timer_setup(); 157 + if (err) 158 + return err; 159 + nmi_timer_shutdown(); /* only check, don't alloc */ 160 + 161 + ops->create_files = NULL; 162 + ops->setup = nmi_timer_setup; 163 + ops->shutdown = nmi_timer_shutdown; 164 + ops->start = nmi_timer_start; 165 + ops->stop = nmi_timer_stop; 166 + ops->cpu_type = "timer"; 167 + 168 + printk(KERN_INFO "oprofile: using NMI timer interrupt.\n"); 169 + 170 + return 0; 171 + } 172 + 173 + #endif
+17 -4
drivers/oprofile/oprof.c
··· 239 239 return err; 240 240 } 241 241 242 + static int timer_mode; 243 + 242 244 static int __init oprofile_init(void) 243 245 { 244 246 int err; 245 247 248 + /* always init architecture to setup backtrace support */ 249 + timer_mode = 0; 246 250 err = oprofile_arch_init(&oprofile_ops); 247 - if (err < 0 || timer) { 248 - printk(KERN_INFO "oprofile: using timer interrupt.\n"); 251 + if (!err) { 252 + if (!timer && !oprofilefs_register()) 253 + return 0; 254 + oprofile_arch_exit(); 255 + } 256 + 257 + /* setup timer mode: */ 258 + timer_mode = 1; 259 + /* no nmi timer mode if oprofile.timer is set */ 260 + if (timer || op_nmi_timer_init(&oprofile_ops)) { 249 261 err = oprofile_timer_init(&oprofile_ops); 250 262 if (err) 251 263 return err; 252 264 } 265 + 253 266 return oprofilefs_register(); 254 267 } 255 268 256 269 257 270 static void __exit oprofile_exit(void) 258 271 { 259 - oprofile_timer_exit(); 260 272 oprofilefs_unregister(); 261 - oprofile_arch_exit(); 273 + if (!timer_mode) 274 + oprofile_arch_exit(); 262 275 } 263 276 264 277
+9
drivers/oprofile/oprof.h
··· 36 36 void oprofile_create_files(struct super_block *sb, struct dentry *root); 37 37 int oprofile_timer_init(struct oprofile_operations *ops); 38 38 void oprofile_timer_exit(void); 39 + #ifdef CONFIG_OPROFILE_NMI_TIMER 40 + int op_nmi_timer_init(struct oprofile_operations *ops); 41 + #else 42 + static inline int op_nmi_timer_init(struct oprofile_operations *ops) 43 + { 44 + return -ENODEV; 45 + } 46 + #endif 47 + 39 48 40 49 int oprofile_set_ulong(unsigned long *addr, unsigned long val); 41 50 int oprofile_set_timeout(unsigned long time);
+15 -14
drivers/oprofile/timer_int.c
··· 97 97 .notifier_call = oprofile_cpu_notify, 98 98 }; 99 99 100 - int oprofile_timer_init(struct oprofile_operations *ops) 100 + static int oprofile_hrtimer_setup(void) 101 101 { 102 - int rc; 103 - 104 - rc = register_hotcpu_notifier(&oprofile_cpu_notifier); 105 - if (rc) 106 - return rc; 107 - ops->create_files = NULL; 108 - ops->setup = NULL; 109 - ops->shutdown = NULL; 110 - ops->start = oprofile_hrtimer_start; 111 - ops->stop = oprofile_hrtimer_stop; 112 - ops->cpu_type = "timer"; 113 - return 0; 102 + return register_hotcpu_notifier(&oprofile_cpu_notifier); 114 103 } 115 104 116 - void oprofile_timer_exit(void) 105 + static void oprofile_hrtimer_shutdown(void) 117 106 { 118 107 unregister_hotcpu_notifier(&oprofile_cpu_notifier); 108 + } 109 + 110 + int oprofile_timer_init(struct oprofile_operations *ops) 111 + { 112 + ops->create_files = NULL; 113 + ops->setup = oprofile_hrtimer_setup; 114 + ops->shutdown = oprofile_hrtimer_shutdown; 115 + ops->start = oprofile_hrtimer_start; 116 + ops->stop = oprofile_hrtimer_stop; 117 + ops->cpu_type = "timer"; 118 + printk(KERN_INFO "oprofile: using timer interrupt.\n"); 119 + return 0; 119 120 }
+2
kernel/events/core.c
··· 1322 1322 } 1323 1323 raw_spin_unlock_irq(&ctx->lock); 1324 1324 } 1325 + EXPORT_SYMBOL_GPL(perf_event_disable); 1325 1326 1326 1327 static void perf_set_shadow_time(struct perf_event *event, 1327 1328 struct perf_event_context *ctx, ··· 1807 1806 out: 1808 1807 raw_spin_unlock_irq(&ctx->lock); 1809 1808 } 1809 + EXPORT_SYMBOL_GPL(perf_event_enable); 1810 1810 1811 1811 int perf_event_refresh(struct perf_event *event, int refresh) 1812 1812 {