Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf, x86 and scheduler updates from Ingo Molnar.

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
tracing: Do not enable function event with enable
perf stat: handle ENXIO error for perf_event_open
perf: Turn off compiler warnings for flex and bison generated files
perf stat: Fix case where guest/host monitoring is not supported by kernel
perf build-id: Fix filename size calculation

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86, kvm: KVM paravirt kernels don't check for CPUID being unavailable
x86: Fix section annotation of acpi_map_cpu2node()
x86/microcode: Ensure that module is only loaded on supported Intel CPUs

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched: Fix KVM and ia64 boot crash due to sched_groups circular linked list assumption

+59 -16
+3
arch/x86/include/asm/kvm_para.h
··· 170 170 unsigned int eax, ebx, ecx, edx; 171 171 char signature[13]; 172 172 173 + if (boot_cpu_data.cpuid_level < 0) 174 + return 0; /* So we don't blow up on old processors */ 175 + 173 176 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 174 177 memcpy(signature + 0, &ebx, 4); 175 178 memcpy(signature + 4, &ecx, 4);
+1 -1
arch/x86/kernel/acpi/boot.c
··· 593 593 #ifdef CONFIG_ACPI_HOTPLUG_CPU 594 594 #include <acpi/processor.h> 595 595 596 - static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 596 + static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 597 597 { 598 598 #ifdef CONFIG_ACPI_NUMA 599 599 int nid;
+8 -6
arch/x86/kernel/microcode_intel.c
··· 147 147 148 148 memset(csig, 0, sizeof(*csig)); 149 149 150 - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 151 - cpu_has(c, X86_FEATURE_IA64)) { 152 - pr_err("CPU%d not a capable Intel processor\n", cpu_num); 153 - return -1; 154 - } 155 - 156 150 csig->sig = cpuid_eax(0x00000001); 157 151 158 152 if ((c->x86_model >= 5) || (c->x86 > 6)) { ··· 457 463 458 464 struct microcode_ops * __init init_intel_microcode(void) 459 465 { 466 + struct cpuinfo_x86 *c = &cpu_data(0); 467 + 468 + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || 469 + cpu_has(c, X86_FEATURE_IA64)) { 470 + pr_err("Intel CPU family 0x%x not supported\n", c->x86); 471 + return NULL; 472 + } 473 + 460 474 return &microcode_intel_ops; 461 475 } 462 476
+2
include/linux/ftrace_event.h
··· 179 179 TRACE_EVENT_FL_RECORDED_CMD_BIT, 180 180 TRACE_EVENT_FL_CAP_ANY_BIT, 181 181 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 182 + TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 182 183 }; 183 184 184 185 enum { ··· 188 187 TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), 189 188 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), 190 189 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 190 + TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 191 191 }; 192 192 193 193 struct ftrace_event_call {
+2
kernel/sched/core.c
··· 6382 6382 if (!sg) 6383 6383 return -ENOMEM; 6384 6384 6385 + sg->next = sg; 6386 + 6385 6387 *per_cpu_ptr(sdd->sg, j) = sg; 6386 6388 6387 6389 sgp = kzalloc_node(sizeof(struct sched_group_power),
+4 -1
kernel/trace/trace_events.c
··· 294 294 if (!call->name || !call->class || !call->class->reg) 295 295 continue; 296 296 297 + if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) 298 + continue; 299 + 297 300 if (match && 298 301 strcmp(match, call->name) != 0 && 299 302 strcmp(match, call->class->system) != 0) ··· 1167 1164 return -1; 1168 1165 } 1169 1166 1170 - if (call->class->reg) 1167 + if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1171 1168 trace_create_file("enable", 0644, call->dir, call, 1172 1169 enable); 1173 1170
+1
kernel/trace/trace_export.c
··· 180 180 .event.type = etype, \ 181 181 .class = &event_class_ftrace_##call, \ 182 182 .print_fmt = print, \ 183 + .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 183 184 }; \ 184 185 struct ftrace_event_call __used \ 185 186 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
+2 -2
tools/perf/Makefile
··· 774 774 # over the general rule for .o 775 775 776 776 $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS 777 - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< 777 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $< 778 778 779 779 $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS 780 - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< 780 + $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $< 781 781 782 782 $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS 783 783 $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
+35 -5
tools/perf/builtin-stat.c
··· 283 283 { 284 284 struct perf_event_attr *attr = &evsel->attr; 285 285 struct xyarray *group_fd = NULL; 286 + bool exclude_guest_missing = false; 287 + int ret; 286 288 287 289 if (group && evsel != first) 288 290 group_fd = first->fd; ··· 295 293 296 294 attr->inherit = !no_inherit; 297 295 298 - if (system_wide) 299 - return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 296 + retry: 297 + if (exclude_guest_missing) 298 + evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; 299 + 300 + if (system_wide) { 301 + ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, 300 302 group, group_fd); 303 + if (ret) 304 + goto check_ret; 305 + return 0; 306 + } 307 + 301 308 if (!target_pid && !target_tid && (!group || evsel == first)) { 302 309 attr->disabled = 1; 303 310 attr->enable_on_exec = 1; 304 311 } 305 312 306 - return perf_evsel__open_per_thread(evsel, evsel_list->threads, 307 - group, group_fd); 313 + ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, 314 + group, group_fd); 315 + if (!ret) 316 + return 0; 317 + /* fall through */ 318 + check_ret: 319 + if (ret && errno == EINVAL) { 320 + if (!exclude_guest_missing && 321 + (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { 322 + pr_debug("Old kernel, cannot exclude " 323 + "guest or host samples.\n"); 324 + exclude_guest_missing = true; 325 + goto retry; 326 + } 327 + } 328 + return ret; 308 329 } 309 330 310 331 /* ··· 488 463 489 464 list_for_each_entry(counter, &evsel_list->entries, node) { 490 465 if (create_perf_stat_counter(counter, first) < 0) { 466 + /* 467 + * PPC returns ENXIO for HW counters until 2.6.37 468 + * (behavior changed with commit b0a873e). 469 + */ 491 470 if (errno == EINVAL || errno == ENOSYS || 492 - errno == ENOENT || errno == EOPNOTSUPP) { 471 + errno == ENOENT || errno == EOPNOTSUPP || 472 + errno == ENXIO) { 493 473 if (verbose) 494 474 ui__warning("%s event is not supported by the kernel.\n", 495 475 event_name(counter));
+1 -1
tools/perf/util/header.c
··· 296 296 if (mkdir_p(filename, 0755)) 297 297 goto out_free; 298 298 299 - snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); 299 + snprintf(filename + len, size - len, "/%s", sbuild_id); 300 300 301 301 if (access(filename, F_OK)) { 302 302 if (is_kallsyms) {