Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
"Two fixes on the kernel side: fix an over-eager condition that failed
larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
for a corner case, found by fuzzing"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Fix impossible ring-buffer sizes warning
perf/x86: Add check_period PMU callback

+59 -3
+14
arch/x86/events/core.c
··· 2278 x86_pmu.check_microcode(); 2279 } 2280 2281 static struct pmu pmu = { 2282 .pmu_enable = x86_pmu_enable, 2283 .pmu_disable = x86_pmu_disable, ··· 2315 .event_idx = x86_pmu_event_idx, 2316 .sched_task = x86_pmu_sched_task, 2317 .task_ctx_size = sizeof(struct x86_perf_task_context), 2318 }; 2319 2320 void arch_perf_update_userpage(struct perf_event *event,
··· 2278 x86_pmu.check_microcode(); 2279 } 2280 2281 + static int x86_pmu_check_period(struct perf_event *event, u64 value) 2282 + { 2283 + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) 2284 + return -EINVAL; 2285 + 2286 + if (value && x86_pmu.limit_period) { 2287 + if (x86_pmu.limit_period(event, value) > value) 2288 + return -EINVAL; 2289 + } 2290 + 2291 + return 0; 2292 + } 2293 + 2294 static struct pmu pmu = { 2295 .pmu_enable = x86_pmu_enable, 2296 .pmu_disable = x86_pmu_disable, ··· 2302 .event_idx = x86_pmu_event_idx, 2303 .sched_task = x86_pmu_sched_task, 2304 .task_ctx_size = sizeof(struct x86_perf_task_context), 2305 + .check_period = x86_pmu_check_period, 2306 }; 2307 2308 void arch_perf_update_userpage(struct perf_event *event,
+9
arch/x86/events/intel/core.c
··· 3587 intel_pmu_lbr_sched_task(ctx, sched_in); 3588 } 3589 3590 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3591 3592 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); ··· 3672 .cpu_starting = intel_pmu_cpu_starting, 3673 .cpu_dying = intel_pmu_cpu_dying, 3674 .cpu_dead = intel_pmu_cpu_dead, 3675 }; 3676 3677 static struct attribute *intel_pmu_attrs[]; ··· 3718 3719 .guest_get_msrs = intel_guest_get_msrs, 3720 .sched_task = intel_pmu_sched_task, 3721 }; 3722 3723 static __init void intel_clovertown_quirk(void)
··· 3587 intel_pmu_lbr_sched_task(ctx, sched_in); 3588 } 3589 3590 + static int intel_pmu_check_period(struct perf_event *event, u64 value) 3591 + { 3592 + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 3593 + } 3594 + 3595 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3596 3597 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); ··· 3667 .cpu_starting = intel_pmu_cpu_starting, 3668 .cpu_dying = intel_pmu_cpu_dying, 3669 .cpu_dead = intel_pmu_cpu_dead, 3670 + 3671 + .check_period = intel_pmu_check_period, 3672 }; 3673 3674 static struct attribute *intel_pmu_attrs[]; ··· 3711 3712 .guest_get_msrs = intel_guest_get_msrs, 3713 .sched_task = intel_pmu_sched_task, 3714 + 3715 + .check_period = intel_pmu_check_period, 3716 }; 3717 3718 static __init void intel_clovertown_quirk(void)
+14 -2
arch/x86/events/perf_event.h
··· 646 * Intel host/guest support (KVM) 647 */ 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 649 }; 650 651 struct x86_perf_task_context { ··· 862 863 #ifdef CONFIG_CPU_SUP_INTEL 864 865 - static inline bool intel_pmu_has_bts(struct perf_event *event) 866 { 867 struct hw_perf_event *hwc = &event->hw; 868 unsigned int hw_event, bts_event; ··· 873 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 874 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 875 876 - return hw_event == bts_event && hwc->sample_period == 1; 877 } 878 879 int intel_pmu_save_and_restart(struct perf_event *event);
··· 646 * Intel host/guest support (KVM) 647 */ 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 649 + 650 + /* 651 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 652 + */ 653 + int (*check_period) (struct perf_event *event, u64 period); 654 }; 655 656 struct x86_perf_task_context { ··· 857 858 #ifdef CONFIG_CPU_SUP_INTEL 859 860 + static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) 861 { 862 struct hw_perf_event *hwc = &event->hw; 863 unsigned int hw_event, bts_event; ··· 868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 870 871 + return hw_event == bts_event && period == 1; 872 + } 873 + 874 + static inline bool intel_pmu_has_bts(struct perf_event *event) 875 + { 876 + struct hw_perf_event *hwc = &event->hw; 877 + 878 + return intel_pmu_has_bts_period(event, hwc->sample_period); 879 } 880 881 int intel_pmu_save_and_restart(struct perf_event *event);
+5
include/linux/perf_event.h
··· 447 * Filter events for PMU-specific reasons. 448 */ 449 int (*filter_match) (struct perf_event *event); /* optional */ 450 }; 451 452 enum perf_addr_filter_action_t {
··· 447 * Filter events for PMU-specific reasons. 448 */ 449 int (*filter_match) (struct perf_event *event); /* optional */ 450 + 451 + /* 452 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 453 + */ 454 + int (*check_period) (struct perf_event *event, u64 value); /* optional */ 455 }; 456 457 enum perf_addr_filter_action_t {
+16
kernel/events/core.c
··· 4963 } 4964 } 4965 4966 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4967 { 4968 u64 value; ··· 4982 return -EINVAL; 4983 4984 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4985 return -EINVAL; 4986 4987 event_function_call(event, __perf_event_period, &value); ··· 9399 return 0; 9400 } 9401 9402 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9403 9404 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) ··· 9703 pmu->pmu_enable = perf_pmu_nop_void; 9704 pmu->pmu_disable = perf_pmu_nop_void; 9705 } 9706 9707 if (!pmu->event_idx) 9708 pmu->event_idx = perf_event_idx_default;
··· 4963 } 4964 } 4965 4966 + static int perf_event_check_period(struct perf_event *event, u64 value) 4967 + { 4968 + return event->pmu->check_period(event, value); 4969 + } 4970 + 4971 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4972 { 4973 u64 value; ··· 4977 return -EINVAL; 4978 4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4980 + return -EINVAL; 4981 + 4982 + if (perf_event_check_period(event, value)) 4983 return -EINVAL; 4984 4985 event_function_call(event, __perf_event_period, &value); ··· 9391 return 0; 9392 } 9393 9394 + static int perf_event_nop_int(struct perf_event *event, u64 value) 9395 + { 9396 + return 0; 9397 + } 9398 + 9399 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9400 9401 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) ··· 9690 pmu->pmu_enable = perf_pmu_nop_void; 9691 pmu->pmu_disable = perf_pmu_nop_void; 9692 } 9693 + 9694 + if (!pmu->check_period) 9695 + pmu->check_period = perf_event_nop_int; 9696 9697 if (!pmu->event_idx) 9698 pmu->event_idx = perf_event_idx_default;
+1 -1
kernel/events/ring_buffer.c
··· 734 size = sizeof(struct ring_buffer); 735 size += nr_pages * sizeof(void *); 736 737 - if (order_base_2(size) >= MAX_ORDER) 738 goto fail; 739 740 rb = kzalloc(size, GFP_KERNEL);
··· 734 size = sizeof(struct ring_buffer); 735 size += nr_pages * sizeof(void *); 736 737 + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) 738 goto fail; 739 740 rb = kzalloc(size, GFP_KERNEL);