Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
"Two fixes on the kernel side: fix an over-eager condition that failed
larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
for a corner case, found by fuzzing"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: Fix impossible ring-buffer sizes warning
perf/x86: Add check_period PMU callback

Changed files
+59 -3
arch
x86
events
include
linux
kernel
+14
arch/x86/events/core.c
··· 2278 2278 x86_pmu.check_microcode(); 2279 2279 } 2280 2280 2281 + static int x86_pmu_check_period(struct perf_event *event, u64 value) 2282 + { 2283 + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) 2284 + return -EINVAL; 2285 + 2286 + if (value && x86_pmu.limit_period) { 2287 + if (x86_pmu.limit_period(event, value) > value) 2288 + return -EINVAL; 2289 + } 2290 + 2291 + return 0; 2292 + } 2293 + 2281 2294 static struct pmu pmu = { 2282 2295 .pmu_enable = x86_pmu_enable, 2283 2296 .pmu_disable = x86_pmu_disable, ··· 2315 2302 .event_idx = x86_pmu_event_idx, 2316 2303 .sched_task = x86_pmu_sched_task, 2317 2304 .task_ctx_size = sizeof(struct x86_perf_task_context), 2305 + .check_period = x86_pmu_check_period, 2318 2306 }; 2319 2307 2320 2308 void arch_perf_update_userpage(struct perf_event *event,
+9
arch/x86/events/intel/core.c
··· 3587 3587 intel_pmu_lbr_sched_task(ctx, sched_in); 3588 3588 } 3589 3589 3590 + static int intel_pmu_check_period(struct perf_event *event, u64 value) 3591 + { 3592 + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; 3593 + } 3594 + 3590 3595 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); 3591 3596 3592 3597 PMU_FORMAT_ATTR(ldlat, "config1:0-15"); ··· 3672 3667 .cpu_starting = intel_pmu_cpu_starting, 3673 3668 .cpu_dying = intel_pmu_cpu_dying, 3674 3669 .cpu_dead = intel_pmu_cpu_dead, 3670 + 3671 + .check_period = intel_pmu_check_period, 3675 3672 }; 3676 3673 3677 3674 static struct attribute *intel_pmu_attrs[]; ··· 3718 3711 3719 3712 .guest_get_msrs = intel_guest_get_msrs, 3720 3713 .sched_task = intel_pmu_sched_task, 3714 + 3715 + .check_period = intel_pmu_check_period, 3721 3716 }; 3722 3717 3723 3718 static __init void intel_clovertown_quirk(void)
+14 -2
arch/x86/events/perf_event.h
··· 646 646 * Intel host/guest support (KVM) 647 647 */ 648 648 struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); 649 + 650 + /* 651 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 652 + */ 653 + int (*check_period) (struct perf_event *event, u64 period); 649 654 }; 650 655 651 656 struct x86_perf_task_context { ··· 862 857 863 858 #ifdef CONFIG_CPU_SUP_INTEL 864 859 865 - static inline bool intel_pmu_has_bts(struct perf_event *event) 860 + static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) 866 861 { 867 862 struct hw_perf_event *hwc = &event->hw; 868 863 unsigned int hw_event, bts_event; ··· 873 868 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; 874 869 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); 875 870 876 - return hw_event == bts_event && hwc->sample_period == 1; 871 + return hw_event == bts_event && period == 1; 872 + } 873 + 874 + static inline bool intel_pmu_has_bts(struct perf_event *event) 875 + { 876 + struct hw_perf_event *hwc = &event->hw; 877 + 878 + return intel_pmu_has_bts_period(event, hwc->sample_period); 877 879 } 878 880 879 881 int intel_pmu_save_and_restart(struct perf_event *event);
+5
include/linux/perf_event.h
··· 447 447 * Filter events for PMU-specific reasons. 448 448 */ 449 449 int (*filter_match) (struct perf_event *event); /* optional */ 450 + 451 + /* 452 + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 453 + */ 454 + int (*check_period) (struct perf_event *event, u64 value); /* optional */ 450 455 }; 451 456 452 457 enum perf_addr_filter_action_t {
+16
kernel/events/core.c
··· 4963 4963 } 4964 4964 } 4965 4965 4966 + static int perf_event_check_period(struct perf_event *event, u64 value) 4967 + { 4968 + return event->pmu->check_period(event, value); 4969 + } 4970 + 4966 4971 static int perf_event_period(struct perf_event *event, u64 __user *arg) 4967 4972 { 4968 4973 u64 value; ··· 4982 4977 return -EINVAL; 4983 4978 4984 4979 if (event->attr.freq && value > sysctl_perf_event_sample_rate) 4980 + return -EINVAL; 4981 + 4982 + if (perf_event_check_period(event, value)) 4985 4983 return -EINVAL; 4986 4984 4987 4985 event_function_call(event, __perf_event_period, &value); ··· 9399 9391 return 0; 9400 9392 } 9401 9393 9394 + static int perf_event_nop_int(struct perf_event *event, u64 value) 9395 + { 9396 + return 0; 9397 + } 9398 + 9402 9399 static DEFINE_PER_CPU(unsigned int, nop_txn_flags); 9403 9400 9404 9401 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) ··· 9703 9690 pmu->pmu_enable = perf_pmu_nop_void; 9704 9691 pmu->pmu_disable = perf_pmu_nop_void; 9705 9692 } 9693 + 9694 + if (!pmu->check_period) 9695 + pmu->check_period = perf_event_nop_int; 9706 9696 9707 9697 if (!pmu->event_idx) 9708 9698 pmu->event_idx = perf_event_idx_default;
+1 -1
kernel/events/ring_buffer.c
··· 734 734 size = sizeof(struct ring_buffer); 735 735 size += nr_pages * sizeof(void *); 736 736 737 - if (order_base_2(size) >= MAX_ORDER) 737 + if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) 738 738 goto fail; 739 739 740 740 rb = kzalloc(size, GFP_KERNEL);