Merge tag 'perf-urgent-2025-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 perf event fix from Ingo Molnar:
"Fix PEBS-via-PT crash"

* tag 'perf-urgent-2025-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/intel: Fix segfault with PEBS-via-PT with sample_freq

+5 -4
+5 -4
arch/x86/events/intel/ds.c
··· 2465 2465 setup_pebs_fixed_sample_data); 2466 2466 } 2467 2467 2468 - static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size) 2468 + static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, u64 mask) 2469 2469 { 2470 + u64 pebs_enabled = cpuc->pebs_enabled & mask; 2470 2471 struct perf_event *event; 2471 2472 int bit; 2472 2473 ··· 2478 2477 * It needs to call intel_pmu_save_and_restart_reload() to 2479 2478 * update the event->count for this case. 2480 2479 */ 2481 - for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) { 2480 + for_each_set_bit(bit, (unsigned long *)&pebs_enabled, X86_PMC_IDX_MAX) { 2482 2481 event = cpuc->events[bit]; 2483 2482 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) 2484 2483 intel_pmu_save_and_restart_reload(event, 0); ··· 2513 2512 } 2514 2513 2515 2514 if (unlikely(base >= top)) { 2516 - intel_pmu_pebs_event_update_no_drain(cpuc, size); 2515 + intel_pmu_pebs_event_update_no_drain(cpuc, mask); 2517 2516 return; 2518 2517 } 2519 2518 ··· 2627 2626 (hybrid(cpuc->pmu, fixed_cntr_mask64) << INTEL_PMC_IDX_FIXED); 2628 2627 2629 2628 if (unlikely(base >= top)) { 2630 - intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); 2629 + intel_pmu_pebs_event_update_no_drain(cpuc, mask); 2631 2630 return; 2632 2631 } 2633 2632