Merge tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Borislav Petkov:

- Make sure the destroy callback is reset when a event initialization
fails

- Update the event constraints for Icelake

- Make sure the active time of an event is updated even for inactive
events

* tag 'perf_urgent_for_v5.15_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/core: fix userpage->time_enabled of inactive events
perf/x86/intel: Update event constraints for ICX
perf/x86: Reset destroy callback on event init failure

+35 -5
+1
arch/x86/events/core.c
··· 2465 2465 if (err) { 2466 2466 if (event->destroy) 2467 2467 event->destroy(event); 2468 + event->destroy = NULL; 2468 2469 } 2469 2470 2470 2471 if (READ_ONCE(x86_pmu.attr_rdpmc) &&
+1
arch/x86/events/intel/core.c
··· 263 263 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 264 264 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 265 265 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 266 + INTEL_EVENT_CONSTRAINT(0xef, 0xf), 266 267 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 267 268 EVENT_CONSTRAINT_END 268 269 };
+3 -1
include/linux/perf_event.h
··· 683 683 /* 684 684 * timestamp shadows the actual context timing but it can 685 685 * be safely used in NMI interrupt context. It reflects the 686 - * context time as it was when the event was last scheduled in. 686 + * context time as it was when the event was last scheduled in, 687 + * or when ctx_sched_in failed to schedule the event because we 688 + * run out of PMC. 687 689 * 688 690 * ctx_time already accounts for ctx->timestamp. Therefore to 689 691 * compute ctx_time for a sample, simply add perf_clock().
+30 -4
kernel/events/core.c
··· 3707 3707 return 0; 3708 3708 } 3709 3709 3710 + static inline bool event_update_userpage(struct perf_event *event) 3711 + { 3712 + if (likely(!atomic_read(&event->mmap_count))) 3713 + return false; 3714 + 3715 + perf_event_update_time(event); 3716 + perf_set_shadow_time(event, event->ctx); 3717 + perf_event_update_userpage(event); 3718 + 3719 + return true; 3720 + } 3721 + 3722 + static inline void group_update_userpage(struct perf_event *group_event) 3723 + { 3724 + struct perf_event *event; 3725 + 3726 + if (!event_update_userpage(group_event)) 3727 + return; 3728 + 3729 + for_each_sibling_event(event, group_event) 3730 + event_update_userpage(event); 3731 + } 3732 + 3710 3733 static int merge_sched_in(struct perf_event *event, void *data) 3711 3734 { 3712 3735 struct perf_event_context *ctx = event->ctx; ··· 3748 3725 } 3749 3726 3750 3727 if (event->state == PERF_EVENT_STATE_INACTIVE) { 3728 + *can_add_hw = 0; 3751 3729 if (event->attr.pinned) { 3752 3730 perf_cgroup_event_disable(event, ctx); 3753 3731 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); 3732 + } else { 3733 + ctx->rotate_necessary = 1; 3734 + perf_mux_hrtimer_restart(cpuctx); 3735 + group_update_userpage(event); 3754 3736 } 3755 - 3756 - *can_add_hw = 0; 3757 - ctx->rotate_necessary = 1; 3758 - perf_mux_hrtimer_restart(cpuctx); 3759 3737 } 3760 3738 3761 3739 return 0; ··· 6348 6324 6349 6325 ring_buffer_attach(event, rb); 6350 6326 6327 + perf_event_update_time(event); 6328 + perf_set_shadow_time(event, event->ctx); 6351 6329 perf_event_init_userpage(event); 6352 6330 perf_event_update_userpage(event); 6353 6331 } else {