Merge tag 'perf_urgent_for_v6.1_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fix from Borislav Petkov:

- Fix a use-after-free case where the perf pending task callback would
see an already freed event

* tag 'perf_urgent_for_v6.1_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Fix perf_pending_task() UaF

Changed files
+13 -4
kernel
events
+13 -4
kernel/events/core.c
··· 2291 2291 !event->pending_work) { 2292 2292 event->pending_work = 1; 2293 2293 dec = false; 2294 + WARN_ON_ONCE(!atomic_long_inc_not_zero(&event->refcount)); 2294 2295 task_work_add(current, &event->pending_task, TWA_RESUME); 2295 2296 } 2296 2297 if (dec) ··· 2337 2336 2338 2337 #define DETACH_GROUP 0x01UL 2339 2338 #define DETACH_CHILD 0x02UL 2339 + #define DETACH_DEAD 0x04UL 2340 2340 2341 2341 /* 2342 2342 * Cross CPU call to remove a performance event ··· 2358 2356 update_cgrp_time_from_cpuctx(cpuctx, false); 2359 2357 } 2360 2358 2359 + /* 2360 + * Ensure event_sched_out() switches to OFF, at the very least 2361 + * this avoids raising perf_pending_task() at this time. 2362 + */ 2363 + if (flags & DETACH_DEAD) 2364 + event->pending_disable = 1; 2361 2365 event_sched_out(event, cpuctx, ctx); 2362 2366 if (flags & DETACH_GROUP) 2363 2367 perf_group_detach(event); 2364 2368 if (flags & DETACH_CHILD) 2365 2369 perf_child_detach(event); 2366 2370 list_del_event(event, ctx); 2371 + if (flags & DETACH_DEAD) 2372 + event->state = PERF_EVENT_STATE_DEAD; 2367 2373 2368 2374 if (!ctx->nr_events && ctx->is_active) { 2369 2375 if (ctx == &cpuctx->ctx) ··· 5131 5121 5132 5122 ctx = perf_event_ctx_lock(event); 5133 5123 WARN_ON_ONCE(ctx->parent_ctx); 5134 - perf_remove_from_context(event, DETACH_GROUP); 5135 5124 5136 - raw_spin_lock_irq(&ctx->lock); 5137 5125 /* 5138 5126 * Mark this event as STATE_DEAD, there is no external reference to it 5139 5127 * anymore. ··· 5143 5135 * Thus this guarantees that we will in fact observe and kill _ALL_ 5144 5136 * child events. 5145 5137 */ 5146 - event->state = PERF_EVENT_STATE_DEAD; 5147 - raw_spin_unlock_irq(&ctx->lock); 5138 + perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); 5148 5139 5149 5140 perf_event_ctx_unlock(event, ctx); 5150 5141 ··· 6584 6577 if (rctx >= 0) 6585 6578 perf_swevent_put_recursion_context(rctx); 6586 6579 preempt_enable_notrace(); 6580 + 6581 + put_event(event); 6587 6582 } 6588 6583 6589 6584 #ifdef CONFIG_GUEST_PERF_EVENTS