perf/core: Fix WARN in perf_cgroup_switch()

There may be concurrency between perf_cgroup_switch and
perf_cgroup_event_disable. Consider the following scenario: after a new
perf cgroup event is created on CPU0, the new event may not trigger
a reprogramming, causing ctx->is_active to be 0. In this case, when CPU1
disables this perf event, it executes __perf_remove_from_context->
list _del_event->perf_cgroup_event_disable on CPU1, which causes a race
with perf_cgroup_switch running on CPU0.

The following describes the details of this concurrency scenario:

CPU0 CPU1

perf_cgroup_switch:
...
# cpuctx->cgrp is not NULL here
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;

perf_remove_from_context:
...
raw_spin_lock_irq(&ctx->lock);
...
# ctx->is_active == 0 because reprogramm is not
# tigger, so CPU1 can do __perf_remove_from_context
# for CPU0
__perf_remove_from_context:
perf_cgroup_event_disable:
...
if (--ctx->nr_cgroups)
...

# this warning will happened because CPU1 changed
# ctx.nr_cgroups to 0.
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);

[peterz: use guard instead of goto unlock]
Fixes: db4a835601b7 ("perf/core: Set cgroup in CPU contexts for new cgroup events")
Signed-off-by: Luo Gengkun <luogengkun@huaweicloud.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20250604033924.3914647-3-luogengkun@huaweicloud.com

authored by Luo Gengkun and committed by Peter Zijlstra 3172fb98 3b7a34ae

+20 -2
+20 -2
kernel/events/core.c
··· 207 __perf_ctx_unlock(&cpuctx->ctx); 208 } 209 210 #define TASK_TOMBSTONE ((void *)-1L) 211 212 static bool is_kernel_event(struct perf_event *event) ··· 957 if (READ_ONCE(cpuctx->cgrp) == cgrp) 958 return; 959 960 - perf_ctx_lock(cpuctx, cpuctx->task_ctx); 961 perf_ctx_disable(&cpuctx->ctx, true); 962 963 ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); ··· 981 ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); 982 983 perf_ctx_enable(&cpuctx->ctx, true); 984 - perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 985 } 986 987 static int perf_cgroup_ensure_storage(struct perf_event *event,
··· 207 __perf_ctx_unlock(&cpuctx->ctx); 208 } 209 210 + typedef struct { 211 + struct perf_cpu_context *cpuctx; 212 + struct perf_event_context *ctx; 213 + } class_perf_ctx_lock_t; 214 + 215 + static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T) 216 + { perf_ctx_unlock(_T->cpuctx, _T->ctx); } 217 + 218 + static inline class_perf_ctx_lock_t 219 + class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx, 220 + struct perf_event_context *ctx) 221 + { perf_ctx_lock(cpuctx, ctx); return (class_perf_ctx_lock_t){ cpuctx, ctx }; } 222 + 223 #define TASK_TOMBSTONE ((void *)-1L) 224 225 static bool is_kernel_event(struct perf_event *event) ··· 944 if (READ_ONCE(cpuctx->cgrp) == cgrp) 945 return; 946 947 + guard(perf_ctx_lock)(cpuctx, cpuctx->task_ctx); 948 + /* 949 + * Re-check, could've raced vs perf_remove_from_context(). 950 + */ 951 + if (READ_ONCE(cpuctx->cgrp) == NULL) 952 + return; 953 + 954 perf_ctx_disable(&cpuctx->ctx, true); 955 956 ctx_sched_out(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); ··· 962 ctx_sched_in(&cpuctx->ctx, NULL, EVENT_ALL|EVENT_CGROUP); 963 964 perf_ctx_enable(&cpuctx->ctx, true); 965 } 966 967 static int perf_cgroup_ensure_storage(struct perf_event *event,