+19
-4
kernel/events/core.c
+19
-4
kernel/events/core.c
···
2462
2462
* event_function_call() user.
2463
2463
*/
2464
2464
raw_spin_lock_irq(&ctx->lock);
2465
-
if (!ctx->is_active) {
2465
+
/*
2466
+
* Cgroup events are per-cpu events, and must IPI because of
2467
+
* cgrp_cpuctx_list.
2468
+
*/
2469
+
if (!ctx->is_active && !is_cgroup_event(event)) {
2466
2470
__perf_remove_from_context(event, __get_cpu_context(ctx),
2467
2471
ctx, (void *)flags);
2468
2472
raw_spin_unlock_irq(&ctx->lock);
···
2899
2895
* perf_event_attr::disabled events will not run and can be initialized
2900
2896
* without IPI. Except when this is the first event for the context, in
2901
2897
* that case we need the magic of the IPI to set ctx->is_active.
2898
+
* Similarly, cgroup events for the context also needs the IPI to
2899
+
* manipulate the cgrp_cpuctx_list.
2902
2900
*
2903
2901
* The IOC_ENABLE that is sure to follow the creation of a disabled
2904
2902
* event will issue the IPI and reprogram the hardware.
2905
2903
*/
2906
-
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
2904
+
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
2905
+
ctx->nr_events && !is_cgroup_event(event)) {
2907
2906
raw_spin_lock_irq(&ctx->lock);
2908
2907
if (ctx->task == TASK_TOMBSTONE) {
2909
2908
raw_spin_unlock_irq(&ctx->lock);
···
5992
5985
struct perf_buffer *old_rb = NULL;
5993
5986
unsigned long flags;
5994
5987
5988
+
WARN_ON_ONCE(event->parent);
5989
+
5995
5990
if (event->rb) {
5996
5991
/*
5997
5992
* Should be impossible, we set this when removing
···
6051
6042
{
6052
6043
struct perf_buffer *rb;
6053
6044
6045
+
if (event->parent)
6046
+
event = event->parent;
6047
+
6054
6048
rcu_read_lock();
6055
6049
rb = rcu_dereference(event->rb);
6056
6050
if (rb) {
···
6066
6054
struct perf_buffer *ring_buffer_get(struct perf_event *event)
6067
6055
{
6068
6056
struct perf_buffer *rb;
6057
+
6058
+
if (event->parent)
6059
+
event = event->parent;
6069
6060
6070
6061
rcu_read_lock();
6071
6062
rb = rcu_dereference(event->rb);
···
6778
6763
if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
6779
6764
goto out;
6780
6765
6781
-
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6766
+
rb = ring_buffer_get(sampler);
6782
6767
if (!rb)
6783
6768
goto out;
6784
6769
···
6844
6829
if (WARN_ON_ONCE(!sampler || !data->aux_size))
6845
6830
return;
6846
6831
6847
-
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6832
+
rb = ring_buffer_get(sampler);
6848
6833
if (!rb)
6849
6834
return;
6850
6835