perf: Fix scaling vs. perf_event_enable()

Similar to the perf_enable_on_exec(), ensure that event timings are
consistent across perf_event_enable().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dvyukov@google.com
Cc: eranian@google.com
Cc: oleg@redhat.com
Cc: panand@redhat.com
Cc: sasha.levin@oracle.com
Cc: vince@deater.net
Link: http://lkml.kernel.org/r/20160224174948.218288698@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by Peter Zijlstra and committed by Ingo Molnar bd2afa49 7fce2509

Changed files
+23 -19
kernel
events
+23 -19
kernel/events/core.c
··· 2069 2069 event->tstamp_stopped = tstamp; 2070 2070 } 2071 2071 2072 - static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2073 - struct perf_event_context *ctx); 2072 + static void ctx_sched_out(struct perf_event_context *ctx, 2073 + struct perf_cpu_context *cpuctx, 2074 + enum event_type_t event_type); 2074 2075 static void 2075 2076 ctx_sched_in(struct perf_event_context *ctx, 2076 2077 struct perf_cpu_context *cpuctx, 2077 2078 enum event_type_t event_type, 2078 2079 struct task_struct *task); 2080 + 2081 + static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2082 + struct perf_event_context *ctx) 2083 + { 2084 + if (!cpuctx->task_ctx) 2085 + return; 2086 + 2087 + if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2088 + return; 2089 + 2090 + ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2091 + } 2079 2092 2080 2093 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2081 2094 struct perf_event_context *ctx, ··· 2240 2227 event->state <= PERF_EVENT_STATE_ERROR) 2241 2228 return; 2242 2229 2243 - update_context_time(ctx); 2230 + if (ctx->is_active) 2231 + ctx_sched_out(ctx, cpuctx, EVENT_TIME); 2232 + 2244 2233 __perf_event_mark_enabled(event); 2245 2234 2246 2235 if (!ctx->is_active) 2247 2236 return; 2248 2237 2249 2238 if (!event_filter_match(event)) { 2250 - if (is_cgroup_event(event)) { 2251 - perf_cgroup_set_timestamp(current, ctx); // XXX ? 2239 + if (is_cgroup_event(event)) 2252 2240 perf_cgroup_defer_enabled(event); 2253 - } 2241 + ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); 2254 2242 return; 2255 2243 } 2256 2244 ··· 2259 2245 * If the event is in a group and isn't the group leader, 2260 2246 * then don't put it on unless the group is on. 2261 2247 */ 2262 - if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2248 + if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) { 2249 + ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); 2263 2250 return; 2251 + } 2264 2252 2265 2253 task_ctx = cpuctx->task_ctx; 2266 2254 if (ctx->task) ··· 2672 2656 */ 2673 2657 if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) 2674 2658 perf_cgroup_sched_out(task, next); 2675 - } 2676 - 2677 - static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2678 - struct perf_event_context *ctx) 2679 - { 2680 - if (!cpuctx->task_ctx) 2681 - return; 2682 - 2683 - if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 2684 - return; 2685 - 2686 - ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2687 2659 } 2688 2660 2689 2661 /*