perf_counter, ftrace: Fix perf_counter integration

Adds possible second part to the assign argument of TP_EVENT().

TP_perf_assign(
__perf_count(foo);
__perf_addr(bar);
)

Which, when specified make the swcounter increment with @foo instead
of the usual 1, and report @bar for PERF_SAMPLE_ADDR (data address
associated with the event) when this triggers a counter overflow.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Peter Zijlstra and committed by Ingo Molnar 3a659305 e3560336

+88 -28
+85 -25
include/trace/ftrace.h
··· 144 #undef TP_fast_assign 145 #define TP_fast_assign(args...) args 146 147 #undef TRACE_EVENT 148 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 149 static int \ ··· 348 349 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 350 351 /* 352 * Stage 4 of the trace events. 353 * ··· 532 #define TP_FMT(fmt, args...) fmt "\n", ##args 533 534 #ifdef CONFIG_EVENT_PROFILE 535 - #define _TRACE_PROFILE(call, proto, args) \ 536 - static void ftrace_profile_##call(proto) \ 537 - { \ 538 - extern void perf_tpcounter_event(int); \ 539 - perf_tpcounter_event(event_##call.id); \ 540 - } \ 541 - \ 542 - static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 543 - { \ 544 - int ret = 0; \ 545 - \ 546 - if (!atomic_inc_return(&event_call->profile_count)) \ 547 - ret = register_trace_##call(ftrace_profile_##call); \ 548 - \ 549 - return ret; \ 550 - } \ 551 - \ 552 - static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 553 - { \ 554 - if (atomic_add_negative(-1, &event_call->profile_count)) \ 555 - unregister_trace_##call(ftrace_profile_##call); \ 556 - } 557 558 #define _TRACE_PROFILE_INIT(call) \ 559 .profile_count = ATOMIC_INIT(-1), \ ··· 539 .profile_disable = ftrace_profile_disable_##call, 540 541 #else 542 - #define _TRACE_PROFILE(call, proto, args) 543 #define _TRACE_PROFILE_INIT(call) 544 #endif 545 ··· 564 565 #undef TRACE_EVENT 566 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 567 - _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ 568 \ 569 static struct ftrace_event_call event_##call; \ 570 \ ··· 647 648 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 649 650 - #undef _TRACE_PROFILE 651 #undef _TRACE_PROFILE_INIT 652
··· 144 #undef TP_fast_assign 145 #define TP_fast_assign(args...) args 146 147 + #undef TP_perf_assign 148 + #define TP_perf_assign(args...) 149 + 150 #undef TRACE_EVENT 151 #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 152 static int \ ··· 345 346 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 347 348 + #ifdef CONFIG_EVENT_PROFILE 349 + 350 + /* 351 + * Generate the functions needed for tracepoint perf_counter support. 352 + * 353 + * static void ftrace_profile_<call>(proto) 354 + * { 355 + * extern void perf_tpcounter_event(int, u64, u64); 356 + * u64 __addr = 0, __count = 1; 357 + * 358 + * <assign> <-- here we expand the TP_perf_assign() macro 359 + * 360 + * perf_tpcounter_event(event_<call>.id, __addr, __count); 361 + * } 362 + * 363 + * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call) 364 + * { 365 + * int ret = 0; 366 + * 367 + * if (!atomic_inc_return(&event_call->profile_count)) 368 + * ret = register_trace_<call>(ftrace_profile_<call>); 369 + * 370 + * return ret; 371 + * } 372 + * 373 + * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call) 374 + * { 375 + * if (atomic_add_negative(-1, &event->call->profile_count)) 376 + * unregister_trace_<call>(ftrace_profile_<call>); 377 + * } 378 + * 379 + */ 380 + 381 + #undef TP_fast_assign 382 + #define TP_fast_assign(args...) 383 + 384 + #undef TP_perf_assign 385 + #define TP_perf_assign(args...) args 386 + 387 + #undef __perf_addr 388 + #define __perf_addr(a) __addr = (a) 389 + 390 + #undef __perf_count 391 + #define __perf_count(c) __count = (c) 392 + 393 + #undef TRACE_EVENT 394 + #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 395 + \ 396 + static void ftrace_profile_##call(proto) \ 397 + { \ 398 + extern void perf_tpcounter_event(int, u64, u64); \ 399 + u64 __addr = 0, __count = 1; \ 400 + { assign; } \ 401 + perf_tpcounter_event(event_##call.id, __addr, __count); \ 402 + } \ 403 + \ 404 + static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ 405 + { \ 406 + int ret = 0; \ 407 + \ 408 + if (!atomic_inc_return(&event_call->profile_count)) \ 409 + ret = register_trace_##call(ftrace_profile_##call); \ 410 + \ 411 + return ret; \ 412 + } \ 413 + \ 414 + static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ 415 + { \ 416 + if (atomic_add_negative(-1, &event_call->profile_count)) \ 417 + unregister_trace_##call(ftrace_profile_##call); \ 418 + } 419 + 420 + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 421 + 422 + #undef TP_fast_assign 423 + #define TP_fast_assign(args...) args 424 + 425 + #undef TP_perf_assign 426 + #define TP_perf_assign(args...) 427 + 428 + #endif 429 + 430 /* 431 * Stage 4 of the trace events. 432 * ··· 447 #define TP_FMT(fmt, args...) fmt "\n", ##args 448 449 #ifdef CONFIG_EVENT_PROFILE 450 451 #define _TRACE_PROFILE_INIT(call) \ 452 .profile_count = ATOMIC_INIT(-1), \ ··· 476 .profile_disable = ftrace_profile_disable_##call, 477 478 #else 479 #define _TRACE_PROFILE_INIT(call) 480 #endif 481 ··· 502 503 #undef TRACE_EVENT 504 #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ 505 \ 506 static struct ftrace_event_call event_##call; \ 507 \ ··· 586 587 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 588 589 #undef _TRACE_PROFILE_INIT 590
+3 -3
kernel/perf_counter.c
··· 3703 }; 3704 3705 #ifdef CONFIG_EVENT_PROFILE 3706 - void perf_tpcounter_event(int event_id) 3707 { 3708 struct perf_sample_data data = { 3709 .regs = get_irq_regs(), 3710 - .addr = 0, 3711 }; 3712 3713 if (!data.regs) 3714 data.regs = task_pt_regs(current); 3715 3716 - do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3717 } 3718 EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3719
··· 3703 }; 3704 3705 #ifdef CONFIG_EVENT_PROFILE 3706 + void perf_tpcounter_event(int event_id, u64 addr, u64 count) 3707 { 3708 struct perf_sample_data data = { 3709 .regs = get_irq_regs(), 3710 + .addr = addr, 3711 }; 3712 3713 if (!data.regs) 3714 data.regs = task_pt_regs(current); 3715 3716 + do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); 3717 } 3718 EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3719