Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf: Remove the 'match' callback for auxiliary events processing

It gives the following benefits:

- only one function pointer is passed along the way

- the 'match' function is called within output function
and could be inlined by the compiler

Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1373388991-9711-1-git-send-email-jolsa@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Jiri Olsa and committed by
Ingo Molnar
67516844 058ebd0e

+39 -40
+39 -40
kernel/events/core.c
··· 4680 4680 perf_output_end(&handle); 4681 4681 } 4682 4682 4683 - typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data); 4684 4683 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); 4685 4684 4686 4685 static void 4687 4686 perf_event_aux_ctx(struct perf_event_context *ctx, 4688 - perf_event_aux_match_cb match, 4689 4687 perf_event_aux_output_cb output, 4690 4688 void *data) 4691 4689 { ··· 4694 4696 continue; 4695 4697 if (!event_filter_match(event)) 4696 4698 continue; 4697 - if (match(event, data)) 4698 - output(event, data); 4699 + output(event, data); 4699 4700 } 4700 4701 } 4701 4702 4702 4703 static void 4703 - perf_event_aux(perf_event_aux_match_cb match, 4704 - perf_event_aux_output_cb output, 4705 - void *data, 4704 + perf_event_aux(perf_event_aux_output_cb output, void *data, 4706 4705 struct perf_event_context *task_ctx) 4707 4706 { 4708 4707 struct perf_cpu_context *cpuctx; ··· 4712 4717 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4713 4718 if (cpuctx->unique_pmu != pmu) 4714 4719 goto next; 4715 - perf_event_aux_ctx(&cpuctx->ctx, match, output, data); 4720 + perf_event_aux_ctx(&cpuctx->ctx, output, data); 4716 4721 if (task_ctx) 4717 4722 goto next; 4718 4723 ctxn = pmu->task_ctx_nr; ··· 4720 4725 goto next; 4721 4726 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4722 4727 if (ctx) 4723 - perf_event_aux_ctx(ctx, match, output, data); 4728 + perf_event_aux_ctx(ctx, output, data); 4724 4729 next: 4725 4730 put_cpu_ptr(pmu->pmu_cpu_context); 4726 4731 } 4727 4732 4728 4733 if (task_ctx) { 4729 4734 preempt_disable(); 4730 - perf_event_aux_ctx(task_ctx, match, output, data); 4735 + perf_event_aux_ctx(task_ctx, output, data); 4731 4736 preempt_enable(); 4732 4737 } 4733 4738 rcu_read_unlock(); ··· 4754 4759 } event_id; 4755 4760 }; 4756 4761 4762 + static int perf_event_task_match(struct perf_event *event) 4763 + { 4764 + return event->attr.comm || event->attr.mmap || 4765 + event->attr.mmap_data || event->attr.task; 4766 + } 4767 + 4757 4768 static void perf_event_task_output(struct perf_event *event, 4758 4769 void *data) 4759 4770 { ··· 4768 4767 struct perf_sample_data sample; 4769 4768 struct task_struct *task = task_event->task; 4770 4769 int ret, size = task_event->event_id.header.size; 4770 + 4771 + if (!perf_event_task_match(event)) 4772 + return; 4771 4773 4772 4774 perf_event_header__init_id(&task_event->event_id.header, &sample, event); 4773 4775 ··· 4792 4788 perf_output_end(&handle); 4793 4789 out: 4794 4790 task_event->event_id.header.size = size; 4795 - } 4796 - 4797 - static int perf_event_task_match(struct perf_event *event, 4798 - void *data __maybe_unused) 4799 - { 4800 - return event->attr.comm || event->attr.mmap || 4801 - event->attr.mmap_data || event->attr.task; 4802 4791 } 4803 4792 4804 4793 static void perf_event_task(struct task_struct *task, ··· 4822 4825 }, 4823 4826 }; 4824 4827 4825 - perf_event_aux(perf_event_task_match, 4826 - perf_event_task_output, 4828 + perf_event_aux(perf_event_task_output, 4827 4829 &task_event, 4828 4830 task_ctx); 4829 4831 } ··· 4849 4853 } event_id; 4850 4854 }; 4851 4855 4856 + static int perf_event_comm_match(struct perf_event *event) 4857 + { 4858 + return event->attr.comm; 4859 + } 4860 + 4852 4861 static void perf_event_comm_output(struct perf_event *event, 4853 4862 void *data) 4854 4863 { ··· 4862 4861 struct perf_sample_data sample; 4863 4862 int size = comm_event->event_id.header.size; 4864 4863 int ret; 4864 + 4865 + if (!perf_event_comm_match(event)) 4866 + return; 4865 4867 4866 4868 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); 4867 4869 ret = perf_output_begin(&handle, event, ··· 4887 4883 comm_event->event_id.header.size = size; 4888 4884 } 4889 4885 4890 - static int perf_event_comm_match(struct perf_event *event, 4891 - void *data __maybe_unused) 4892 - { 4893 - return event->attr.comm; 4894 - } 4895 - 4896 4886 static void perf_event_comm_event(struct perf_comm_event *comm_event) 4897 4887 { 4898 4888 char comm[TASK_COMM_LEN]; ··· 4901 4903 4902 4904 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 4903 4905 4904 - perf_event_aux(perf_event_comm_match, 4905 - perf_event_comm_output, 4906 + perf_event_aux(perf_event_comm_output, 4906 4907 comm_event, 4907 4908 NULL); 4908 4909 } ··· 4964 4967 } event_id; 4965 4968 }; 4966 4969 4970 + static int perf_event_mmap_match(struct perf_event *event, 4971 + void *data) 4972 + { 4973 + struct perf_mmap_event *mmap_event = data; 4974 + struct vm_area_struct *vma = mmap_event->vma; 4975 + int executable = vma->vm_flags & VM_EXEC; 4976 + 4977 + return (!executable && event->attr.mmap_data) || 4978 + (executable && event->attr.mmap); 4979 + } 4980 + 4967 4981 static void perf_event_mmap_output(struct perf_event *event, 4968 4982 void *data) 4969 4983 { ··· 4983 4975 struct perf_sample_data sample; 4984 4976 int size = mmap_event->event_id.header.size; 4985 4977 int ret; 4978 + 4979 + if (!perf_event_mmap_match(event, data)) 4980 + return; 4986 4981 4987 4982 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); 4988 4983 ret = perf_output_begin(&handle, event, ··· 5005 4994 perf_output_end(&handle); 5006 4995 out: 5007 4996 mmap_event->event_id.header.size = size; 5008 - } 5009 - 5010 - static int perf_event_mmap_match(struct perf_event *event, 5011 - void *data) 5012 - { 5013 - struct perf_mmap_event *mmap_event = data; 5014 - struct vm_area_struct *vma = mmap_event->vma; 5015 - int executable = vma->vm_flags & VM_EXEC; 5016 - 5017 - return (!executable && event->attr.mmap_data) || 5018 - (executable && event->attr.mmap); 5019 4997 } 5020 4998 5021 4999 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ··· 5070 5070 5071 5071 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 5072 5072 5073 - perf_event_aux(perf_event_mmap_match, 5074 - perf_event_mmap_output, 5073 + perf_event_aux(perf_event_mmap_output, 5075 5074 mmap_event, 5076 5075 NULL); 5077 5076