Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf: Cleanup {start,commit,cancel}_txn details

Clarify some of the transactional group scheduling API details
and change it so that a successfull ->commit_txn also closes
the transaction.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1274803086.5882.1752.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Peter Zijlstra and committed by
Ingo Molnar
8d2cacbb 3af9e859

+36 -28
+4 -3
arch/powerpc/kernel/perf_event.c
··· 754 754 * skip the schedulability test here, it will be peformed 755 755 * at commit time(->commit_txn) as a whole 756 756 */ 757 - if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED) 757 + if (cpuhw->group_flag & PERF_EVENT_TXN) 758 758 goto nocheck; 759 759 760 760 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) ··· 858 858 { 859 859 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 860 860 861 - cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; 861 + cpuhw->group_flag |= PERF_EVENT_TXN; 862 862 cpuhw->n_txn_start = cpuhw->n_events; 863 863 } 864 864 ··· 871 871 { 872 872 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 873 873 874 - cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; 874 + cpuhw->group_flag &= ~PERF_EVENT_TXN; 875 875 } 876 876 877 877 /* ··· 897 897 for (i = cpuhw->n_txn_start; i < n; ++i) 898 898 cpuhw->event[i]->hw.config = cpuhw->events[i]; 899 899 900 + cpuhw->group_flag &= ~PERF_EVENT_TXN; 900 901 return 0; 901 902 } 902 903
+4 -3
arch/sparc/kernel/perf_event.c
··· 1005 1005 * skip the schedulability test here, it will be peformed 1006 1006 * at commit time(->commit_txn) as a whole 1007 1007 */ 1008 - if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) 1008 + if (cpuc->group_flag & PERF_EVENT_TXN) 1009 1009 goto nocheck; 1010 1010 1011 1011 if (check_excludes(cpuc->event, n0, 1)) ··· 1102 1102 { 1103 1103 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1104 1104 1105 - cpuhw->group_flag |= PERF_EVENT_TXN_STARTED; 1105 + cpuhw->group_flag |= PERF_EVENT_TXN; 1106 1106 } 1107 1107 1108 1108 /* ··· 1114 1114 { 1115 1115 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1116 1116 1117 - cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED; 1117 + cpuhw->group_flag &= ~PERF_EVENT_TXN; 1118 1118 } 1119 1119 1120 1120 /* ··· 1137 1137 if (sparc_check_constraints(cpuc->event, cpuc->events, n)) 1138 1138 return -EAGAIN; 1139 1139 1140 + cpuc->group_flag &= ~PERF_EVENT_TXN; 1140 1141 return 0; 1141 1142 } 1142 1143
+5 -9
arch/x86/kernel/cpu/perf_event.c
··· 969 969 * skip the schedulability test here, it will be peformed 970 970 * at commit time(->commit_txn) as a whole 971 971 */ 972 - if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) 972 + if (cpuc->group_flag & PERF_EVENT_TXN) 973 973 goto out; 974 974 975 975 ret = x86_pmu.schedule_events(cpuc, n, assign); ··· 1096 1096 * The events never got scheduled and ->cancel_txn will truncate 1097 1097 * the event_list. 1098 1098 */ 1099 - if (cpuc->group_flag & PERF_EVENT_TXN_STARTED) 1099 + if (cpuc->group_flag & PERF_EVENT_TXN) 1100 1100 return; 1101 1101 1102 1102 x86_pmu_stop(event); ··· 1388 1388 { 1389 1389 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1390 1390 1391 - cpuc->group_flag |= PERF_EVENT_TXN_STARTED; 1391 + cpuc->group_flag |= PERF_EVENT_TXN; 1392 1392 cpuc->n_txn = 0; 1393 1393 } 1394 1394 ··· 1401 1401 { 1402 1402 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1403 1403 1404 - cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED; 1404 + cpuc->group_flag &= ~PERF_EVENT_TXN; 1405 1405 /* 1406 1406 * Truncate the collected events. 1407 1407 */ ··· 1435 1435 */ 1436 1436 memcpy(cpuc->assign, assign, n*sizeof(int)); 1437 1437 1438 - /* 1439 - * Clear out the txn count so that ->cancel_txn() which gets 1440 - * run after ->commit_txn() doesn't undo things. 1441 - */ 1442 - cpuc->n_txn = 0; 1438 + cpuc->group_flag &= ~PERF_EVENT_TXN; 1443 1439 1444 1440 return 0; 1445 1441 }
+22 -5
include/linux/perf_event.h
··· 549 549 550 550 struct perf_event; 551 551 552 - #define PERF_EVENT_TXN_STARTED 1 552 + /* 553 + * Common implementation detail of pmu::{start,commit,cancel}_txn 554 + */ 555 + #define PERF_EVENT_TXN 0x1 553 556 554 557 /** 555 558 * struct pmu - generic performance monitoring unit ··· 566 563 void (*unthrottle) (struct perf_event *event); 567 564 568 565 /* 569 - * group events scheduling is treated as a transaction, 570 - * add group events as a whole and perform one schedulability test. 571 - * If test fails, roll back the whole group 566 + * Group events scheduling is treated as a transaction, add group 567 + * events as a whole and perform one schedulability test. If the test 568 + * fails, roll back the whole group 572 569 */ 573 570 571 + /* 572 + * Start the transaction, after this ->enable() doesn't need 573 + * to do schedulability tests. 574 + */ 574 575 void (*start_txn) (const struct pmu *pmu); 575 - void (*cancel_txn) (const struct pmu *pmu); 576 + /* 577 + * If ->start_txn() disabled the ->enable() schedulability test 578 + * then ->commit_txn() is required to perform one. On success 579 + * the transaction is closed. On error the transaction is kept 580 + * open until ->cancel_txn() is called. 581 + */ 576 582 int (*commit_txn) (const struct pmu *pmu); 583 + /* 584 + * Will cancel the transaction, assumes ->disable() is called for 585 + * each successfull ->enable() during the transaction. 586 + */ 587 + void (*cancel_txn) (const struct pmu *pmu); 577 588 }; 578 589 579 590 /**
+1 -8
kernel/perf_event.c
··· 675 675 struct perf_event *event, *partial_group = NULL; 676 676 const struct pmu *pmu = group_event->pmu; 677 677 bool txn = false; 678 - int ret; 679 678 680 679 if (group_event->state == PERF_EVENT_STATE_OFF) 681 680 return 0; ··· 702 703 } 703 704 } 704 705 705 - if (!txn) 706 + if (!txn || !pmu->commit_txn(pmu)) 706 707 return 0; 707 - 708 - ret = pmu->commit_txn(pmu); 709 - if (!ret) { 710 - pmu->cancel_txn(pmu); 711 - return 0; 712 - } 713 708 714 709 group_error: 715 710 /*