Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm_pmu: Tidy up clear_event_idx call backs

The armpmu uses get_event_idx callback to allocate an event
counter for a given event, which marks the selected counter
as "used". Now, when we delete the counter, the arm_pmu goes
ahead and clears the "used" bit and then invokes the "clear_event_idx"
call back, which kind of splits the job between the core code
and the backend. To keep things tidy, mandate the implementation
of clear_event_idx() and add it for exisiting backends.
This will be useful for adding the chained event support, where
we leave the event idx maintenance to the backend.

Also, when an event is removed from the PMU, reset the hw.idx
to indicate that a counter is not allocated for this event,
to help the backends do better checks. This will be also used
for the chain counter support.

Cc: Will Deacon <will.deacon@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Suzuki K Poulose and committed by
Will Deacon
7dfc8db1 e2da97d3

+35 -4
+8
arch/arm/kernel/perf_event_v6.c
··· 411 411 } 412 412 } 413 413 414 + static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, 415 + struct perf_event *event) 416 + { 417 + clear_bit(event->hw.idx, cpuc->used_mask); 418 + } 419 + 414 420 static void armv6pmu_disable_event(struct perf_event *event) 415 421 { 416 422 unsigned long val, mask, evt, flags; ··· 497 491 cpu_pmu->read_counter = armv6pmu_read_counter; 498 492 cpu_pmu->write_counter = armv6pmu_write_counter; 499 493 cpu_pmu->get_event_idx = armv6pmu_get_event_idx; 494 + cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; 500 495 cpu_pmu->start = armv6pmu_start; 501 496 cpu_pmu->stop = armv6pmu_stop; 502 497 cpu_pmu->map_event = armv6_map_event; ··· 548 541 cpu_pmu->read_counter = armv6pmu_read_counter; 549 542 cpu_pmu->write_counter = armv6pmu_write_counter; 550 543 cpu_pmu->get_event_idx = armv6pmu_get_event_idx; 544 + cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx; 551 545 cpu_pmu->start = armv6pmu_start; 552 546 cpu_pmu->stop = armv6pmu_stop; 553 547 cpu_pmu->map_event = armv6mpcore_map_event;
+9
arch/arm/kernel/perf_event_v7.c
··· 1058 1058 return -EAGAIN; 1059 1059 } 1060 1060 1061 + static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc, 1062 + struct perf_event *event) 1063 + { 1064 + clear_bit(event->hw.idx, cpuc->used_mask); 1065 + } 1066 + 1061 1067 /* 1062 1068 * Add an event filter to a given event. This will only work for PMUv2 PMUs. 1063 1069 */ ··· 1173 1167 cpu_pmu->read_counter = armv7pmu_read_counter; 1174 1168 cpu_pmu->write_counter = armv7pmu_write_counter; 1175 1169 cpu_pmu->get_event_idx = armv7pmu_get_event_idx; 1170 + cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx; 1176 1171 cpu_pmu->start = armv7pmu_start; 1177 1172 cpu_pmu->stop = armv7pmu_stop; 1178 1173 cpu_pmu->reset = armv7pmu_reset; ··· 1644 1637 bool venum_event = EVENT_VENUM(hwc->config_base); 1645 1638 bool krait_event = EVENT_CPU(hwc->config_base); 1646 1639 1640 + armv7pmu_clear_event_idx(cpuc, event); 1647 1641 if (venum_event || krait_event) { 1648 1642 bit = krait_event_to_bit(event, region, group); 1649 1643 clear_bit(bit, cpuc->used_mask); ··· 1974 1966 bool venum_event = EVENT_VENUM(hwc->config_base); 1975 1967 bool scorpion_event = EVENT_CPU(hwc->config_base); 1976 1968 1969 + armv7pmu_clear_event_idx(cpuc, event); 1977 1970 if (venum_event || scorpion_event) { 1978 1971 bit = scorpion_event_to_bit(event, region, group); 1979 1972 clear_bit(bit, cpuc->used_mask);
+8
arch/arm/kernel/perf_event_xscale.c
··· 292 292 } 293 293 } 294 294 295 + static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, 296 + struct perf_event *event) 297 + { 298 + clear_bit(event->hw.idx, cpuc->used_mask); 299 + } 300 + 295 301 static void xscale1pmu_start(struct arm_pmu *cpu_pmu) 296 302 { 297 303 unsigned long flags, val; ··· 376 370 cpu_pmu->read_counter = xscale1pmu_read_counter; 377 371 cpu_pmu->write_counter = xscale1pmu_write_counter; 378 372 cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; 373 + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; 379 374 cpu_pmu->start = xscale1pmu_start; 380 375 cpu_pmu->stop = xscale1pmu_stop; 381 376 cpu_pmu->map_event = xscale_map_event; ··· 745 738 cpu_pmu->read_counter = xscale2pmu_read_counter; 746 739 cpu_pmu->write_counter = xscale2pmu_write_counter; 747 740 cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; 741 + cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; 748 742 cpu_pmu->start = xscale2pmu_start; 749 743 cpu_pmu->stop = xscale2pmu_stop; 750 744 cpu_pmu->map_event = xscale_map_event;
+7
arch/arm64/kernel/perf_event.c
··· 778 778 return -EAGAIN; 779 779 } 780 780 781 + static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, 782 + struct perf_event *event) 783 + { 784 + clear_bit(event->hw.idx, cpuc->used_mask); 785 + } 786 + 781 787 /* 782 788 * Add an event filter to a given event. This will only work for PMUv2 PMUs. 783 789 */ ··· 962 956 cpu_pmu->read_counter = armv8pmu_read_counter, 963 957 cpu_pmu->write_counter = armv8pmu_write_counter, 964 958 cpu_pmu->get_event_idx = armv8pmu_get_event_idx, 959 + cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx, 965 960 cpu_pmu->start = armv8pmu_start, 966 961 cpu_pmu->stop = armv8pmu_stop, 967 962 cpu_pmu->reset = armv8pmu_reset,
+3 -4
drivers/perf/arm_pmu.c
··· 238 238 239 239 armpmu_stop(event, PERF_EF_UPDATE); 240 240 hw_events->events[idx] = NULL; 241 - clear_bit(idx, hw_events->used_mask); 242 - if (armpmu->clear_event_idx) 243 - armpmu->clear_event_idx(hw_events, event); 244 - 241 + armpmu->clear_event_idx(hw_events, event); 245 242 perf_event_update_userpage(event); 243 + /* Clear the allocated counter */ 244 + hwc->idx = -1; 246 245 } 247 246 248 247 static int