Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: perf: Clean up enable/disable calls

Reading this code bordered on painful, what with all the repetition and
pointless return values. More fundamentally, dribbling the hardware
enables and disables in one bit at a time incurs needless system
register overhead for chained events and on reset. We already use
bitmask values for the KVM hooks, so consolidate all the register
accesses to match, and make a reasonable saving in both source and
object code.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Robin Murphy and committed by
Will Deacon
29227d6e 06236821

+35 -52
+35 -52
arch/arm64/kernel/perf_event.c
··· 450 450 } 451 451 } 452 452 453 - static inline int armv8pmu_enable_counter(int idx) 453 + static u32 armv8pmu_event_cnten_mask(struct perf_event *event) 454 454 { 455 - u32 counter = ARMV8_IDX_TO_COUNTER(idx); 456 - write_sysreg(BIT(counter), pmcntenset_el0); 457 - return idx; 455 + int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 456 + u32 mask = BIT(counter); 457 + 458 + if (armv8pmu_event_is_chained(event)) 459 + mask |= BIT(counter - 1); 460 + return mask; 461 + } 462 + 463 + static inline void armv8pmu_enable_counter(u32 mask) 464 + { 465 + write_sysreg(mask, pmcntenset_el0); 458 466 } 459 467 460 468 static inline void armv8pmu_enable_event_counter(struct perf_event *event) 461 469 { 462 470 struct perf_event_attr *attr = &event->attr; 463 - int idx = event->hw.idx; 464 - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); 471 + u32 mask = armv8pmu_event_cnten_mask(event); 465 472 466 - if (armv8pmu_event_is_chained(event)) 467 - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); 468 - 469 - kvm_set_pmu_events(counter_bits, attr); 473 + kvm_set_pmu_events(mask, attr); 470 474 471 475 /* We rely on the hypervisor switch code to enable guest counters */ 472 - if (!kvm_pmu_counter_deferred(attr)) { 473 - armv8pmu_enable_counter(idx); 474 - if (armv8pmu_event_is_chained(event)) 475 - armv8pmu_enable_counter(idx - 1); 476 - } 476 + if (!kvm_pmu_counter_deferred(attr)) 477 + armv8pmu_enable_counter(mask); 477 478 } 478 479 479 - static inline int armv8pmu_disable_counter(int idx) 480 + static inline void armv8pmu_disable_counter(u32 mask) 480 481 { 481 - u32 counter = ARMV8_IDX_TO_COUNTER(idx); 482 - write_sysreg(BIT(counter), pmcntenclr_el0); 483 - return idx; 482 + write_sysreg(mask, pmcntenclr_el0); 484 483 } 485 484 486 485 static inline void armv8pmu_disable_event_counter(struct perf_event *event) 487 486 { 488 - struct hw_perf_event *hwc = &event->hw; 489 487 struct perf_event_attr *attr = &event->attr; 490 - int idx = hwc->idx; 491 - u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); 488 + u32 mask = armv8pmu_event_cnten_mask(event); 492 489 493 - if (armv8pmu_event_is_chained(event)) 494 - counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); 495 - 496 - kvm_clr_pmu_events(counter_bits); 490 + kvm_clr_pmu_events(mask); 497 491 498 492 /* We rely on the hypervisor switch code to disable guest counters */ 499 - if (!kvm_pmu_counter_deferred(attr)) { 500 - if (armv8pmu_event_is_chained(event)) 501 - armv8pmu_disable_counter(idx - 1); 502 - armv8pmu_disable_counter(idx); 503 - } 493 + if (!kvm_pmu_counter_deferred(attr)) 494 + armv8pmu_disable_counter(mask); 504 495 } 505 496 506 - static inline int armv8pmu_enable_intens(int idx) 497 + static inline void armv8pmu_enable_intens(u32 mask) 507 498 { 508 - u32 counter = ARMV8_IDX_TO_COUNTER(idx); 509 - write_sysreg(BIT(counter), pmintenset_el1); 510 - return idx; 499 + write_sysreg(mask, pmintenset_el1); 511 500 } 512 501 513 - static inline int armv8pmu_enable_event_irq(struct perf_event *event) 502 + static inline void armv8pmu_enable_event_irq(struct perf_event *event) 514 503 { 515 - return armv8pmu_enable_intens(event->hw.idx); 504 + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 505 + armv8pmu_enable_intens(BIT(counter)); 516 506 } 517 507 518 - static inline int armv8pmu_disable_intens(int idx) 508 + static inline void armv8pmu_disable_intens(u32 mask) 519 509 { 520 - u32 counter = ARMV8_IDX_TO_COUNTER(idx); 521 - write_sysreg(BIT(counter), pmintenclr_el1); 510 + write_sysreg(mask, pmintenclr_el1); 522 511 isb(); 523 512 /* Clear the overflow flag in case an interrupt is pending. */ 524 - write_sysreg(BIT(counter), pmovsclr_el0); 513 + write_sysreg(mask, pmovsclr_el0); 525 514 isb(); 526 - 527 - return idx; 528 515 } 529 516 530 - static inline int armv8pmu_disable_event_irq(struct perf_event *event) 517 + static inline void armv8pmu_disable_event_irq(struct perf_event *event) 531 518 { 532 - return armv8pmu_disable_intens(event->hw.idx); 519 + u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); 520 + armv8pmu_disable_intens(BIT(counter)); 533 521 } 534 522 535 523 static inline u32 armv8pmu_getreset_flags(void) ··· 802 814 803 815 static void armv8pmu_reset(void *info) 804 816 { 805 - struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; 806 - u32 idx, nb_cnt = cpu_pmu->num_events; 807 - 808 817 /* The counter and interrupt enable registers are unknown at reset. */ 809 - for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { 810 - armv8pmu_disable_counter(idx); 811 - armv8pmu_disable_intens(idx); 812 - } 818 + armv8pmu_disable_counter(U32_MAX); 819 + armv8pmu_disable_intens(U32_MAX); 813 820 814 821 /* Clear the counters we flip at guest entry/exit */ 815 822 kvm_clr_pmu_events(U32_MAX);