Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/marvell: Refactor to extract PMU operations

Introduce a refactor to the Marvell DDR PMU driver to extract
PMU operations ("pmu ops") from the existing driver.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@Huawei.com>
Signed-off-by: Gowthami Thiagarajan <gthiagarajan@marvell.com>
Link: https://lore.kernel.org/r/20241108040619.753343-3-gthiagarajan@marvell.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Gowthami Thiagarajan and committed by
Will Deacon
0045de7e 349f77e1

+83 -22
+83 -22
drivers/perf/marvell_cn10k_ddr_pmu.c
··· 127 127 struct pmu pmu; 128 128 void __iomem *base; 129 129 const struct ddr_pmu_platform_data *p_data; 130 + const struct ddr_pmu_ops *ops; 130 131 unsigned int cpu; 131 132 struct device *dev; 132 133 int active_events; 133 134 struct perf_event *events[DDRC_PERF_NUM_COUNTERS]; 134 135 struct hrtimer hrtimer; 135 136 struct hlist_node node; 137 + }; 138 + 139 + struct ddr_pmu_ops { 140 + void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu, 141 + bool enable); 142 + void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu, 143 + bool enable); 144 + void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu); 145 + void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu); 146 + void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx); 136 147 }; 137 148 138 149 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) ··· 386 375 int counter, bool enable) 387 376 { 388 377 const struct ddr_pmu_platform_data *p_data = pmu->p_data; 378 + const struct ddr_pmu_ops *ops = pmu->ops; 389 379 u32 reg; 390 380 u64 val; 391 381 ··· 406 394 407 395 writeq_relaxed(val, pmu->base + reg); 408 396 } else { 409 - val = readq_relaxed(pmu->base + 410 - p_data->cnt_freerun_en); 411 - if (enable) { 412 - if (counter == DDRC_PERF_READ_COUNTER_IDX) 413 - val |= DDRC_PERF_FREERUN_READ_EN; 414 - else 415 - val |= DDRC_PERF_FREERUN_WRITE_EN; 416 - } else { 417 - if (counter == DDRC_PERF_READ_COUNTER_IDX) 418 - val &= ~DDRC_PERF_FREERUN_READ_EN; 419 - else 420 - val &= ~DDRC_PERF_FREERUN_WRITE_EN; 421 - } 422 - writeq_relaxed(val, pmu->base + 423 - p_data->cnt_freerun_en); 397 + if (counter == DDRC_PERF_READ_COUNTER_IDX) 398 + ops->enable_read_freerun_counter(pmu, enable); 399 + else 400 + ops->enable_write_freerun_counter(pmu, enable); 424 401 } 425 402 } 426 403 ··· 465 464 { 466 465 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); 467 466 const struct ddr_pmu_platform_data *p_data = pmu->p_data; 467 + const struct ddr_pmu_ops *ops = pmu->ops; 468 468 struct hw_perf_event *hwc = &event->hw; 469 469 u8 config = event->attr.config; 470 470 int counter, ret; ··· 494 492 } else { 495 493 /* fixed event counter, clear counter value */ 496 494 if (counter == DDRC_PERF_READ_COUNTER_IDX) 497 - val = DDRC_FREERUN_READ_CNT_CLR; 495 + ops->clear_read_freerun_counter(pmu); 498 496 else 499 - val = DDRC_FREERUN_WRITE_CNT_CLR; 500 - 501 - writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 497 + ops->clear_write_freerun_counter(pmu); 502 498 } 503 499 504 500 hwc->state |= PERF_HES_STOPPED; ··· 578 578 } 579 579 } 580 580 581 + static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable) 582 + { 583 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 584 + u64 val; 585 + 586 + val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); 587 + if (enable) 588 + val |= DDRC_PERF_FREERUN_READ_EN; 589 + else 590 + val &= ~DDRC_PERF_FREERUN_READ_EN; 591 + 592 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); 593 + } 594 + 595 + static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable) 596 + { 597 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 598 + u64 val; 599 + 600 + val = readq_relaxed(pmu->base + p_data->cnt_freerun_en); 601 + if (enable) 602 + val |= DDRC_PERF_FREERUN_WRITE_EN; 603 + else 604 + val &= ~DDRC_PERF_FREERUN_WRITE_EN; 605 + 606 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en); 607 + } 608 + 609 + static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu) 610 + { 611 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 612 + u64 val; 613 + 614 + val = DDRC_FREERUN_READ_CNT_CLR; 615 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 616 + } 617 + 618 + static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu) 619 + { 620 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 621 + u64 val; 622 + 623 + val = DDRC_FREERUN_WRITE_CNT_CLR; 624 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 625 + } 626 + 627 + static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) 628 + { 629 + cn10k_ddr_perf_event_update_all(pmu); 630 + cn10k_ddr_perf_pmu_disable(&pmu->pmu); 631 + cn10k_ddr_perf_pmu_enable(&pmu->pmu); 632 + } 633 + 581 634 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) 582 635 { 583 636 const struct ddr_pmu_platform_data *p_data = pmu->p_data; 637 + const struct ddr_pmu_ops *ops = pmu->ops; 584 638 struct perf_event *event; 585 639 struct hw_perf_event *hwc; 586 640 u64 prev_count, new_count; ··· 674 620 value = cn10k_ddr_perf_read_counter(pmu, i); 675 621 if (value == p_data->counter_max_val) { 676 622 pr_info("Counter-(%d) reached max value\n", i); 677 - cn10k_ddr_perf_event_update_all(pmu); 678 - cn10k_ddr_perf_pmu_disable(&pmu->pmu); 679 - cn10k_ddr_perf_pmu_enable(&pmu->pmu); 623 + ops->pmu_overflow_handler(pmu, i); 680 624 } 681 625 } 682 626 ··· 712 660 pmu->cpu = target; 713 661 return 0; 714 662 } 663 + 664 + static const struct ddr_pmu_ops ddr_pmu_ops = { 665 + .enable_read_freerun_counter = ddr_pmu_enable_read_freerun, 666 + .enable_write_freerun_counter = ddr_pmu_enable_write_freerun, 667 + .clear_read_freerun_counter = ddr_pmu_read_clear_freerun, 668 + .clear_write_freerun_counter = ddr_pmu_write_clear_freerun, 669 + .pmu_overflow_handler = ddr_pmu_overflow_hander, 670 + }; 715 671 716 672 #if defined(CONFIG_ACPI) || defined(CONFIG_OF) 717 673 static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = { ··· 773 713 is_cn10k = ddr_pmu->p_data->is_cn10k; 774 714 775 715 if (is_cn10k) { 716 + ddr_pmu->ops = &ddr_pmu_ops; 776 717 /* Setup the PMU counter to work in manual mode */ 777 718 writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base + 778 719 ddr_pmu->p_data->cnt_op_mode_ctrl);