Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/marvell: Refactor to extract platform data

Introduce a refactor to the Marvell DDR pmu driver to extract platform
data ("pdata") from the existing driver. Prepare for the upcoming
support of the next version of the Performance Monitoring Unit (PMU) in
this driver.

Make no functional changes, this refactor solely improves code
organization and prepares for future enhancements.

While at it, fix a typo.

Signed-off-by: Gowthami Thiagarajan <gthiagarajan@marvell.com>
Link: https://lore.kernel.org/r/20241108040619.753343-2-gthiagarajan@marvell.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Gowthami Thiagarajan and committed by
Will Deacon
349f77e1 8632306e

+112 -48
+112 -48
drivers/perf/marvell_cn10k_ddr_pmu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver 2 + /* 3 + * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver 3 4 * 4 - * Copyright (C) 2021 Marvell. 5 + * Copyright (C) 2021-2024 Marvell. 5 6 */ 6 7 7 8 #include <linux/init.h> ··· 15 14 #include <linux/platform_device.h> 16 15 17 16 /* Performance Counters Operating Mode Control Registers */ 18 - #define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020 19 - #define OP_MODE_CTRL_VAL_MANNUAL 0x1 17 + #define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020 18 + #define OP_MODE_CTRL_VAL_MANUAL 0x1 20 19 21 20 /* Performance Counters Start Operation Control Registers */ 22 - #define DDRC_PERF_CNT_START_OP_CTRL 0x8028 21 + #define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028 23 22 #define START_OP_CTRL_VAL_START 0x1ULL 24 23 #define START_OP_CTRL_VAL_ACTIVE 0x2 25 24 26 25 /* Performance Counters End Operation Control Registers */ 27 - #define DDRC_PERF_CNT_END_OP_CTRL 0x8030 26 + #define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030 28 27 #define END_OP_CTRL_VAL_END 0x1ULL 29 28 30 29 /* Performance Counters End Status Registers */ 31 - #define DDRC_PERF_CNT_END_STATUS 0x8038 30 + #define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038 32 31 #define END_STATUS_VAL_END_TIMER_MODE_END 0x1 33 32 34 33 /* Performance Counters Configuration Registers */ 35 - #define DDRC_PERF_CFG_BASE 0x8040 34 + #define CN10K_DDRC_PERF_CFG_BASE 0x8040 36 35 37 36 /* 8 Generic event counter + 2 fixed event counters */ 38 37 #define DDRC_PERF_NUM_GEN_COUNTERS 8 ··· 43 42 DDRC_PERF_NUM_FIX_COUNTERS) 44 43 45 44 /* Generic event counter registers */ 46 - #define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n)) 45 + #define DDRC_PERF_CFG(base, n) ((base) + 8 * (n)) 47 46 #define EVENT_ENABLE BIT_ULL(63) 48 47 49 48 /* Two dedicated event counters for DDR reads and writes */ 50 49 #define EVENT_DDR_READS 101 51 50 #define EVENT_DDR_WRITES 100 52 51 52 + #define DDRC_PERF_REG(base, n) ((base) + 8 * (n)) 53 53 /* 54 54 * programmable events IDs in programmable event counters. 55 55 * DO NOT change these event-id numbers, they are used to ··· 104 102 #define EVENT_HIF_RD_OR_WR 1 105 103 106 104 /* Event counter value registers */ 107 - #define DDRC_PERF_CNT_VALUE_BASE 0x8080 108 - #define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n)) 105 + #define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080 109 106 110 107 /* Fixed event counter enable/disable register */ 111 - #define DDRC_PERF_CNT_FREERUN_EN 0x80C0 108 + #define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0 112 109 #define DDRC_PERF_FREERUN_WRITE_EN 0x1 113 110 #define DDRC_PERF_FREERUN_READ_EN 0x2 114 111 115 112 /* Fixed event counter control register */ 116 - #define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8 113 + #define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8 117 114 #define DDRC_FREERUN_WRITE_CNT_CLR 0x1 118 115 #define DDRC_FREERUN_READ_CNT_CLR 0x2 119 116 120 - /* Fixed event counter value register */ 121 - #define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0 122 - #define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8 123 117 #define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48) 124 118 #define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0) 119 + 120 + /* Fixed event counter value register */ 121 + #define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0 122 + #define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8 125 123 126 124 struct cn10k_ddr_pmu { 127 125 struct pmu pmu; 128 126 void __iomem *base; 127 + const struct ddr_pmu_platform_data *p_data; 129 128 unsigned int cpu; 130 129 struct device *dev; 131 130 int active_events; ··· 136 133 }; 137 134 138 135 #define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu) 136 + 137 + struct ddr_pmu_platform_data { 138 + u64 counter_overflow_val; 139 + u64 counter_max_val; 140 + u64 cnt_base; 141 + u64 cfg_base; 142 + u64 cnt_op_mode_ctrl; 143 + u64 cnt_start_op_ctrl; 144 + u64 cnt_end_op_ctrl; 145 + u64 cnt_end_status; 146 + u64 cnt_freerun_en; 147 + u64 cnt_freerun_ctrl; 148 + u64 cnt_freerun_clr; 149 + u64 cnt_value_wr_op; 150 + u64 cnt_value_rd_op; 151 + bool is_cn10k; 152 + }; 139 153 140 154 static ssize_t cn10k_ddr_pmu_event_show(struct device *dev, 141 155 struct device_attribute *attr, ··· 374 354 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, 375 355 int counter, bool enable) 376 356 { 357 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 377 358 u32 reg; 378 359 u64 val; 379 360 ··· 384 363 } 385 364 386 365 if (counter < DDRC_PERF_NUM_GEN_COUNTERS) { 387 - reg = DDRC_PERF_CFG(counter); 366 + reg = DDRC_PERF_CFG(p_data->cfg_base, counter); 388 367 val = readq_relaxed(pmu->base + reg); 389 368 390 369 if (enable) ··· 394 373 395 374 writeq_relaxed(val, pmu->base + reg); 396 375 } else { 397 - val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN); 376 + val = readq_relaxed(pmu->base + 377 + p_data->cnt_freerun_en); 398 378 if (enable) { 399 379 if (counter == DDRC_PERF_READ_COUNTER_IDX) 400 380 val |= DDRC_PERF_FREERUN_READ_EN; ··· 407 385 else 408 386 val &= ~DDRC_PERF_FREERUN_WRITE_EN; 409 387 } 410 - writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN); 388 + writeq_relaxed(val, pmu->base + 389 + p_data->cnt_freerun_en); 411 390 } 412 391 } 413 392 414 393 static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter) 415 394 { 395 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 416 396 u64 val; 417 397 418 398 if (counter == DDRC_PERF_READ_COUNTER_IDX) 419 - return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP); 399 + return readq_relaxed(pmu->base + 400 + p_data->cnt_value_rd_op); 420 401 421 402 if (counter == DDRC_PERF_WRITE_COUNTER_IDX) 422 - return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP); 403 + return readq_relaxed(pmu->base + 404 + p_data->cnt_value_wr_op); 423 405 424 - val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter)); 406 + val = readq_relaxed(pmu->base + 407 + DDRC_PERF_REG(p_data->cnt_base, counter)); 425 408 return val; 426 409 } 427 410 428 411 static void cn10k_ddr_perf_event_update(struct perf_event *event) 429 412 { 430 413 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); 414 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 431 415 struct hw_perf_event *hwc = &event->hw; 432 416 u64 prev_count, new_count, mask; 433 417 ··· 442 414 new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx); 443 415 } while (local64_xchg(&hwc->prev_count, new_count) != prev_count); 444 416 445 - mask = DDRC_PERF_CNT_MAX_VALUE; 417 + mask = p_data->counter_max_val; 446 418 447 419 local64_add((new_count - prev_count) & mask, &event->count); 448 420 } ··· 463 435 static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags) 464 436 { 465 437 struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu); 438 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 466 439 struct hw_perf_event *hwc = &event->hw; 467 440 u8 config = event->attr.config; 468 441 int counter, ret; ··· 483 454 484 455 if (counter < DDRC_PERF_NUM_GEN_COUNTERS) { 485 456 /* Generic counters, configure event id */ 486 - reg_offset = DDRC_PERF_CFG(counter); 457 + reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter); 487 458 ret = ddr_perf_get_event_bitmap(config, &val); 488 459 if (ret) 489 460 return ret; ··· 496 467 else 497 468 val = DDRC_FREERUN_WRITE_CNT_CLR; 498 469 499 - writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL); 470 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 500 471 } 501 472 502 473 hwc->state |= PERF_HES_STOPPED; ··· 541 512 static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu) 542 513 { 543 514 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); 515 + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; 544 516 545 517 writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base + 546 - DDRC_PERF_CNT_START_OP_CTRL); 518 + p_data->cnt_start_op_ctrl); 547 519 } 548 520 549 521 static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu) 550 522 { 551 523 struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu); 524 + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; 552 525 553 526 writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base + 554 - DDRC_PERF_CNT_END_OP_CTRL); 527 + p_data->cnt_end_op_ctrl); 555 528 } 556 529 557 530 static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu) ··· 580 549 581 550 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) 582 551 { 552 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 583 553 struct perf_event *event; 584 554 struct hw_perf_event *hwc; 585 555 u64 prev_count, new_count; ··· 618 586 continue; 619 587 620 588 value = cn10k_ddr_perf_read_counter(pmu, i); 621 - if (value == DDRC_PERF_CNT_MAX_VALUE) { 589 + if (value == p_data->counter_max_val) { 622 590 pr_info("Counter-(%d) reached max value\n", i); 623 591 cn10k_ddr_perf_event_update_all(pmu); 624 592 cn10k_ddr_perf_pmu_disable(&pmu->pmu); ··· 661 629 return 0; 662 630 } 663 631 632 + #if defined(CONFIG_ACPI) || defined(CONFIG_OF) 633 + static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = { 634 + .counter_overflow_val = BIT_ULL(48), 635 + .counter_max_val = GENMASK_ULL(48, 0), 636 + .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE, 637 + .cfg_base = CN10K_DDRC_PERF_CFG_BASE, 638 + .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL, 639 + .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL, 640 + .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL, 641 + .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS, 642 + .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN, 643 + .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL, 644 + .cnt_freerun_clr = 0, 645 + .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP, 646 + .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP, 647 + .is_cn10k = TRUE, 648 + }; 649 + #endif 650 + 664 651 static int cn10k_ddr_perf_probe(struct platform_device *pdev) 665 652 { 653 + const struct ddr_pmu_platform_data *dev_data; 666 654 struct cn10k_ddr_pmu *ddr_pmu; 667 655 struct resource *res; 668 656 void __iomem *base; 657 + bool is_cn10k; 669 658 char *name; 670 659 int ret; 671 660 ··· 697 644 ddr_pmu->dev = &pdev->dev; 698 645 platform_set_drvdata(pdev, ddr_pmu); 699 646 647 + dev_data = device_get_match_data(&pdev->dev); 648 + if (!dev_data) { 649 + dev_err(&pdev->dev, "Error: No device match data found\n"); 650 + return -ENODEV; 651 + } 652 + 700 653 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 701 654 if (IS_ERR(base)) 702 655 return PTR_ERR(base); 703 656 704 657 ddr_pmu->base = base; 705 658 706 - /* Setup the PMU counter to work in manual mode */ 707 - writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base + 708 - DDRC_PERF_CNT_OP_MODE_CTRL); 659 + ddr_pmu->p_data = dev_data; 660 + is_cn10k = ddr_pmu->p_data->is_cn10k; 709 661 710 - ddr_pmu->pmu = (struct pmu) { 711 - .module = THIS_MODULE, 712 - .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 713 - .task_ctx_nr = perf_invalid_context, 714 - .attr_groups = cn10k_attr_groups, 715 - .event_init = cn10k_ddr_perf_event_init, 716 - .add = cn10k_ddr_perf_event_add, 717 - .del = cn10k_ddr_perf_event_del, 718 - .start = cn10k_ddr_perf_event_start, 719 - .stop = cn10k_ddr_perf_event_stop, 720 - .read = cn10k_ddr_perf_event_update, 721 - .pmu_enable = cn10k_ddr_perf_pmu_enable, 722 - .pmu_disable = cn10k_ddr_perf_pmu_disable, 723 - }; 662 + if (is_cn10k) { 663 + /* Setup the PMU counter to work in manual mode */ 664 + writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base + 665 + ddr_pmu->p_data->cnt_op_mode_ctrl); 666 + 667 + ddr_pmu->pmu = (struct pmu) { 668 + .module = THIS_MODULE, 669 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 670 + .task_ctx_nr = perf_invalid_context, 671 + .attr_groups = cn10k_attr_groups, 672 + .event_init = cn10k_ddr_perf_event_init, 673 + .add = cn10k_ddr_perf_event_add, 674 + .del = cn10k_ddr_perf_event_del, 675 + .start = cn10k_ddr_perf_event_start, 676 + .stop = cn10k_ddr_perf_event_stop, 677 + .read = cn10k_ddr_perf_event_update, 678 + .pmu_enable = cn10k_ddr_perf_pmu_enable, 679 + .pmu_disable = cn10k_ddr_perf_pmu_disable, 680 + }; 681 + } 724 682 725 683 /* Choose this cpu to collect perf data */ 726 684 ddr_pmu->cpu = raw_smp_processor_id(); ··· 752 688 if (ret) 753 689 goto error; 754 690 755 - pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start); 691 + pr_info("DDR PMU Driver for ddrc@%llx\n", res->start); 756 692 return 0; 757 693 error: 758 694 cpuhp_state_remove_instance_nocalls( ··· 774 710 775 711 #ifdef CONFIG_OF 776 712 static const struct of_device_id cn10k_ddr_pmu_of_match[] = { 777 - { .compatible = "marvell,cn10k-ddr-pmu", }, 713 + { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata }, 778 714 { }, 779 715 }; 780 716 MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match); ··· 782 718 783 719 #ifdef CONFIG_ACPI 784 720 static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = { 785 - {"MRVL000A", 0}, 721 + {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata }, 786 722 {}, 787 723 }; 788 724 MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);