Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/marvell: Odyssey DDR Performance monitor support

Odyssey DRAM Subsystem supports eight counters for monitoring performance
and software can program those counters to monitor any of the defined
performance events. Supported performance events include those counted
at the interface between the DDR controller and the PHY, interface between
the DDR Controller and the CHI interconnect, or within the DDR Controller.

Additionally DSS also supports two fixed performance event counters, one
for ddr reads and the other for ddr writes.

Signed-off-by: Gowthami Thiagarajan <gthiagarajan@marvell.com>
Link: https://lore.kernel.org/r/20241108040619.753343-4-gthiagarajan@marvell.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Gowthami Thiagarajan and committed by
Will Deacon
d950c381 0045de7e

+349 -5
+1
Documentation/admin-guide/perf/index.rst
··· 14 14 qcom_l2_pmu 15 15 qcom_l3_pmu 16 16 starfive_starlink_pmu 17 + mrvl-odyssey-ddr-pmu 17 18 arm-ccn 18 19 arm-cmn 19 20 arm-ni
+80
Documentation/admin-guide/perf/mrvl-odyssey-ddr-pmu.rst
··· 1 + =================================================================== 2 + Marvell Odyssey DDR PMU Performance Monitoring Unit (PMU UNCORE) 3 + =================================================================== 4 + 5 + Odyssey DRAM Subsystem supports eight counters for monitoring performance 6 + and software can program those counters to monitor any of the defined 7 + performance events. Supported performance events include those counted 8 + at the interface between the DDR controller and the PHY, interface between 9 + the DDR Controller and the CHI interconnect, or within the DDR Controller. 10 + 11 + Additionally DSS also supports two fixed performance event counters, one 12 + for ddr reads and the other for ddr writes. 13 + 14 + The counter will be operating in either manual or auto mode. 15 + 16 + The PMU driver exposes the available events and format options under sysfs:: 17 + 18 + /sys/bus/event_source/devices/mrvl_ddr_pmu_<>/events/ 19 + /sys/bus/event_source/devices/mrvl_ddr_pmu_<>/format/ 20 + 21 + Examples:: 22 + 23 + $ perf list | grep ddr 24 + mrvl_ddr_pmu_<>/ddr_act_bypass_access/ [Kernel PMU event] 25 + mrvl_ddr_pmu_<>/ddr_bsm_alloc/ [Kernel PMU event] 26 + mrvl_ddr_pmu_<>/ddr_bsm_starvation/ [Kernel PMU event] 27 + mrvl_ddr_pmu_<>/ddr_cam_active_access/ [Kernel PMU event] 28 + mrvl_ddr_pmu_<>/ddr_cam_mwr/ [Kernel PMU event] 29 + mrvl_ddr_pmu_<>/ddr_cam_rd_active_access/ [Kernel PMU event] 30 + mrvl_ddr_pmu_<>/ddr_cam_rd_or_wr_access/ [Kernel PMU event] 31 + mrvl_ddr_pmu_<>/ddr_cam_read/ [Kernel PMU event] 32 + mrvl_ddr_pmu_<>/ddr_cam_wr_access/ [Kernel PMU event] 33 + mrvl_ddr_pmu_<>/ddr_cam_write/ [Kernel PMU event] 34 + mrvl_ddr_pmu_<>/ddr_capar_error/ [Kernel PMU event] 35 + mrvl_ddr_pmu_<>/ddr_crit_ref/ [Kernel PMU event] 36 + mrvl_ddr_pmu_<>/ddr_ddr_reads/ [Kernel PMU event] 37 + mrvl_ddr_pmu_<>/ddr_ddr_writes/ [Kernel PMU event] 38 + mrvl_ddr_pmu_<>/ddr_dfi_cmd_is_retry/ [Kernel PMU event] 39 + mrvl_ddr_pmu_<>/ddr_dfi_cycles/ [Kernel PMU event] 40 + mrvl_ddr_pmu_<>/ddr_dfi_parity_poison/ [Kernel PMU event] 41 + mrvl_ddr_pmu_<>/ddr_dfi_rd_data_access/ [Kernel PMU event] 42 + mrvl_ddr_pmu_<>/ddr_dfi_wr_data_access/ [Kernel PMU event] 43 + mrvl_ddr_pmu_<>/ddr_dqsosc_mpc/ [Kernel PMU event] 44 + mrvl_ddr_pmu_<>/ddr_dqsosc_mrr/ [Kernel PMU event] 45 + mrvl_ddr_pmu_<>/ddr_enter_mpsm/ [Kernel PMU event] 46 + mrvl_ddr_pmu_<>/ddr_enter_powerdown/ [Kernel PMU event] 47 + mrvl_ddr_pmu_<>/ddr_enter_selfref/ [Kernel PMU event] 48 + mrvl_ddr_pmu_<>/ddr_hif_pri_rdaccess/ [Kernel PMU event] 49 + mrvl_ddr_pmu_<>/ddr_hif_rd_access/ [Kernel PMU event] 50 + mrvl_ddr_pmu_<>/ddr_hif_rd_or_wr_access/ [Kernel PMU event] 51 + mrvl_ddr_pmu_<>/ddr_hif_rmw_access/ [Kernel PMU event] 52 + mrvl_ddr_pmu_<>/ddr_hif_wr_access/ [Kernel PMU event] 53 + mrvl_ddr_pmu_<>/ddr_hpri_sched_rd_crit_access/ [Kernel PMU event] 54 + mrvl_ddr_pmu_<>/ddr_load_mode/ [Kernel PMU event] 55 + mrvl_ddr_pmu_<>/ddr_lpri_sched_rd_crit_access/ [Kernel PMU event] 56 + mrvl_ddr_pmu_<>/ddr_precharge/ [Kernel PMU event] 57 + mrvl_ddr_pmu_<>/ddr_precharge_for_other/ [Kernel PMU event] 58 + mrvl_ddr_pmu_<>/ddr_precharge_for_rdwr/ [Kernel PMU event] 59 + mrvl_ddr_pmu_<>/ddr_raw_hazard/ [Kernel PMU event] 60 + mrvl_ddr_pmu_<>/ddr_rd_bypass_access/ [Kernel PMU event] 61 + mrvl_ddr_pmu_<>/ddr_rd_crc_error/ [Kernel PMU event] 62 + mrvl_ddr_pmu_<>/ddr_rd_uc_ecc_error/ [Kernel PMU event] 63 + mrvl_ddr_pmu_<>/ddr_rdwr_transitions/ [Kernel PMU event] 64 + mrvl_ddr_pmu_<>/ddr_refresh/ [Kernel PMU event] 65 + mrvl_ddr_pmu_<>/ddr_retry_fifo_full/ [Kernel PMU event] 66 + mrvl_ddr_pmu_<>/ddr_spec_ref/ [Kernel PMU event] 67 + mrvl_ddr_pmu_<>/ddr_tcr_mrr/ [Kernel PMU event] 68 + mrvl_ddr_pmu_<>/ddr_war_hazard/ [Kernel PMU event] 69 + mrvl_ddr_pmu_<>/ddr_waw_hazard/ [Kernel PMU event] 70 + mrvl_ddr_pmu_<>/ddr_win_limit_reached_rd/ [Kernel PMU event] 71 + mrvl_ddr_pmu_<>/ddr_win_limit_reached_wr/ [Kernel PMU event] 72 + mrvl_ddr_pmu_<>/ddr_wr_crc_error/ [Kernel PMU event] 73 + mrvl_ddr_pmu_<>/ddr_wr_trxn_crit_access/ [Kernel PMU event] 74 + mrvl_ddr_pmu_<>/ddr_write_combine/ [Kernel PMU event] 75 + mrvl_ddr_pmu_<>/ddr_zqcl/ [Kernel PMU event] 76 + mrvl_ddr_pmu_<>/ddr_zqlatch/ [Kernel PMU event] 77 + mrvl_ddr_pmu_<>/ddr_zqstart/ [Kernel PMU event] 78 + 79 + $ perf stat -e ddr_cam_read,ddr_cam_write,ddr_cam_active_access,ddr_cam 80 + rd_or_wr_access,ddr_cam_rd_active_access,ddr_cam_mwr <workload>
+268 -5
drivers/perf/marvell_cn10k_ddr_pmu.c
··· 16 16 17 17 /* Performance Counters Operating Mode Control Registers */ 18 18 #define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020 19 + #define ODY_DDRC_PERF_CNT_OP_MODE_CTRL 0x20020 19 20 #define OP_MODE_CTRL_VAL_MANUAL 0x1 20 21 21 22 /* Performance Counters Start Operation Control Registers */ 22 23 #define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028 24 + #define ODY_DDRC_PERF_CNT_START_OP_CTRL 0x200A0 23 25 #define START_OP_CTRL_VAL_START 0x1ULL 24 26 #define START_OP_CTRL_VAL_ACTIVE 0x2 25 27 26 28 /* Performance Counters End Operation Control Registers */ 27 29 #define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030 30 + #define ODY_DDRC_PERF_CNT_END_OP_CTRL 0x200E0 28 31 #define END_OP_CTRL_VAL_END 0x1ULL 29 32 30 33 /* Performance Counters End Status Registers */ 31 34 #define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038 35 + #define ODY_DDRC_PERF_CNT_END_STATUS 0x20120 32 36 #define END_STATUS_VAL_END_TIMER_MODE_END 0x1 33 37 34 38 /* Performance Counters Configuration Registers */ 35 39 #define CN10K_DDRC_PERF_CFG_BASE 0x8040 40 + #define ODY_DDRC_PERF_CFG_BASE 0x20160 36 41 37 42 /* 8 Generic event counter + 2 fixed event counters */ 38 43 #define DDRC_PERF_NUM_GEN_COUNTERS 8 ··· 61 56 * DO NOT change these event-id numbers, they are used to 62 57 * program event bitmap in h/w. 63 58 */ 59 + #define EVENT_DFI_CMD_IS_RETRY 61 60 + #define EVENT_RD_UC_ECC_ERROR 60 61 + #define EVENT_RD_CRC_ERROR 59 62 + #define EVENT_CAPAR_ERROR 58 63 + #define EVENT_WR_CRC_ERROR 57 64 + #define EVENT_DFI_PARITY_POISON 56 65 + #define EVENT_RETRY_FIFO_FULL 46 66 + #define EVENT_DFI_CYCLES 45 67 + 64 68 #define EVENT_OP_IS_ZQLATCH 55 65 69 #define EVENT_OP_IS_ZQSTART 54 66 70 #define EVENT_OP_IS_TCR_MRR 53 ··· 119 105 120 106 /* Event counter value registers */ 121 107 #define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080 108 + #define ODY_DDRC_PERF_CNT_VALUE_BASE 0x201C0 122 109 123 110 /* Fixed event counter enable/disable register */ 124 111 #define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0 ··· 128 113 129 114 /* Fixed event counter control register */ 130 115 #define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8 116 + #define ODY_DDRC_PERF_CNT_FREERUN_CTRL 0x20240 131 117 #define DDRC_FREERUN_WRITE_CNT_CLR 0x1 132 118 #define DDRC_FREERUN_READ_CNT_CLR 0x2 119 + 120 + /* Fixed event counter clear register, defined only for Odyssey */ 121 + #define ODY_DDRC_PERF_CNT_FREERUN_CLR 0x20248 133 122 134 123 #define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48) 135 124 #define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0) ··· 141 122 /* Fixed event counter value register */ 142 123 #define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0 143 124 #define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8 125 + #define ODY_DDRC_PERF_CNT_VALUE_WR_OP 0x20250 126 + #define ODY_DDRC_PERF_CNT_VALUE_RD_OP 0x20258 144 127 145 128 struct cn10k_ddr_pmu { 146 129 struct pmu pmu; ··· 184 163 u64 cnt_value_wr_op; 185 164 u64 cnt_value_rd_op; 186 165 bool is_cn10k; 166 + bool is_ody; 187 167 }; 188 168 189 169 static ssize_t cn10k_ddr_pmu_event_show(struct device *dev, ··· 262 240 NULL 263 241 }; 264 242 243 + static struct attribute *odyssey_ddr_perf_events_attrs[] = { 244 + /* Programmable */ 245 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR), 246 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR), 247 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD), 248 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW), 249 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD), 250 + CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS), 251 + CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS), 252 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access, 253 + EVENT_DFI_WR_DATA_CYCLES), 254 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access, 255 + EVENT_DFI_RD_DATA_CYCLES), 256 + CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access, 257 + EVENT_HPR_XACT_WHEN_CRITICAL), 258 + CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access, 259 + EVENT_LPR_XACT_WHEN_CRITICAL), 260 + CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access, 261 + EVENT_WR_XACT_WHEN_CRITICAL), 262 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE), 263 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access, 264 + EVENT_OP_IS_RD_OR_WR), 265 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access, 266 + EVENT_OP_IS_RD_ACTIVATE), 267 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD), 268 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR), 269 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR), 270 + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE), 271 + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr, 272 + EVENT_PRECHARGE_FOR_RDWR), 273 + CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other, 274 + EVENT_PRECHARGE_FOR_OTHER), 275 + CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS), 276 + CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE), 277 + CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD), 278 + CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD), 279 + CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD), 280 + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF), 281 + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown, 282 + EVENT_OP_IS_ENTER_POWERDOWN), 283 + CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM), 284 + CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH), 285 + CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF), 286 + CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF), 287 + CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE), 288 + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL), 289 + CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS), 290 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES), 291 + CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full, 292 + EVENT_RETRY_FIFO_FULL), 293 + CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC), 294 + CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION), 295 + CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd, 296 + EVENT_VISIBLE_WIN_LIMIT_REACHED_RD), 297 + CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr, 298 + EVENT_VISIBLE_WIN_LIMIT_REACHED_WR), 299 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC), 300 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR), 301 + CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR), 302 + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART), 303 + CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH), 304 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison, 305 + EVENT_DFI_PARITY_POISON), 306 + CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR), 307 + CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR), 308 + CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR), 309 + CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR), 310 + CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY), 311 + /* Free run event counters */ 312 + CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS), 313 + CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES), 314 + NULL 315 + }; 316 + 317 + static struct attribute_group odyssey_ddr_perf_events_attr_group = { 318 + .name = "events", 319 + .attrs = odyssey_ddr_perf_events_attrs, 320 + }; 321 + 265 322 static struct attribute_group cn10k_ddr_perf_events_attr_group = { 266 323 .name = "events", 267 324 .attrs = cn10k_ddr_perf_events_attrs, ··· 386 285 NULL, 387 286 }; 388 287 288 + static const struct attribute_group *odyssey_attr_groups[] = { 289 + &odyssey_ddr_perf_events_attr_group, 290 + &cn10k_ddr_perf_format_attr_group, 291 + &cn10k_ddr_perf_cpumask_attr_group, 292 + NULL 293 + }; 294 + 389 295 /* Default poll timeout is 100 sec, which is very sufficient for 390 296 * 48 bit counter incremented max at 5.6 GT/s, which may take many 391 297 * hours to overflow. ··· 405 297 return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC); 406 298 } 407 299 408 - static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap) 300 + static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap, 301 + struct cn10k_ddr_pmu *ddr_pmu) 409 302 { 303 + int err = 0; 304 + 410 305 switch (eventid) { 306 + case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY: 307 + if (!ddr_pmu->p_data->is_ody) { 308 + err = -EINVAL; 309 + break; 310 + } 311 + fallthrough; 411 312 case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD: 412 313 case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH: 413 314 *event_bitmap = (1ULL << (eventid - 1)); ··· 427 310 *event_bitmap = (0xFULL << (eventid - 1)); 428 311 break; 429 312 default: 430 - pr_err("%s Invalid eventid %d\n", __func__, eventid); 431 - return -EINVAL; 313 + err = -EINVAL; 432 314 } 433 315 434 - return 0; 316 + if (err) 317 + pr_err("%s Invalid eventid %d\n", __func__, eventid); 318 + return err; 435 319 } 436 320 437 321 static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu, ··· 500 382 return 0; 501 383 } 502 384 385 + static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu, 386 + int counter) 387 + { 388 + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; 389 + u64 ctrl_reg = p_data->cnt_start_op_ctrl; 390 + 391 + writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base + 392 + DDRC_PERF_REG(ctrl_reg, counter)); 393 + } 394 + 395 + static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu, 396 + int counter) 397 + { 398 + const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data; 399 + u64 ctrl_reg = p_data->cnt_end_op_ctrl; 400 + 401 + writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base + 402 + DDRC_PERF_REG(ctrl_reg, counter)); 403 + } 404 + 503 405 static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu, 504 406 int counter, bool enable) 505 407 { 506 408 const struct ddr_pmu_platform_data *p_data = pmu->p_data; 409 + u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl; 507 410 const struct ddr_pmu_ops *ops = pmu->ops; 411 + bool is_ody = pmu->p_data->is_ody; 508 412 u32 reg; 509 413 u64 val; 510 414 ··· 545 405 val &= ~EVENT_ENABLE; 546 406 547 407 writeq_relaxed(val, pmu->base + reg); 408 + 409 + if (is_ody) { 410 + if (enable) { 411 + /* 412 + * Setup the PMU counter to work in 413 + * manual mode 414 + */ 415 + reg = DDRC_PERF_REG(ctrl_reg, counter); 416 + writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, 417 + pmu->base + reg); 418 + 419 + cn10k_ddr_perf_counter_start(pmu, counter); 420 + } else { 421 + cn10k_ddr_perf_counter_stop(pmu, counter); 422 + } 423 + } 548 424 } else { 549 425 if (counter == DDRC_PERF_READ_COUNTER_IDX) 550 426 ops->enable_read_freerun_counter(pmu, enable); ··· 642 486 if (counter < DDRC_PERF_NUM_GEN_COUNTERS) { 643 487 /* Generic counters, configure event id */ 644 488 reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter); 645 - ret = ddr_perf_get_event_bitmap(config, &val); 489 + ret = ddr_perf_get_event_bitmap(config, &val, pmu); 646 490 if (ret) 647 491 return ret; 648 492 ··· 787 631 cn10k_ddr_perf_pmu_enable(&pmu->pmu); 788 632 } 789 633 634 + static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu, 635 + bool enable) 636 + { 637 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 638 + u64 val; 639 + 640 + val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); 641 + if (enable) 642 + val |= DDRC_PERF_FREERUN_READ_EN; 643 + else 644 + val &= ~DDRC_PERF_FREERUN_READ_EN; 645 + 646 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 647 + } 648 + 649 + static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu, 650 + bool enable) 651 + { 652 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 653 + u64 val; 654 + 655 + val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl); 656 + if (enable) 657 + val |= DDRC_PERF_FREERUN_WRITE_EN; 658 + else 659 + val &= ~DDRC_PERF_FREERUN_WRITE_EN; 660 + 661 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl); 662 + } 663 + 664 + static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu) 665 + { 666 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 667 + u64 val; 668 + 669 + val = DDRC_FREERUN_READ_CNT_CLR; 670 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); 671 + } 672 + 673 + static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu) 674 + { 675 + const struct ddr_pmu_platform_data *p_data = pmu->p_data; 676 + u64 val; 677 + 678 + val = DDRC_FREERUN_WRITE_CNT_CLR; 679 + writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr); 680 + } 681 + 682 + static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx) 683 + { 684 + /* 685 + * On reaching the maximum value of the counter, the counter freezes 686 + * there. The particular event is updated and the respective counter 687 + * is stopped and started again so that it starts counting from zero 688 + */ 689 + cn10k_ddr_perf_event_update(pmu->events[evt_idx]); 690 + cn10k_ddr_perf_counter_stop(pmu, evt_idx); 691 + cn10k_ddr_perf_counter_start(pmu, evt_idx); 692 + } 693 + 790 694 static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu) 791 695 { 792 696 const struct ddr_pmu_platform_data *p_data = pmu->p_data; ··· 956 740 }; 957 741 #endif 958 742 743 + static const struct ddr_pmu_ops ddr_pmu_ody_ops = { 744 + .enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun, 745 + .enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun, 746 + .clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun, 747 + .clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun, 748 + .pmu_overflow_handler = ddr_pmu_ody_overflow_hander, 749 + }; 750 + 751 + #ifdef CONFIG_ACPI 752 + static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = { 753 + .counter_overflow_val = 0, 754 + .counter_max_val = GENMASK_ULL(63, 0), 755 + .cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE, 756 + .cfg_base = ODY_DDRC_PERF_CFG_BASE, 757 + .cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL, 758 + .cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL, 759 + .cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL, 760 + .cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS, 761 + .cnt_freerun_en = 0, 762 + .cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL, 763 + .cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR, 764 + .cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP, 765 + .cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP, 766 + .is_ody = TRUE, 767 + }; 768 + #endif 769 + 959 770 static int cn10k_ddr_perf_probe(struct platform_device *pdev) 960 771 { 961 772 const struct ddr_pmu_platform_data *dev_data; ··· 990 747 struct resource *res; 991 748 void __iomem *base; 992 749 bool is_cn10k; 750 + bool is_ody; 993 751 char *name; 994 752 int ret; 995 753 ··· 1015 771 1016 772 ddr_pmu->p_data = dev_data; 1017 773 is_cn10k = ddr_pmu->p_data->is_cn10k; 774 + is_ody = ddr_pmu->p_data->is_ody; 1018 775 1019 776 if (is_cn10k) { 1020 777 ddr_pmu->ops = &ddr_pmu_ops; ··· 1036 791 .read = cn10k_ddr_perf_event_update, 1037 792 .pmu_enable = cn10k_ddr_perf_pmu_enable, 1038 793 .pmu_disable = cn10k_ddr_perf_pmu_disable, 794 + }; 795 + } 796 + 797 + if (is_ody) { 798 + ddr_pmu->ops = &ddr_pmu_ody_ops; 799 + 800 + ddr_pmu->pmu = (struct pmu) { 801 + .module = THIS_MODULE, 802 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 803 + .task_ctx_nr = perf_invalid_context, 804 + .attr_groups = odyssey_attr_groups, 805 + .event_init = cn10k_ddr_perf_event_init, 806 + .add = cn10k_ddr_perf_event_add, 807 + .del = cn10k_ddr_perf_event_del, 808 + .start = cn10k_ddr_perf_event_start, 809 + .stop = cn10k_ddr_perf_event_stop, 810 + .read = cn10k_ddr_perf_event_update, 1039 811 }; 1040 812 } 1041 813 ··· 1106 844 #ifdef CONFIG_ACPI 1107 845 static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = { 1108 846 {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata }, 847 + {"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata}, 1109 848 {}, 1110 849 }; 1111 850 MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);