Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/imx_ddr: Add support for PMU in DB (system interconnects)

There is a PMU in DB, which has the same function with PMU in DDR
subsystem, the difference is PMU in DB only supports cycles, axid-read,
axid-write events.

e.g.
perf stat -a -e imx8_db0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD,axi_port=0xPP,axi_channel=0xH/ cmd
perf stat -a -e imx8_db0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD,axi_port=0xPP,axi_channel=0xH/ cmd

Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
Signed-off-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Joakim Zhang and committed by
Will Deacon
11abb4e8 037e8cf6

+60 -7
+60 -7
drivers/perf/fsl_imx8_ddr_perf.c
··· 53 53 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 54 54 55 55 #define DDR_PERF_DEV_NAME "imx8_ddr" 56 + #define DB_PERF_DEV_NAME "imx8_db" 56 57 #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" 57 58 58 59 static DEFINE_IDA(ddr_ida); 60 + static DEFINE_IDA(db_ida); 59 61 60 62 /* DDR Perf hardware feature */ 61 63 #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ 62 64 #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ 63 65 #define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */ 64 66 67 + /* Perf type */ 68 + enum fsl_ddr_type { 69 + DDR_PERF_TYPE = 0, /* ddr Perf (default) */ 70 + DB_PERF_TYPE, /* db Perf */ 71 + }; 72 + 65 73 struct fsl_ddr_devtype_data { 66 74 unsigned int quirks; /* quirks needed for different DDR Perf core */ 67 75 const char *identifier; /* system PMU identifier for userspace */ 76 + enum fsl_ddr_type type; /* types of Perf, ddr or db */ 68 77 }; 69 78 70 79 static const struct fsl_ddr_devtype_data imx8_devtype_data; ··· 107 98 .identifier = "i.MX8DXL", 108 99 }; 109 100 101 + static const struct fsl_ddr_devtype_data imx8dxl_db_devtype_data = { 102 + .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER, 103 + .identifier = "i.MX8DXL", 104 + .type = DB_PERF_TYPE, 105 + }; 106 + 110 107 static const struct of_device_id imx_ddr_pmu_dt_ids[] = { 111 108 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, 112 109 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, ··· 121 106 { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data}, 122 107 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, 123 108 { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data}, 109 + { .compatible = "fsl,imx8dxl-db-pmu", .data = &imx8dxl_db_devtype_data}, 124 110 { /* sentinel */ } 125 111 }; 126 112 MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); ··· 301 285 NULL, 302 286 }; 303 287 288 + static const int ddr_perf_db_visible_event_list[] = { 289 + EVENT_CYCLES_ID, 290 + 0x41, 291 + 0x42, 292 + }; 293 + 294 + static umode_t ddr_perf_events_attrs_is_visible(struct kobject *kobj, 295 + struct attribute *attr, int n) 296 + { 297 + struct device *dev = kobj_to_dev(kobj); 298 + struct ddr_pmu *pmu = dev_get_drvdata(dev); 299 + struct perf_pmu_events_attr *pmu_attr; 300 + unsigned int i; 301 + 302 + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); 303 + 304 + if (pmu->devtype_data->type == DDR_PERF_TYPE) 305 + return attr->mode; 306 + 307 + /* DB Type */ 308 + for (i = 0; i < ARRAY_SIZE(ddr_perf_db_visible_event_list); i++) 309 + if (pmu_attr->id == ddr_perf_db_visible_event_list[i]) 310 + return attr->mode; 311 + 312 + return 0; 313 + } 314 + 304 315 static const struct attribute_group ddr_perf_events_attr_group = { 305 316 .name = "events", 306 317 .attrs = ddr_perf_events_attrs, 318 + .is_visible = ddr_perf_events_attrs_is_visible, 307 319 }; 308 320 309 321 PMU_FORMAT_ATTR(event, "config:0-7"); ··· 781 737 struct ddr_pmu *pmu; 782 738 struct device_node *np; 783 739 void __iomem *base; 740 + struct ida *ida; 784 741 char *name; 785 742 int nclks; 786 743 int num; ··· 806 761 if (nclks < 0) 807 762 return dev_err_probe(&pdev->dev, nclks, "Failure get clks\n"); 808 763 809 - num = ida_alloc(&ddr_ida, GFP_KERNEL); 764 + pmu->devtype_data = of_device_get_match_data(&pdev->dev); 765 + 766 + ida = pmu->devtype_data->type == DDR_PERF_TYPE ? &ddr_ida : &db_ida; 767 + num = ida_alloc(ida, GFP_KERNEL); 810 768 if (num < 0) 811 769 return num; 812 770 813 771 pmu->id = num; 814 772 815 - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", 816 - num); 773 + if (pmu->devtype_data->type == DDR_PERF_TYPE) 774 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num); 775 + else 776 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DB_PERF_DEV_NAME "%d", num); 777 + 817 778 if (!name) { 818 779 ret = -ENOMEM; 819 780 goto idr_free; 820 781 } 821 - 822 - pmu->devtype_data = of_device_get_match_data(&pdev->dev); 823 782 824 783 pmu->cpu = raw_smp_processor_id(); 825 784 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, ··· 881 832 cpuhp_instance_err: 882 833 cpuhp_remove_multi_state(pmu->cpuhp_state); 883 834 idr_free: 884 - ida_free(&ddr_ida, pmu->id); 835 + ida_free(ida, pmu->id); 885 836 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); 886 837 return ret; 887 838 } ··· 895 846 896 847 perf_pmu_unregister(&pmu->pmu); 897 848 898 - ida_free(&ddr_ida, pmu->id); 849 + if (pmu->devtype_data->type == DDR_PERF_TYPE) 850 + ida_free(&ddr_ida, pmu->id); 851 + else 852 + ida_free(&db_ida, pmu->id); 853 + 899 854 } 900 855 901 856 static struct platform_driver imx_ddr_pmu_driver = {