Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hwtracing: hisi_ptt: Add trace function support for HiSilicon PCIe Tune and Trace device

HiSilicon PCIe tune and trace device(PTT) is a PCIe Root Complex integrated
Endpoint(RCiEP) device, providing the capability to dynamically monitor and
tune the PCIe traffic and trace the TLP headers.

Add the driver for the device to enable the trace function. Register PMU
device of PTT trace, then users can use trace through perf command. The
driver makes use of perf AUX trace function and support the following
events to configure the trace:

- filter: select Root port or Endpoint to trace
- type: select the type of traced TLP headers
- direction: select the direction of traced TLP headers
- format: select the data format of the traced TLP headers

This patch initially add basic trace support of PTT device.

Acked-by: Mathieu Poirier <mathieu.poirier@linaro.org>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: John Garry <john.garry@huawei.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Link: https://lore.kernel.org/r/20220816114414.4092-3-yangyicong@huawei.com
Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>

authored by

Yicong Yang and committed by
Mathieu Poirier
ff0de066 24b6c779

+1110
+1
drivers/Makefile
··· 175 175 obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ 176 176 obj-y += hwtracing/intel_th/ 177 177 obj-$(CONFIG_STM) += hwtracing/stm/ 178 + obj-$(CONFIG_HISI_PTT) += hwtracing/ptt/ 178 179 obj-y += android/ 179 180 obj-$(CONFIG_NVMEM) += nvmem/ 180 181 obj-$(CONFIG_FPGA) += fpga/
+2
drivers/hwtracing/Kconfig
··· 5 5 6 6 source "drivers/hwtracing/intel_th/Kconfig" 7 7 8 + source "drivers/hwtracing/ptt/Kconfig" 9 + 8 10 endmenu
+12
drivers/hwtracing/ptt/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + config HISI_PTT 3 + tristate "HiSilicon PCIe Tune and Trace Device" 4 + depends on ARM64 || (COMPILE_TEST && 64BIT) 5 + depends on PCI && HAS_DMA && HAS_IOMEM && PERF_EVENTS 6 + help 7 + HiSilicon PCIe Tune and Trace device exists as a PCIe RCiEP 8 + device, and it provides support for PCIe traffic tuning and 9 + tracing TLP headers to the memory. 10 + 11 + This driver can also be built as a module. If so, the module 12 + will be called hisi_ptt.
+2
drivers/hwtracing/ptt/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + obj-$(CONFIG_HISI_PTT) += hisi_ptt.o
+916
drivers/hwtracing/ptt/hisi_ptt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Driver for HiSilicon PCIe tune and trace device 4 + * 5 + * Copyright (c) 2022 HiSilicon Technologies Co., Ltd. 6 + * Author: Yicong Yang <yangyicong@hisilicon.com> 7 + */ 8 + 9 + #include <linux/bitfield.h> 10 + #include <linux/bitops.h> 11 + #include <linux/cpuhotplug.h> 12 + #include <linux/delay.h> 13 + #include <linux/dma-iommu.h> 14 + #include <linux/dma-mapping.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/io.h> 17 + #include <linux/iommu.h> 18 + #include <linux/iopoll.h> 19 + #include <linux/module.h> 20 + #include <linux/sysfs.h> 21 + #include <linux/vmalloc.h> 22 + 23 + #include "hisi_ptt.h" 24 + 25 + /* Dynamic CPU hotplug state used by PTT */ 26 + static enum cpuhp_state hisi_ptt_pmu_online; 27 + 28 + static u16 hisi_ptt_get_filter_val(u16 devid, bool is_port) 29 + { 30 + if (is_port) 31 + return BIT(HISI_PCIE_CORE_PORT_ID(devid & 0xff)); 32 + 33 + return devid; 34 + } 35 + 36 + static bool hisi_ptt_wait_trace_hw_idle(struct hisi_ptt *hisi_ptt) 37 + { 38 + u32 val; 39 + 40 + return !readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_STS, 41 + val, val & HISI_PTT_TRACE_IDLE, 42 + HISI_PTT_WAIT_POLL_INTERVAL_US, 43 + HISI_PTT_WAIT_TRACE_TIMEOUT_US); 44 + } 45 + 46 + static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt) 47 + { 48 + u32 val; 49 + 50 + readl_poll_timeout_atomic(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS, 51 + val, !val, HISI_PTT_RESET_POLL_INTERVAL_US, 52 + HISI_PTT_RESET_TIMEOUT_US); 53 + } 54 + 55 + static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt) 56 + { 57 + writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 58 + hisi_ptt->trace_ctrl.started = false; 59 + } 60 + 61 + static int hisi_ptt_trace_start(struct hisi_ptt *hisi_ptt) 62 + { 63 + struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl; 64 + u32 val; 65 + int i; 66 + 67 + /* Check device idle before start trace */ 68 + if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) { 69 + pci_err(hisi_ptt->pdev, "Failed to start trace, the device is still busy\n"); 70 + return -EBUSY; 71 + } 72 + 73 + ctrl->started = true; 74 + 75 + /* Reset the DMA before start tracing */ 76 + val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 77 + val |= HISI_PTT_TRACE_CTRL_RST; 78 + writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 79 + 80 + hisi_ptt_wait_dma_reset_done(hisi_ptt); 81 + 82 + val = readl(hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 83 + val &= ~HISI_PTT_TRACE_CTRL_RST; 84 + writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 85 + 86 + /* Reset the index of current buffer */ 87 + hisi_ptt->trace_ctrl.buf_index = 0; 88 + 89 + /* Zero the trace buffers */ 90 + for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) 91 + memset(ctrl->trace_buf[i].addr, 0, HISI_PTT_TRACE_BUF_SIZE); 92 + 93 + /* Clear the interrupt status */ 94 + writel(HISI_PTT_TRACE_INT_STAT_MASK, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT); 95 + writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK); 96 + 97 + /* Set the trace control register */ 98 + val = FIELD_PREP(HISI_PTT_TRACE_CTRL_TYPE_SEL, ctrl->type); 99 + val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_RXTX_SEL, ctrl->direction); 100 + val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_DATA_FORMAT, ctrl->format); 101 + val |= FIELD_PREP(HISI_PTT_TRACE_CTRL_TARGET_SEL, hisi_ptt->trace_ctrl.filter); 102 + if (!hisi_ptt->trace_ctrl.is_port) 103 + val |= HISI_PTT_TRACE_CTRL_FILTER_MODE; 104 + 105 + /* Start the Trace */ 106 + val |= HISI_PTT_TRACE_CTRL_EN; 107 + writel(val, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); 108 + 109 + return 0; 110 + } 111 + 112 + static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop) 113 + { 114 + struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl; 115 + struct perf_output_handle *handle = &ctrl->handle; 116 + struct perf_event *event = handle->event; 117 + struct hisi_ptt_pmu_buf *buf; 118 + size_t size; 119 + void *addr; 120 + 121 + buf = perf_get_aux(handle); 122 + if (!buf || !handle->size) 123 + return -EINVAL; 124 + 125 + addr = ctrl->trace_buf[ctrl->buf_index].addr; 126 + 127 + /* 128 + * If we're going to stop, read the size of already traced data from 129 + * HISI_PTT_TRACE_WR_STS. Otherwise we're coming from the interrupt, 130 + * the data size is always HISI_PTT_TRACE_BUF_SIZE. 131 + */ 132 + if (stop) { 133 + u32 reg; 134 + 135 + reg = readl(hisi_ptt->iobase + HISI_PTT_TRACE_WR_STS); 136 + size = FIELD_GET(HISI_PTT_TRACE_WR_STS_WRITE, reg); 137 + } else { 138 + size = HISI_PTT_TRACE_BUF_SIZE; 139 + } 140 + 141 + memcpy(buf->base + buf->pos, addr, size); 142 + buf->pos += size; 143 + 144 + /* 145 + * Just commit the traced data if we're going to stop. Otherwise if the 146 + * resident AUX buffer cannot contain the data of next trace buffer, 147 + * apply a new one. 148 + */ 149 + if (stop) { 150 + perf_aux_output_end(handle, buf->pos); 151 + } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) { 152 + perf_aux_output_end(handle, buf->pos); 153 + 154 + buf = perf_aux_output_begin(handle, event); 155 + if (!buf) 156 + return -EINVAL; 157 + 158 + buf->pos = handle->head % buf->length; 159 + if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) { 160 + perf_aux_output_end(handle, 0); 161 + return -EINVAL; 162 + } 163 + } 164 + 165 + return 0; 166 + } 167 + 168 + static irqreturn_t hisi_ptt_isr(int irq, void *context) 169 + { 170 + struct hisi_ptt *hisi_ptt = context; 171 + u32 status, buf_idx; 172 + 173 + status = readl(hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT); 174 + if (!(status & HISI_PTT_TRACE_INT_STAT_MASK)) 175 + return IRQ_NONE; 176 + 177 + buf_idx = ffs(status) - 1; 178 + 179 + /* Clear the interrupt status of buffer @buf_idx */ 180 + writel(status, hisi_ptt->iobase + HISI_PTT_TRACE_INT_STAT); 181 + 182 + /* 183 + * Update the AUX buffer and cache the current buffer index, 184 + * as we need to know this and save the data when the trace 185 + * is ended out of the interrupt handler. End the trace 186 + * if the updating fails. 187 + */ 188 + if (hisi_ptt_update_aux(hisi_ptt, buf_idx, false)) 189 + hisi_ptt_trace_end(hisi_ptt); 190 + else 191 + hisi_ptt->trace_ctrl.buf_index = (buf_idx + 1) % HISI_PTT_TRACE_BUF_CNT; 192 + 193 + return IRQ_HANDLED; 194 + } 195 + 196 + static void hisi_ptt_irq_free_vectors(void *pdev) 197 + { 198 + pci_free_irq_vectors(pdev); 199 + } 200 + 201 + static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt) 202 + { 203 + struct pci_dev *pdev = hisi_ptt->pdev; 204 + int ret; 205 + 206 + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 207 + if (ret < 0) { 208 + pci_err(pdev, "failed to allocate irq vector, ret = %d\n", ret); 209 + return ret; 210 + } 211 + 212 + ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_irq_free_vectors, pdev); 213 + if (ret < 0) 214 + return ret; 215 + 216 + ret = devm_request_threaded_irq(&pdev->dev, 217 + pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), 218 + NULL, hisi_ptt_isr, 0, 219 + DRV_NAME, hisi_ptt); 220 + if (ret) { 221 + pci_err(pdev, "failed to request irq %d, ret = %d\n", 222 + pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ), ret); 223 + return ret; 224 + } 225 + 226 + return 0; 227 + } 228 + 229 + static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data) 230 + { 231 + struct hisi_ptt_filter_desc *filter; 232 + struct hisi_ptt *hisi_ptt = data; 233 + 234 + /* 235 + * We won't fail the probe if filter allocation failed here. The filters 236 + * should be partial initialized and users would know which filter fails 237 + * through the log. Other functions of PTT device are still available. 238 + */ 239 + filter = kzalloc(sizeof(*filter), GFP_KERNEL); 240 + if (!filter) { 241 + pci_err(hisi_ptt->pdev, "failed to add filter %s\n", pci_name(pdev)); 242 + return -ENOMEM; 243 + } 244 + 245 + filter->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); 246 + 247 + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) { 248 + filter->is_port = true; 249 + list_add_tail(&filter->list, &hisi_ptt->port_filters); 250 + 251 + /* Update the available port mask */ 252 + hisi_ptt->port_mask |= hisi_ptt_get_filter_val(filter->devid, true); 253 + } else { 254 + list_add_tail(&filter->list, &hisi_ptt->req_filters); 255 + } 256 + 257 + return 0; 258 + } 259 + 260 + static void hisi_ptt_release_filters(void *data) 261 + { 262 + struct hisi_ptt_filter_desc *filter, *tmp; 263 + struct hisi_ptt *hisi_ptt = data; 264 + 265 + list_for_each_entry_safe(filter, tmp, &hisi_ptt->req_filters, list) { 266 + list_del(&filter->list); 267 + kfree(filter); 268 + } 269 + 270 + list_for_each_entry_safe(filter, tmp, &hisi_ptt->port_filters, list) { 271 + list_del(&filter->list); 272 + kfree(filter); 273 + } 274 + } 275 + 276 + static int hisi_ptt_config_trace_buf(struct hisi_ptt *hisi_ptt) 277 + { 278 + struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl; 279 + struct device *dev = &hisi_ptt->pdev->dev; 280 + int i; 281 + 282 + ctrl->trace_buf = devm_kcalloc(dev, HISI_PTT_TRACE_BUF_CNT, 283 + sizeof(*ctrl->trace_buf), GFP_KERNEL); 284 + if (!ctrl->trace_buf) 285 + return -ENOMEM; 286 + 287 + for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; ++i) { 288 + ctrl->trace_buf[i].addr = dmam_alloc_coherent(dev, HISI_PTT_TRACE_BUF_SIZE, 289 + &ctrl->trace_buf[i].dma, 290 + GFP_KERNEL); 291 + if (!ctrl->trace_buf[i].addr) 292 + return -ENOMEM; 293 + } 294 + 295 + /* Configure the trace DMA buffer */ 296 + for (i = 0; i < HISI_PTT_TRACE_BUF_CNT; i++) { 297 + writel(lower_32_bits(ctrl->trace_buf[i].dma), 298 + hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_LO_0 + 299 + i * HISI_PTT_TRACE_ADDR_STRIDE); 300 + writel(upper_32_bits(ctrl->trace_buf[i].dma), 301 + hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_BASE_HI_0 + 302 + i * HISI_PTT_TRACE_ADDR_STRIDE); 303 + } 304 + writel(HISI_PTT_TRACE_BUF_SIZE, hisi_ptt->iobase + HISI_PTT_TRACE_ADDR_SIZE); 305 + 306 + return 0; 307 + } 308 + 309 + static int hisi_ptt_init_ctrls(struct hisi_ptt *hisi_ptt) 310 + { 311 + struct pci_dev *pdev = hisi_ptt->pdev; 312 + struct pci_bus *bus; 313 + int ret; 314 + u32 reg; 315 + 316 + INIT_LIST_HEAD(&hisi_ptt->port_filters); 317 + INIT_LIST_HEAD(&hisi_ptt->req_filters); 318 + 319 + ret = hisi_ptt_config_trace_buf(hisi_ptt); 320 + if (ret) 321 + return ret; 322 + 323 + /* 324 + * The device range register provides the information about the root 325 + * ports which the RCiEP can control and trace. The RCiEP and the root 326 + * ports which it supports are on the same PCIe core, with same domain 327 + * number but maybe different bus number. The device range register 328 + * will tell us which root ports we can support, Bit[31:16] indicates 329 + * the upper BDF numbers of the root port, while Bit[15:0] indicates 330 + * the lower. 331 + */ 332 + reg = readl(hisi_ptt->iobase + HISI_PTT_DEVICE_RANGE); 333 + hisi_ptt->upper_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_UPPER, reg); 334 + hisi_ptt->lower_bdf = FIELD_GET(HISI_PTT_DEVICE_RANGE_LOWER, reg); 335 + 336 + bus = pci_find_bus(pci_domain_nr(pdev->bus), PCI_BUS_NUM(hisi_ptt->upper_bdf)); 337 + if (bus) 338 + pci_walk_bus(bus, hisi_ptt_init_filters, hisi_ptt); 339 + 340 + ret = devm_add_action_or_reset(&pdev->dev, hisi_ptt_release_filters, hisi_ptt); 341 + if (ret) 342 + return ret; 343 + 344 + hisi_ptt->trace_ctrl.on_cpu = -1; 345 + return 0; 346 + } 347 + 348 + static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, 349 + char *buf) 350 + { 351 + struct hisi_ptt *hisi_ptt = to_hisi_ptt(dev_get_drvdata(dev)); 352 + const cpumask_t *cpumask = cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)); 353 + 354 + return cpumap_print_to_pagebuf(true, buf, cpumask); 355 + } 356 + static DEVICE_ATTR_RO(cpumask); 357 + 358 + static struct attribute *hisi_ptt_cpumask_attrs[] = { 359 + &dev_attr_cpumask.attr, 360 + NULL 361 + }; 362 + 363 + static const struct attribute_group hisi_ptt_cpumask_attr_group = { 364 + .attrs = hisi_ptt_cpumask_attrs, 365 + }; 366 + 367 + /* 368 + * Bit 19 indicates the filter type, 1 for Root Port filter and 0 for Requester 369 + * filter. Bit[15:0] indicates the filter value, for Root Port filter it's 370 + * a bit mask of desired ports and for Requester filter it's the Requester ID 371 + * of the desired PCIe function. Bit[18:16] is reserved for extension. 372 + * 373 + * See hisi_ptt.rst documentation for detailed information. 374 + */ 375 + PMU_FORMAT_ATTR(filter, "config:0-19"); 376 + PMU_FORMAT_ATTR(direction, "config:20-23"); 377 + PMU_FORMAT_ATTR(type, "config:24-31"); 378 + PMU_FORMAT_ATTR(format, "config:32-35"); 379 + 380 + static struct attribute *hisi_ptt_pmu_format_attrs[] = { 381 + &format_attr_filter.attr, 382 + &format_attr_direction.attr, 383 + &format_attr_type.attr, 384 + &format_attr_format.attr, 385 + NULL 386 + }; 387 + 388 + static struct attribute_group hisi_ptt_pmu_format_group = { 389 + .name = "format", 390 + .attrs = hisi_ptt_pmu_format_attrs, 391 + }; 392 + 393 + static const struct attribute_group *hisi_ptt_pmu_groups[] = { 394 + &hisi_ptt_cpumask_attr_group, 395 + &hisi_ptt_pmu_format_group, 396 + NULL 397 + }; 398 + 399 + static int hisi_ptt_trace_valid_direction(u32 val) 400 + { 401 + /* 402 + * The direction values have different effects according to the data 403 + * format (specified in the parentheses). TLP set A/B means different 404 + * set of TLP types. See hisi_ptt.rst documentation for more details. 405 + */ 406 + static const u32 hisi_ptt_trace_available_direction[] = { 407 + 0, /* inbound(4DW) or reserved(8DW) */ 408 + 1, /* outbound(4DW) */ 409 + 2, /* {in, out}bound(4DW) or inbound(8DW), TLP set A */ 410 + 3, /* {in, out}bound(4DW) or inbound(8DW), TLP set B */ 411 + }; 412 + int i; 413 + 414 + for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_direction); i++) { 415 + if (val == hisi_ptt_trace_available_direction[i]) 416 + return 0; 417 + } 418 + 419 + return -EINVAL; 420 + } 421 + 422 + static int hisi_ptt_trace_valid_type(u32 val) 423 + { 424 + /* Different types can be set simultaneously */ 425 + static const u32 hisi_ptt_trace_available_type[] = { 426 + 1, /* posted_request */ 427 + 2, /* non-posted_request */ 428 + 4, /* completion */ 429 + }; 430 + int i; 431 + 432 + if (!val) 433 + return -EINVAL; 434 + 435 + /* 436 + * Walk the available list and clear the valid bits of 437 + * the config. If there is any resident bit after the 438 + * walk then the config is invalid. 439 + */ 440 + for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_available_type); i++) 441 + val &= ~hisi_ptt_trace_available_type[i]; 442 + 443 + if (val) 444 + return -EINVAL; 445 + 446 + return 0; 447 + } 448 + 449 + static int hisi_ptt_trace_valid_format(u32 val) 450 + { 451 + static const u32 hisi_ptt_trace_availble_format[] = { 452 + 0, /* 4DW */ 453 + 1, /* 8DW */ 454 + }; 455 + int i; 456 + 457 + for (i = 0; i < ARRAY_SIZE(hisi_ptt_trace_availble_format); i++) { 458 + if (val == hisi_ptt_trace_availble_format[i]) 459 + return 0; 460 + } 461 + 462 + return -EINVAL; 463 + } 464 + 465 + static int hisi_ptt_trace_valid_filter(struct hisi_ptt *hisi_ptt, u64 config) 466 + { 467 + unsigned long val, port_mask = hisi_ptt->port_mask; 468 + struct hisi_ptt_filter_desc *filter; 469 + 470 + hisi_ptt->trace_ctrl.is_port = FIELD_GET(HISI_PTT_PMU_FILTER_IS_PORT, config); 471 + val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, config); 472 + 473 + /* 474 + * Port filters are defined as bit mask. For port filters, check 475 + * the bits in the @val are within the range of hisi_ptt->port_mask 476 + * and whether it's empty or not, otherwise user has specified 477 + * some unsupported root ports. 478 + * 479 + * For Requester ID filters, walk the available filter list to see 480 + * whether we have one matched. 481 + */ 482 + if (!hisi_ptt->trace_ctrl.is_port) { 483 + list_for_each_entry(filter, &hisi_ptt->req_filters, list) { 484 + if (val == hisi_ptt_get_filter_val(filter->devid, filter->is_port)) 485 + return 0; 486 + } 487 + } else if (bitmap_subset(&val, &port_mask, BITS_PER_LONG)) { 488 + return 0; 489 + } 490 + 491 + return -EINVAL; 492 + } 493 + 494 + static void hisi_ptt_pmu_init_configs(struct hisi_ptt *hisi_ptt, struct perf_event *event) 495 + { 496 + struct hisi_ptt_trace_ctrl *ctrl = &hisi_ptt->trace_ctrl; 497 + u32 val; 498 + 499 + val = FIELD_GET(HISI_PTT_PMU_FILTER_VAL_MASK, event->attr.config); 500 + hisi_ptt->trace_ctrl.filter = val; 501 + 502 + val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config); 503 + ctrl->direction = val; 504 + 505 + val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config); 506 + ctrl->type = val; 507 + 508 + val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config); 509 + ctrl->format = val; 510 + } 511 + 512 + static int hisi_ptt_pmu_event_init(struct perf_event *event) 513 + { 514 + struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu); 515 + int ret; 516 + u32 val; 517 + 518 + if (event->cpu < 0) { 519 + dev_dbg(event->pmu->dev, "Per-task mode not supported\n"); 520 + return -EOPNOTSUPP; 521 + } 522 + 523 + if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type) 524 + return -ENOENT; 525 + 526 + ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config); 527 + if (ret < 0) 528 + return ret; 529 + 530 + val = FIELD_GET(HISI_PTT_PMU_DIRECTION_MASK, event->attr.config); 531 + ret = hisi_ptt_trace_valid_direction(val); 532 + if (ret < 0) 533 + return ret; 534 + 535 + val = FIELD_GET(HISI_PTT_PMU_TYPE_MASK, event->attr.config); 536 + ret = hisi_ptt_trace_valid_type(val); 537 + if (ret < 0) 538 + return ret; 539 + 540 + val = FIELD_GET(HISI_PTT_PMU_FORMAT_MASK, event->attr.config); 541 + return hisi_ptt_trace_valid_format(val); 542 + } 543 + 544 + static void *hisi_ptt_pmu_setup_aux(struct perf_event *event, void **pages, 545 + int nr_pages, bool overwrite) 546 + { 547 + struct hisi_ptt_pmu_buf *buf; 548 + struct page **pagelist; 549 + int i; 550 + 551 + if (overwrite) { 552 + dev_warn(event->pmu->dev, "Overwrite mode is not supported\n"); 553 + return NULL; 554 + } 555 + 556 + /* If the pages size less than buffers, we cannot start trace */ 557 + if (nr_pages < HISI_PTT_TRACE_TOTAL_BUF_SIZE / PAGE_SIZE) 558 + return NULL; 559 + 560 + buf = kzalloc(sizeof(*buf), GFP_KERNEL); 561 + if (!buf) 562 + return NULL; 563 + 564 + pagelist = kcalloc(nr_pages, sizeof(*pagelist), GFP_KERNEL); 565 + if (!pagelist) 566 + goto err; 567 + 568 + for (i = 0; i < nr_pages; i++) 569 + pagelist[i] = virt_to_page(pages[i]); 570 + 571 + buf->base = vmap(pagelist, nr_pages, VM_MAP, PAGE_KERNEL); 572 + if (!buf->base) { 573 + kfree(pagelist); 574 + goto err; 575 + } 576 + 577 + buf->nr_pages = nr_pages; 578 + buf->length = nr_pages * PAGE_SIZE; 579 + buf->pos = 0; 580 + 581 + kfree(pagelist); 582 + return buf; 583 + err: 584 + kfree(buf); 585 + return NULL; 586 + } 587 + 588 + static void hisi_ptt_pmu_free_aux(void *aux) 589 + { 590 + struct hisi_ptt_pmu_buf *buf = aux; 591 + 592 + vunmap(buf->base); 593 + kfree(buf); 594 + } 595 + 596 + static void hisi_ptt_pmu_start(struct perf_event *event, int flags) 597 + { 598 + struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu); 599 + struct perf_output_handle *handle = &hisi_ptt->trace_ctrl.handle; 600 + struct hw_perf_event *hwc = &event->hw; 601 + struct device *dev = event->pmu->dev; 602 + struct hisi_ptt_pmu_buf *buf; 603 + int cpu = event->cpu; 604 + int ret; 605 + 606 + hwc->state = 0; 607 + 608 + /* Serialize the perf process if user specified several CPUs */ 609 + spin_lock(&hisi_ptt->pmu_lock); 610 + if (hisi_ptt->trace_ctrl.started) { 611 + dev_dbg(dev, "trace has already started\n"); 612 + goto stop; 613 + } 614 + 615 + /* 616 + * Handle the interrupt on the same cpu which starts the trace to avoid 617 + * context mismatch. Otherwise we'll trigger the WARN from the perf 618 + * core in event_function_local(). If CPU passed is offline we'll fail 619 + * here, just log it since we can do nothing here. 620 + */ 621 + ret = irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ), 622 + cpumask_of(cpu)); 623 + if (ret) 624 + dev_warn(dev, "failed to set the affinity of trace interrupt\n"); 625 + 626 + hisi_ptt->trace_ctrl.on_cpu = cpu; 627 + 628 + buf = perf_aux_output_begin(handle, event); 629 + if (!buf) { 630 + dev_dbg(dev, "aux output begin failed\n"); 631 + goto stop; 632 + } 633 + 634 + buf->pos = handle->head % buf->length; 635 + 636 + hisi_ptt_pmu_init_configs(hisi_ptt, event); 637 + 638 + ret = hisi_ptt_trace_start(hisi_ptt); 639 + if (ret) { 640 + dev_dbg(dev, "trace start failed, ret = %d\n", ret); 641 + perf_aux_output_end(handle, 0); 642 + goto stop; 643 + } 644 + 645 + spin_unlock(&hisi_ptt->pmu_lock); 646 + return; 647 + stop: 648 + event->hw.state |= PERF_HES_STOPPED; 649 + spin_unlock(&hisi_ptt->pmu_lock); 650 + } 651 + 652 + static void hisi_ptt_pmu_stop(struct perf_event *event, int flags) 653 + { 654 + struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu); 655 + struct hw_perf_event *hwc = &event->hw; 656 + 657 + if (hwc->state & PERF_HES_STOPPED) 658 + return; 659 + 660 + spin_lock(&hisi_ptt->pmu_lock); 661 + if (hisi_ptt->trace_ctrl.started) { 662 + hisi_ptt_trace_end(hisi_ptt); 663 + 664 + if (!hisi_ptt_wait_trace_hw_idle(hisi_ptt)) 665 + dev_warn(event->pmu->dev, "Device is still busy\n"); 666 + 667 + hisi_ptt_update_aux(hisi_ptt, hisi_ptt->trace_ctrl.buf_index, true); 668 + } 669 + spin_unlock(&hisi_ptt->pmu_lock); 670 + 671 + hwc->state |= PERF_HES_STOPPED; 672 + perf_event_update_userpage(event); 673 + hwc->state |= PERF_HES_UPTODATE; 674 + } 675 + 676 + static int hisi_ptt_pmu_add(struct perf_event *event, int flags) 677 + { 678 + struct hisi_ptt *hisi_ptt = to_hisi_ptt(event->pmu); 679 + struct hw_perf_event *hwc = &event->hw; 680 + int cpu = event->cpu; 681 + 682 + /* Only allow the cpus on the device's node to add the event */ 683 + if (!cpumask_test_cpu(cpu, cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)))) 684 + return 0; 685 + 686 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 687 + 688 + if (flags & PERF_EF_START) { 689 + hisi_ptt_pmu_start(event, PERF_EF_RELOAD); 690 + if (hwc->state & PERF_HES_STOPPED) 691 + return -EINVAL; 692 + } 693 + 694 + return 0; 695 + } 696 + 697 + static void hisi_ptt_pmu_del(struct perf_event *event, int flags) 698 + { 699 + hisi_ptt_pmu_stop(event, PERF_EF_UPDATE); 700 + } 701 + 702 + static void hisi_ptt_remove_cpuhp_instance(void *hotplug_node) 703 + { 704 + cpuhp_state_remove_instance_nocalls(hisi_ptt_pmu_online, hotplug_node); 705 + } 706 + 707 + static void hisi_ptt_unregister_pmu(void *pmu) 708 + { 709 + perf_pmu_unregister(pmu); 710 + } 711 + 712 + static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt) 713 + { 714 + u16 core_id, sicl_id; 715 + char *pmu_name; 716 + u32 reg; 717 + int ret; 718 + 719 + ret = cpuhp_state_add_instance_nocalls(hisi_ptt_pmu_online, 720 + &hisi_ptt->hotplug_node); 721 + if (ret) 722 + return ret; 723 + 724 + ret = devm_add_action_or_reset(&hisi_ptt->pdev->dev, 725 + hisi_ptt_remove_cpuhp_instance, 726 + &hisi_ptt->hotplug_node); 727 + if (ret) 728 + return ret; 729 + 730 + spin_lock_init(&hisi_ptt->pmu_lock); 731 + 732 + hisi_ptt->hisi_ptt_pmu = (struct pmu) { 733 + .module = THIS_MODULE, 734 + .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, 735 + .task_ctx_nr = perf_sw_context, 736 + .attr_groups = hisi_ptt_pmu_groups, 737 + .event_init = hisi_ptt_pmu_event_init, 738 + .setup_aux = hisi_ptt_pmu_setup_aux, 739 + .free_aux = hisi_ptt_pmu_free_aux, 740 + .start = hisi_ptt_pmu_start, 741 + .stop = hisi_ptt_pmu_stop, 742 + .add = hisi_ptt_pmu_add, 743 + .del = hisi_ptt_pmu_del, 744 + }; 745 + 746 + reg = readl(hisi_ptt->iobase + HISI_PTT_LOCATION); 747 + core_id = FIELD_GET(HISI_PTT_CORE_ID, reg); 748 + sicl_id = FIELD_GET(HISI_PTT_SICL_ID, reg); 749 + 750 + pmu_name = devm_kasprintf(&hisi_ptt->pdev->dev, GFP_KERNEL, "hisi_ptt%u_%u", 751 + sicl_id, core_id); 752 + if (!pmu_name) 753 + return -ENOMEM; 754 + 755 + ret = perf_pmu_register(&hisi_ptt->hisi_ptt_pmu, pmu_name, -1); 756 + if (ret) 757 + return ret; 758 + 759 + return devm_add_action_or_reset(&hisi_ptt->pdev->dev, 760 + hisi_ptt_unregister_pmu, 761 + &hisi_ptt->hisi_ptt_pmu); 762 + } 763 + 764 + /* 765 + * The DMA of PTT trace can only use direct mappings due to some 766 + * hardware restriction. Check whether there is no IOMMU or the 767 + * policy of the IOMMU domain is passthrough, otherwise the trace 768 + * cannot work. 769 + * 770 + * The PTT device is supposed to behind an ARM SMMUv3, which 771 + * should have passthrough the device by a quirk. 772 + */ 773 + static int hisi_ptt_check_iommu_mapping(struct pci_dev *pdev) 774 + { 775 + struct iommu_domain *iommu_domain; 776 + 777 + iommu_domain = iommu_get_domain_for_dev(&pdev->dev); 778 + if (!iommu_domain || iommu_domain->type == IOMMU_DOMAIN_IDENTITY) 779 + return 0; 780 + 781 + return -EOPNOTSUPP; 782 + } 783 + 784 + static int hisi_ptt_probe(struct pci_dev *pdev, 785 + const struct pci_device_id *id) 786 + { 787 + struct hisi_ptt *hisi_ptt; 788 + int ret; 789 + 790 + ret = hisi_ptt_check_iommu_mapping(pdev); 791 + if (ret) { 792 + pci_err(pdev, "requires direct DMA mappings\n"); 793 + return ret; 794 + } 795 + 796 + hisi_ptt = devm_kzalloc(&pdev->dev, sizeof(*hisi_ptt), GFP_KERNEL); 797 + if (!hisi_ptt) 798 + return -ENOMEM; 799 + 800 + hisi_ptt->pdev = pdev; 801 + pci_set_drvdata(pdev, hisi_ptt); 802 + 803 + ret = pcim_enable_device(pdev); 804 + if (ret) { 805 + pci_err(pdev, "failed to enable device, ret = %d\n", ret); 806 + return ret; 807 + } 808 + 809 + ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME); 810 + if (ret) { 811 + pci_err(pdev, "failed to remap io memory, ret = %d\n", ret); 812 + return ret; 813 + } 814 + 815 + hisi_ptt->iobase = pcim_iomap_table(pdev)[2]; 816 + 817 + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 818 + if (ret) { 819 + pci_err(pdev, "failed to set 64 bit dma mask, ret = %d\n", ret); 820 + return ret; 821 + } 822 + 823 + pci_set_master(pdev); 824 + 825 + ret = hisi_ptt_register_irq(hisi_ptt); 826 + if (ret) 827 + return ret; 828 + 829 + ret = hisi_ptt_init_ctrls(hisi_ptt); 830 + if (ret) { 831 + pci_err(pdev, "failed to init controls, ret = %d\n", ret); 832 + return ret; 833 + } 834 + 835 + ret = hisi_ptt_register_pmu(hisi_ptt); 836 + if (ret) { 837 + pci_err(pdev, "failed to register PMU device, ret = %d", ret); 838 + return ret; 839 + } 840 + 841 + return 0; 842 + } 843 + 844 + static const struct pci_device_id hisi_ptt_id_tbl[] = { 845 + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12e) }, 846 + { } 847 + }; 848 + MODULE_DEVICE_TABLE(pci, hisi_ptt_id_tbl); 849 + 850 + static struct pci_driver hisi_ptt_driver = { 851 + .name = DRV_NAME, 852 + .id_table = hisi_ptt_id_tbl, 853 + .probe = hisi_ptt_probe, 854 + }; 855 + 856 + static int hisi_ptt_cpu_teardown(unsigned int cpu, struct hlist_node *node) 857 + { 858 + struct hisi_ptt *hisi_ptt; 859 + struct device *dev; 860 + int target, src; 861 + 862 + hisi_ptt = hlist_entry_safe(node, struct hisi_ptt, hotplug_node); 863 + src = hisi_ptt->trace_ctrl.on_cpu; 864 + dev = hisi_ptt->hisi_ptt_pmu.dev; 865 + 866 + if (!hisi_ptt->trace_ctrl.started || src != cpu) 867 + return 0; 868 + 869 + target = cpumask_any_but(cpumask_of_node(dev_to_node(&hisi_ptt->pdev->dev)), cpu); 870 + if (target >= nr_cpu_ids) { 871 + dev_err(dev, "no available cpu for perf context migration\n"); 872 + return 0; 873 + } 874 + 875 + perf_pmu_migrate_context(&hisi_ptt->hisi_ptt_pmu, src, target); 876 + 877 + /* 878 + * Also make sure the interrupt bind to the migrated CPU as well. Warn 879 + * the user on failure here. 880 + */ 881 + if (irq_set_affinity(pci_irq_vector(hisi_ptt->pdev, HISI_PTT_TRACE_DMA_IRQ), 882 + cpumask_of(target))) 883 + dev_warn(dev, "failed to set the affinity of trace interrupt\n"); 884 + 885 + hisi_ptt->trace_ctrl.on_cpu = target; 886 + return 0; 887 + } 888 + 889 + static int __init hisi_ptt_init(void) 890 + { 891 + int ret; 892 + 893 + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRV_NAME, NULL, 894 + hisi_ptt_cpu_teardown); 895 + if (ret < 0) 896 + return ret; 897 + hisi_ptt_pmu_online = ret; 898 + 899 + ret = pci_register_driver(&hisi_ptt_driver); 900 + if (ret) 901 + cpuhp_remove_multi_state(hisi_ptt_pmu_online); 902 + 903 + return ret; 904 + } 905 + module_init(hisi_ptt_init); 906 + 907 + static void __exit hisi_ptt_exit(void) 908 + { 909 + pci_unregister_driver(&hisi_ptt_driver); 910 + cpuhp_remove_multi_state(hisi_ptt_pmu_online); 911 + } 912 + module_exit(hisi_ptt_exit); 913 + 914 + MODULE_LICENSE("GPL"); 915 + MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>"); 916 + MODULE_DESCRIPTION("Driver for HiSilicon PCIe tune and trace device");
+177
drivers/hwtracing/ptt/hisi_ptt.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Driver for HiSilicon PCIe tune and trace device 4 + * 5 + * Copyright (c) 2022 HiSilicon Technologies Co., Ltd. 6 + * Author: Yicong Yang <yangyicong@hisilicon.com> 7 + */ 8 + 9 + #ifndef _HISI_PTT_H 10 + #define _HISI_PTT_H 11 + 12 + #include <linux/bits.h> 13 + #include <linux/cpumask.h> 14 + #include <linux/list.h> 15 + #include <linux/pci.h> 16 + #include <linux/perf_event.h> 17 + #include <linux/spinlock.h> 18 + #include <linux/types.h> 19 + 20 + #define DRV_NAME "hisi_ptt" 21 + 22 + /* 23 + * The definition of the device registers and register fields. 24 + */ 25 + #define HISI_PTT_TRACE_ADDR_SIZE 0x0800 26 + #define HISI_PTT_TRACE_ADDR_BASE_LO_0 0x0810 27 + #define HISI_PTT_TRACE_ADDR_BASE_HI_0 0x0814 28 + #define HISI_PTT_TRACE_ADDR_STRIDE 0x8 29 + #define HISI_PTT_TRACE_CTRL 0x0850 30 + #define HISI_PTT_TRACE_CTRL_EN BIT(0) 31 + #define HISI_PTT_TRACE_CTRL_RST BIT(1) 32 + #define HISI_PTT_TRACE_CTRL_RXTX_SEL GENMASK(3, 2) 33 + #define HISI_PTT_TRACE_CTRL_TYPE_SEL GENMASK(7, 4) 34 + #define HISI_PTT_TRACE_CTRL_DATA_FORMAT BIT(14) 35 + #define HISI_PTT_TRACE_CTRL_FILTER_MODE BIT(15) 36 + #define HISI_PTT_TRACE_CTRL_TARGET_SEL GENMASK(31, 16) 37 + #define HISI_PTT_TRACE_INT_STAT 0x0890 38 + #define HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0) 39 + #define HISI_PTT_TRACE_INT_MASK 0x0894 40 + #define HISI_PTT_TRACE_WR_STS 0x08a0 41 + #define HISI_PTT_TRACE_WR_STS_WRITE GENMASK(27, 0) 42 + #define HISI_PTT_TRACE_WR_STS_BUFFER GENMASK(29, 28) 43 + #define HISI_PTT_TRACE_STS 0x08b0 44 + #define HISI_PTT_TRACE_IDLE BIT(0) 45 + #define HISI_PTT_DEVICE_RANGE 0x0fe0 46 + #define HISI_PTT_DEVICE_RANGE_UPPER GENMASK(31, 16) 47 + #define HISI_PTT_DEVICE_RANGE_LOWER GENMASK(15, 0) 48 + #define HISI_PTT_LOCATION 0x0fe8 49 + #define HISI_PTT_CORE_ID GENMASK(15, 0) 50 + #define HISI_PTT_SICL_ID GENMASK(31, 16) 51 + 52 + /* Parameters of PTT trace DMA part. */ 53 + #define HISI_PTT_TRACE_DMA_IRQ 0 54 + #define HISI_PTT_TRACE_BUF_CNT 4 55 + #define HISI_PTT_TRACE_BUF_SIZE SZ_4M 56 + #define HISI_PTT_TRACE_TOTAL_BUF_SIZE (HISI_PTT_TRACE_BUF_SIZE * \ 57 + HISI_PTT_TRACE_BUF_CNT) 58 + /* Wait time for hardware DMA to reset */ 59 + #define HISI_PTT_RESET_TIMEOUT_US 10UL 60 + #define HISI_PTT_RESET_POLL_INTERVAL_US 1UL 61 + /* Poll timeout and interval for waiting hardware work to finish */ 62 + #define HISI_PTT_WAIT_TRACE_TIMEOUT_US 100UL 63 + #define HISI_PTT_WAIT_POLL_INTERVAL_US 10UL 64 + 65 + #define HISI_PCIE_CORE_PORT_ID(devfn) ((PCI_SLOT(devfn) & 0x7) << 1) 66 + 67 + /* Definition of the PMU configs */ 68 + #define HISI_PTT_PMU_FILTER_IS_PORT BIT(19) 69 + #define HISI_PTT_PMU_FILTER_VAL_MASK GENMASK(15, 0) 70 + #define HISI_PTT_PMU_DIRECTION_MASK GENMASK(23, 20) 71 + #define HISI_PTT_PMU_TYPE_MASK GENMASK(31, 24) 72 + #define HISI_PTT_PMU_FORMAT_MASK GENMASK(35, 32) 73 + 74 + /** 75 + * struct hisi_ptt_dma_buffer - Describe a single trace buffer of PTT trace. 76 + * The detail of the data format is described 77 + * in the documentation of PTT device. 78 + * @dma: DMA address of this buffer visible to the device 79 + * @addr: virtual address of this buffer visible to the cpu 80 + */ 81 + struct hisi_ptt_dma_buffer { 82 + dma_addr_t dma; 83 + void *addr; 84 + }; 85 + 86 + /** 87 + * struct hisi_ptt_trace_ctrl - Control and status of PTT trace 88 + * @trace_buf: array of the trace buffers for holding the trace data. 89 + * the length will be HISI_PTT_TRACE_BUF_CNT. 90 + * @handle: perf output handle of current trace session 91 + * @buf_index: the index of current using trace buffer 92 + * @on_cpu: current tracing cpu 93 + * @started: current trace status, true for started 94 + * @is_port: whether we're tracing root port or not 95 + * @direction: direction of the TLP headers to trace 96 + * @filter: filter value for tracing the TLP headers 97 + * @format: format of the TLP headers to trace 98 + * @type: type of the TLP headers to trace 99 + */ 100 + struct hisi_ptt_trace_ctrl { 101 + struct hisi_ptt_dma_buffer *trace_buf; 102 + struct perf_output_handle handle; 103 + u32 buf_index; 104 + int on_cpu; 105 + bool started; 106 + bool is_port; 107 + u32 direction:2; 108 + u32 filter:16; 109 + u32 format:1; 110 + u32 type:4; 111 + }; 112 + 113 + /** 114 + * struct hisi_ptt_filter_desc - Descriptor of the PTT trace filter 115 + * @list: entry of this descriptor in the filter list 116 + * @is_port: the PCI device of the filter is a Root Port or not 117 + * @devid: the PCI device's devid of the filter 118 + */ 119 + struct hisi_ptt_filter_desc { 120 + struct list_head list; 121 + bool is_port; 122 + u16 devid; 123 + }; 124 + 125 + /** 126 + * struct hisi_ptt_pmu_buf - Descriptor of the AUX buffer of PTT trace 127 + * @length: size of the AUX buffer 128 + * @nr_pages: number of pages of the AUX buffer 129 + * @base: start address of AUX buffer 130 + * @pos: position in the AUX buffer to commit traced data 131 + */ 132 + struct hisi_ptt_pmu_buf { 133 + size_t length; 134 + int nr_pages; 135 + void *base; 136 + long pos; 137 + }; 138 + 139 + /** 140 + * struct hisi_ptt - Per PTT device data 141 + * @trace_ctrl: the control information of PTT trace 142 + * @hotplug_node: node for register cpu hotplug event 143 + * @hisi_ptt_pmu: the pum device of trace 144 + * @iobase: base IO address of the device 145 + * @pdev: pci_dev of this PTT device 146 + * @pmu_lock: lock to serialize the perf process 147 + * @upper_bdf: the upper BDF range of the PCI devices managed by this PTT device 148 + * @lower_bdf: the lower BDF range of the PCI devices managed by this PTT device 149 + * @port_filters: the filter list of root ports 150 + * @req_filters: the filter list of requester ID 151 + * @port_mask: port mask of the managed root ports 152 + */ 153 + struct hisi_ptt { 154 + struct hisi_ptt_trace_ctrl trace_ctrl; 155 + struct hlist_node hotplug_node; 156 + struct pmu hisi_ptt_pmu; 157 + void __iomem *iobase; 158 + struct pci_dev *pdev; 159 + spinlock_t pmu_lock; 160 + u32 upper_bdf; 161 + u32 lower_bdf; 162 + 163 + /* 164 + * The trace TLP headers can either be filtered by certain 165 + * root port, or by the requester ID. Organize the filters 166 + * by @port_filters and @req_filters here. The mask of all 167 + * the valid ports is also cached for doing sanity check 168 + * of user input. 169 + */ 170 + struct list_head port_filters; 171 + struct list_head req_filters; 172 + u16 port_mask; 173 + }; 174 + 175 + #define to_hisi_ptt(pmu) container_of(pmu, struct hisi_ptt, hisi_ptt_pmu) 176 + 177 + #endif /* _HISI_PTT_H */