Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/perf: hisi: Add support for L3C PMU v3

This patch adds support for L3C PMU v3. The v3 L3C PMU supports
an extended events space which can be controlled in up to 2 extra
address spaces with separate overflow interrupts. The layout
of the control/event registers are kept the same. The extended events
with original ones together cover the monitoring job of all transactions
on L3C.

The extended events is specified with `ext=[1|2]` option for the
driver to distinguish, like below:

perf stat -e hisi_sccl0_l3c0_0/event=<event_id>,ext=1/

Currently only event option using config bit [7, 0]. There's
still plenty unused space. Make ext using config [16, 17] and
reserve bit [15, 8] for event option for future extension.

With the capability of extra counters, number of counters for HiSilicon
uncore PMU could reach up to 24, the usedmap is extended accordingly.

The hw_perf_event::event_base is initialized to the base MMIO
address of the event and will be used for later control,
overflow handling and counts readout.

We still make use of the Uncore PMU framework for handling the
events and interrupt migration on CPU hotplug. The framework's
cpuhp callback will handle the event migration and interrupt
migration of orginial event, if PMU supports extended events
then the interrupt of extended events is migrated to the same
CPU choosed by the framework.

A new HID of HISI0215 is used for this version of L3C PMU.

Acked-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Co-developed-by: Yushan Wang <wangyushan12@huawei.com>
Signed-off-by: Yushan Wang <wangyushan12@huawei.com>
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Yicong Yang and committed by
Will Deacon
475d94df b3abb08d

+324 -30
+323 -29
drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
··· 39 39 40 40 /* L3C has 8-counters */ 41 41 #define L3C_NR_COUNTERS 0x8 42 + #define L3C_MAX_EXT 2 42 43 43 44 #define L3C_PERF_CTRL_EN 0x10000 44 45 #define L3C_TRACETAG_EN BIT(31) ··· 56 55 #define L3C_V1_NR_EVENTS 0x59 57 56 #define L3C_V2_NR_EVENTS 0xFF 58 57 58 + HISI_PMU_EVENT_ATTR_EXTRACTOR(ext, config, 17, 16); 59 59 HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8); 60 60 HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11); 61 61 HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16); 62 62 HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config2, 15, 0); 63 63 64 + struct hisi_l3c_pmu { 65 + struct hisi_pmu l3c_pmu; 66 + 67 + /* MMIO and IRQ resources for extension events */ 68 + void __iomem *ext_base[L3C_MAX_EXT]; 69 + int ext_irq[L3C_MAX_EXT]; 70 + int ext_num; 71 + }; 72 + 73 + #define to_hisi_l3c_pmu(_l3c_pmu) \ 74 + container_of(_l3c_pmu, struct hisi_l3c_pmu, l3c_pmu) 75 + 76 + /* 77 + * The hardware counter idx used in counter enable/disable, 78 + * interrupt enable/disable and status check, etc. 79 + */ 80 + #define L3C_HW_IDX(_cntr_idx) ((_cntr_idx) % L3C_NR_COUNTERS) 81 + 82 + /* Range of ext counters in used mask. */ 83 + #define L3C_CNTR_EXT_L(_ext) (((_ext) + 1) * L3C_NR_COUNTERS) 84 + #define L3C_CNTR_EXT_H(_ext) (((_ext) + 2) * L3C_NR_COUNTERS) 85 + 86 + struct hisi_l3c_pmu_ext { 87 + bool support_ext; 88 + }; 89 + 90 + static bool support_ext(struct hisi_l3c_pmu *pmu) 91 + { 92 + struct hisi_l3c_pmu_ext *l3c_pmu_ext = pmu->l3c_pmu.dev_info->private; 93 + 94 + return l3c_pmu_ext->support_ext; 95 + } 96 + 64 97 static int hisi_l3c_pmu_get_event_idx(struct perf_event *event) 65 98 { 66 99 struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); 100 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 67 101 unsigned long *used_mask = l3c_pmu->pmu_events.used_mask; 68 - u32 num_counters = l3c_pmu->num_counters; 102 + int ext = hisi_get_ext(event); 69 103 int idx; 70 104 71 - idx = find_first_zero_bit(used_mask, num_counters); 72 - if (idx == num_counters) 105 + /* 106 + * For an L3C PMU that supports extension events, we can monitor 107 + * maximum 2 * num_counters to 3 * num_counters events, depending on 108 + * the number of ext regions supported by hardware. Thus use bit 109 + * [0, num_counters - 1] for normal events and bit 110 + * [ext * num_counters, (ext + 1) * num_counters - 1] for extension 111 + * events. The idx allocation will keep unchanged for normal events and 112 + * we can also use the idx to distinguish whether it's an extension 113 + * event or not. 114 + * 115 + * Since normal events and extension events locates on the different 116 + * address space, save the base address to the event->hw.event_base. 117 + */ 118 + if (ext && !support_ext(hisi_l3c_pmu)) 119 + return -EOPNOTSUPP; 120 + 121 + if (ext) 122 + event->hw.event_base = (unsigned long)hisi_l3c_pmu->ext_base[ext - 1]; 123 + else 124 + event->hw.event_base = (unsigned long)l3c_pmu->base; 125 + 126 + ext -= 1; 127 + idx = find_next_zero_bit(used_mask, L3C_CNTR_EXT_H(ext), L3C_CNTR_EXT_L(ext)); 128 + 129 + if (idx >= L3C_CNTR_EXT_H(ext)) 73 130 return -EAGAIN; 74 131 75 132 set_bit(idx, used_mask); 76 - event->hw.event_base = (unsigned long)l3c_pmu->base; 77 133 78 134 return idx; 79 135 } ··· 201 143 { 202 144 struct hw_perf_event *hwc = &event->hw; 203 145 u32 reg, reg_idx, shift, val; 204 - int idx = hwc->idx; 146 + int idx = L3C_HW_IDX(hwc->idx); 205 147 206 148 /* 207 149 * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1). ··· 322 264 } 323 265 } 324 266 267 + static int hisi_l3c_pmu_check_filter(struct perf_event *event) 268 + { 269 + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); 270 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 271 + int ext = hisi_get_ext(event); 272 + 273 + if (ext < 0 || ext > hisi_l3c_pmu->ext_num) 274 + return -EINVAL; 275 + 276 + return 0; 277 + } 278 + 325 279 /* 326 280 * Select the counter register offset using the counter index 327 281 */ 328 282 static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx) 329 283 { 330 - return (L3C_CNTR0_LOWER + (cntr_idx * 8)); 284 + return L3C_CNTR0_LOWER + L3C_HW_IDX(cntr_idx) * 8; 331 285 } 332 286 333 287 static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu, ··· 360 290 struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw; 361 291 u32 reg, reg_idx, shift, val; 362 292 293 + idx = L3C_HW_IDX(idx); 294 + 363 295 /* 364 296 * Select the appropriate event select register(L3C_EVENT_TYPE0/1). 365 297 * There are 2 event select registers for the 8 hardware counters. ··· 376 304 /* Write event code to L3C_EVENT_TYPEx Register */ 377 305 val = hisi_l3c_pmu_event_readl(hwc, reg); 378 306 val &= ~(L3C_EVTYPE_NONE << shift); 379 - val |= (type << shift); 307 + val |= type << shift; 380 308 hisi_l3c_pmu_event_writel(hwc, reg, val); 381 309 } 382 310 383 311 static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu) 384 312 { 313 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 314 + unsigned long *used_mask = l3c_pmu->pmu_events.used_mask; 315 + unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters); 385 316 u32 val; 317 + int i; 386 318 387 319 /* 388 - * Set perf_enable bit in L3C_PERF_CTRL register to start counting 389 - * for all enabled counters. 320 + * Check if any counter belongs to the normal range (instead of ext 321 + * range). If so, enable it. 390 322 */ 391 - val = readl(l3c_pmu->base + L3C_PERF_CTRL); 392 - val |= L3C_PERF_CTRL_EN; 393 - writel(val, l3c_pmu->base + L3C_PERF_CTRL); 323 + if (used_cntr < L3C_NR_COUNTERS) { 324 + val = readl(l3c_pmu->base + L3C_PERF_CTRL); 325 + val |= L3C_PERF_CTRL_EN; 326 + writel(val, l3c_pmu->base + L3C_PERF_CTRL); 327 + } 328 + 329 + /* If not, do enable it on ext ranges. */ 330 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) { 331 + /* Find used counter in this ext range, skip the range if not. */ 332 + used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i)); 333 + if (used_cntr >= L3C_CNTR_EXT_H(i)) 334 + continue; 335 + 336 + val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL); 337 + val |= L3C_PERF_CTRL_EN; 338 + writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL); 339 + } 394 340 } 395 341 396 342 static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu) 397 343 { 344 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 345 + unsigned long *used_mask = l3c_pmu->pmu_events.used_mask; 346 + unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters); 398 347 u32 val; 348 + int i; 399 349 400 350 /* 401 - * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting 402 - * for all enabled counters. 351 + * Check if any counter belongs to the normal range (instead of ext 352 + * range). If so, stop it. 403 353 */ 404 - val = readl(l3c_pmu->base + L3C_PERF_CTRL); 405 - val &= ~(L3C_PERF_CTRL_EN); 406 - writel(val, l3c_pmu->base + L3C_PERF_CTRL); 354 + if (used_cntr < L3C_NR_COUNTERS) { 355 + val = readl(l3c_pmu->base + L3C_PERF_CTRL); 356 + val &= ~L3C_PERF_CTRL_EN; 357 + writel(val, l3c_pmu->base + L3C_PERF_CTRL); 358 + } 359 + 360 + /* If not, do stop it on ext ranges. */ 361 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) { 362 + /* Find used counter in this ext range, skip the range if not. */ 363 + used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i)); 364 + if (used_cntr >= L3C_CNTR_EXT_H(i)) 365 + continue; 366 + 367 + val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL); 368 + val &= ~L3C_PERF_CTRL_EN; 369 + writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL); 370 + } 407 371 } 408 372 409 373 static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu, ··· 449 341 450 342 /* Enable counter index in L3C_EVENT_CTRL register */ 451 343 val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL); 452 - val |= (1 << hwc->idx); 344 + val |= 1 << L3C_HW_IDX(hwc->idx); 453 345 hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val); 454 346 } 455 347 ··· 460 352 461 353 /* Clear counter index in L3C_EVENT_CTRL register */ 462 354 val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL); 463 - val &= ~(1 << hwc->idx); 355 + val &= ~(1 << L3C_HW_IDX(hwc->idx)); 464 356 hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val); 465 357 } 466 358 ··· 471 363 472 364 val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK); 473 365 /* Write 0 to enable interrupt */ 474 - val &= ~(1 << hwc->idx); 366 + val &= ~(1 << L3C_HW_IDX(hwc->idx)); 475 367 hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val); 476 368 } 477 369 ··· 482 374 483 375 val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK); 484 376 /* Write 1 to mask interrupt */ 485 - val |= (1 << hwc->idx); 377 + val |= 1 << L3C_HW_IDX(hwc->idx); 486 378 hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val); 487 379 } 488 380 489 381 static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu) 490 382 { 491 - return readl(l3c_pmu->base + L3C_INT_STATUS); 383 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 384 + u32 ext_int, status, status_ext = 0; 385 + int i; 386 + 387 + status = readl(l3c_pmu->base + L3C_INT_STATUS); 388 + 389 + if (!support_ext(hisi_l3c_pmu)) 390 + return status; 391 + 392 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) { 393 + ext_int = readl(hisi_l3c_pmu->ext_base[i] + L3C_INT_STATUS); 394 + status_ext |= ext_int << (L3C_NR_COUNTERS * i); 395 + } 396 + 397 + return status | (status_ext << L3C_NR_COUNTERS); 492 398 } 493 399 494 400 static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx) 495 401 { 496 402 struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw; 497 403 498 - hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << idx); 404 + hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << L3C_HW_IDX(idx)); 499 405 } 500 406 501 407 static int hisi_l3c_pmu_init_data(struct platform_device *pdev, ··· 546 424 return 0; 547 425 } 548 426 427 + static int hisi_l3c_pmu_init_ext(struct hisi_pmu *l3c_pmu, struct platform_device *pdev) 428 + { 429 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 430 + int ret, irq, ext_num, i; 431 + char *irqname; 432 + 433 + /* HiSilicon L3C PMU supporting ext should have more than 1 irq resources. */ 434 + ext_num = platform_irq_count(pdev); 435 + if (ext_num < L3C_MAX_EXT) 436 + return -ENODEV; 437 + 438 + /* 439 + * The number of ext supported equals the number of irq - 1, since one 440 + * of the irqs belongs to the normal part of PMU. 441 + */ 442 + hisi_l3c_pmu->ext_num = ext_num - 1; 443 + 444 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) { 445 + hisi_l3c_pmu->ext_base[i] = devm_platform_ioremap_resource(pdev, i + 1); 446 + if (IS_ERR(hisi_l3c_pmu->ext_base[i])) 447 + return PTR_ERR(hisi_l3c_pmu->ext_base[i]); 448 + 449 + irq = platform_get_irq(pdev, i + 1); 450 + if (irq < 0) 451 + return irq; 452 + 453 + irqname = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s ext%d", 454 + dev_name(&pdev->dev), i + 1); 455 + if (!irqname) 456 + return -ENOMEM; 457 + 458 + ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr, 459 + IRQF_NOBALANCING | IRQF_NO_THREAD, 460 + irqname, l3c_pmu); 461 + if (ret < 0) 462 + return dev_err_probe(&pdev->dev, ret, 463 + "Fail to request EXT IRQ: %d.\n", irq); 464 + 465 + hisi_l3c_pmu->ext_irq[i] = irq; 466 + } 467 + 468 + return 0; 469 + } 470 + 549 471 static struct attribute *hisi_l3c_pmu_v1_format_attr[] = { 550 472 HISI_PMU_FORMAT_ATTR(event, "config:0-7"), 551 473 NULL, ··· 612 446 static const struct attribute_group hisi_l3c_pmu_v2_format_group = { 613 447 .name = "format", 614 448 .attrs = hisi_l3c_pmu_v2_format_attr, 449 + }; 450 + 451 + static struct attribute *hisi_l3c_pmu_v3_format_attr[] = { 452 + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), 453 + HISI_PMU_FORMAT_ATTR(ext, "config:16-17"), 454 + HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"), 455 + HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"), 456 + NULL 457 + }; 458 + 459 + static const struct attribute_group hisi_l3c_pmu_v3_format_group = { 460 + .name = "format", 461 + .attrs = hisi_l3c_pmu_v3_format_attr, 615 462 }; 616 463 617 464 static struct attribute *hisi_l3c_pmu_v1_events_attr[] = { ··· 662 483 .attrs = hisi_l3c_pmu_v2_events_attr, 663 484 }; 664 485 486 + static struct attribute *hisi_l3c_pmu_v3_events_attr[] = { 487 + HISI_PMU_EVENT_ATTR(rd_spipe, 0x18), 488 + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x19), 489 + HISI_PMU_EVENT_ATTR(wr_spipe, 0x1a), 490 + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x1b), 491 + HISI_PMU_EVENT_ATTR(io_rd_spipe, 0x1c), 492 + HISI_PMU_EVENT_ATTR(io_rd_hit_spipe, 0x1d), 493 + HISI_PMU_EVENT_ATTR(io_wr_spipe, 0x1e), 494 + HISI_PMU_EVENT_ATTR(io_wr_hit_spipe, 0x1f), 495 + HISI_PMU_EVENT_ATTR(cycles, 0x7f), 496 + HISI_PMU_EVENT_ATTR(l3c_ref, 0xbc), 497 + HISI_PMU_EVENT_ATTR(l3c2ring, 0xbd), 498 + NULL 499 + }; 500 + 501 + static const struct attribute_group hisi_l3c_pmu_v3_events_group = { 502 + .name = "events", 503 + .attrs = hisi_l3c_pmu_v3_events_attr, 504 + }; 505 + 665 506 static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = { 666 507 &hisi_l3c_pmu_v1_format_group, 667 508 &hisi_l3c_pmu_v1_events_group, ··· 698 499 NULL 699 500 }; 700 501 502 + static const struct attribute_group *hisi_l3c_pmu_v3_attr_groups[] = { 503 + &hisi_l3c_pmu_v3_format_group, 504 + &hisi_l3c_pmu_v3_events_group, 505 + &hisi_pmu_cpumask_attr_group, 506 + &hisi_pmu_identifier_group, 507 + NULL 508 + }; 509 + 510 + static struct hisi_l3c_pmu_ext hisi_l3c_pmu_support_ext = { 511 + .support_ext = true, 512 + }; 513 + 514 + static struct hisi_l3c_pmu_ext hisi_l3c_pmu_not_support_ext = { 515 + .support_ext = false, 516 + }; 517 + 701 518 static const struct hisi_pmu_dev_info hisi_l3c_pmu_v1 = { 702 519 .attr_groups = hisi_l3c_pmu_v1_attr_groups, 703 520 .counter_bits = 48, 704 521 .check_event = L3C_V1_NR_EVENTS, 522 + .private = &hisi_l3c_pmu_not_support_ext, 705 523 }; 706 524 707 525 static const struct hisi_pmu_dev_info hisi_l3c_pmu_v2 = { 708 526 .attr_groups = hisi_l3c_pmu_v2_attr_groups, 709 527 .counter_bits = 64, 710 528 .check_event = L3C_V2_NR_EVENTS, 529 + .private = &hisi_l3c_pmu_not_support_ext, 530 + }; 531 + 532 + static const struct hisi_pmu_dev_info hisi_l3c_pmu_v3 = { 533 + .attr_groups = hisi_l3c_pmu_v3_attr_groups, 534 + .counter_bits = 64, 535 + .check_event = L3C_V2_NR_EVENTS, 536 + .private = &hisi_l3c_pmu_support_ext, 711 537 }; 712 538 713 539 static const struct hisi_uncore_ops hisi_uncore_l3c_ops = { ··· 750 526 .clear_int_status = hisi_l3c_pmu_clear_int_status, 751 527 .enable_filter = hisi_l3c_pmu_enable_filter, 752 528 .disable_filter = hisi_l3c_pmu_disable_filter, 529 + .check_filter = hisi_l3c_pmu_check_filter, 753 530 }; 754 531 755 532 static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev, 756 533 struct hisi_pmu *l3c_pmu) 757 534 { 535 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 536 + struct hisi_l3c_pmu_ext *l3c_pmu_dev_ext; 758 537 int ret; 759 538 760 539 ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu); ··· 776 549 l3c_pmu->dev = &pdev->dev; 777 550 l3c_pmu->on_cpu = -1; 778 551 552 + l3c_pmu_dev_ext = l3c_pmu->dev_info->private; 553 + if (l3c_pmu_dev_ext->support_ext) { 554 + ret = hisi_l3c_pmu_init_ext(l3c_pmu, pdev); 555 + if (ret) 556 + return ret; 557 + /* 558 + * The extension events have their own counters with the 559 + * same number of the normal events counters. So we can 560 + * have at maximum num_counters * ext events monitored. 561 + */ 562 + l3c_pmu->num_counters += hisi_l3c_pmu->ext_num * L3C_NR_COUNTERS; 563 + } 564 + 779 565 return 0; 780 566 } 781 567 782 568 static int hisi_l3c_pmu_probe(struct platform_device *pdev) 783 569 { 570 + struct hisi_l3c_pmu *hisi_l3c_pmu; 784 571 struct hisi_pmu *l3c_pmu; 785 572 char *name; 786 573 int ret; 787 574 788 - l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL); 789 - if (!l3c_pmu) 575 + hisi_l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*hisi_l3c_pmu), GFP_KERNEL); 576 + if (!hisi_l3c_pmu) 790 577 return -ENOMEM; 791 578 579 + l3c_pmu = &hisi_l3c_pmu->l3c_pmu; 792 580 platform_set_drvdata(pdev, l3c_pmu); 793 581 794 582 ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu); 795 583 if (ret) 796 584 return ret; 797 585 798 - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d", 799 - l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id); 586 + if (l3c_pmu->topo.sub_id >= 0) 587 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d_%d", 588 + l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id, 589 + l3c_pmu->topo.sub_id); 590 + else 591 + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d", 592 + l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id); 800 593 if (!name) 801 594 return -ENOMEM; 802 595 ··· 851 604 static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = { 852 605 { "HISI0213", (kernel_ulong_t)&hisi_l3c_pmu_v1 }, 853 606 { "HISI0214", (kernel_ulong_t)&hisi_l3c_pmu_v2 }, 607 + { "HISI0215", (kernel_ulong_t)&hisi_l3c_pmu_v3 }, 854 608 {} 855 609 }; 856 610 MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match); ··· 866 618 .remove = hisi_l3c_pmu_remove, 867 619 }; 868 620 621 + static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) 622 + { 623 + struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node); 624 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 625 + int ret, i; 626 + 627 + ret = hisi_uncore_pmu_online_cpu(cpu, node); 628 + if (ret) 629 + return ret; 630 + 631 + /* Avoid L3C pmu not supporting ext from ext irq migrating. */ 632 + if (!support_ext(hisi_l3c_pmu)) 633 + return 0; 634 + 635 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) 636 + WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i], 637 + cpumask_of(l3c_pmu->on_cpu))); 638 + 639 + return 0; 640 + } 641 + 642 + static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) 643 + { 644 + struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node); 645 + struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu); 646 + int ret, i; 647 + 648 + ret = hisi_uncore_pmu_offline_cpu(cpu, node); 649 + if (ret) 650 + return ret; 651 + 652 + /* If failed to find any available CPU, skip irq migration. */ 653 + if (l3c_pmu->on_cpu < 0) 654 + return 0; 655 + 656 + /* Avoid L3C pmu not supporting ext from ext irq migrating. */ 657 + if (!support_ext(hisi_l3c_pmu)) 658 + return 0; 659 + 660 + for (i = 0; i < hisi_l3c_pmu->ext_num; i++) 661 + WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i], 662 + cpumask_of(l3c_pmu->on_cpu))); 663 + 664 + return 0; 665 + } 666 + 869 667 static int __init hisi_l3c_pmu_module_init(void) 870 668 { 871 669 int ret; 872 670 873 671 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, 874 672 "AP_PERF_ARM_HISI_L3_ONLINE", 875 - hisi_uncore_pmu_online_cpu, 876 - hisi_uncore_pmu_offline_cpu); 673 + hisi_l3c_pmu_online_cpu, 674 + hisi_l3c_pmu_offline_cpu); 877 675 if (ret) { 878 676 pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret); 879 677 return ret;
+1 -1
drivers/perf/hisilicon/hisi_uncore_pmu.h
··· 24 24 #define pr_fmt(fmt) "hisi_pmu: " fmt 25 25 26 26 #define HISI_PMU_V2 0x30 27 - #define HISI_MAX_COUNTERS 0x10 27 + #define HISI_MAX_COUNTERS 0x18 28 28 #define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu)) 29 29 30 30 #define HISI_PMU_ATTR(_name, _func, _config) \