Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/x86/intel/uncore: Add support for the Intel Skylake client uncore PMU

This patch adds full support for Intel SKL client uncore PMU:

- Add support for SKL client CPU uncore PMU, which is similar to the
BDW client PMU driver. (There are some differences in CBOX numbering
and uncore control MSR.)
- Add new support for SkyLake Mobile uncore PMUs, for both CPU and PCI
uncore functionality.

Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/1467208912-8179-1-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Kan Liang and committed by
Ingo Molnar
46866b59 aefbc4d0

+69 -1
+2
arch/x86/events/intel/uncore.c
··· 1379 1379 }; 1380 1380 1381 1381 static const struct intel_uncore_init_fun skl_uncore_init __initconst = { 1382 + .cpu_init = skl_uncore_cpu_init, 1382 1383 .pci_init = skl_uncore_pci_init, 1383 1384 }; 1384 1385 ··· 1404 1403 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init), 1405 1404 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), 1406 1405 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), 1406 + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), 1407 1407 {}, 1408 1408 }; 1409 1409
+1
arch/x86/events/intel/uncore.h
··· 364 364 int skl_uncore_pci_init(void); 365 365 void snb_uncore_cpu_init(void); 366 366 void nhm_uncore_cpu_init(void); 367 + void skl_uncore_cpu_init(void); 367 368 int snb_pci2phy_map_init(int devid); 368 369 369 370 /* perf_event_intel_uncore_snbep.c */
+66 -1
arch/x86/events/intel/uncore_snb.c
··· 1 - /* Nehalem/SandBridge/Haswell uncore support */ 1 + /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ 2 2 #include "uncore.h" 3 3 4 4 /* Uncore IMC PCI IDs */ ··· 9 9 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 10 10 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 11 11 #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f 12 + #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c 12 13 13 14 /* SNB event control */ 14 15 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff ··· 64 63 /* NHM uncore register */ 65 64 #define NHM_UNC_PERFEVTSEL0 0x3c0 66 65 #define NHM_UNC_UNCORE_PMC0 0x3b0 66 + 67 + /* SKL uncore global control */ 68 + #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 69 + #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) 67 70 68 71 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 69 72 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); ··· 182 177 uncore_msr_uncores = snb_msr_uncores; 183 178 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 184 179 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 180 + } 181 + 182 + static void skl_uncore_msr_init_box(struct intel_uncore_box *box) 183 + { 184 + if (box->pmu->pmu_idx == 0) { 185 + wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 186 + SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); 187 + } 188 + } 189 + 190 + static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) 191 + { 192 + if (box->pmu->pmu_idx == 0) 193 + wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); 194 + } 195 + 196 + static struct intel_uncore_ops skl_uncore_msr_ops = { 197 + .init_box = skl_uncore_msr_init_box, 198 + .exit_box = skl_uncore_msr_exit_box, 199 + .disable_event = snb_uncore_msr_disable_event, 200 + .enable_event = snb_uncore_msr_enable_event, 201 + .read_counter = uncore_msr_read_counter, 202 + }; 203 + 204 + static struct intel_uncore_type skl_uncore_cbox = { 205 + .name = "cbox", 206 + .num_counters = 4, 207 + .num_boxes = 5, 208 + .perf_ctr_bits = 44, 209 + .fixed_ctr_bits = 48, 210 + .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, 211 + .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, 212 + .fixed_ctr = SNB_UNC_FIXED_CTR, 213 + .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, 214 + .single_fixed = 1, 215 + .event_mask = SNB_UNC_RAW_EVENT_MASK, 216 + .msr_offset = SNB_UNC_CBO_MSR_OFFSET, 217 + .ops = &skl_uncore_msr_ops, 218 + .format_group = &snb_uncore_format_group, 219 + .event_descs = snb_uncore_events, 220 + }; 221 + 222 + static struct intel_uncore_type *skl_msr_uncores[] = { 223 + &skl_uncore_cbox, 224 + &snb_uncore_arb, 225 + NULL, 226 + }; 227 + 228 + void skl_uncore_cpu_init(void) 229 + { 230 + uncore_msr_uncores = skl_msr_uncores; 231 + if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) 232 + skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; 233 + snb_uncore_arb.ops = &skl_uncore_msr_ops; 185 234 } 186 235 187 236 enum { ··· 603 544 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), 604 545 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 605 546 }, 547 + { /* IMC */ 548 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), 549 + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), 550 + }, 551 + 606 552 { /* end: all zeroes */ }, 607 553 }; 608 554 ··· 651 587 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ 652 588 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ 653 589 IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ 590 + IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ 654 591 { /* end marker */ } 655 592 }; 656 593