Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/x86/intel/uncore: Add Skylake server uncore support

This patch implements the uncore monitoring driver for Skylake server.
The uncore subsystem in Skylake server is similar to previous
server. There are some differences in config register encoding and pci
device IDs. Besides, Skylake introduces many new boxes to reflect the
MESH architecture changes.

The control registers for IIO and UPI have been extended to 64 bit. This
patch also introduces event_mask_ext to handle the high 32 bit mask.

The CHA box number could vary for different machines. This patch gets
the CHA box number by counting the CHA register space during
initialization at runtime.

Signed-off-by: Kan Liang <kan.liang@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/1471378190-17276-3-git-send-email-kan.liang@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Kan Liang and committed by
Ingo Molnar
cd34cd97 2668c619

+600 -1
+8 -1
arch/x86/events/intel/uncore.c
··· 685 685 /* fixed counters have event field hardcoded to zero */ 686 686 hwc->config = 0ULL; 687 687 } else { 688 - hwc->config = event->attr.config & pmu->type->event_mask; 688 + hwc->config = event->attr.config & 689 + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); 689 690 if (pmu->type->ops->hw_config) { 690 691 ret = pmu->type->ops->hw_config(box, event); 691 692 if (ret) ··· 1324 1323 .pci_init = skl_uncore_pci_init, 1325 1324 }; 1326 1325 1326 + static const struct intel_uncore_init_fun skx_uncore_init __initconst = { 1327 + .cpu_init = skx_uncore_cpu_init, 1328 + .pci_init = skx_uncore_pci_init, 1329 + }; 1330 + 1327 1331 static const struct x86_cpu_id intel_uncore_match[] __initconst = { 1328 1332 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init), 1329 1333 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init), ··· 1351 1345 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init), 1352 1346 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init), 1353 1347 X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init), 1348 + X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init), 1354 1349 {}, 1355 1350 }; 1356 1351
+3
arch/x86/events/intel/uncore.h
··· 44 44 unsigned perf_ctr; 45 45 unsigned event_ctl; 46 46 unsigned event_mask; 47 + unsigned event_mask_ext; 47 48 unsigned fixed_ctr; 48 49 unsigned fixed_ctl; 49 50 unsigned box_ctl; ··· 382 381 void bdx_uncore_cpu_init(void); 383 382 int knl_uncore_pci_init(void); 384 383 void knl_uncore_cpu_init(void); 384 + int skx_uncore_pci_init(void); 385 + void skx_uncore_cpu_init(void); 385 386 386 387 /* perf_event_intel_uncore_nhmex.c */ 387 388 void nhmex_uncore_cpu_init(void);
+589
arch/x86/events/intel/uncore_snbep.c
··· 268 268 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 269 269 SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET) 270 270 271 + /* SKX pci bus to socket mapping */ 272 + #define SKX_CPUNODEID 0xc0 273 + #define SKX_GIDNIDMAP 0xd4 274 + 275 + /* SKX CHA */ 276 + #define SKX_CHA_MSR_PMON_BOX_FILTER_TID (0x1ffULL << 0) 277 + #define SKX_CHA_MSR_PMON_BOX_FILTER_LINK (0xfULL << 9) 278 + #define SKX_CHA_MSR_PMON_BOX_FILTER_STATE (0x3ffULL << 17) 279 + #define SKX_CHA_MSR_PMON_BOX_FILTER_REM (0x1ULL << 32) 280 + #define SKX_CHA_MSR_PMON_BOX_FILTER_LOC (0x1ULL << 33) 281 + #define SKX_CHA_MSR_PMON_BOX_FILTER_ALL_OPC (0x1ULL << 35) 282 + #define SKX_CHA_MSR_PMON_BOX_FILTER_NM (0x1ULL << 36) 283 + #define SKX_CHA_MSR_PMON_BOX_FILTER_NOT_NM (0x1ULL << 37) 284 + #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC0 (0x3ffULL << 41) 285 + #define SKX_CHA_MSR_PMON_BOX_FILTER_OPC1 (0x3ffULL << 51) 286 + #define SKX_CHA_MSR_PMON_BOX_FILTER_C6 (0x1ULL << 61) 287 + #define SKX_CHA_MSR_PMON_BOX_FILTER_NC (0x1ULL << 62) 288 + #define SKX_CHA_MSR_PMON_BOX_FILTER_ISOC (0x1ULL << 63) 289 + 290 + /* SKX IIO */ 291 + #define SKX_IIO0_MSR_PMON_CTL0 0xa48 292 + #define SKX_IIO0_MSR_PMON_CTR0 0xa41 293 + #define SKX_IIO0_MSR_PMON_BOX_CTL 0xa40 294 + #define SKX_IIO_MSR_OFFSET 0x20 295 + 296 + #define SKX_PMON_CTL_TRESH_MASK (0xff << 24) 297 + #define SKX_PMON_CTL_TRESH_MASK_EXT (0xf) 298 + #define SKX_PMON_CTL_CH_MASK (0xff << 4) 299 + #define SKX_PMON_CTL_FC_MASK (0x7 << 12) 300 + #define SKX_IIO_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \ 301 + SNBEP_PMON_CTL_UMASK_MASK | \ 302 + SNBEP_PMON_CTL_EDGE_DET | \ 303 + SNBEP_PMON_CTL_INVERT | \ 304 + SKX_PMON_CTL_TRESH_MASK) 305 + #define SKX_IIO_PMON_RAW_EVENT_MASK_EXT (SKX_PMON_CTL_TRESH_MASK_EXT | \ 306 + SKX_PMON_CTL_CH_MASK | \ 307 + SKX_PMON_CTL_FC_MASK) 308 + 309 + /* SKX IRP */ 310 + #define SKX_IRP0_MSR_PMON_CTL0 0xa5b 311 + #define SKX_IRP0_MSR_PMON_CTR0 0xa59 312 + #define SKX_IRP0_MSR_PMON_BOX_CTL 0xa58 313 + #define SKX_IRP_MSR_OFFSET 0x20 314 + 315 + /* SKX UPI */ 316 + #define SKX_UPI_PCI_PMON_CTL0 0x350 317 + #define SKX_UPI_PCI_PMON_CTR0 0x318 318 + #define SKX_UPI_PCI_PMON_BOX_CTL 0x378 319 + #define SKX_PMON_CTL_UMASK_EXT 0xff 320 + 321 + /* SKX M2M */ 322 + #define SKX_M2M_PCI_PMON_CTL0 0x228 323 + #define SKX_M2M_PCI_PMON_CTR0 0x200 324 + #define SKX_M2M_PCI_PMON_BOX_CTL 0x258 325 + 271 326 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 272 327 DEFINE_UNCORE_FORMAT_ATTR(event2, event, "config:0-6"); 273 328 DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21"); 274 329 DEFINE_UNCORE_FORMAT_ATTR(use_occ_ctr, use_occ_ctr, "config:7"); 275 330 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 331 + DEFINE_UNCORE_FORMAT_ATTR(umask_ext, umask, "config:8-15,32-39"); 276 332 DEFINE_UNCORE_FORMAT_ATTR(qor, qor, "config:16"); 277 333 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 278 334 DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19"); 279 335 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 336 + DEFINE_UNCORE_FORMAT_ATTR(thresh9, thresh, "config:24-35"); 280 337 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 281 338 DEFINE_UNCORE_FORMAT_ATTR(thresh6, thresh, "config:24-29"); 282 339 DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28"); ··· 341 284 DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30"); 342 285 DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51"); 343 286 DEFINE_UNCORE_FORMAT_ATTR(occ_edge_det, occ_edge_det, "config:31"); 287 + DEFINE_UNCORE_FORMAT_ATTR(ch_mask, ch_mask, "config:36-43"); 288 + DEFINE_UNCORE_FORMAT_ATTR(fc_mask, fc_mask, "config:44-46"); 344 289 DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4"); 345 290 DEFINE_UNCORE_FORMAT_ATTR(filter_tid2, filter_tid, "config1:0"); 346 291 DEFINE_UNCORE_FORMAT_ATTR(filter_tid3, filter_tid, "config1:0-5"); ··· 351 292 DEFINE_UNCORE_FORMAT_ATTR(filter_link, filter_link, "config1:5-8"); 352 293 DEFINE_UNCORE_FORMAT_ATTR(filter_link2, filter_link, "config1:6-8"); 353 294 DEFINE_UNCORE_FORMAT_ATTR(filter_link3, filter_link, "config1:12"); 295 + DEFINE_UNCORE_FORMAT_ATTR(filter_link4, filter_link, "config1:9-12"); 354 296 DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17"); 355 297 DEFINE_UNCORE_FORMAT_ATTR(filter_nid2, filter_nid, "config1:32-47"); 356 298 DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22"); 357 299 DEFINE_UNCORE_FORMAT_ATTR(filter_state2, filter_state, "config1:17-22"); 358 300 DEFINE_UNCORE_FORMAT_ATTR(filter_state3, filter_state, "config1:17-23"); 359 301 DEFINE_UNCORE_FORMAT_ATTR(filter_state4, filter_state, "config1:18-20"); 302 + DEFINE_UNCORE_FORMAT_ATTR(filter_state5, filter_state, "config1:17-26"); 303 + DEFINE_UNCORE_FORMAT_ATTR(filter_rem, filter_rem, "config1:32"); 304 + DEFINE_UNCORE_FORMAT_ATTR(filter_loc, filter_loc, "config1:33"); 305 + DEFINE_UNCORE_FORMAT_ATTR(filter_nm, filter_nm, "config1:36"); 306 + DEFINE_UNCORE_FORMAT_ATTR(filter_not_nm, filter_not_nm, "config1:37"); 360 307 DEFINE_UNCORE_FORMAT_ATTR(filter_local, filter_local, "config1:33"); 361 308 DEFINE_UNCORE_FORMAT_ATTR(filter_all_op, filter_all_op, "config1:35"); 362 309 DEFINE_UNCORE_FORMAT_ATTR(filter_nnm, filter_nnm, "config1:37"); 363 310 DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31"); 364 311 DEFINE_UNCORE_FORMAT_ATTR(filter_opc2, filter_opc, "config1:52-60"); 365 312 DEFINE_UNCORE_FORMAT_ATTR(filter_opc3, filter_opc, "config1:41-60"); 313 + DEFINE_UNCORE_FORMAT_ATTR(filter_opc_0, filter_opc0, "config1:41-50"); 314 + DEFINE_UNCORE_FORMAT_ATTR(filter_opc_1, filter_opc1, "config1:51-60"); 366 315 DEFINE_UNCORE_FORMAT_ATTR(filter_nc, filter_nc, "config1:62"); 367 316 DEFINE_UNCORE_FORMAT_ATTR(filter_c6, filter_c6, "config1:61"); 368 317 DEFINE_UNCORE_FORMAT_ATTR(filter_isoc, filter_isoc, "config1:63"); ··· 3276 3209 } 3277 3210 3278 3211 /* end of BDX uncore support */ 3212 + 3213 + /* SKX uncore support */ 3214 + 3215 + static struct intel_uncore_type skx_uncore_ubox = { 3216 + .name = "ubox", 3217 + .num_counters = 2, 3218 + .num_boxes = 1, 3219 + .perf_ctr_bits = 48, 3220 + .fixed_ctr_bits = 48, 3221 + .perf_ctr = HSWEP_U_MSR_PMON_CTR0, 3222 + .event_ctl = HSWEP_U_MSR_PMON_CTL0, 3223 + .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK, 3224 + .fixed_ctr = HSWEP_U_MSR_PMON_UCLK_FIXED_CTR, 3225 + .fixed_ctl = HSWEP_U_MSR_PMON_UCLK_FIXED_CTL, 3226 + .ops = &ivbep_uncore_msr_ops, 3227 + .format_group = &ivbep_uncore_ubox_format_group, 3228 + }; 3229 + 3230 + static struct attribute *skx_uncore_cha_formats_attr[] = { 3231 + &format_attr_event.attr, 3232 + &format_attr_umask.attr, 3233 + &format_attr_edge.attr, 3234 + &format_attr_tid_en.attr, 3235 + &format_attr_inv.attr, 3236 + &format_attr_thresh8.attr, 3237 + &format_attr_filter_tid4.attr, 3238 + &format_attr_filter_link4.attr, 3239 + &format_attr_filter_state5.attr, 3240 + &format_attr_filter_rem.attr, 3241 + &format_attr_filter_loc.attr, 3242 + &format_attr_filter_nm.attr, 3243 + &format_attr_filter_all_op.attr, 3244 + &format_attr_filter_not_nm.attr, 3245 + &format_attr_filter_opc_0.attr, 3246 + &format_attr_filter_opc_1.attr, 3247 + &format_attr_filter_nc.attr, 3248 + &format_attr_filter_c6.attr, 3249 + &format_attr_filter_isoc.attr, 3250 + NULL, 3251 + }; 3252 + 3253 + static struct attribute_group skx_uncore_chabox_format_group = { 3254 + .name = "format", 3255 + .attrs = skx_uncore_cha_formats_attr, 3256 + }; 3257 + 3258 + static struct event_constraint skx_uncore_chabox_constraints[] = { 3259 + UNCORE_EVENT_CONSTRAINT(0x11, 0x1), 3260 + UNCORE_EVENT_CONSTRAINT(0x36, 0x1), 3261 + EVENT_CONSTRAINT_END 3262 + }; 3263 + 3264 + static struct extra_reg skx_uncore_cha_extra_regs[] = { 3265 + SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4), 3266 + SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4), 3267 + SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4), 3268 + SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 3269 + SNBEP_CBO_EVENT_EXTRA_REG(0x2134, 0xffff, 0x4), 3270 + SNBEP_CBO_EVENT_EXTRA_REG(0x8134, 0xffff, 0x4), 3271 + }; 3272 + 3273 + static u64 skx_cha_filter_mask(int fields) 3274 + { 3275 + u64 mask = 0; 3276 + 3277 + if (fields & 0x1) 3278 + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_TID; 3279 + if (fields & 0x2) 3280 + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_LINK; 3281 + if (fields & 0x4) 3282 + mask |= SKX_CHA_MSR_PMON_BOX_FILTER_STATE; 3283 + return mask; 3284 + } 3285 + 3286 + static struct event_constraint * 3287 + skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 3288 + { 3289 + return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); 3290 + } 3291 + 3292 + static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) 3293 + { 3294 + struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 3295 + struct extra_reg *er; 3296 + int idx = 0; 3297 + 3298 + for (er = skx_uncore_cha_extra_regs; er->msr; er++) { 3299 + if (er->event != (event->hw.config & er->config_mask)) 3300 + continue; 3301 + idx |= er->idx; 3302 + } 3303 + 3304 + if (idx) { 3305 + reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 + 3306 + HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; 3307 + reg1->config = event->attr.config1 & skx_cha_filter_mask(idx); 3308 + reg1->idx = idx; 3309 + } 3310 + return 0; 3311 + } 3312 + 3313 + static struct intel_uncore_ops skx_uncore_chabox_ops = { 3314 + /* There is no frz_en for chabox ctl */ 3315 + .init_box = ivbep_uncore_msr_init_box, 3316 + .disable_box = snbep_uncore_msr_disable_box, 3317 + .enable_box = snbep_uncore_msr_enable_box, 3318 + .disable_event = snbep_uncore_msr_disable_event, 3319 + .enable_event = hswep_cbox_enable_event, 3320 + .read_counter = uncore_msr_read_counter, 3321 + .hw_config = skx_cha_hw_config, 3322 + .get_constraint = skx_cha_get_constraint, 3323 + .put_constraint = snbep_cbox_put_constraint, 3324 + }; 3325 + 3326 + static struct intel_uncore_type skx_uncore_chabox = { 3327 + .name = "cha", 3328 + .num_counters = 4, 3329 + .perf_ctr_bits = 48, 3330 + .event_ctl = HSWEP_C0_MSR_PMON_CTL0, 3331 + .perf_ctr = HSWEP_C0_MSR_PMON_CTR0, 3332 + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 3333 + .box_ctl = HSWEP_C0_MSR_PMON_BOX_CTL, 3334 + .msr_offset = HSWEP_CBO_MSR_OFFSET, 3335 + .num_shared_regs = 1, 3336 + .constraints = skx_uncore_chabox_constraints, 3337 + .ops = &skx_uncore_chabox_ops, 3338 + .format_group = &skx_uncore_chabox_format_group, 3339 + }; 3340 + 3341 + static struct attribute *skx_uncore_iio_formats_attr[] = { 3342 + &format_attr_event.attr, 3343 + &format_attr_umask.attr, 3344 + &format_attr_edge.attr, 3345 + &format_attr_inv.attr, 3346 + &format_attr_thresh9.attr, 3347 + &format_attr_ch_mask.attr, 3348 + &format_attr_fc_mask.attr, 3349 + NULL, 3350 + }; 3351 + 3352 + static struct attribute_group skx_uncore_iio_format_group = { 3353 + .name = "format", 3354 + .attrs = skx_uncore_iio_formats_attr, 3355 + }; 3356 + 3357 + static struct event_constraint skx_uncore_iio_constraints[] = { 3358 + UNCORE_EVENT_CONSTRAINT(0x83, 0x3), 3359 + UNCORE_EVENT_CONSTRAINT(0x88, 0xc), 3360 + UNCORE_EVENT_CONSTRAINT(0x95, 0xc), 3361 + UNCORE_EVENT_CONSTRAINT(0xc0, 0xc), 3362 + UNCORE_EVENT_CONSTRAINT(0xc5, 0xc), 3363 + UNCORE_EVENT_CONSTRAINT(0xd4, 0xc), 3364 + EVENT_CONSTRAINT_END 3365 + }; 3366 + 3367 + static void skx_iio_enable_event(struct intel_uncore_box *box, 3368 + struct perf_event *event) 3369 + { 3370 + struct hw_perf_event *hwc = &event->hw; 3371 + 3372 + wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); 3373 + } 3374 + 3375 + static struct intel_uncore_ops skx_uncore_iio_ops = { 3376 + .init_box = ivbep_uncore_msr_init_box, 3377 + .disable_box = snbep_uncore_msr_disable_box, 3378 + .enable_box = snbep_uncore_msr_enable_box, 3379 + .disable_event = snbep_uncore_msr_disable_event, 3380 + .enable_event = skx_iio_enable_event, 3381 + .read_counter = uncore_msr_read_counter, 3382 + }; 3383 + 3384 + static struct intel_uncore_type skx_uncore_iio = { 3385 + .name = "iio", 3386 + .num_counters = 4, 3387 + .num_boxes = 5, 3388 + .perf_ctr_bits = 48, 3389 + .event_ctl = SKX_IIO0_MSR_PMON_CTL0, 3390 + .perf_ctr = SKX_IIO0_MSR_PMON_CTR0, 3391 + .event_mask = SKX_IIO_PMON_RAW_EVENT_MASK, 3392 + .event_mask_ext = SKX_IIO_PMON_RAW_EVENT_MASK_EXT, 3393 + .box_ctl = SKX_IIO0_MSR_PMON_BOX_CTL, 3394 + .msr_offset = SKX_IIO_MSR_OFFSET, 3395 + .constraints = skx_uncore_iio_constraints, 3396 + .ops = &skx_uncore_iio_ops, 3397 + .format_group = &skx_uncore_iio_format_group, 3398 + }; 3399 + 3400 + static struct attribute *skx_uncore_formats_attr[] = { 3401 + &format_attr_event.attr, 3402 + &format_attr_umask.attr, 3403 + &format_attr_edge.attr, 3404 + &format_attr_inv.attr, 3405 + &format_attr_thresh8.attr, 3406 + NULL, 3407 + }; 3408 + 3409 + static struct attribute_group skx_uncore_format_group = { 3410 + .name = "format", 3411 + .attrs = skx_uncore_formats_attr, 3412 + }; 3413 + 3414 + static struct intel_uncore_type skx_uncore_irp = { 3415 + .name = "irp", 3416 + .num_counters = 2, 3417 + .num_boxes = 5, 3418 + .perf_ctr_bits = 48, 3419 + .event_ctl = SKX_IRP0_MSR_PMON_CTL0, 3420 + .perf_ctr = SKX_IRP0_MSR_PMON_CTR0, 3421 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3422 + .box_ctl = SKX_IRP0_MSR_PMON_BOX_CTL, 3423 + .msr_offset = SKX_IRP_MSR_OFFSET, 3424 + .ops = &skx_uncore_iio_ops, 3425 + .format_group = &skx_uncore_format_group, 3426 + }; 3427 + 3428 + static struct intel_uncore_ops skx_uncore_pcu_ops = { 3429 + IVBEP_UNCORE_MSR_OPS_COMMON_INIT(), 3430 + .hw_config = hswep_pcu_hw_config, 3431 + .get_constraint = snbep_pcu_get_constraint, 3432 + .put_constraint = snbep_pcu_put_constraint, 3433 + }; 3434 + 3435 + static struct intel_uncore_type skx_uncore_pcu = { 3436 + .name = "pcu", 3437 + .num_counters = 4, 3438 + .num_boxes = 1, 3439 + .perf_ctr_bits = 48, 3440 + .perf_ctr = HSWEP_PCU_MSR_PMON_CTR0, 3441 + .event_ctl = HSWEP_PCU_MSR_PMON_CTL0, 3442 + .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK, 3443 + .box_ctl = HSWEP_PCU_MSR_PMON_BOX_CTL, 3444 + .num_shared_regs = 1, 3445 + .ops = &skx_uncore_pcu_ops, 3446 + .format_group = &snbep_uncore_pcu_format_group, 3447 + }; 3448 + 3449 + static struct intel_uncore_type *skx_msr_uncores[] = { 3450 + &skx_uncore_ubox, 3451 + &skx_uncore_chabox, 3452 + &skx_uncore_iio, 3453 + &skx_uncore_irp, 3454 + &skx_uncore_pcu, 3455 + NULL, 3456 + }; 3457 + 3458 + static int skx_count_chabox(void) 3459 + { 3460 + struct pci_dev *chabox_dev = NULL; 3461 + int bus, count = 0; 3462 + 3463 + while (1) { 3464 + chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); 3465 + if (!chabox_dev) 3466 + break; 3467 + if (count == 0) 3468 + bus = chabox_dev->bus->number; 3469 + if (bus != chabox_dev->bus->number) 3470 + break; 3471 + count++; 3472 + } 3473 + 3474 + pci_dev_put(chabox_dev); 3475 + return count; 3476 + } 3477 + 3478 + void skx_uncore_cpu_init(void) 3479 + { 3480 + skx_uncore_chabox.num_boxes = skx_count_chabox(); 3481 + uncore_msr_uncores = skx_msr_uncores; 3482 + } 3483 + 3484 + static struct intel_uncore_type skx_uncore_imc = { 3485 + .name = "imc", 3486 + .num_counters = 4, 3487 + .num_boxes = 6, 3488 + .perf_ctr_bits = 48, 3489 + .fixed_ctr_bits = 48, 3490 + .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR, 3491 + .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL, 3492 + .event_descs = hswep_uncore_imc_events, 3493 + .perf_ctr = SNBEP_PCI_PMON_CTR0, 3494 + .event_ctl = SNBEP_PCI_PMON_CTL0, 3495 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3496 + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3497 + .ops = &ivbep_uncore_pci_ops, 3498 + .format_group = &skx_uncore_format_group, 3499 + }; 3500 + 3501 + static struct attribute *skx_upi_uncore_formats_attr[] = { 3502 + &format_attr_event_ext.attr, 3503 + &format_attr_umask_ext.attr, 3504 + &format_attr_edge.attr, 3505 + &format_attr_inv.attr, 3506 + &format_attr_thresh8.attr, 3507 + NULL, 3508 + }; 3509 + 3510 + static struct attribute_group skx_upi_uncore_format_group = { 3511 + .name = "format", 3512 + .attrs = skx_upi_uncore_formats_attr, 3513 + }; 3514 + 3515 + static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) 3516 + { 3517 + struct pci_dev *pdev = box->pci_dev; 3518 + 3519 + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 3520 + pci_write_config_dword(pdev, SKX_UPI_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 3521 + } 3522 + 3523 + static struct intel_uncore_ops skx_upi_uncore_pci_ops = { 3524 + .init_box = skx_upi_uncore_pci_init_box, 3525 + .disable_box = snbep_uncore_pci_disable_box, 3526 + .enable_box = snbep_uncore_pci_enable_box, 3527 + .disable_event = snbep_uncore_pci_disable_event, 3528 + .enable_event = snbep_uncore_pci_enable_event, 3529 + .read_counter = snbep_uncore_pci_read_counter, 3530 + }; 3531 + 3532 + static struct intel_uncore_type skx_uncore_upi = { 3533 + .name = "upi", 3534 + .num_counters = 4, 3535 + .num_boxes = 3, 3536 + .perf_ctr_bits = 48, 3537 + .perf_ctr = SKX_UPI_PCI_PMON_CTR0, 3538 + .event_ctl = SKX_UPI_PCI_PMON_CTL0, 3539 + .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 3540 + .event_mask_ext = SKX_PMON_CTL_UMASK_EXT, 3541 + .box_ctl = SKX_UPI_PCI_PMON_BOX_CTL, 3542 + .ops = &skx_upi_uncore_pci_ops, 3543 + .format_group = &skx_upi_uncore_format_group, 3544 + }; 3545 + 3546 + static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) 3547 + { 3548 + struct pci_dev *pdev = box->pci_dev; 3549 + 3550 + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); 3551 + pci_write_config_dword(pdev, SKX_M2M_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT); 3552 + } 3553 + 3554 + static struct intel_uncore_ops skx_m2m_uncore_pci_ops = { 3555 + .init_box = skx_m2m_uncore_pci_init_box, 3556 + .disable_box = snbep_uncore_pci_disable_box, 3557 + .enable_box = snbep_uncore_pci_enable_box, 3558 + .disable_event = snbep_uncore_pci_disable_event, 3559 + .enable_event = snbep_uncore_pci_enable_event, 3560 + .read_counter = snbep_uncore_pci_read_counter, 3561 + }; 3562 + 3563 + static struct intel_uncore_type skx_uncore_m2m = { 3564 + .name = "m2m", 3565 + .num_counters = 4, 3566 + .num_boxes = 2, 3567 + .perf_ctr_bits = 48, 3568 + .perf_ctr = SKX_M2M_PCI_PMON_CTR0, 3569 + .event_ctl = SKX_M2M_PCI_PMON_CTL0, 3570 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3571 + .box_ctl = SKX_M2M_PCI_PMON_BOX_CTL, 3572 + .ops = &skx_m2m_uncore_pci_ops, 3573 + .format_group = &skx_uncore_format_group, 3574 + }; 3575 + 3576 + static struct event_constraint skx_uncore_m2pcie_constraints[] = { 3577 + UNCORE_EVENT_CONSTRAINT(0x23, 0x3), 3578 + EVENT_CONSTRAINT_END 3579 + }; 3580 + 3581 + static struct intel_uncore_type skx_uncore_m2pcie = { 3582 + .name = "m2pcie", 3583 + .num_counters = 4, 3584 + .num_boxes = 4, 3585 + .perf_ctr_bits = 48, 3586 + .constraints = skx_uncore_m2pcie_constraints, 3587 + .perf_ctr = SNBEP_PCI_PMON_CTR0, 3588 + .event_ctl = SNBEP_PCI_PMON_CTL0, 3589 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3590 + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3591 + .ops = &ivbep_uncore_pci_ops, 3592 + .format_group = &skx_uncore_format_group, 3593 + }; 3594 + 3595 + static struct event_constraint skx_uncore_m3upi_constraints[] = { 3596 + UNCORE_EVENT_CONSTRAINT(0x1d, 0x1), 3597 + UNCORE_EVENT_CONSTRAINT(0x1e, 0x1), 3598 + UNCORE_EVENT_CONSTRAINT(0x40, 0x7), 3599 + UNCORE_EVENT_CONSTRAINT(0x4e, 0x7), 3600 + UNCORE_EVENT_CONSTRAINT(0x4f, 0x7), 3601 + UNCORE_EVENT_CONSTRAINT(0x50, 0x7), 3602 + UNCORE_EVENT_CONSTRAINT(0x51, 0x7), 3603 + UNCORE_EVENT_CONSTRAINT(0x52, 0x7), 3604 + EVENT_CONSTRAINT_END 3605 + }; 3606 + 3607 + static struct intel_uncore_type skx_uncore_m3upi = { 3608 + .name = "m3upi", 3609 + .num_counters = 3, 3610 + .num_boxes = 3, 3611 + .perf_ctr_bits = 48, 3612 + .constraints = skx_uncore_m3upi_constraints, 3613 + .perf_ctr = SNBEP_PCI_PMON_CTR0, 3614 + .event_ctl = SNBEP_PCI_PMON_CTL0, 3615 + .event_mask = SNBEP_PMON_RAW_EVENT_MASK, 3616 + .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 3617 + .ops = &ivbep_uncore_pci_ops, 3618 + .format_group = &skx_uncore_format_group, 3619 + }; 3620 + 3621 + enum { 3622 + SKX_PCI_UNCORE_IMC, 3623 + SKX_PCI_UNCORE_M2M, 3624 + SKX_PCI_UNCORE_UPI, 3625 + SKX_PCI_UNCORE_M2PCIE, 3626 + SKX_PCI_UNCORE_M3UPI, 3627 + }; 3628 + 3629 + static struct intel_uncore_type *skx_pci_uncores[] = { 3630 + [SKX_PCI_UNCORE_IMC] = &skx_uncore_imc, 3631 + [SKX_PCI_UNCORE_M2M] = &skx_uncore_m2m, 3632 + [SKX_PCI_UNCORE_UPI] = &skx_uncore_upi, 3633 + [SKX_PCI_UNCORE_M2PCIE] = &skx_uncore_m2pcie, 3634 + [SKX_PCI_UNCORE_M3UPI] = &skx_uncore_m3upi, 3635 + NULL, 3636 + }; 3637 + 3638 + static const struct pci_device_id skx_uncore_pci_ids[] = { 3639 + { /* MC0 Channel 0 */ 3640 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 3641 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 2, SKX_PCI_UNCORE_IMC, 0), 3642 + }, 3643 + { /* MC0 Channel 1 */ 3644 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 3645 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 6, SKX_PCI_UNCORE_IMC, 1), 3646 + }, 3647 + { /* MC0 Channel 2 */ 3648 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 3649 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 2, SKX_PCI_UNCORE_IMC, 2), 3650 + }, 3651 + { /* MC1 Channel 0 */ 3652 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2042), 3653 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 2, SKX_PCI_UNCORE_IMC, 3), 3654 + }, 3655 + { /* MC1 Channel 1 */ 3656 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2046), 3657 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(12, 6, SKX_PCI_UNCORE_IMC, 4), 3658 + }, 3659 + { /* MC1 Channel 2 */ 3660 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204a), 3661 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(13, 2, SKX_PCI_UNCORE_IMC, 5), 3662 + }, 3663 + { /* M2M0 */ 3664 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 3665 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 0, SKX_PCI_UNCORE_M2M, 0), 3666 + }, 3667 + { /* M2M1 */ 3668 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2066), 3669 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 0, SKX_PCI_UNCORE_M2M, 1), 3670 + }, 3671 + { /* UPI0 Link 0 */ 3672 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 3673 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(14, 0, SKX_PCI_UNCORE_UPI, 0), 3674 + }, 3675 + { /* UPI0 Link 1 */ 3676 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 3677 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, SKX_PCI_UNCORE_UPI, 1), 3678 + }, 3679 + { /* UPI1 Link 2 */ 3680 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2058), 3681 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, SKX_PCI_UNCORE_UPI, 2), 3682 + }, 3683 + { /* M2PCIe 0 */ 3684 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 3685 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 1, SKX_PCI_UNCORE_M2PCIE, 0), 3686 + }, 3687 + { /* M2PCIe 1 */ 3688 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 3689 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 1, SKX_PCI_UNCORE_M2PCIE, 1), 3690 + }, 3691 + { /* M2PCIe 2 */ 3692 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 3693 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(23, 1, SKX_PCI_UNCORE_M2PCIE, 2), 3694 + }, 3695 + { /* M2PCIe 3 */ 3696 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2088), 3697 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), 3698 + }, 3699 + { /* M3UPI0 Link 0 */ 3700 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), 3701 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), 3702 + }, 3703 + { /* M3UPI0 Link 1 */ 3704 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), 3705 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), 3706 + }, 3707 + { /* M3UPI1 Link 2 */ 3708 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), 3709 + .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), 3710 + }, 3711 + { /* end: all zeroes */ } 3712 + }; 3713 + 3714 + 3715 + static struct pci_driver skx_uncore_pci_driver = { 3716 + .name = "skx_uncore", 3717 + .id_table = skx_uncore_pci_ids, 3718 + }; 3719 + 3720 + int skx_uncore_pci_init(void) 3721 + { 3722 + /* need to double check pci address */ 3723 + int ret = snbep_pci2phy_map_init(0x2014, SKX_CPUNODEID, SKX_GIDNIDMAP, false); 3724 + 3725 + if (ret) 3726 + return ret; 3727 + 3728 + uncore_pci_uncores = skx_pci_uncores; 3729 + uncore_pci_driver = &skx_uncore_pci_driver; 3730 + return 0; 3731 + } 3732 + 3733 + /* end of SKX uncore support */