Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf/x86/intel/cstate: Clean up cpumask and hotplug

There are three cstate PMUs with different scopes, core, die and module.
The scopes are supported by the generic perf_event subsystem now.

Set the scope for each PMU and remove all the cpumask and hotplug codes.

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240802151643.1691631-4-kan.liang@linux.intel.com

authored by

Kan Liang and committed by
Peter Zijlstra
08155c7f a48a36b3

+5 -139
+5 -137
arch/x86/events/intel/cstate.c
··· 128 128 static struct device_attribute format_attr_##_var = \ 129 129 __ATTR(_name, 0444, __cstate_##_var##_show, NULL) 130 130 131 - static ssize_t cstate_get_attr_cpumask(struct device *dev, 132 - struct device_attribute *attr, 133 - char *buf); 134 - 135 131 /* Model -> events mapping */ 136 132 struct cstate_model { 137 133 unsigned long core_events; ··· 202 206 .attrs = cstate_format_attrs, 203 207 }; 204 208 205 - static cpumask_t cstate_core_cpu_mask; 206 - static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL); 207 - 208 - static struct attribute *cstate_cpumask_attrs[] = { 209 - &dev_attr_cpumask.attr, 210 - NULL, 211 - }; 212 - 213 - static struct attribute_group cpumask_attr_group = { 214 - .attrs = cstate_cpumask_attrs, 215 - }; 216 - 217 209 static const struct attribute_group *cstate_attr_groups[] = { 218 210 &cstate_events_attr_group, 219 211 &cstate_format_attr_group, 220 - &cpumask_attr_group, 221 212 NULL, 222 213 }; 223 214 ··· 252 269 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, 253 270 }; 254 271 255 - static cpumask_t cstate_pkg_cpu_mask; 256 - 257 272 /* cstate_module PMU */ 258 273 static struct pmu cstate_module_pmu; 259 274 static bool has_cstate_module; ··· 272 291 [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, 273 292 }; 274 293 275 - static cpumask_t cstate_module_cpu_mask; 276 - 277 - static ssize_t cstate_get_attr_cpumask(struct device *dev, 278 - struct device_attribute *attr, 279 - char *buf) 280 - { 281 - struct pmu *pmu = dev_get_drvdata(dev); 282 - 283 - if (pmu == &cstate_core_pmu) 284 - return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); 285 - else if (pmu == &cstate_pkg_pmu) 286 - return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); 287 - else if (pmu == &cstate_module_pmu) 288 - return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); 289 - else 290 - return 0; 291 - } 292 - 293 294 static int cstate_pmu_event_init(struct perf_event *event) 294 295 { 295 296 u64 cfg = event->attr.config; 296 - int cpu; 297 297 298 298 if (event->attr.type != event->pmu->type) 299 299 return -ENOENT; ··· 293 331 if (!(core_msr_mask & (1 << cfg))) 294 332 return -EINVAL; 295 333 event->hw.event_base = core_msr[cfg].msr; 296 - cpu = cpumask_any_and(&cstate_core_cpu_mask, 297 - topology_sibling_cpumask(event->cpu)); 298 334 } else if (event->pmu == &cstate_pkg_pmu) { 299 335 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX) 300 336 return -EINVAL; 301 337 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX); 302 338 if (!(pkg_msr_mask & (1 << cfg))) 303 339 return -EINVAL; 304 - 305 - event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; 306 - 307 340 event->hw.event_base = pkg_msr[cfg].msr; 308 - cpu = cpumask_any_and(&cstate_pkg_cpu_mask, 309 - topology_die_cpumask(event->cpu)); 310 341 } else if (event->pmu == &cstate_module_pmu) { 311 342 if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) 312 343 return -EINVAL; ··· 307 352 if (!(module_msr_mask & (1 << cfg))) 308 353 return -EINVAL; 309 354 event->hw.event_base = module_msr[cfg].msr; 310 - cpu = cpumask_any_and(&cstate_module_cpu_mask, 311 - topology_cluster_cpumask(event->cpu)); 312 355 } else { 313 356 return -ENOENT; 314 357 } 315 358 316 - if (cpu >= nr_cpu_ids) 317 - return -ENODEV; 318 - 319 - event->cpu = cpu; 320 359 event->hw.config = cfg; 321 360 event->hw.idx = -1; 322 361 return 0; ··· 361 412 return 0; 362 413 } 363 414 364 - /* 365 - * Check if exiting cpu is the designated reader. If so migrate the 366 - * events when there is a valid target available 367 - */ 368 - static int cstate_cpu_exit(unsigned int cpu) 369 - { 370 - unsigned int target; 371 - 372 - if (has_cstate_core && 373 - cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask)) { 374 - 375 - target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 376 - /* Migrate events if there is a valid target */ 377 - if (target < nr_cpu_ids) { 378 - cpumask_set_cpu(target, &cstate_core_cpu_mask); 379 - perf_pmu_migrate_context(&cstate_core_pmu, cpu, target); 380 - } 381 - } 382 - 383 - if (has_cstate_pkg && 384 - cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask)) { 385 - 386 - target = cpumask_any_but(topology_die_cpumask(cpu), cpu); 387 - /* Migrate events if there is a valid target */ 388 - if (target < nr_cpu_ids) { 389 - cpumask_set_cpu(target, &cstate_pkg_cpu_mask); 390 - perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); 391 - } 392 - } 393 - 394 - if (has_cstate_module && 395 - cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { 396 - 397 - target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); 398 - /* Migrate events if there is a valid target */ 399 - if (target < nr_cpu_ids) { 400 - cpumask_set_cpu(target, &cstate_module_cpu_mask); 401 - perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); 402 - } 403 - } 404 - return 0; 405 - } 406 - 407 - static int cstate_cpu_init(unsigned int cpu) 408 - { 409 - unsigned int target; 410 - 411 - /* 412 - * If this is the first online thread of that core, set it in 413 - * the core cpu mask as the designated reader. 414 - */ 415 - target = cpumask_any_and(&cstate_core_cpu_mask, 416 - topology_sibling_cpumask(cpu)); 417 - 418 - if (has_cstate_core && target >= nr_cpu_ids) 419 - cpumask_set_cpu(cpu, &cstate_core_cpu_mask); 420 - 421 - /* 422 - * If this is the first online thread of that package, set it 423 - * in the package cpu mask as the designated reader. 424 - */ 425 - target = cpumask_any_and(&cstate_pkg_cpu_mask, 426 - topology_die_cpumask(cpu)); 427 - if (has_cstate_pkg && target >= nr_cpu_ids) 428 - cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); 429 - 430 - /* 431 - * If this is the first online thread of that cluster, set it 432 - * in the cluster cpu mask as the designated reader. 433 - */ 434 - target = cpumask_any_and(&cstate_module_cpu_mask, 435 - topology_cluster_cpumask(cpu)); 436 - if (has_cstate_module && target >= nr_cpu_ids) 437 - cpumask_set_cpu(cpu, &cstate_module_cpu_mask); 438 - 439 - return 0; 440 - } 441 - 442 415 static const struct attribute_group *core_attr_update[] = { 443 416 &group_cstate_core_c1, 444 417 &group_cstate_core_c3, ··· 397 526 .stop = cstate_pmu_event_stop, 398 527 .read = cstate_pmu_event_update, 399 528 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 529 + .scope = PERF_PMU_SCOPE_CORE, 400 530 .module = THIS_MODULE, 401 531 }; 402 532 ··· 413 541 .stop = cstate_pmu_event_stop, 414 542 .read = cstate_pmu_event_update, 415 543 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 544 + .scope = PERF_PMU_SCOPE_PKG, 416 545 .module = THIS_MODULE, 417 546 }; 418 547 ··· 429 556 .stop = cstate_pmu_event_stop, 430 557 .read = cstate_pmu_event_update, 431 558 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, 559 + .scope = PERF_PMU_SCOPE_CLUSTER, 432 560 .module = THIS_MODULE, 433 561 }; 434 562 ··· 684 810 685 811 static inline void cstate_cleanup(void) 686 812 { 687 - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); 688 - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); 689 - 690 813 if (has_cstate_core) 691 814 perf_pmu_unregister(&cstate_core_pmu); 692 815 ··· 698 827 { 699 828 int err; 700 829 701 - cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, 702 - "perf/x86/cstate:starting", cstate_cpu_init, NULL); 703 - cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, 704 - "perf/x86/cstate:online", NULL, cstate_cpu_exit); 705 - 706 830 if (has_cstate_core) { 707 831 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); 708 832 if (err) { ··· 710 844 711 845 if (has_cstate_pkg) { 712 846 if (topology_max_dies_per_package() > 1) { 847 + /* CLX-AP is multi-die and the cstate is die-scope */ 848 + cstate_pkg_pmu.scope = PERF_PMU_SCOPE_DIE; 713 849 err = perf_pmu_register(&cstate_pkg_pmu, 714 850 "cstate_die", -1); 715 851 } else {
-2
include/linux/cpuhotplug.h
··· 152 152 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 153 153 CPUHP_AP_PERF_X86_STARTING, 154 154 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 155 - CPUHP_AP_PERF_X86_CSTATE_STARTING, 156 155 CPUHP_AP_PERF_XTENSA_STARTING, 157 156 CPUHP_AP_ARM_VFP_STARTING, 158 157 CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, ··· 208 209 CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, 209 210 CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, 210 211 CPUHP_AP_PERF_X86_RAPL_ONLINE, 211 - CPUHP_AP_PERF_X86_CSTATE_ONLINE, 212 212 CPUHP_AP_PERF_S390_CF_ONLINE, 213 213 CPUHP_AP_PERF_S390_SF_ONLINE, 214 214 CPUHP_AP_PERF_ARM_CCI_ONLINE,