Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/kvm: Fix Host-Only/Guest-Only counting with SVM disabled

Changed files
+54 -4
arch
x86
include
kernel
kvm
+8
arch/x86/include/asm/perf_event.h
··· 242 242 static inline void perf_events_lapic_init(void) { } 243 243 #endif 244 244 245 + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) 246 + extern void amd_pmu_enable_virt(void); 247 + extern void amd_pmu_disable_virt(void); 248 + #else 249 + static inline void amd_pmu_enable_virt(void) { } 250 + static inline void amd_pmu_disable_virt(void) { } 251 + #endif 252 + 245 253 #endif /* _ASM_X86_PERF_EVENT_H */
+6 -2
arch/x86/kernel/cpu/perf_event.h
··· 147 147 /* 148 148 * AMD specific bits 149 149 */ 150 - struct amd_nb *amd_nb; 150 + struct amd_nb *amd_nb; 151 + /* Inverted mask of bits to clear in the perf_ctr ctrl registers */ 152 + u64 perf_ctr_virt_mask; 151 153 152 154 void *kfree_on_online; 153 155 }; ··· 419 417 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, 420 418 u64 enable_mask) 421 419 { 420 + u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); 421 + 422 422 if (hwc->extra_reg.reg) 423 423 wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); 424 - wrmsrl(hwc->config_base, hwc->config | enable_mask); 424 + wrmsrl(hwc->config_base, (hwc->config | enable_mask) & ~disable_mask); 425 425 } 426 426 427 427 void x86_pmu_enable_all(int added);
+35 -2
arch/x86/kernel/cpu/perf_event_amd.c
··· 1 1 #include <linux/perf_event.h> 2 + #include <linux/export.h> 2 3 #include <linux/types.h> 3 4 #include <linux/init.h> 4 5 #include <linux/slab.h> ··· 358 357 struct amd_nb *nb; 359 358 int i, nb_id; 360 359 361 - if (boot_cpu_data.x86_max_cores < 2) 360 + cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 361 + 362 + if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15) 362 363 return; 363 364 364 365 nb_id = amd_get_nb_id(cpu); ··· 590 587 .put_event_constraints = amd_put_event_constraints, 591 588 592 589 .cpu_prepare = amd_pmu_cpu_prepare, 593 - .cpu_starting = amd_pmu_cpu_starting, 594 590 .cpu_dead = amd_pmu_cpu_dead, 595 591 #endif 592 + .cpu_starting = amd_pmu_cpu_starting, 596 593 }; 597 594 598 595 __init int amd_pmu_init(void) ··· 624 621 625 622 return 0; 626 623 } 624 + 625 + void amd_pmu_enable_virt(void) 626 + { 627 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 628 + 629 + cpuc->perf_ctr_virt_mask = 0; 630 + 631 + /* Reload all events */ 632 + x86_pmu_disable_all(); 633 + x86_pmu_enable_all(0); 634 + } 635 + EXPORT_SYMBOL_GPL(amd_pmu_enable_virt); 636 + 637 + void amd_pmu_disable_virt(void) 638 + { 639 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 640 + 641 + /* 642 + * We only mask out the Host-only bit so that host-only counting works 643 + * when SVM is disabled. If someone sets up a guest-only counter when 644 + * SVM is disabled the Guest-only bits still gets set and the counter 645 + * will not count anything. 646 + */ 647 + cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 648 + 649 + /* Reload all events */ 650 + x86_pmu_disable_all(); 651 + x86_pmu_enable_all(0); 652 + } 653 + EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);
+5
arch/x86/kvm/svm.c
··· 29 29 #include <linux/ftrace_event.h> 30 30 #include <linux/slab.h> 31 31 32 + #include <asm/perf_event.h> 32 33 #include <asm/tlbflush.h> 33 34 #include <asm/desc.h> 34 35 #include <asm/kvm_para.h> ··· 576 575 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); 577 576 578 577 cpu_svm_disable(); 578 + 579 + amd_pmu_disable_virt(); 579 580 } 580 581 581 582 static int svm_hardware_enable(void *garbage) ··· 624 621 } 625 622 626 623 svm_init_erratum_383(); 624 + 625 + amd_pmu_enable_virt(); 627 626 628 627 return 0; 629 628 }