Merge tag 'perf-tools-fixes-for-v5.19-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux

Pull perf tools fixes from Arnaldo Carvalho de Melo:

- Fix SIGSEGV when processing syscall args in perf.data files in 'perf
trace'

- Sync kvm, msr-index and cpufeatures headers with the kernel sources

- Fix 'convert perf time to TSC' 'perf test':
- No need to open events twice
- Fix finding correct event on hybrid systems

* tag 'perf-tools-fixes-for-v5.19-2022-07-17' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
perf trace: Fix SIGSEGV when processing syscall args
perf tests: Fix Convert perf time to TSC test for hybrid
perf tests: Stop Convert perf time to TSC test opening events twice
tools arch x86: Sync the msr-index.h copy with the kernel sources
tools headers cpufeatures: Sync with the kernel sources
tools headers UAPI: Sync linux/kvm.h with the kernel sources

Changed files
+47 -20
tools
arch
include
uapi
linux
perf
+10 -2
tools/arch/x86/include/asm/cpufeatures.h
··· 203 203 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ 204 204 #define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */ 205 205 #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ 206 - #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ 207 - #define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */ 206 + #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ 207 + #define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */ 208 208 #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ 209 209 #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ 210 210 #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ ··· 296 296 #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ 297 297 #define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ 298 298 #define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ 299 + #define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */ 300 + #define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */ 301 + #define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ 302 + #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ 303 + #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ 304 + #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ 299 305 300 306 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ 301 307 #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ ··· 322 316 #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 323 317 #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ 324 318 #define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */ 319 + #define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ 325 320 #define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */ 326 321 327 322 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ ··· 454 447 #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ 455 448 #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ 456 449 #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ 450 + #define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ 457 451 458 452 #endif /* _ASM_X86_CPUFEATURES_H */
+20 -1
tools/arch/x86/include/asm/disabled-features.h
··· 50 50 # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) 51 51 #endif 52 52 53 + #ifdef CONFIG_RETPOLINE 54 + # define DISABLE_RETPOLINE 0 55 + #else 56 + # define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \ 57 + (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31))) 58 + #endif 59 + 60 + #ifdef CONFIG_RETHUNK 61 + # define DISABLE_RETHUNK 0 62 + #else 63 + # define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31)) 64 + #endif 65 + 66 + #ifdef CONFIG_CPU_UNRET_ENTRY 67 + # define DISABLE_UNRET 0 68 + #else 69 + # define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31)) 70 + #endif 71 + 53 72 #ifdef CONFIG_INTEL_IOMMU_SVM 54 73 # define DISABLE_ENQCMD 0 55 74 #else ··· 101 82 #define DISABLED_MASK8 (DISABLE_TDX_GUEST) 102 83 #define DISABLED_MASK9 (DISABLE_SGX) 103 84 #define DISABLED_MASK10 0 104 - #define DISABLED_MASK11 0 85 + #define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET) 105 86 #define DISABLED_MASK12 0 106 87 #define DISABLED_MASK13 0 107 88 #define DISABLED_MASK14 0
+4
tools/arch/x86/include/asm/msr-index.h
··· 95 95 #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a 96 96 #define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ 97 97 #define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ 98 + #define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */ 98 99 #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ 99 100 #define ARCH_CAP_SSB_NO BIT(4) /* 100 101 * Not susceptible to Speculative Store Bypass ··· 576 575 577 576 /* Fam 17h MSRs */ 578 577 #define MSR_F17H_IRPERF 0xc00000e9 578 + 579 + #define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3 580 + #define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1) 579 581 580 582 /* Fam 16h MSRs */ 581 583 #define MSR_F16H_L2I_PERF_CTL 0xc0010230
+1
tools/include/uapi/linux/kvm.h
··· 2083 2083 #define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT) 2084 2084 #define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT) 2085 2085 #define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT) 2086 + #define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT) 2086 2087 #define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_CYCLES 2087 2088 2088 2089 #define KVM_STATS_BASE_SHIFT 8
+2
tools/perf/builtin-trace.c
··· 4280 4280 goto out; 4281 4281 4282 4282 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter"); 4283 + trace->syscalls.events.sys_enter = evsel; 4283 4284 /* older kernels have syscalls tp versus raw_syscalls */ 4284 4285 if (evsel == NULL) 4285 4286 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter"); ··· 4293 4292 } 4294 4293 4295 4294 evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit"); 4295 + trace->syscalls.events.sys_exit = evsel; 4296 4296 if (evsel == NULL) 4297 4297 evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit"); 4298 4298 if (evsel &&
+10 -17
tools/perf/tests/perf-time-to-tsc.c
··· 20 20 #include "tsc.h" 21 21 #include "mmap.h" 22 22 #include "tests.h" 23 - #include "pmu.h" 24 - #include "pmu-hybrid.h" 25 23 26 24 /* 27 25 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just ··· 104 106 105 107 evlist__config(evlist, &opts, NULL); 106 108 107 - evsel = evlist__first(evlist); 108 - 109 - evsel->core.attr.comm = 1; 110 - evsel->core.attr.disabled = 1; 111 - evsel->core.attr.enable_on_exec = 0; 112 - 113 - /* 114 - * For hybrid "cycles:u", it creates two events. 115 - * Init the second evsel here. 116 - */ 117 - if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { 118 - evsel = evsel__next(evsel); 109 + /* For hybrid "cycles:u", it creates two events */ 110 + evlist__for_each_entry(evlist, evsel) { 119 111 evsel->core.attr.comm = 1; 120 112 evsel->core.attr.disabled = 1; 121 113 evsel->core.attr.enable_on_exec = 0; 122 114 } 123 115 124 - if (evlist__open(evlist) == -ENOENT) { 125 - err = TEST_SKIP; 116 + ret = evlist__open(evlist); 117 + if (ret < 0) { 118 + if (ret == -ENOENT) 119 + err = TEST_SKIP; 120 + else 121 + pr_debug("evlist__open() failed\n"); 126 122 goto out_err; 127 123 } 128 - CHECK__(evlist__open(evlist)); 129 124 130 125 CHECK__(evlist__mmap(evlist, UINT_MAX)); 131 126 ··· 158 167 goto next_event; 159 168 160 169 if (strcmp(event->comm.comm, comm1) == 0) { 170 + CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); 161 171 CHECK__(evsel__parse_sample(evsel, event, &sample)); 162 172 comm1_time = sample.time; 163 173 } 164 174 if (strcmp(event->comm.comm, comm2) == 0) { 175 + CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); 165 176 CHECK__(evsel__parse_sample(evsel, event, &sample)); 166 177 comm2_time = sample.time; 167 178 }