Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.17 393 lines 11 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_MSR_H 3#define _ASM_X86_MSR_H 4 5#include "msr-index.h" 6 7#ifndef __ASSEMBLY__ 8 9#include <asm/asm.h> 10#include <asm/errno.h> 11#include <asm/cpumask.h> 12#include <uapi/asm/msr.h> 13 14struct msr { 15 union { 16 struct { 17 u32 l; 18 u32 h; 19 }; 20 u64 q; 21 }; 22}; 23 24struct msr_info { 25 u32 msr_no; 26 struct msr reg; 27 struct msr *msrs; 28 int err; 29}; 30 31struct msr_regs_info { 32 u32 *regs; 33 int err; 34}; 35 36struct saved_msr { 37 bool valid; 38 struct msr_info info; 39}; 40 41struct saved_msrs { 42 unsigned int num; 43 struct saved_msr *array; 44}; 45 46/* 47 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" 48 * constraint has different meanings. For i386, "A" means exactly 49 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, 50 * it means rax *or* rdx. 51 */ 52#ifdef CONFIG_X86_64 53/* Using 64-bit values saves one instruction clearing the high half of low */ 54#define DECLARE_ARGS(val, low, high) unsigned long low, high 55#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) 56#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) 57#else 58#define DECLARE_ARGS(val, low, high) unsigned long long val 59#define EAX_EDX_VAL(val, low, high) (val) 60#define EAX_EDX_RET(val, low, high) "=A" (val) 61#endif 62 63/* 64 * Be very careful with includes. This header is prone to include loops. 65 */ 66#include <asm/atomic.h> 67#include <linux/tracepoint-defs.h> 68 69#ifdef CONFIG_TRACEPOINTS 70DECLARE_TRACEPOINT(read_msr); 71DECLARE_TRACEPOINT(write_msr); 72DECLARE_TRACEPOINT(rdpmc); 73extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); 74extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); 75extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); 76#else 77static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} 78static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} 79static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} 80#endif 81 82/* 83 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR 84 * accessors and should not have any tracing or other functionality piggybacking 85 * on them - those are *purely* for accessing MSRs and nothing more. So don't even 86 * think of extending them - you will be slapped with a stinking trout or a frozen 87 * shark will reach you, wherever you are! You've been warned. 88 */ 89static __always_inline unsigned long long __rdmsr(unsigned int msr) 90{ 91 DECLARE_ARGS(val, low, high); 92 93 asm volatile("1: rdmsr\n" 94 "2:\n" 95 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) 96 : EAX_EDX_RET(val, low, high) : "c" (msr)); 97 98 return EAX_EDX_VAL(val, low, high); 99} 100 101static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) 102{ 103 asm volatile("1: wrmsr\n" 104 "2:\n" 105 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) 106 : : "c" (msr), "a"(low), "d" (high) : "memory"); 107} 108 109#define native_rdmsr(msr, val1, val2) \ 110do { \ 111 u64 __val = __rdmsr((msr)); \ 112 (void)((val1) = (u32)__val); \ 113 (void)((val2) = (u32)(__val >> 32)); \ 114} while (0) 115 116#define native_wrmsr(msr, low, high) \ 117 __wrmsr(msr, low, high) 118 119#define native_wrmsrl(msr, val) \ 120 __wrmsr((msr), (u32)((u64)(val)), \ 121 (u32)((u64)(val) >> 32)) 122 123static inline unsigned long long native_read_msr(unsigned int msr) 124{ 125 unsigned long long val; 126 127 val = __rdmsr(msr); 128 129 if (tracepoint_enabled(read_msr)) 130 do_trace_read_msr(msr, val, 0); 131 132 return val; 133} 134 135static inline unsigned long long native_read_msr_safe(unsigned int msr, 136 int *err) 137{ 138 DECLARE_ARGS(val, low, high); 139 140 asm volatile("1: rdmsr ; xor %[err],%[err]\n" 141 "2:\n\t" 142 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) 143 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 144 : "c" (msr)); 145 if (tracepoint_enabled(read_msr)) 146 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); 147 return EAX_EDX_VAL(val, low, high); 148} 149 150/* Can be uninlined because referenced by paravirt */ 151static inline void notrace 152native_write_msr(unsigned int msr, u32 low, u32 high) 153{ 154 __wrmsr(msr, low, high); 155 156 if (tracepoint_enabled(write_msr)) 157 do_trace_write_msr(msr, ((u64)high << 32 | low), 0); 158} 159 160/* Can be uninlined because referenced by paravirt */ 161static inline int notrace 162native_write_msr_safe(unsigned int msr, u32 low, u32 high) 163{ 164 int err; 165 166 asm volatile("1: wrmsr ; xor %[err],%[err]\n" 167 "2:\n\t" 168 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) 169 : [err] "=a" (err) 170 : "c" (msr), "0" (low), "d" (high) 171 : "memory"); 172 if (tracepoint_enabled(write_msr)) 173 do_trace_write_msr(msr, ((u64)high << 32 | low), err); 174 return err; 175} 176 177extern int rdmsr_safe_regs(u32 regs[8]); 178extern int wrmsr_safe_regs(u32 regs[8]); 179 180/** 181 * rdtsc() - returns the current TSC without ordering constraints 182 * 183 * rdtsc() returns the result of RDTSC as a 64-bit integer. The 184 * only ordering constraint it supplies is the ordering implied by 185 * "asm volatile": it will put the RDTSC in the place you expect. The 186 * CPU can and will speculatively execute that RDTSC, though, so the 187 * results can be non-monotonic if compared on different CPUs. 188 */ 189static __always_inline unsigned long long rdtsc(void) 190{ 191 DECLARE_ARGS(val, low, high); 192 193 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); 194 195 return EAX_EDX_VAL(val, low, high); 196} 197 198/** 199 * rdtsc_ordered() - read the current TSC in program order 200 * 201 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. 202 * It is ordered like a load to a global in-memory counter. It should 203 * be impossible to observe non-monotonic rdtsc_unordered() behavior 204 * across multiple CPUs as long as the TSC is synced. 205 */ 206static __always_inline unsigned long long rdtsc_ordered(void) 207{ 208 DECLARE_ARGS(val, low, high); 209 210 /* 211 * The RDTSC instruction is not ordered relative to memory 212 * access. The Intel SDM and the AMD APM are both vague on this 213 * point, but empirically an RDTSC instruction can be 214 * speculatively executed before prior loads. An RDTSC 215 * immediately after an appropriate barrier appears to be 216 * ordered as a normal load, that is, it provides the same 217 * ordering guarantees as reading from a global memory location 218 * that some other imaginary CPU is updating continuously with a 219 * time stamp. 220 * 221 * Thus, use the preferred barrier on the respective CPU, aiming for 222 * RDTSCP as the default. 223 */ 224 asm volatile(ALTERNATIVE_2("rdtsc", 225 "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, 226 "rdtscp", X86_FEATURE_RDTSCP) 227 : EAX_EDX_RET(val, low, high) 228 /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ 229 :: "ecx"); 230 231 return EAX_EDX_VAL(val, low, high); 232} 233 234static inline unsigned long long native_read_pmc(int counter) 235{ 236 DECLARE_ARGS(val, low, high); 237 238 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); 239 if (tracepoint_enabled(rdpmc)) 240 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); 241 return EAX_EDX_VAL(val, low, high); 242} 243 244#ifdef CONFIG_PARAVIRT_XXL 245#include <asm/paravirt.h> 246#else 247#include <linux/errno.h> 248/* 249 * Access to machine-specific registers (available on 586 and better only) 250 * Note: the rd* operations modify the parameters directly (without using 251 * pointer indirection), this allows gcc to optimize better 252 */ 253 254#define rdmsr(msr, low, high) \ 255do { \ 256 u64 __val = native_read_msr((msr)); \ 257 (void)((low) = (u32)__val); \ 258 (void)((high) = (u32)(__val >> 32)); \ 259} while (0) 260 261static inline void wrmsr(unsigned int msr, u32 low, u32 high) 262{ 263 native_write_msr(msr, low, high); 264} 265 266#define rdmsrl(msr, val) \ 267 ((val) = native_read_msr((msr))) 268 269static inline void wrmsrl(unsigned int msr, u64 val) 270{ 271 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); 272} 273 274/* wrmsr with exception handling */ 275static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) 276{ 277 return native_write_msr_safe(msr, low, high); 278} 279 280/* rdmsr with exception handling */ 281#define rdmsr_safe(msr, low, high) \ 282({ \ 283 int __err; \ 284 u64 __val = native_read_msr_safe((msr), &__err); \ 285 (*low) = (u32)__val; \ 286 (*high) = (u32)(__val >> 32); \ 287 __err; \ 288}) 289 290static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) 291{ 292 int err; 293 294 *p = native_read_msr_safe(msr, &err); 295 return err; 296} 297 298#define rdpmc(counter, low, high) \ 299do { \ 300 u64 _l = native_read_pmc((counter)); \ 301 (low) = (u32)_l; \ 302 (high) = (u32)(_l >> 32); \ 303} while (0) 304 305#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) 306 307#endif /* !CONFIG_PARAVIRT_XXL */ 308 309/* 310 * 64-bit version of wrmsr_safe(): 311 */ 312static inline int wrmsrl_safe(u32 msr, u64 val) 313{ 314 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); 315} 316 317struct msr *msrs_alloc(void); 318void msrs_free(struct msr *msrs); 319int msr_set_bit(u32 msr, u8 bit); 320int msr_clear_bit(u32 msr, u8 bit); 321 322#ifdef CONFIG_SMP 323int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 324int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 325int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 326int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 327void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); 328void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); 329int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 330int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 331int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 332int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 333int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 334int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 335#else /* CONFIG_SMP */ 336static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 337{ 338 rdmsr(msr_no, *l, *h); 339 return 0; 340} 341static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 342{ 343 wrmsr(msr_no, l, h); 344 return 0; 345} 346static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 347{ 348 rdmsrl(msr_no, *q); 349 return 0; 350} 351static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 352{ 353 wrmsrl(msr_no, q); 354 return 0; 355} 356static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, 357 struct msr *msrs) 358{ 359 rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); 360} 361static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, 362 struct msr *msrs) 363{ 364 wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); 365} 366static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 367 u32 *l, u32 *h) 368{ 369 return rdmsr_safe(msr_no, l, h); 370} 371static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 372{ 373 return wrmsr_safe(msr_no, l, h); 374} 375static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 376{ 377 return rdmsrl_safe(msr_no, q); 378} 379static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 380{ 381 return wrmsrl_safe(msr_no, q); 382} 383static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 384{ 385 return rdmsr_safe_regs(regs); 386} 387static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 388{ 389 return wrmsr_safe_regs(regs); 390} 391#endif /* CONFIG_SMP */ 392#endif /* __ASSEMBLY__ */ 393#endif /* _ASM_X86_MSR_H */