···00000001#ifdef __KERNEL__2-# ifdef CONFIG_X86_323-# include "msr_32.h"4-# else5-# include "msr_64.h"6-# endif00000000000000000000000000000000000000000000000000000000000000000007#else8-# ifdef __i386__9-# include "msr_32.h"10-# else11-# include "msr_64.h"12-# endif0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000013#endif
···1+#ifndef __ASM_X86_MSR_H_2+#define __ASM_X86_MSR_H_3+4+#include <asm/msr-index.h>5+6+#ifdef __i386__7+8#ifdef __KERNEL__9+#ifndef __ASSEMBLY__10+11+#include <asm/errno.h>12+13+static inline unsigned long long native_read_msr(unsigned int msr)14+{15+ unsigned long long val;16+17+ asm volatile("rdmsr" : "=A" (val) : "c" (msr));18+ return val;19+}20+21+static inline unsigned long long native_read_msr_safe(unsigned int msr,22+ int *err)23+{24+ unsigned long long val;25+26+ asm volatile("2: rdmsr ; xorl %0,%0\n"27+ "1:\n\t"28+ ".section .fixup,\"ax\"\n\t"29+ "3: movl %3,%0 ; jmp 1b\n\t"30+ ".previous\n\t"31+ ".section __ex_table,\"a\"\n"32+ " .align 4\n\t"33+ " .long 2b,3b\n\t"34+ ".previous"35+ : "=r" (*err), "=A" (val)36+ : "c" (msr), "i" (-EFAULT));37+38+ return val;39+}40+41+static inline void native_write_msr(unsigned int msr, unsigned long long val)42+{43+ asm volatile("wrmsr" : : "c" (msr), "A"(val));44+}45+46+static inline int native_write_msr_safe(unsigned int msr,47+ unsigned long long val)48+{49+ int err;50+ asm volatile("2: wrmsr ; xorl %0,%0\n"51+ "1:\n\t"52+ ".section .fixup,\"ax\"\n\t"53+ "3: movl %4,%0 ; jmp 1b\n\t"54+ ".previous\n\t"55+ ".section __ex_table,\"a\"\n"56+ " .align 4\n\t"57+ " .long 2b,3b\n\t"58+ ".previous"59+ : "=a" (err)60+ : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),61+ "i" (-EFAULT));62+ return err;63+}64+65+static inline unsigned long long native_read_tsc(void)66+{67+ unsigned long long val;68+ asm volatile("rdtsc" : "=A" (val));69+ return val;70+}71+72+static inline unsigned long long native_read_pmc(void)73+{74+ unsigned long long val;75+ asm volatile("rdpmc" : "=A" (val));76+ return val;77+}78+79+#ifdef CONFIG_PARAVIRT80+#include <asm/paravirt.h>81#else82+#include <linux/errno.h>83+/*84+ * Access to machine-specific registers (available on 586 and better only)85+ * Note: the rd* operations modify the parameters directly (without using86+ * pointer indirection), this allows gcc to optimize better87+ */88+89+#define rdmsr(msr,val1,val2) \90+ do { \91+ u64 __val = native_read_msr(msr); \92+ (val1) = (u32)__val; \93+ (val2) = (u32)(__val >> 32); \94+ } while(0)95+96+static inline void wrmsr(u32 __msr, u32 __low, u32 __high)97+{98+ native_write_msr(__msr, ((u64)__high << 32) | __low);99+}100+101+#define rdmsrl(msr,val) \102+ ((val) = native_read_msr(msr))103+104+#define wrmsrl(msr,val) native_write_msr(msr, val)105+106+/* wrmsr with exception handling */107+static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)108+{109+ return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);110+}111+112+/* rdmsr with exception handling */113+#define rdmsr_safe(msr,p1,p2) \114+ ({ \115+ int __err; \116+ u64 __val = native_read_msr_safe(msr, &__err); \117+ (*p1) = (u32)__val; \118+ (*p2) = (u32)(__val >> 32); \119+ __err; \120+ })121+122+#define rdtscl(low) \123+ ((low) = (u32)native_read_tsc())124+125+#define rdtscll(val) \126+ ((val) = native_read_tsc())127+128+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)129+130+#define rdpmc(counter,low,high) \131+ do { \132+ u64 _l = native_read_pmc(); \133+ (low) = (u32)_l; \134+ (high) = (u32)(_l >> 32); \135+ } while(0)136+#endif /* !CONFIG_PARAVIRT */137+138+#ifdef CONFIG_SMP139+void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);140+void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);141+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);142+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);143+#else /* CONFIG_SMP */144+static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)145+{146+ rdmsr(msr_no, *l, *h);147+}148+static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)149+{150+ wrmsr(msr_no, l, h);151+}152+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)153+{154+ return rdmsr_safe(msr_no, l, h);155+}156+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)157+{158+ return wrmsr_safe(msr_no, l, h);159+}160+#endif /* CONFIG_SMP */161+#endif /* ! __ASSEMBLY__ */162+#endif /* __KERNEL__ */163+164+#else /* __i386__ */165+166+#ifndef __ASSEMBLY__167+#include <linux/errno.h>168+/*169+ * Access to machine-specific registers (available on 586 and better only)170+ * Note: the rd* operations modify the parameters directly (without using171+ * pointer indirection), this allows gcc to optimize better172+ */173+174+#define rdmsr(msr,val1,val2) \175+ __asm__ __volatile__("rdmsr" \176+ : "=a" (val1), "=d" (val2) \177+ : "c" (msr))178+179+180+#define rdmsrl(msr,val) do { unsigned long a__,b__; \181+ __asm__ __volatile__("rdmsr" \182+ : "=a" (a__), "=d" (b__) \183+ : "c" (msr)); \184+ val = a__ | (b__<<32); \185+} while(0)186+187+#define wrmsr(msr,val1,val2) \188+ __asm__ __volatile__("wrmsr" \189+ : /* no outputs */ \190+ : "c" (msr), "a" (val1), "d" (val2))191+192+#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)193+194+/* wrmsr with exception handling */195+#define wrmsr_safe(msr,a,b) ({ int ret__; \196+ asm volatile("2: wrmsr ; xorl %0,%0\n" \197+ "1:\n\t" \198+ ".section .fixup,\"ax\"\n\t" \199+ "3: movl %4,%0 ; jmp 1b\n\t" \200+ ".previous\n\t" \201+ ".section __ex_table,\"a\"\n" \202+ " .align 8\n\t" \203+ " .quad 2b,3b\n\t" \204+ ".previous" \205+ : "=a" (ret__) \206+ : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \207+ ret__; })208+209+#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))210+211+#define rdmsr_safe(msr,a,b) \212+ ({ int ret__; \213+ asm volatile ("1: rdmsr\n" \214+ "2:\n" \215+ ".section .fixup,\"ax\"\n" \216+ "3: movl %4,%0\n" \217+ " jmp 2b\n" \218+ ".previous\n" \219+ ".section __ex_table,\"a\"\n" \220+ " .align 8\n" \221+ " .quad 1b,3b\n" \222+ ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \223+ :"c"(msr), "i"(-EIO), "0"(0)); \224+ ret__; })225+226+#define rdtsc(low,high) \227+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))228+229+#define rdtscl(low) \230+ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")231+232+#define rdtscp(low,high,aux) \233+ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))234+235+#define rdtscll(val) do { \236+ unsigned int __a,__d; \237+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \238+ (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \239+} while(0)240+241+#define rdtscpll(val, aux) do { \242+ unsigned long __a, __d; \243+ asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \244+ (val) = (__d << 32) | __a; \245+} while (0)246+247+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)248+249+#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)250+251+#define rdpmc(counter,low,high) \252+ __asm__ __volatile__("rdpmc" \253+ : "=a" (low), "=d" (high) \254+ : "c" (counter))255+256+static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,257+ unsigned int *ecx, unsigned int *edx)258+{259+ __asm__("cpuid"260+ : "=a" (*eax),261+ "=b" (*ebx),262+ "=c" (*ecx),263+ "=d" (*edx)264+ : "0" (op));265+}266+267+/* Some CPUID calls want 'count' to be placed in ecx */268+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,269+ int *edx)270+{271+ __asm__("cpuid"272+ : "=a" (*eax),273+ "=b" (*ebx),274+ "=c" (*ecx),275+ "=d" (*edx)276+ : "0" (op), "c" (count));277+}278+279+/*280+ * CPUID functions returning a single datum281+ */282+static inline unsigned int cpuid_eax(unsigned int op)283+{284+ unsigned int eax;285+286+ __asm__("cpuid"287+ : "=a" (eax)288+ : "0" (op)289+ : "bx", "cx", "dx");290+ return eax;291+}292+static inline unsigned int cpuid_ebx(unsigned int op)293+{294+ unsigned int eax, ebx;295+296+ __asm__("cpuid"297+ : "=a" (eax), "=b" (ebx)298+ : "0" (op)299+ : "cx", "dx" );300+ return ebx;301+}302+static inline unsigned int cpuid_ecx(unsigned int op)303+{304+ unsigned int eax, ecx;305+306+ __asm__("cpuid"307+ : "=a" (eax), "=c" (ecx)308+ : "0" (op)309+ : "bx", "dx" );310+ return ecx;311+}312+static inline unsigned int cpuid_edx(unsigned int op)313+{314+ unsigned int eax, edx;315+316+ __asm__("cpuid"317+ : "=a" (eax), "=d" (edx)318+ : "0" (op)319+ : "bx", "cx");320+ return edx;321+}322+323+#ifdef CONFIG_SMP324+void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);325+void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);326+int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);327+int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);328+#else /* CONFIG_SMP */329+static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)330+{331+ rdmsr(msr_no, *l, *h);332+}333+static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)334+{335+ wrmsr(msr_no, l, h);336+}337+static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)338+{339+ return rdmsr_safe(msr_no, l, h);340+}341+static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)342+{343+ return wrmsr_safe(msr_no, l, h);344+}345+#endif /* CONFIG_SMP */346+#endif /* __ASSEMBLY__ */347+348+#endif /* !__i386__ */349+350#endif
-161
include/asm-x86/msr_32.h
···1-#ifndef __ASM_MSR_H2-#define __ASM_MSR_H3-4-#include <asm/msr-index.h>5-6-#ifdef __KERNEL__7-#ifndef __ASSEMBLY__8-9-#include <asm/errno.h>10-11-static inline unsigned long long native_read_msr(unsigned int msr)12-{13- unsigned long long val;14-15- asm volatile("rdmsr" : "=A" (val) : "c" (msr));16- return val;17-}18-19-static inline unsigned long long native_read_msr_safe(unsigned int msr,20- int *err)21-{22- unsigned long long val;23-24- asm volatile("2: rdmsr ; xorl %0,%0\n"25- "1:\n\t"26- ".section .fixup,\"ax\"\n\t"27- "3: movl %3,%0 ; jmp 1b\n\t"28- ".previous\n\t"29- ".section __ex_table,\"a\"\n"30- " .align 4\n\t"31- " .long 2b,3b\n\t"32- ".previous"33- : "=r" (*err), "=A" (val)34- : "c" (msr), "i" (-EFAULT));35-36- return val;37-}38-39-static inline void native_write_msr(unsigned int msr, unsigned long long val)40-{41- asm volatile("wrmsr" : : "c" (msr), "A"(val));42-}43-44-static inline int native_write_msr_safe(unsigned int msr,45- unsigned long long val)46-{47- int err;48- asm volatile("2: wrmsr ; xorl %0,%0\n"49- "1:\n\t"50- ".section .fixup,\"ax\"\n\t"51- "3: movl %4,%0 ; jmp 1b\n\t"52- ".previous\n\t"53- ".section __ex_table,\"a\"\n"54- " .align 4\n\t"55- " .long 2b,3b\n\t"56- ".previous"57- : "=a" (err)58- : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),59- "i" (-EFAULT));60- return err;61-}62-63-static inline unsigned long long native_read_tsc(void)64-{65- unsigned long long val;66- asm volatile("rdtsc" : "=A" (val));67- return val;68-}69-70-static inline unsigned long long native_read_pmc(void)71-{72- unsigned long long val;73- asm volatile("rdpmc" : "=A" (val));74- return val;75-}76-77-#ifdef CONFIG_PARAVIRT78-#include <asm/paravirt.h>79-#else80-#include <linux/errno.h>81-/*82- * Access to machine-specific registers (available on 586 and better only)83- * Note: the rd* operations modify the parameters directly (without using84- * pointer indirection), this allows gcc to optimize better85- */86-87-#define rdmsr(msr,val1,val2) \88- do { \89- u64 __val = native_read_msr(msr); \90- (val1) = (u32)__val; \91- (val2) = (u32)(__val >> 32); \92- } while(0)93-94-static inline void wrmsr(u32 __msr, u32 __low, u32 __high)95-{96- native_write_msr(__msr, ((u64)__high << 32) | __low);97-}98-99-#define rdmsrl(msr,val) \100- ((val) = native_read_msr(msr))101-102-#define wrmsrl(msr,val) native_write_msr(msr, val)103-104-/* wrmsr with exception handling */105-static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)106-{107- return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);108-}109-110-/* rdmsr with exception handling */111-#define rdmsr_safe(msr,p1,p2) \112- ({ \113- int __err; \114- u64 __val = native_read_msr_safe(msr, &__err); \115- (*p1) = (u32)__val; \116- (*p2) = (u32)(__val >> 32); \117- __err; \118- })119-120-#define rdtscl(low) \121- ((low) = (u32)native_read_tsc())122-123-#define rdtscll(val) \124- ((val) = native_read_tsc())125-126-#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)127-128-#define rdpmc(counter,low,high) \129- do { \130- u64 _l = native_read_pmc(); \131- (low) = (u32)_l; \132- (high) = (u32)(_l >> 32); \133- } while(0)134-#endif /* !CONFIG_PARAVIRT */135-136-#ifdef CONFIG_SMP137-void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);138-void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);139-int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);140-int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);141-#else /* CONFIG_SMP */142-static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)143-{144- rdmsr(msr_no, *l, *h);145-}146-static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)147-{148- wrmsr(msr_no, l, h);149-}150-static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)151-{152- return rdmsr_safe(msr_no, l, h);153-}154-static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)155-{156- return wrmsr_safe(msr_no, l, h);157-}158-#endif /* CONFIG_SMP */159-#endif160-#endif161-#endif /* __ASM_MSR_H */