x86: merge msr_32/64.h

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+347 -360
-2
include/asm-x86/Kbuild
··· 14 14 unifdef-y += e820.h 15 15 unifdef-y += ist.h 16 16 unifdef-y += mce.h 17 - unifdef-y += msr_32.h 18 - unifdef-y += msr_64.h 19 17 unifdef-y += msr.h 20 18 unifdef-y += mtrr.h 21 19 unifdef-y += page_32.h
+347 -10
include/asm-x86/msr.h
··· 1 + #ifndef __ASM_X86_MSR_H_ 2 + #define __ASM_X86_MSR_H_ 3 + 4 + #include <asm/msr-index.h> 5 + 6 + #ifdef __i386__ 7 + 1 8 #ifdef __KERNEL__ 2 - # ifdef CONFIG_X86_32 3 - # include "msr_32.h" 4 - # else 5 - # include "msr_64.h" 6 - # endif 9 + #ifndef __ASSEMBLY__ 10 + 11 + #include <asm/errno.h> 12 + 13 + static inline unsigned long long native_read_msr(unsigned int msr) 14 + { 15 + unsigned long long val; 16 + 17 + asm volatile("rdmsr" : "=A" (val) : "c" (msr)); 18 + return val; 19 + } 20 + 21 + static inline unsigned long long native_read_msr_safe(unsigned int msr, 22 + int *err) 23 + { 24 + unsigned long long val; 25 + 26 + asm volatile("2: rdmsr ; xorl %0,%0\n" 27 + "1:\n\t" 28 + ".section .fixup,\"ax\"\n\t" 29 + "3: movl %3,%0 ; jmp 1b\n\t" 30 + ".previous\n\t" 31 + ".section __ex_table,\"a\"\n" 32 + " .align 4\n\t" 33 + " .long 2b,3b\n\t" 34 + ".previous" 35 + : "=r" (*err), "=A" (val) 36 + : "c" (msr), "i" (-EFAULT)); 37 + 38 + return val; 39 + } 40 + 41 + static inline void native_write_msr(unsigned int msr, unsigned long long val) 42 + { 43 + asm volatile("wrmsr" : : "c" (msr), "A"(val)); 44 + } 45 + 46 + static inline int native_write_msr_safe(unsigned int msr, 47 + unsigned long long val) 48 + { 49 + int err; 50 + asm volatile("2: wrmsr ; xorl %0,%0\n" 51 + "1:\n\t" 52 + ".section .fixup,\"ax\"\n\t" 53 + "3: movl %4,%0 ; jmp 1b\n\t" 54 + ".previous\n\t" 55 + ".section __ex_table,\"a\"\n" 56 + " .align 4\n\t" 57 + " .long 2b,3b\n\t" 58 + ".previous" 59 + : "=a" (err) 60 + : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), 61 + "i" (-EFAULT)); 62 + return err; 63 + } 64 + 65 + static inline unsigned long long native_read_tsc(void) 66 + { 67 + unsigned long long val; 68 + asm volatile("rdtsc" : "=A" (val)); 69 + return val; 70 + } 71 + 72 + static inline unsigned long long native_read_pmc(void) 73 + { 74 + unsigned long long val; 75 + asm volatile("rdpmc" : "=A" (val)); 76 + return val; 77 + } 78 + 79 + #ifdef CONFIG_PARAVIRT 80 + #include <asm/paravirt.h> 7 81 #else 8 - # ifdef __i386__ 9 - # include "msr_32.h" 10 - # else 11 - # include "msr_64.h" 12 - # endif 82 + #include <linux/errno.h> 83 + /* 84 + * Access to machine-specific registers (available on 586 and better only) 85 + * Note: the rd* operations modify the parameters directly (without using 86 + * pointer indirection), this allows gcc to optimize better 87 + */ 88 + 89 + #define rdmsr(msr,val1,val2) \ 90 + do { \ 91 + u64 __val = native_read_msr(msr); \ 92 + (val1) = (u32)__val; \ 93 + (val2) = (u32)(__val >> 32); \ 94 + } while(0) 95 + 96 + static inline void wrmsr(u32 __msr, u32 __low, u32 __high) 97 + { 98 + native_write_msr(__msr, ((u64)__high << 32) | __low); 99 + } 100 + 101 + #define rdmsrl(msr,val) \ 102 + ((val) = native_read_msr(msr)) 103 + 104 + #define wrmsrl(msr,val) native_write_msr(msr, val) 105 + 106 + /* wrmsr with exception handling */ 107 + static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) 108 + { 109 + return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); 110 + } 111 + 112 + /* rdmsr with exception handling */ 113 + #define rdmsr_safe(msr,p1,p2) \ 114 + ({ \ 115 + int __err; \ 116 + u64 __val = native_read_msr_safe(msr, &__err); \ 117 + (*p1) = (u32)__val; \ 118 + (*p2) = (u32)(__val >> 32); \ 119 + __err; \ 120 + }) 121 + 122 + #define rdtscl(low) \ 123 + ((low) = (u32)native_read_tsc()) 124 + 125 + #define rdtscll(val) \ 126 + ((val) = native_read_tsc()) 127 + 128 + #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 129 + 130 + #define rdpmc(counter,low,high) \ 131 + do { \ 132 + u64 _l = native_read_pmc(); \ 133 + (low) = (u32)_l; \ 134 + (high) = (u32)(_l >> 32); \ 135 + } while(0) 136 + #endif /* !CONFIG_PARAVIRT */ 137 + 138 + #ifdef CONFIG_SMP 139 + void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 140 + void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 141 + int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 142 + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 143 + #else /* CONFIG_SMP */ 144 + static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 145 + { 146 + rdmsr(msr_no, *l, *h); 147 + } 148 + static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 149 + { 150 + wrmsr(msr_no, l, h); 151 + } 152 + static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 153 + { 154 + return rdmsr_safe(msr_no, l, h); 155 + } 156 + static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 157 + { 158 + return wrmsr_safe(msr_no, l, h); 159 + } 160 + #endif /* CONFIG_SMP */ 161 + #endif /* ! __ASSEMBLY__ */ 162 + #endif /* __KERNEL__ */ 163 + 164 + #else /* __i386__ */ 165 + 166 + #ifndef __ASSEMBLY__ 167 + #include <linux/errno.h> 168 + /* 169 + * Access to machine-specific registers (available on 586 and better only) 170 + * Note: the rd* operations modify the parameters directly (without using 171 + * pointer indirection), this allows gcc to optimize better 172 + */ 173 + 174 + #define rdmsr(msr,val1,val2) \ 175 + __asm__ __volatile__("rdmsr" \ 176 + : "=a" (val1), "=d" (val2) \ 177 + : "c" (msr)) 178 + 179 + 180 + #define rdmsrl(msr,val) do { unsigned long a__,b__; \ 181 + __asm__ __volatile__("rdmsr" \ 182 + : "=a" (a__), "=d" (b__) \ 183 + : "c" (msr)); \ 184 + val = a__ | (b__<<32); \ 185 + } while(0) 186 + 187 + #define wrmsr(msr,val1,val2) \ 188 + __asm__ __volatile__("wrmsr" \ 189 + : /* no outputs */ \ 190 + : "c" (msr), "a" (val1), "d" (val2)) 191 + 192 + #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 193 + 194 + /* wrmsr with exception handling */ 195 + #define wrmsr_safe(msr,a,b) ({ int ret__; \ 196 + asm volatile("2: wrmsr ; xorl %0,%0\n" \ 197 + "1:\n\t" \ 198 + ".section .fixup,\"ax\"\n\t" \ 199 + "3: movl %4,%0 ; jmp 1b\n\t" \ 200 + ".previous\n\t" \ 201 + ".section __ex_table,\"a\"\n" \ 202 + " .align 8\n\t" \ 203 + " .quad 2b,3b\n\t" \ 204 + ".previous" \ 205 + : "=a" (ret__) \ 206 + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ 207 + ret__; }) 208 + 209 + #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) 210 + 211 + #define rdmsr_safe(msr,a,b) \ 212 + ({ int ret__; \ 213 + asm volatile ("1: rdmsr\n" \ 214 + "2:\n" \ 215 + ".section .fixup,\"ax\"\n" \ 216 + "3: movl %4,%0\n" \ 217 + " jmp 2b\n" \ 218 + ".previous\n" \ 219 + ".section __ex_table,\"a\"\n" \ 220 + " .align 8\n" \ 221 + " .quad 1b,3b\n" \ 222 + ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ 223 + :"c"(msr), "i"(-EIO), "0"(0)); \ 224 + ret__; }) 225 + 226 + #define rdtsc(low,high) \ 227 + __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 228 + 229 + #define rdtscl(low) \ 230 + __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 231 + 232 + #define rdtscp(low,high,aux) \ 233 + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) 234 + 235 + #define rdtscll(val) do { \ 236 + unsigned int __a,__d; \ 237 + asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 238 + (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 239 + } while(0) 240 + 241 + #define rdtscpll(val, aux) do { \ 242 + unsigned long __a, __d; \ 243 + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ 244 + (val) = (__d << 32) | __a; \ 245 + } while (0) 246 + 247 + #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 248 + 249 + #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) 250 + 251 + #define rdpmc(counter,low,high) \ 252 + __asm__ __volatile__("rdpmc" \ 253 + : "=a" (low), "=d" (high) \ 254 + : "c" (counter)) 255 + 256 + static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, 257 + unsigned int *ecx, unsigned int *edx) 258 + { 259 + __asm__("cpuid" 260 + : "=a" (*eax), 261 + "=b" (*ebx), 262 + "=c" (*ecx), 263 + "=d" (*edx) 264 + : "0" (op)); 265 + } 266 + 267 + /* Some CPUID calls want 'count' to be placed in ecx */ 268 + static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 269 + int *edx) 270 + { 271 + __asm__("cpuid" 272 + : "=a" (*eax), 273 + "=b" (*ebx), 274 + "=c" (*ecx), 275 + "=d" (*edx) 276 + : "0" (op), "c" (count)); 277 + } 278 + 279 + /* 280 + * CPUID functions returning a single datum 281 + */ 282 + static inline unsigned int cpuid_eax(unsigned int op) 283 + { 284 + unsigned int eax; 285 + 286 + __asm__("cpuid" 287 + : "=a" (eax) 288 + : "0" (op) 289 + : "bx", "cx", "dx"); 290 + return eax; 291 + } 292 + static inline unsigned int cpuid_ebx(unsigned int op) 293 + { 294 + unsigned int eax, ebx; 295 + 296 + __asm__("cpuid" 297 + : "=a" (eax), "=b" (ebx) 298 + : "0" (op) 299 + : "cx", "dx" ); 300 + return ebx; 301 + } 302 + static inline unsigned int cpuid_ecx(unsigned int op) 303 + { 304 + unsigned int eax, ecx; 305 + 306 + __asm__("cpuid" 307 + : "=a" (eax), "=c" (ecx) 308 + : "0" (op) 309 + : "bx", "dx" ); 310 + return ecx; 311 + } 312 + static inline unsigned int cpuid_edx(unsigned int op) 313 + { 314 + unsigned int eax, edx; 315 + 316 + __asm__("cpuid" 317 + : "=a" (eax), "=d" (edx) 318 + : "0" (op) 319 + : "bx", "cx"); 320 + return edx; 321 + } 322 + 323 + #ifdef CONFIG_SMP 324 + void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 325 + void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 326 + int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 327 + int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 328 + #else /* CONFIG_SMP */ 329 + static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 330 + { 331 + rdmsr(msr_no, *l, *h); 332 + } 333 + static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 334 + { 335 + wrmsr(msr_no, l, h); 336 + } 337 + static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 338 + { 339 + return rdmsr_safe(msr_no, l, h); 340 + } 341 + static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 342 + { 343 + return wrmsr_safe(msr_no, l, h); 344 + } 345 + #endif /* CONFIG_SMP */ 346 + #endif /* __ASSEMBLY__ */ 347 + 348 + #endif /* !__i386__ */ 349 + 13 350 #endif
-161
include/asm-x86/msr_32.h
··· 1 - #ifndef __ASM_MSR_H 2 - #define __ASM_MSR_H 3 - 4 - #include <asm/msr-index.h> 5 - 6 - #ifdef __KERNEL__ 7 - #ifndef __ASSEMBLY__ 8 - 9 - #include <asm/errno.h> 10 - 11 - static inline unsigned long long native_read_msr(unsigned int msr) 12 - { 13 - unsigned long long val; 14 - 15 - asm volatile("rdmsr" : "=A" (val) : "c" (msr)); 16 - return val; 17 - } 18 - 19 - static inline unsigned long long native_read_msr_safe(unsigned int msr, 20 - int *err) 21 - { 22 - unsigned long long val; 23 - 24 - asm volatile("2: rdmsr ; xorl %0,%0\n" 25 - "1:\n\t" 26 - ".section .fixup,\"ax\"\n\t" 27 - "3: movl %3,%0 ; jmp 1b\n\t" 28 - ".previous\n\t" 29 - ".section __ex_table,\"a\"\n" 30 - " .align 4\n\t" 31 - " .long 2b,3b\n\t" 32 - ".previous" 33 - : "=r" (*err), "=A" (val) 34 - : "c" (msr), "i" (-EFAULT)); 35 - 36 - return val; 37 - } 38 - 39 - static inline void native_write_msr(unsigned int msr, unsigned long long val) 40 - { 41 - asm volatile("wrmsr" : : "c" (msr), "A"(val)); 42 - } 43 - 44 - static inline int native_write_msr_safe(unsigned int msr, 45 - unsigned long long val) 46 - { 47 - int err; 48 - asm volatile("2: wrmsr ; xorl %0,%0\n" 49 - "1:\n\t" 50 - ".section .fixup,\"ax\"\n\t" 51 - "3: movl %4,%0 ; jmp 1b\n\t" 52 - ".previous\n\t" 53 - ".section __ex_table,\"a\"\n" 54 - " .align 4\n\t" 55 - " .long 2b,3b\n\t" 56 - ".previous" 57 - : "=a" (err) 58 - : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), 59 - "i" (-EFAULT)); 60 - return err; 61 - } 62 - 63 - static inline unsigned long long native_read_tsc(void) 64 - { 65 - unsigned long long val; 66 - asm volatile("rdtsc" : "=A" (val)); 67 - return val; 68 - } 69 - 70 - static inline unsigned long long native_read_pmc(void) 71 - { 72 - unsigned long long val; 73 - asm volatile("rdpmc" : "=A" (val)); 74 - return val; 75 - } 76 - 77 - #ifdef CONFIG_PARAVIRT 78 - #include <asm/paravirt.h> 79 - #else 80 - #include <linux/errno.h> 81 - /* 82 - * Access to machine-specific registers (available on 586 and better only) 83 - * Note: the rd* operations modify the parameters directly (without using 84 - * pointer indirection), this allows gcc to optimize better 85 - */ 86 - 87 - #define rdmsr(msr,val1,val2) \ 88 - do { \ 89 - u64 __val = native_read_msr(msr); \ 90 - (val1) = (u32)__val; \ 91 - (val2) = (u32)(__val >> 32); \ 92 - } while(0) 93 - 94 - static inline void wrmsr(u32 __msr, u32 __low, u32 __high) 95 - { 96 - native_write_msr(__msr, ((u64)__high << 32) | __low); 97 - } 98 - 99 - #define rdmsrl(msr,val) \ 100 - ((val) = native_read_msr(msr)) 101 - 102 - #define wrmsrl(msr,val) native_write_msr(msr, val) 103 - 104 - /* wrmsr with exception handling */ 105 - static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) 106 - { 107 - return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); 108 - } 109 - 110 - /* rdmsr with exception handling */ 111 - #define rdmsr_safe(msr,p1,p2) \ 112 - ({ \ 113 - int __err; \ 114 - u64 __val = native_read_msr_safe(msr, &__err); \ 115 - (*p1) = (u32)__val; \ 116 - (*p2) = (u32)(__val >> 32); \ 117 - __err; \ 118 - }) 119 - 120 - #define rdtscl(low) \ 121 - ((low) = (u32)native_read_tsc()) 122 - 123 - #define rdtscll(val) \ 124 - ((val) = native_read_tsc()) 125 - 126 - #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 127 - 128 - #define rdpmc(counter,low,high) \ 129 - do { \ 130 - u64 _l = native_read_pmc(); \ 131 - (low) = (u32)_l; \ 132 - (high) = (u32)(_l >> 32); \ 133 - } while(0) 134 - #endif /* !CONFIG_PARAVIRT */ 135 - 136 - #ifdef CONFIG_SMP 137 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 138 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 139 - int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 140 - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 141 - #else /* CONFIG_SMP */ 142 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 143 - { 144 - rdmsr(msr_no, *l, *h); 145 - } 146 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 147 - { 148 - wrmsr(msr_no, l, h); 149 - } 150 - static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 151 - { 152 - return rdmsr_safe(msr_no, l, h); 153 - } 154 - static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 155 - { 156 - return wrmsr_safe(msr_no, l, h); 157 - } 158 - #endif /* CONFIG_SMP */ 159 - #endif 160 - #endif 161 - #endif /* __ASM_MSR_H */
-187
include/asm-x86/msr_64.h
··· 1 - #ifndef X86_64_MSR_H 2 - #define X86_64_MSR_H 1 3 - 4 - #include <asm/msr-index.h> 5 - 6 - #ifndef __ASSEMBLY__ 7 - #include <linux/errno.h> 8 - /* 9 - * Access to machine-specific registers (available on 586 and better only) 10 - * Note: the rd* operations modify the parameters directly (without using 11 - * pointer indirection), this allows gcc to optimize better 12 - */ 13 - 14 - #define rdmsr(msr,val1,val2) \ 15 - __asm__ __volatile__("rdmsr" \ 16 - : "=a" (val1), "=d" (val2) \ 17 - : "c" (msr)) 18 - 19 - 20 - #define rdmsrl(msr,val) do { unsigned long a__,b__; \ 21 - __asm__ __volatile__("rdmsr" \ 22 - : "=a" (a__), "=d" (b__) \ 23 - : "c" (msr)); \ 24 - val = a__ | (b__<<32); \ 25 - } while(0) 26 - 27 - #define wrmsr(msr,val1,val2) \ 28 - __asm__ __volatile__("wrmsr" \ 29 - : /* no outputs */ \ 30 - : "c" (msr), "a" (val1), "d" (val2)) 31 - 32 - #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) 33 - 34 - /* wrmsr with exception handling */ 35 - #define wrmsr_safe(msr,a,b) ({ int ret__; \ 36 - asm volatile("2: wrmsr ; xorl %0,%0\n" \ 37 - "1:\n\t" \ 38 - ".section .fixup,\"ax\"\n\t" \ 39 - "3: movl %4,%0 ; jmp 1b\n\t" \ 40 - ".previous\n\t" \ 41 - ".section __ex_table,\"a\"\n" \ 42 - " .align 8\n\t" \ 43 - " .quad 2b,3b\n\t" \ 44 - ".previous" \ 45 - : "=a" (ret__) \ 46 - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ 47 - ret__; }) 48 - 49 - #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) 50 - 51 - #define rdmsr_safe(msr,a,b) \ 52 - ({ int ret__; \ 53 - asm volatile ("1: rdmsr\n" \ 54 - "2:\n" \ 55 - ".section .fixup,\"ax\"\n" \ 56 - "3: movl %4,%0\n" \ 57 - " jmp 2b\n" \ 58 - ".previous\n" \ 59 - ".section __ex_table,\"a\"\n" \ 60 - " .align 8\n" \ 61 - " .quad 1b,3b\n" \ 62 - ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ 63 - :"c"(msr), "i"(-EIO), "0"(0)); \ 64 - ret__; }) 65 - 66 - #define rdtsc(low,high) \ 67 - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) 68 - 69 - #define rdtscl(low) \ 70 - __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") 71 - 72 - #define rdtscp(low,high,aux) \ 73 - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) 74 - 75 - #define rdtscll(val) do { \ 76 - unsigned int __a,__d; \ 77 - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ 78 - (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ 79 - } while(0) 80 - 81 - #define rdtscpll(val, aux) do { \ 82 - unsigned long __a, __d; \ 83 - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ 84 - (val) = (__d << 32) | __a; \ 85 - } while (0) 86 - 87 - #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) 88 - 89 - #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) 90 - 91 - #define rdpmc(counter,low,high) \ 92 - __asm__ __volatile__("rdpmc" \ 93 - : "=a" (low), "=d" (high) \ 94 - : "c" (counter)) 95 - 96 - static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, 97 - unsigned int *ecx, unsigned int *edx) 98 - { 99 - __asm__("cpuid" 100 - : "=a" (*eax), 101 - "=b" (*ebx), 102 - "=c" (*ecx), 103 - "=d" (*edx) 104 - : "0" (op)); 105 - } 106 - 107 - /* Some CPUID calls want 'count' to be placed in ecx */ 108 - static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, 109 - int *edx) 110 - { 111 - __asm__("cpuid" 112 - : "=a" (*eax), 113 - "=b" (*ebx), 114 - "=c" (*ecx), 115 - "=d" (*edx) 116 - : "0" (op), "c" (count)); 117 - } 118 - 119 - /* 120 - * CPUID functions returning a single datum 121 - */ 122 - static inline unsigned int cpuid_eax(unsigned int op) 123 - { 124 - unsigned int eax; 125 - 126 - __asm__("cpuid" 127 - : "=a" (eax) 128 - : "0" (op) 129 - : "bx", "cx", "dx"); 130 - return eax; 131 - } 132 - static inline unsigned int cpuid_ebx(unsigned int op) 133 - { 134 - unsigned int eax, ebx; 135 - 136 - __asm__("cpuid" 137 - : "=a" (eax), "=b" (ebx) 138 - : "0" (op) 139 - : "cx", "dx" ); 140 - return ebx; 141 - } 142 - static inline unsigned int cpuid_ecx(unsigned int op) 143 - { 144 - unsigned int eax, ecx; 145 - 146 - __asm__("cpuid" 147 - : "=a" (eax), "=c" (ecx) 148 - : "0" (op) 149 - : "bx", "dx" ); 150 - return ecx; 151 - } 152 - static inline unsigned int cpuid_edx(unsigned int op) 153 - { 154 - unsigned int eax, edx; 155 - 156 - __asm__("cpuid" 157 - : "=a" (eax), "=d" (edx) 158 - : "0" (op) 159 - : "bx", "cx"); 160 - return edx; 161 - } 162 - 163 - #ifdef CONFIG_SMP 164 - void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 165 - void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 166 - int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 167 - int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 168 - #else /* CONFIG_SMP */ 169 - static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 170 - { 171 - rdmsr(msr_no, *l, *h); 172 - } 173 - static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 174 - { 175 - wrmsr(msr_no, l, h); 176 - } 177 - static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 178 - { 179 - return rdmsr_safe(msr_no, l, h); 180 - } 181 - static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 182 - { 183 - return wrmsr_safe(msr_no, l, h); 184 - } 185 - #endif /* CONFIG_SMP */ 186 - #endif /* __ASSEMBLY__ */ 187 - #endif /* X86_64_MSR_H */