Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef __ASM_X86_MSR_H_
2#define __ASM_X86_MSR_H_
3
4#include <asm/msr-index.h>
5
6#ifndef __ASSEMBLY__
7# include <linux/types.h>
8#endif
9
10#ifdef __KERNEL__
11#ifndef __ASSEMBLY__
12
13#include <asm/asm.h>
14#include <asm/errno.h>
15
16static inline unsigned long long native_read_tscp(unsigned int *aux)
17{
18 unsigned long low, high;
19 asm volatile (".byte 0x0f,0x01,0xf9"
20 : "=a" (low), "=d" (high), "=c" (*aux));
21 return low | ((u64)high >> 32);
22}
23
24/*
25 * i386 calling convention returns 64-bit value in edx:eax, while
26 * x86_64 returns at rax. Also, the "A" constraint does not really
27 * mean rdx:rax in x86_64, so we need specialized behaviour for each
28 * architecture
29 */
30#ifdef CONFIG_X86_64
31#define DECLARE_ARGS(val, low, high) unsigned low, high
32#define EAX_EDX_VAL(val, low, high) (low | ((u64)(high) << 32))
33#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high)
34#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
35#else
36#define DECLARE_ARGS(val, low, high) unsigned long long val
37#define EAX_EDX_VAL(val, low, high) (val)
38#define EAX_EDX_ARGS(val, low, high) "A" (val)
39#define EAX_EDX_RET(val, low, high) "=A" (val)
40#endif
41
42static inline unsigned long long native_read_msr(unsigned int msr)
43{
44 DECLARE_ARGS(val, low, high);
45
46 asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
47 return EAX_EDX_VAL(val, low, high);
48}
49
50static inline unsigned long long native_read_msr_safe(unsigned int msr,
51 int *err)
52{
53 DECLARE_ARGS(val, low, high);
54
55 asm volatile("2: rdmsr ; xor %0,%0\n"
56 "1:\n\t"
57 ".section .fixup,\"ax\"\n\t"
58 "3: mov %3,%0 ; jmp 1b\n\t"
59 ".previous\n\t"
60 ".section __ex_table,\"a\"\n"
61 _ASM_ALIGN "\n\t"
62 _ASM_PTR " 2b,3b\n\t"
63 ".previous"
64 : "=r" (*err), EAX_EDX_RET(val, low, high)
65 : "c" (msr), "i" (-EFAULT));
66 return EAX_EDX_VAL(val, low, high);
67}
68
69static inline void native_write_msr(unsigned int msr,
70 unsigned low, unsigned high)
71{
72 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high));
73}
74
75static inline int native_write_msr_safe(unsigned int msr,
76 unsigned low, unsigned high)
77{
78 int err;
79 asm volatile("2: wrmsr ; xor %0,%0\n"
80 "1:\n\t"
81 ".section .fixup,\"ax\"\n\t"
82 "3: mov %4,%0 ; jmp 1b\n\t"
83 ".previous\n\t"
84 ".section __ex_table,\"a\"\n"
85 _ASM_ALIGN "\n\t"
86 _ASM_PTR " 2b,3b\n\t"
87 ".previous"
88 : "=a" (err)
89 : "c" (msr), "0" (low), "d" (high),
90 "i" (-EFAULT));
91 return err;
92}
93
94extern unsigned long long native_read_tsc(void);
95
96static __always_inline unsigned long long __native_read_tsc(void)
97{
98 DECLARE_ARGS(val, low, high);
99
100 rdtsc_barrier();
101 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
102 rdtsc_barrier();
103
104 return EAX_EDX_VAL(val, low, high);
105}
106
107static inline unsigned long long native_read_pmc(int counter)
108{
109 DECLARE_ARGS(val, low, high);
110
111 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
112 return EAX_EDX_VAL(val, low, high);
113}
114
115#ifdef CONFIG_PARAVIRT
116#include <asm/paravirt.h>
117#else
118#include <linux/errno.h>
119/*
120 * Access to machine-specific registers (available on 586 and better only)
121 * Note: the rd* operations modify the parameters directly (without using
122 * pointer indirection), this allows gcc to optimize better
123 */
124
125#define rdmsr(msr,val1,val2) \
126 do { \
127 u64 __val = native_read_msr(msr); \
128 (val1) = (u32)__val; \
129 (val2) = (u32)(__val >> 32); \
130 } while(0)
131
132static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
133{
134 native_write_msr(msr, low, high);
135}
136
137#define rdmsrl(msr,val) \
138 ((val) = native_read_msr(msr))
139
140#define wrmsrl(msr, val) \
141 native_write_msr(msr, (u32)((u64)(val)), (u32)((u64)(val) >> 32))
142
143/* wrmsr with exception handling */
144static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
145{
146 return native_write_msr_safe(msr, low, high);
147}
148
149/* rdmsr with exception handling */
150#define rdmsr_safe(msr,p1,p2) \
151 ({ \
152 int __err; \
153 u64 __val = native_read_msr_safe(msr, &__err); \
154 (*p1) = (u32)__val; \
155 (*p2) = (u32)(__val >> 32); \
156 __err; \
157 })
158
159#define rdtscl(low) \
160 ((low) = (u32)native_read_tsc())
161
162#define rdtscll(val) \
163 ((val) = native_read_tsc())
164
165#define rdpmc(counter,low,high) \
166 do { \
167 u64 _l = native_read_pmc(counter); \
168 (low) = (u32)_l; \
169 (high) = (u32)(_l >> 32); \
170 } while(0)
171
172#define rdtscp(low, high, aux) \
173 do { \
174 unsigned long long _val = native_read_tscp(&(aux)); \
175 (low) = (u32)_val; \
176 (high) = (u32)(_val >> 32); \
177 } while (0)
178
179#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
180
181#endif /* !CONFIG_PARAVIRT */
182
183
184#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
185
186#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
187
188#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
189
190#ifdef CONFIG_SMP
191void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
192void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
193int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
194int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
195#else /* CONFIG_SMP */
196static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
197{
198 rdmsr(msr_no, *l, *h);
199}
200static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
201{
202 wrmsr(msr_no, l, h);
203}
204static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
205{
206 return rdmsr_safe(msr_no, l, h);
207}
208static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
209{
210 return wrmsr_safe(msr_no, l, h);
211}
212#endif /* CONFIG_SMP */
213#endif /* __ASSEMBLY__ */
214#endif /* __KERNEL__ */
215
216
217#endif