Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9 208 lines 5.2 kB view raw
1#ifndef ARCH_X86_KVM_CPUID_H 2#define ARCH_X86_KVM_CPUID_H 3 4#include "x86.h" 5#include <asm/cpu.h> 6 7int kvm_update_cpuid(struct kvm_vcpu *vcpu); 8bool kvm_mpx_supported(void); 9struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 10 u32 function, u32 index); 11int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 12 struct kvm_cpuid_entry2 __user *entries, 13 unsigned int type); 14int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 15 struct kvm_cpuid *cpuid, 16 struct kvm_cpuid_entry __user *entries); 17int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, 18 struct kvm_cpuid2 *cpuid, 19 struct kvm_cpuid_entry2 __user *entries); 20int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, 21 struct kvm_cpuid2 *cpuid, 22 struct kvm_cpuid_entry2 __user *entries); 23void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); 24 25int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); 26 27static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) 28{ 29 return vcpu->arch.maxphyaddr; 30} 31 32static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) 33{ 34 struct kvm_cpuid_entry2 *best; 35 36 if (!static_cpu_has(X86_FEATURE_XSAVE)) 37 return false; 38 39 best = kvm_find_cpuid_entry(vcpu, 1, 0); 40 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 41} 42 43static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu) 44{ 45 struct kvm_cpuid_entry2 *best; 46 47 best = kvm_find_cpuid_entry(vcpu, 1, 0); 48 return best && (best->edx & bit(X86_FEATURE_MTRR)); 49} 50 51static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 52{ 53 struct kvm_cpuid_entry2 *best; 54 55 best = kvm_find_cpuid_entry(vcpu, 7, 0); 56 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); 57} 58 59static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) 60{ 61 struct kvm_cpuid_entry2 *best; 62 63 best = kvm_find_cpuid_entry(vcpu, 7, 0); 64 return best && (best->ebx & bit(X86_FEATURE_SMEP)); 65} 66 67static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) 68{ 69 struct kvm_cpuid_entry2 *best; 70 71 best = kvm_find_cpuid_entry(vcpu, 7, 0); 72 return best && (best->ebx & bit(X86_FEATURE_SMAP)); 73} 74 75static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) 76{ 77 struct kvm_cpuid_entry2 *best; 78 79 best = kvm_find_cpuid_entry(vcpu, 7, 0); 80 return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); 81} 82 83static inline bool guest_cpuid_has_pku(struct kvm_vcpu *vcpu) 84{ 85 struct kvm_cpuid_entry2 *best; 86 87 best = kvm_find_cpuid_entry(vcpu, 7, 0); 88 return best && (best->ecx & bit(X86_FEATURE_PKU)); 89} 90 91static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) 92{ 93 struct kvm_cpuid_entry2 *best; 94 95 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 96 return best && (best->edx & bit(X86_FEATURE_LM)); 97} 98 99static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) 100{ 101 struct kvm_cpuid_entry2 *best; 102 103 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 104 return best && (best->ecx & bit(X86_FEATURE_OSVW)); 105} 106 107static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) 108{ 109 struct kvm_cpuid_entry2 *best; 110 111 best = kvm_find_cpuid_entry(vcpu, 1, 0); 112 return best && (best->ecx & bit(X86_FEATURE_PCID)); 113} 114 115static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) 116{ 117 struct kvm_cpuid_entry2 *best; 118 119 best = kvm_find_cpuid_entry(vcpu, 1, 0); 120 return best && (best->ecx & bit(X86_FEATURE_X2APIC)); 121} 122 123static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) 124{ 125 struct kvm_cpuid_entry2 *best; 126 127 best = kvm_find_cpuid_entry(vcpu, 0, 0); 128 return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; 129} 130 131static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) 132{ 133 struct kvm_cpuid_entry2 *best; 134 135 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 136 return best && (best->edx & bit(X86_FEATURE_GBPAGES)); 137} 138 139static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) 140{ 141 struct kvm_cpuid_entry2 *best; 142 143 best = kvm_find_cpuid_entry(vcpu, 7, 0); 144 return best && (best->ebx & bit(X86_FEATURE_RTM)); 145} 146 147static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) 148{ 149 struct kvm_cpuid_entry2 *best; 150 151 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 152 return best && (best->edx & bit(X86_FEATURE_RDTSCP)); 153} 154 155/* 156 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 157 */ 158#define BIT_NRIPS 3 159 160static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) 161{ 162 struct kvm_cpuid_entry2 *best; 163 164 best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0); 165 166 /* 167 * NRIPS is a scattered cpuid feature, so we can't use 168 * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit 169 * position 8, not 3). 170 */ 171 return best && (best->edx & bit(BIT_NRIPS)); 172} 173#undef BIT_NRIPS 174 175static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) 176{ 177 struct kvm_cpuid_entry2 *best; 178 179 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 180 if (!best) 181 return -1; 182 183 return x86_family(best->eax); 184} 185 186static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) 187{ 188 struct kvm_cpuid_entry2 *best; 189 190 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 191 if (!best) 192 return -1; 193 194 return x86_model(best->eax); 195} 196 197static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) 198{ 199 struct kvm_cpuid_entry2 *best; 200 201 best = kvm_find_cpuid_entry(vcpu, 0x1, 0); 202 if (!best) 203 return -1; 204 205 return x86_stepping(best->eax); 206} 207 208#endif