Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*

The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.

On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.

However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.

To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.

Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.

Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.

While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.

Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>

authored by

Nuno Das Neves and committed by
Wei Liu
0e3f7d12 d206a76d

+216 -190
+19 -26
arch/arm64/include/asm/hyperv-tlfs.h
··· 22 22 */ 23 23 24 24 /* 25 - * These Hyper-V registers provide information equivalent to the CPUID 26 - * instruction on x86/x64. 27 - */ 28 - #define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */ 29 - #define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */ 30 - #define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */ 31 - 32 - /* 33 25 * Group C Features. See the asm-generic version of hyperv-tlfs.h 34 26 * for a description of Feature Groups. 35 27 */ ··· 33 41 #define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13) 34 42 35 43 /* 36 - * Synthetic register definitions equivalent to MSRs on x86/x64 44 + * To support arch-generic code calling hv_set/get_register: 45 + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl 46 + * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall 37 47 */ 38 - #define HV_REGISTER_CRASH_P0 0x00000210 39 - #define HV_REGISTER_CRASH_P1 0x00000211 40 - #define HV_REGISTER_CRASH_P2 0x00000212 41 - #define HV_REGISTER_CRASH_P3 0x00000213 42 - #define HV_REGISTER_CRASH_P4 0x00000214 43 - #define HV_REGISTER_CRASH_CTL 0x00000215 48 + #define HV_MSR_CRASH_P0 (HV_REGISTER_CRASH_P0) 49 + #define HV_MSR_CRASH_P1 (HV_REGISTER_CRASH_P1) 50 + #define HV_MSR_CRASH_P2 (HV_REGISTER_CRASH_P2) 51 + #define HV_MSR_CRASH_P3 (HV_REGISTER_CRASH_P3) 52 + #define HV_MSR_CRASH_P4 (HV_REGISTER_CRASH_P4) 53 + #define HV_MSR_CRASH_CTL (HV_REGISTER_CRASH_CTL) 44 54 45 - #define HV_REGISTER_GUEST_OSID 0x00090002 46 - #define HV_REGISTER_VP_INDEX 0x00090003 47 - #define HV_REGISTER_TIME_REF_COUNT 0x00090004 48 - #define HV_REGISTER_REFERENCE_TSC 0x00090017 55 + #define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX) 56 + #define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT) 57 + #define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC) 49 58 50 - #define HV_REGISTER_SINT0 0x000A0000 51 - #define HV_REGISTER_SCONTROL 0x000A0010 52 - #define HV_REGISTER_SIEFP 0x000A0012 53 - #define HV_REGISTER_SIMP 0x000A0013 54 - #define HV_REGISTER_EOM 0x000A0014 59 + #define HV_MSR_SINT0 (HV_REGISTER_SINT0) 60 + #define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL) 61 + #define HV_MSR_SIEFP (HV_REGISTER_SIEFP) 62 + #define HV_MSR_SIMP (HV_REGISTER_SIMP) 63 + #define HV_MSR_EOM (HV_REGISTER_EOM) 55 64 56 - #define HV_REGISTER_STIMER0_CONFIG 0x000B0000 57 - #define HV_REGISTER_STIMER0_COUNT 0x000B0001 65 + #define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG) 66 + #define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT) 58 67 59 68 union hv_msi_entry { 60 69 u64 as_uint64[2];
+2 -2
arch/arm64/include/asm/mshyperv.h
··· 31 31 u64 hv_get_vpreg(u32 reg); 32 32 void hv_get_vpreg_128(u32 reg, struct hv_get_vp_registers_output *result); 33 33 34 - static inline void hv_set_register(unsigned int reg, u64 value) 34 + static inline void hv_set_msr(unsigned int reg, u64 value) 35 35 { 36 36 hv_set_vpreg(reg, value); 37 37 } 38 38 39 - static inline u64 hv_get_register(unsigned int reg) 39 + static inline u64 hv_get_msr(unsigned int reg) 40 40 { 41 41 return hv_get_vpreg(reg); 42 42 }
+4 -4
arch/x86/hyperv/hv_init.c
··· 667 667 hv_hypercall_pg = NULL; 668 668 669 669 /* Reset the hypercall page */ 670 - hypercall_msr.as_uint64 = hv_get_register(HV_X64_MSR_HYPERCALL); 670 + hypercall_msr.as_uint64 = hv_get_msr(HV_X64_MSR_HYPERCALL); 671 671 hypercall_msr.enable = 0; 672 - hv_set_register(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 672 + hv_set_msr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 673 673 674 674 /* Reset the TSC page */ 675 - tsc_msr.as_uint64 = hv_get_register(HV_X64_MSR_REFERENCE_TSC); 675 + tsc_msr.as_uint64 = hv_get_msr(HV_X64_MSR_REFERENCE_TSC); 676 676 tsc_msr.enable = 0; 677 - hv_set_register(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 677 + hv_set_msr(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 678 678 } 679 679 680 680 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
+76 -69
arch/x86/include/asm/hyperv-tlfs.h
··· 182 182 #define HV_X64_MSR_HYPERCALL 0x40000001 183 183 184 184 /* MSR used to provide vcpu index */ 185 - #define HV_REGISTER_VP_INDEX 0x40000002 185 + #define HV_X64_MSR_VP_INDEX 0x40000002 186 186 187 187 /* MSR used to reset the guest OS. */ 188 188 #define HV_X64_MSR_RESET 0x40000003 ··· 191 191 #define HV_X64_MSR_VP_RUNTIME 0x40000010 192 192 193 193 /* MSR used to read the per-partition time reference counter */ 194 - #define HV_REGISTER_TIME_REF_COUNT 0x40000020 194 + #define HV_X64_MSR_TIME_REF_COUNT 0x40000020 195 195 196 196 /* A partition's reference time stamp counter (TSC) page */ 197 - #define HV_REGISTER_REFERENCE_TSC 0x40000021 197 + #define HV_X64_MSR_REFERENCE_TSC 0x40000021 198 198 199 199 /* MSR used to retrieve the TSC frequency */ 200 200 #define HV_X64_MSR_TSC_FREQUENCY 0x40000022 ··· 209 209 #define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 210 210 211 211 /* Define synthetic interrupt controller model specific registers. */ 212 - #define HV_REGISTER_SCONTROL 0x40000080 213 - #define HV_REGISTER_SVERSION 0x40000081 214 - #define HV_REGISTER_SIEFP 0x40000082 215 - #define HV_REGISTER_SIMP 0x40000083 216 - #define HV_REGISTER_EOM 0x40000084 217 - #define HV_REGISTER_SINT0 0x40000090 218 - #define HV_REGISTER_SINT1 0x40000091 219 - #define HV_REGISTER_SINT2 0x40000092 220 - #define HV_REGISTER_SINT3 0x40000093 221 - #define HV_REGISTER_SINT4 0x40000094 222 - #define HV_REGISTER_SINT5 0x40000095 223 - #define HV_REGISTER_SINT6 0x40000096 224 - #define HV_REGISTER_SINT7 0x40000097 225 - #define HV_REGISTER_SINT8 0x40000098 226 - #define HV_REGISTER_SINT9 0x40000099 227 - #define HV_REGISTER_SINT10 0x4000009A 228 - #define HV_REGISTER_SINT11 0x4000009B 229 - #define HV_REGISTER_SINT12 0x4000009C 230 - #define HV_REGISTER_SINT13 0x4000009D 231 - #define HV_REGISTER_SINT14 0x4000009E 232 - #define HV_REGISTER_SINT15 0x4000009F 212 + #define HV_X64_MSR_SCONTROL 0x40000080 213 + #define HV_X64_MSR_SVERSION 0x40000081 214 + #define HV_X64_MSR_SIEFP 0x40000082 215 + #define HV_X64_MSR_SIMP 0x40000083 216 + #define HV_X64_MSR_EOM 0x40000084 217 + #define HV_X64_MSR_SINT0 0x40000090 218 + #define HV_X64_MSR_SINT1 0x40000091 219 + #define HV_X64_MSR_SINT2 0x40000092 220 + #define HV_X64_MSR_SINT3 0x40000093 221 + #define HV_X64_MSR_SINT4 0x40000094 222 + #define HV_X64_MSR_SINT5 0x40000095 223 + #define HV_X64_MSR_SINT6 0x40000096 224 + #define HV_X64_MSR_SINT7 0x40000097 225 + #define HV_X64_MSR_SINT8 0x40000098 226 + #define HV_X64_MSR_SINT9 0x40000099 227 + #define HV_X64_MSR_SINT10 0x4000009A 228 + #define HV_X64_MSR_SINT11 0x4000009B 229 + #define HV_X64_MSR_SINT12 0x4000009C 230 + #define HV_X64_MSR_SINT13 0x4000009D 231 + #define HV_X64_MSR_SINT14 0x4000009E 232 + #define HV_X64_MSR_SINT15 0x4000009F 233 233 234 234 /* 235 235 * Define synthetic interrupt controller model specific registers for 236 236 * nested hypervisor. 237 237 */ 238 - #define HV_REGISTER_NESTED_SCONTROL 0x40001080 239 - #define HV_REGISTER_NESTED_SVERSION 0x40001081 240 - #define HV_REGISTER_NESTED_SIEFP 0x40001082 241 - #define HV_REGISTER_NESTED_SIMP 0x40001083 242 - #define HV_REGISTER_NESTED_EOM 0x40001084 243 - #define HV_REGISTER_NESTED_SINT0 0x40001090 238 + #define HV_X64_MSR_NESTED_SCONTROL 0x40001080 239 + #define HV_X64_MSR_NESTED_SVERSION 0x40001081 240 + #define HV_X64_MSR_NESTED_SIEFP 0x40001082 241 + #define HV_X64_MSR_NESTED_SIMP 0x40001083 242 + #define HV_X64_MSR_NESTED_EOM 0x40001084 243 + #define HV_X64_MSR_NESTED_SINT0 0x40001090 244 244 245 245 /* 246 246 * Synthetic Timer MSRs. Four timers per vcpu. 247 247 */ 248 - #define HV_REGISTER_STIMER0_CONFIG 0x400000B0 249 - #define HV_REGISTER_STIMER0_COUNT 0x400000B1 250 - #define HV_REGISTER_STIMER1_CONFIG 0x400000B2 251 - #define HV_REGISTER_STIMER1_COUNT 0x400000B3 252 - #define HV_REGISTER_STIMER2_CONFIG 0x400000B4 253 - #define HV_REGISTER_STIMER2_COUNT 0x400000B5 254 - #define HV_REGISTER_STIMER3_CONFIG 0x400000B6 255 - #define HV_REGISTER_STIMER3_COUNT 0x400000B7 248 + #define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 249 + #define HV_X64_MSR_STIMER0_COUNT 0x400000B1 250 + #define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 251 + #define HV_X64_MSR_STIMER1_COUNT 0x400000B3 252 + #define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 253 + #define HV_X64_MSR_STIMER2_COUNT 0x400000B5 254 + #define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 255 + #define HV_X64_MSR_STIMER3_COUNT 0x400000B7 256 256 257 257 /* Hyper-V guest idle MSR */ 258 258 #define HV_X64_MSR_GUEST_IDLE 0x400000F0 259 259 260 260 /* Hyper-V guest crash notification MSR's */ 261 - #define HV_REGISTER_CRASH_P0 0x40000100 262 - #define HV_REGISTER_CRASH_P1 0x40000101 263 - #define HV_REGISTER_CRASH_P2 0x40000102 264 - #define HV_REGISTER_CRASH_P3 0x40000103 265 - #define HV_REGISTER_CRASH_P4 0x40000104 266 - #define HV_REGISTER_CRASH_CTL 0x40000105 261 + #define HV_X64_MSR_CRASH_P0 0x40000100 262 + #define HV_X64_MSR_CRASH_P1 0x40000101 263 + #define HV_X64_MSR_CRASH_P2 0x40000102 264 + #define HV_X64_MSR_CRASH_P3 0x40000103 265 + #define HV_X64_MSR_CRASH_P4 0x40000104 266 + #define HV_X64_MSR_CRASH_CTL 0x40000105 267 267 268 268 /* TSC emulation after migration */ 269 269 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 ··· 276 276 /* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */ 277 277 #define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0) 278 278 279 - /* Register name aliases for temporary compatibility */ 280 - #define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT 281 - #define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG 282 - #define HV_X64_MSR_STIMER1_COUNT HV_REGISTER_STIMER1_COUNT 283 - #define HV_X64_MSR_STIMER1_CONFIG HV_REGISTER_STIMER1_CONFIG 284 - #define HV_X64_MSR_STIMER2_COUNT HV_REGISTER_STIMER2_COUNT 285 - #define HV_X64_MSR_STIMER2_CONFIG HV_REGISTER_STIMER2_CONFIG 286 - #define HV_X64_MSR_STIMER3_COUNT HV_REGISTER_STIMER3_COUNT 287 - #define HV_X64_MSR_STIMER3_CONFIG HV_REGISTER_STIMER3_CONFIG 288 - #define HV_X64_MSR_SCONTROL HV_REGISTER_SCONTROL 289 - #define HV_X64_MSR_SVERSION HV_REGISTER_SVERSION 290 - #define HV_X64_MSR_SIMP HV_REGISTER_SIMP 291 - #define HV_X64_MSR_SIEFP HV_REGISTER_SIEFP 292 - #define HV_X64_MSR_VP_INDEX HV_REGISTER_VP_INDEX 293 - #define HV_X64_MSR_EOM HV_REGISTER_EOM 294 - #define HV_X64_MSR_SINT0 HV_REGISTER_SINT0 295 - #define HV_X64_MSR_SINT15 HV_REGISTER_SINT15 296 - #define HV_X64_MSR_CRASH_P0 HV_REGISTER_CRASH_P0 297 - #define HV_X64_MSR_CRASH_P1 HV_REGISTER_CRASH_P1 298 - #define HV_X64_MSR_CRASH_P2 HV_REGISTER_CRASH_P2 299 - #define HV_X64_MSR_CRASH_P3 HV_REGISTER_CRASH_P3 300 - #define HV_X64_MSR_CRASH_P4 HV_REGISTER_CRASH_P4 301 - #define HV_X64_MSR_CRASH_CTL HV_REGISTER_CRASH_CTL 302 - #define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT 303 - #define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC 279 + /* 280 + * To support arch-generic code calling hv_set/get_register: 281 + * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl 282 + * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall 283 + */ 284 + #define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0) 285 + #define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1) 286 + #define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2) 287 + #define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3) 288 + #define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4) 289 + #define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL) 290 + 291 + #define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX) 292 + #define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT) 293 + #define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC) 294 + 295 + #define HV_MSR_SINT0 (HV_X64_MSR_SINT0) 296 + #define HV_MSR_SVERSION (HV_X64_MSR_SVERSION) 297 + #define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL) 298 + #define HV_MSR_SIEFP (HV_X64_MSR_SIEFP) 299 + #define HV_MSR_SIMP (HV_X64_MSR_SIMP) 300 + #define HV_MSR_EOM (HV_X64_MSR_EOM) 301 + 302 + #define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL) 303 + #define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION) 304 + #define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP) 305 + #define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP) 306 + #define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM) 307 + #define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0) 308 + 309 + #define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG) 310 + #define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT) 304 311 305 312 /* 306 313 * Registers are only accessible via HVCALL_GET_VP_REGISTERS hvcall and
+15 -15
arch/x86/include/asm/mshyperv.h
··· 293 293 static inline void hv_ivm_msr_read(u64 msr, u64 *value) {} 294 294 #endif 295 295 296 - static inline bool hv_is_synic_reg(unsigned int reg) 296 + static inline bool hv_is_synic_msr(unsigned int reg) 297 297 { 298 - return (reg >= HV_REGISTER_SCONTROL) && 299 - (reg <= HV_REGISTER_SINT15); 298 + return (reg >= HV_X64_MSR_SCONTROL) && 299 + (reg <= HV_X64_MSR_SINT15); 300 300 } 301 301 302 - static inline bool hv_is_sint_reg(unsigned int reg) 302 + static inline bool hv_is_sint_msr(unsigned int reg) 303 303 { 304 - return (reg >= HV_REGISTER_SINT0) && 305 - (reg <= HV_REGISTER_SINT15); 304 + return (reg >= HV_X64_MSR_SINT0) && 305 + (reg <= HV_X64_MSR_SINT15); 306 306 } 307 307 308 - u64 hv_get_register(unsigned int reg); 309 - void hv_set_register(unsigned int reg, u64 value); 310 - u64 hv_get_non_nested_register(unsigned int reg); 311 - void hv_set_non_nested_register(unsigned int reg, u64 value); 308 + u64 hv_get_msr(unsigned int reg); 309 + void hv_set_msr(unsigned int reg, u64 value); 310 + u64 hv_get_non_nested_msr(unsigned int reg); 311 + void hv_set_non_nested_msr(unsigned int reg, u64 value); 312 312 313 - static __always_inline u64 hv_raw_get_register(unsigned int reg) 313 + static __always_inline u64 hv_raw_get_msr(unsigned int reg) 314 314 { 315 315 return __rdmsr(reg); 316 316 } ··· 331 331 { 332 332 return -1; 333 333 } 334 - static inline void hv_set_register(unsigned int reg, u64 value) { } 335 - static inline u64 hv_get_register(unsigned int reg) { return 0; } 336 - static inline void hv_set_non_nested_register(unsigned int reg, u64 value) { } 337 - static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; } 334 + static inline void hv_set_msr(unsigned int reg, u64 value) { } 335 + static inline u64 hv_get_msr(unsigned int reg) { return 0; } 336 + static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { } 337 + static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; } 338 338 #endif /* CONFIG_HYPERV */ 339 339 340 340
+28 -28
arch/x86/kernel/cpu/mshyperv.c
··· 45 45 EXPORT_SYMBOL_GPL(hyperv_paravisor_present); 46 46 47 47 #if IS_ENABLED(CONFIG_HYPERV) 48 - static inline unsigned int hv_get_nested_reg(unsigned int reg) 48 + static inline unsigned int hv_get_nested_msr(unsigned int reg) 49 49 { 50 - if (hv_is_sint_reg(reg)) 51 - return reg - HV_REGISTER_SINT0 + HV_REGISTER_NESTED_SINT0; 50 + if (hv_is_sint_msr(reg)) 51 + return reg - HV_X64_MSR_SINT0 + HV_X64_MSR_NESTED_SINT0; 52 52 53 53 switch (reg) { 54 - case HV_REGISTER_SIMP: 55 - return HV_REGISTER_NESTED_SIMP; 56 - case HV_REGISTER_SIEFP: 57 - return HV_REGISTER_NESTED_SIEFP; 58 - case HV_REGISTER_SVERSION: 59 - return HV_REGISTER_NESTED_SVERSION; 60 - case HV_REGISTER_SCONTROL: 61 - return HV_REGISTER_NESTED_SCONTROL; 62 - case HV_REGISTER_EOM: 63 - return HV_REGISTER_NESTED_EOM; 54 + case HV_X64_MSR_SIMP: 55 + return HV_X64_MSR_NESTED_SIMP; 56 + case HV_X64_MSR_SIEFP: 57 + return HV_X64_MSR_NESTED_SIEFP; 58 + case HV_X64_MSR_SVERSION: 59 + return HV_X64_MSR_NESTED_SVERSION; 60 + case HV_X64_MSR_SCONTROL: 61 + return HV_X64_MSR_NESTED_SCONTROL; 62 + case HV_X64_MSR_EOM: 63 + return HV_X64_MSR_NESTED_EOM; 64 64 default: 65 65 return reg; 66 66 } 67 67 } 68 68 69 - u64 hv_get_non_nested_register(unsigned int reg) 69 + u64 hv_get_non_nested_msr(unsigned int reg) 70 70 { 71 71 u64 value; 72 72 73 - if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) 73 + if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) 74 74 hv_ivm_msr_read(reg, &value); 75 75 else 76 76 rdmsrl(reg, value); 77 77 return value; 78 78 } 79 - EXPORT_SYMBOL_GPL(hv_get_non_nested_register); 79 + EXPORT_SYMBOL_GPL(hv_get_non_nested_msr); 80 80 81 - void hv_set_non_nested_register(unsigned int reg, u64 value) 81 + void hv_set_non_nested_msr(unsigned int reg, u64 value) 82 82 { 83 - if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) { 83 + if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) { 84 84 hv_ivm_msr_write(reg, value); 85 85 86 86 /* Write proxy bit via wrmsl instruction */ 87 - if (hv_is_sint_reg(reg)) 87 + if (hv_is_sint_msr(reg)) 88 88 wrmsrl(reg, value | 1 << 20); 89 89 } else { 90 90 wrmsrl(reg, value); 91 91 } 92 92 } 93 - EXPORT_SYMBOL_GPL(hv_set_non_nested_register); 93 + EXPORT_SYMBOL_GPL(hv_set_non_nested_msr); 94 94 95 - u64 hv_get_register(unsigned int reg) 95 + u64 hv_get_msr(unsigned int reg) 96 96 { 97 97 if (hv_nested) 98 - reg = hv_get_nested_reg(reg); 98 + reg = hv_get_nested_msr(reg); 99 99 100 - return hv_get_non_nested_register(reg); 100 + return hv_get_non_nested_msr(reg); 101 101 } 102 - EXPORT_SYMBOL_GPL(hv_get_register); 102 + EXPORT_SYMBOL_GPL(hv_get_msr); 103 103 104 - void hv_set_register(unsigned int reg, u64 value) 104 + void hv_set_msr(unsigned int reg, u64 value) 105 105 { 106 106 if (hv_nested) 107 - reg = hv_get_nested_reg(reg); 107 + reg = hv_get_nested_msr(reg); 108 108 109 - hv_set_non_nested_register(reg, value); 109 + hv_set_non_nested_msr(reg, value); 110 110 } 111 - EXPORT_SYMBOL_GPL(hv_set_register); 111 + EXPORT_SYMBOL_GPL(hv_set_msr); 112 112 113 113 static void (*vmbus_handler)(void); 114 114 static void (*hv_stimer0_handler)(void);
+13 -13
drivers/clocksource/hyperv_timer.c
··· 81 81 82 82 current_tick = hv_read_reference_counter(); 83 83 current_tick += delta; 84 - hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick); 84 + hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick); 85 85 return 0; 86 86 } 87 87 88 88 static int hv_ce_shutdown(struct clock_event_device *evt) 89 89 { 90 - hv_set_register(HV_REGISTER_STIMER0_COUNT, 0); 91 - hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0); 90 + hv_set_msr(HV_MSR_STIMER0_COUNT, 0); 91 + hv_set_msr(HV_MSR_STIMER0_CONFIG, 0); 92 92 if (direct_mode_enabled && stimer0_irq >= 0) 93 93 disable_percpu_irq(stimer0_irq); 94 94 ··· 119 119 timer_cfg.direct_mode = 0; 120 120 timer_cfg.sintx = stimer0_message_sint; 121 121 } 122 - hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64); 122 + hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); 123 123 return 0; 124 124 } 125 125 ··· 372 372 * is set to 0 when the partition is created and is incremented in 100 373 373 * nanosecond units. 374 374 * 375 - * Use hv_raw_get_register() because this function is used from 376 - * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic 375 + * Use hv_raw_get_msr() because this function is used from 376 + * noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic 377 377 * register it doesn't need the GHCB path. 378 378 */ 379 - return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT); 379 + return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT); 380 380 } 381 381 382 382 /* ··· 439 439 union hv_reference_tsc_msr tsc_msr; 440 440 441 441 /* Disable the TSC page */ 442 - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); 442 + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); 443 443 tsc_msr.enable = 0; 444 - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); 444 + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 445 445 } 446 446 447 447 ··· 450 450 union hv_reference_tsc_msr tsc_msr; 451 451 452 452 /* Re-enable the TSC page */ 453 - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); 453 + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); 454 454 tsc_msr.enable = 1; 455 455 tsc_msr.pfn = tsc_pfn; 456 - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); 456 + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 457 457 } 458 458 459 459 #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK ··· 555 555 * thus TSC clocksource will work even without the real TSC page 556 556 * mapped. 557 557 */ 558 - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); 558 + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); 559 559 if (hv_root_partition) 560 560 tsc_pfn = tsc_msr.pfn; 561 561 else 562 562 tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page)); 563 563 tsc_msr.enable = 1; 564 564 tsc_msr.pfn = tsc_pfn; 565 - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); 565 + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); 566 566 567 567 clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); 568 568
+16 -20
drivers/hv/hv.c
··· 270 270 union hv_synic_scontrol sctrl; 271 271 272 272 /* Setup the Synic's message page */ 273 - simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP); 273 + simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 274 274 simp.simp_enabled = 1; 275 275 276 276 if (ms_hyperv.paravisor_present || hv_root_partition) { ··· 286 286 >> HV_HYP_PAGE_SHIFT; 287 287 } 288 288 289 - hv_set_register(HV_REGISTER_SIMP, simp.as_uint64); 289 + hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 290 290 291 291 /* Setup the Synic's event page */ 292 - siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); 292 + siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 293 293 siefp.siefp_enabled = 1; 294 294 295 295 if (ms_hyperv.paravisor_present || hv_root_partition) { ··· 305 305 >> HV_HYP_PAGE_SHIFT; 306 306 } 307 307 308 - hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64); 308 + hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 309 309 310 310 /* Setup the shared SINT. */ 311 311 if (vmbus_irq != -1) 312 312 enable_percpu_irq(vmbus_irq, 0); 313 - shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 + 314 - VMBUS_MESSAGE_SINT); 313 + shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 315 314 316 315 shared_sint.vector = vmbus_interrupt; 317 316 shared_sint.masked = false; ··· 325 326 #else 326 327 shared_sint.auto_eoi = 0; 327 328 #endif 328 - hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT, 329 - shared_sint.as_uint64); 329 + hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 330 330 331 331 /* Enable the global synic bit */ 332 - sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL); 332 + sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 333 333 sctrl.enable = 1; 334 334 335 - hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64); 335 + hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 336 336 } 337 337 338 338 int hv_synic_init(unsigned int cpu) ··· 355 357 union hv_synic_siefp siefp; 356 358 union hv_synic_scontrol sctrl; 357 359 358 - shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 + 359 - VMBUS_MESSAGE_SINT); 360 + shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT); 360 361 361 362 shared_sint.masked = 1; 362 363 363 364 /* Need to correctly cleanup in the case of SMP!!! */ 364 365 /* Disable the interrupt */ 365 - hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT, 366 - shared_sint.as_uint64); 366 + hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 367 367 368 - simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP); 368 + simp.as_uint64 = hv_get_msr(HV_MSR_SIMP); 369 369 /* 370 370 * In Isolation VM, sim and sief pages are allocated by 371 371 * paravisor. These pages also will be used by kdump ··· 378 382 simp.base_simp_gpa = 0; 379 383 } 380 384 381 - hv_set_register(HV_REGISTER_SIMP, simp.as_uint64); 385 + hv_set_msr(HV_MSR_SIMP, simp.as_uint64); 382 386 383 - siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); 387 + siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP); 384 388 siefp.siefp_enabled = 0; 385 389 386 390 if (ms_hyperv.paravisor_present || hv_root_partition) { ··· 390 394 siefp.base_siefp_gpa = 0; 391 395 } 392 396 393 - hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64); 397 + hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64); 394 398 395 399 /* Disable the global synic bit */ 396 - sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL); 400 + sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL); 397 401 sctrl.enable = 0; 398 - hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64); 402 + hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64); 399 403 400 404 if (vmbus_irq != -1) 401 405 disable_percpu_irq(vmbus_irq);
+11 -11
drivers/hv/hv_common.c
··· 227 227 * contain the size of the panic data in that page. Rest of the 228 228 * registers are no-op when the NOTIFY_MSG flag is set. 229 229 */ 230 - hv_set_register(HV_REGISTER_CRASH_P0, 0); 231 - hv_set_register(HV_REGISTER_CRASH_P1, 0); 232 - hv_set_register(HV_REGISTER_CRASH_P2, 0); 233 - hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page)); 234 - hv_set_register(HV_REGISTER_CRASH_P4, bytes_written); 230 + hv_set_msr(HV_MSR_CRASH_P0, 0); 231 + hv_set_msr(HV_MSR_CRASH_P1, 0); 232 + hv_set_msr(HV_MSR_CRASH_P2, 0); 233 + hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page)); 234 + hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 235 235 236 236 /* 237 237 * Let Hyper-V know there is crash data available along with 238 238 * the panic message. 239 239 */ 240 - hv_set_register(HV_REGISTER_CRASH_CTL, 241 - (HV_CRASH_CTL_CRASH_NOTIFY | 242 - HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 240 + hv_set_msr(HV_MSR_CRASH_CTL, 241 + (HV_CRASH_CTL_CRASH_NOTIFY | 242 + HV_CRASH_CTL_CRASH_NOTIFY_MSG)); 243 243 } 244 244 245 245 static struct kmsg_dumper hv_kmsg_dumper = { ··· 310 310 * Register for panic kmsg callback only if the right 311 311 * capability is supported by the hypervisor. 312 312 */ 313 - hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL); 313 + hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL); 314 314 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) 315 315 hv_kmsg_dump_register(); 316 316 ··· 409 409 *inputarg = mem; 410 410 } 411 411 412 - msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX); 412 + msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX); 413 413 414 414 hv_vp_index[cpu] = msr_vp_index; 415 415 ··· 506 506 */ 507 507 static u64 __hv_read_ref_counter(void) 508 508 { 509 - return hv_get_register(HV_REGISTER_TIME_REF_COUNT); 509 + return hv_get_msr(HV_MSR_TIME_REF_COUNT); 510 510 } 511 511 512 512 u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
+31 -1
include/asm-generic/hyperv-tlfs.h
··· 625 625 struct hv_device_interrupt_target int_target; 626 626 } __packed __aligned(8); 627 627 628 + /* 629 + * These Hyper-V registers provide information equivalent to the CPUID 630 + * instruction on x86/x64. 631 + */ 632 + #define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */ 633 + #define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */ 634 + #define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */ 635 + 636 + /* 637 + * Synthetic register definitions equivalent to MSRs on x86/x64 638 + */ 639 + #define HV_REGISTER_CRASH_P0 0x00000210 640 + #define HV_REGISTER_CRASH_P1 0x00000211 641 + #define HV_REGISTER_CRASH_P2 0x00000212 642 + #define HV_REGISTER_CRASH_P3 0x00000213 643 + #define HV_REGISTER_CRASH_P4 0x00000214 644 + #define HV_REGISTER_CRASH_CTL 0x00000215 645 + 646 + #define HV_REGISTER_GUEST_OSID 0x00090002 647 + #define HV_REGISTER_VP_INDEX 0x00090003 648 + #define HV_REGISTER_TIME_REF_COUNT 0x00090004 649 + #define HV_REGISTER_REFERENCE_TSC 0x00090017 650 + 651 + #define HV_REGISTER_SINT0 0x000A0000 652 + #define HV_REGISTER_SCONTROL 0x000A0010 653 + #define HV_REGISTER_SIEFP 0x000A0012 654 + #define HV_REGISTER_SIMP 0x000A0013 655 + #define HV_REGISTER_EOM 0x000A0014 656 + 657 + #define HV_REGISTER_STIMER0_CONFIG 0x000B0000 658 + #define HV_REGISTER_STIMER0_COUNT 0x000B0001 628 659 629 660 /* HvGetVpRegisters hypercall input with variable size reg name list*/ 630 661 struct hv_get_vp_registers_input { ··· 670 639 u32 name1; 671 640 } element[]; 672 641 } __packed; 673 - 674 642 675 643 /* HvGetVpRegisters returns an array of these output elements */ 676 644 struct hv_get_vp_registers_output {
+1 -1
include/asm-generic/mshyperv.h
··· 157 157 * possibly deliver another msg from the 158 158 * hypervisor 159 159 */ 160 - hv_set_register(HV_REGISTER_EOM, 0); 160 + hv_set_msr(HV_MSR_EOM, 0); 161 161 } 162 162 } 163 163