Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Replace growing set of *_in_guest bools with a u64

Store each "disabled exit" boolean in a single bit rather than a byte.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
Link: https://lore.kernel.org/r/20250530185239.2335185-2-jmattson@google.com
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Link: https://lore.kernel.org/r/20250626001225.744268-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>

authored by

Jim Mattson and committed by
Sean Christopherson
6fbef861 e88cfd50

+13 -18
+1 -4
arch/x86/include/asm/kvm_host.h
··· 1392 1392 1393 1393 gpa_t wall_clock; 1394 1394 1395 - bool mwait_in_guest; 1396 - bool hlt_in_guest; 1397 - bool pause_in_guest; 1398 - bool cstate_in_guest; 1395 + u64 disabled_exits; 1399 1396 1400 1397 unsigned long irq_sources_bitmap; 1401 1398 s64 kvmclock_offset;
+1 -1
arch/x86/kvm/svm/svm.c
··· 5012 5012 } 5013 5013 5014 5014 if (!pause_filter_count || !pause_filter_thresh) 5015 - kvm->arch.pause_in_guest = true; 5015 + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); 5016 5016 5017 5017 if (enable_apicv) { 5018 5018 int ret = avic_vm_init(kvm);
+1 -1
arch/x86/kvm/vmx/vmx.c
··· 7515 7515 int vmx_vm_init(struct kvm *kvm) 7516 7516 { 7517 7517 if (!ple_gap) 7518 - kvm->arch.pause_in_guest = true; 7518 + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); 7519 7519 7520 7520 if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) { 7521 7521 switch (l1tf_mitigation) {
+1 -8
arch/x86/kvm/x86.c
··· 6616 6616 (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE)) 6617 6617 pr_warn_once(SMT_RSB_MSG); 6618 6618 6619 - if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) 6620 - kvm->arch.pause_in_guest = true; 6621 - if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) 6622 - kvm->arch.mwait_in_guest = true; 6623 - if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) 6624 - kvm->arch.hlt_in_guest = true; 6625 - if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) 6626 - kvm->arch.cstate_in_guest = true; 6619 + kvm_disable_exits(kvm, cap->args[0]); 6627 6620 r = 0; 6628 6621 disable_exits_unlock: 6629 6622 mutex_unlock(&kvm->lock);
+9 -4
arch/x86/kvm/x86.h
··· 499 499 __rem; \ 500 500 }) 501 501 502 + static inline void kvm_disable_exits(struct kvm *kvm, u64 mask) 503 + { 504 + kvm->arch.disabled_exits |= mask; 505 + } 506 + 502 507 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 503 508 { 504 - return kvm->arch.mwait_in_guest; 509 + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT; 505 510 } 506 511 507 512 static inline bool kvm_hlt_in_guest(struct kvm *kvm) 508 513 { 509 - return kvm->arch.hlt_in_guest; 514 + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT; 510 515 } 511 516 512 517 static inline bool kvm_pause_in_guest(struct kvm *kvm) 513 518 { 514 - return kvm->arch.pause_in_guest; 519 + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE; 515 520 } 516 521 517 522 static inline bool kvm_cstate_in_guest(struct kvm *kvm) 518 523 { 519 - return kvm->arch.cstate_in_guest; 524 + return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE; 520 525 } 521 526 522 527 static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)