Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

entry/kvm: KVM: Move KVM details related to signal/-EINTR into KVM proper

Move KVM's morphing of pending signals into userspace exits into KVM
proper, and drop the @vcpu param from xfer_to_guest_mode_handle_work().
How KVM responds to -EINTR is a detail that really belongs in KVM itself,
and invoking kvm_handle_signal_exit() from kernel code creates an inverted
module dependency. E.g. attempting to move kvm_handle_signal_exit() into
kvm_main.c would generate an linker error when building kvm.ko as a module.

Dropping KVM details will also converting the KVM "entry" code into a more
generic virtualization framework so that it can be used when running as a
Hyper-V root partition.

Lastly, eliminating usage of "struct kvm_vcpu" outside of KVM is also nice
to have for KVM x86 developers, as keeping the details of kvm_vcpu purely
within KVM allows changing the layout of the structure without having to
boot into a new kernel, e.g. allows rebuilding and reloading kvm.ko with a
modified kvm_vcpu structure as part of debug/development.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Wei Liu <wei.liu@kernel.org>

authored by

Sean Christopherson and committed by
Wei Liu
6d0386ea 0ebac01a

+24 -26
+1 -2
arch/arm64/kvm/arm.c
··· 6 6 7 7 #include <linux/bug.h> 8 8 #include <linux/cpu_pm.h> 9 - #include <linux/entry-kvm.h> 10 9 #include <linux/errno.h> 11 10 #include <linux/err.h> 12 11 #include <linux/kvm_host.h> ··· 1176 1177 /* 1177 1178 * Check conditions before entering the guest 1178 1179 */ 1179 - ret = xfer_to_guest_mode_handle_work(vcpu); 1180 + ret = kvm_xfer_to_guest_mode_handle_work(vcpu); 1180 1181 if (!ret) 1181 1182 ret = 1; 1182 1183
+1 -2
arch/loongarch/kvm/vcpu.c
··· 4 4 */ 5 5 6 6 #include <linux/kvm_host.h> 7 - #include <linux/entry-kvm.h> 8 7 #include <asm/fpu.h> 9 8 #include <asm/lbt.h> 10 9 #include <asm/loongarch.h> ··· 250 251 /* 251 252 * Check conditions before entering the guest 252 253 */ 253 - ret = xfer_to_guest_mode_handle_work(vcpu); 254 + ret = kvm_xfer_to_guest_mode_handle_work(vcpu); 254 255 if (ret < 0) 255 256 return ret; 256 257
+1 -2
arch/riscv/kvm/vcpu.c
··· 7 7 */ 8 8 9 9 #include <linux/bitops.h> 10 - #include <linux/entry-kvm.h> 11 10 #include <linux/errno.h> 12 11 #include <linux/err.h> 13 12 #include <linux/kdebug.h> ··· 909 910 run->exit_reason = KVM_EXIT_UNKNOWN; 910 911 while (ret > 0) { 911 912 /* Check conditions before entering the guest */ 912 - ret = xfer_to_guest_mode_handle_work(vcpu); 913 + ret = kvm_xfer_to_guest_mode_handle_work(vcpu); 913 914 if (ret) 914 915 continue; 915 916 ret = 1;
-1
arch/x86/kvm/vmx/vmx.c
··· 28 28 #include <linux/slab.h> 29 29 #include <linux/tboot.h> 30 30 #include <linux/trace_events.h> 31 - #include <linux/entry-kvm.h> 32 31 33 32 #include <asm/apic.h> 34 33 #include <asm/asm.h>
+1 -2
arch/x86/kvm/x86.c
··· 59 59 #include <linux/sched/stat.h> 60 60 #include <linux/sched/isolation.h> 61 61 #include <linux/mem_encrypt.h> 62 - #include <linux/entry-kvm.h> 63 62 #include <linux/suspend.h> 64 63 #include <linux/smp.h> 65 64 ··· 11240 11241 11241 11242 if (__xfer_to_guest_mode_work_pending()) { 11242 11243 kvm_vcpu_srcu_read_unlock(vcpu); 11243 - r = xfer_to_guest_mode_handle_work(vcpu); 11244 + r = kvm_xfer_to_guest_mode_handle_work(vcpu); 11244 11245 kvm_vcpu_srcu_read_lock(vcpu); 11245 11246 if (r) 11246 11247 return r;
+3 -8
include/linux/entry-kvm.h
··· 21 21 _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \ 22 22 ARCH_XFER_TO_GUEST_MODE_WORK) 23 23 24 - struct kvm_vcpu; 25 - 26 24 /** 27 25 * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest 28 26 * mode work handling function. ··· 30 32 * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be 31 33 * replaced by architecture specific code. 32 34 */ 33 - static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, 34 - unsigned long ti_work); 35 + static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work); 35 36 36 37 #ifndef arch_xfer_to_guest_mode_work 37 - static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, 38 - unsigned long ti_work) 38 + static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work) 39 39 { 40 40 return 0; 41 41 } ··· 42 46 /** 43 47 * xfer_to_guest_mode_handle_work - Check and handle pending work which needs 44 48 * to be handled before going to guest mode 45 - * @vcpu: Pointer to current's VCPU data 46 49 * 47 50 * Returns: 0 or an error code 48 51 */ 49 - int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); 52 + int xfer_to_guest_mode_handle_work(void); 50 53 51 54 /** 52 55 * xfer_to_guest_mode_prepare - Perform last minute preparation work that
+12 -1
include/linux/kvm_host.h
··· 2 2 #ifndef __KVM_HOST_H 3 3 #define __KVM_HOST_H 4 4 5 - 5 + #include <linux/entry-kvm.h> 6 6 #include <linux/types.h> 7 7 #include <linux/hardirq.h> 8 8 #include <linux/list.h> ··· 2449 2449 { 2450 2450 vcpu->run->exit_reason = KVM_EXIT_INTR; 2451 2451 vcpu->stat.signal_exits++; 2452 + } 2453 + 2454 + static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu) 2455 + { 2456 + int r = xfer_to_guest_mode_handle_work(); 2457 + 2458 + if (r) { 2459 + WARN_ON_ONCE(r != -EINTR); 2460 + kvm_handle_signal_exit(vcpu); 2461 + } 2462 + return r; 2452 2463 } 2453 2464 #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ 2454 2465
+5 -8
kernel/entry/kvm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include <linux/entry-kvm.h> 4 - #include <linux/kvm_host.h> 5 4 6 - static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) 5 + static int xfer_to_guest_mode_work(unsigned long ti_work) 7 6 { 8 7 do { 9 8 int ret; 10 9 11 - if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { 12 - kvm_handle_signal_exit(vcpu); 10 + if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 13 11 return -EINTR; 14 - } 15 12 16 13 if (ti_work & (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)) 17 14 schedule(); ··· 16 19 if (ti_work & _TIF_NOTIFY_RESUME) 17 20 resume_user_mode_work(NULL); 18 21 19 - ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work); 22 + ret = arch_xfer_to_guest_mode_handle_work(ti_work); 20 23 if (ret) 21 24 return ret; 22 25 ··· 25 28 return 0; 26 29 } 27 30 28 - int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu) 31 + int xfer_to_guest_mode_handle_work(void) 29 32 { 30 33 unsigned long ti_work; 31 34 ··· 41 44 if (!(ti_work & XFER_TO_GUEST_MODE_WORK)) 42 45 return 0; 43 46 44 - return xfer_to_guest_mode_work(vcpu, ti_work); 47 + return xfer_to_guest_mode_work(ti_work); 45 48 } 46 49 EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);