Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Move guest enter/exit down into subarch-specific code

Instead of doing the kvm_guest_enter/exit() and local_irq_dis/enable()
calls in powerpc.c, this moves them down into the subarch-specific
book3s_pr.c and booke.c. This eliminates an extra local_irq_enable()
call in book3s_pr.c, and will be needed for when we do SMT4 guest
support in the book3s hypervisor mode code.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Avi Kivity
df6909e5 f9e0554d

+22 -12
+1
arch/powerpc/include/asm/kvm_ppc.h
··· 42 42 EMULATE_AGAIN, /* something went wrong. go again */ 43 43 }; 44 44 45 + extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 45 46 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 46 47 extern char kvmppc_handlers_start[]; 47 48 extern unsigned long kvmppc_handler_len;
+1 -1
arch/powerpc/kvm/book3s_interrupts.S
··· 85 85 * r3: kvm_run pointer 86 86 * r4: vcpu pointer 87 87 */ 88 - _GLOBAL(__kvmppc_vcpu_entry) 88 + _GLOBAL(__kvmppc_vcpu_run) 89 89 90 90 kvm_start_entry: 91 91 /* Write correct stack frame */
+6 -6
arch/powerpc/kvm/book3s_pr.c
··· 891 891 vfree(vcpu_book3s); 892 892 } 893 893 894 - extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 895 - int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 894 + int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 896 895 { 897 896 int ret; 898 897 double fpr[32][TS_FPRWIDTH]; ··· 943 944 /* Remember the MSR with disabled extensions */ 944 945 ext_msr = current->thread.regs->msr; 945 946 946 - /* XXX we get called with irq disabled - change that! */ 947 - local_irq_enable(); 948 - 949 947 /* Preload FPU if it's enabled */ 950 948 if (vcpu->arch.shared->msr & MSR_FP) 951 949 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 952 950 953 - ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 951 + kvm_guest_enter(); 952 + 953 + ret = __kvmppc_vcpu_run(kvm_run, vcpu); 954 + 955 + kvm_guest_exit(); 954 956 955 957 local_irq_disable(); 956 958
+13
arch/powerpc/kvm/booke.c
··· 312 312 vcpu->arch.shared->int_pending = 0; 313 313 } 314 314 315 + int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 316 + { 317 + int ret; 318 + 319 + local_irq_disable(); 320 + kvm_guest_enter(); 321 + ret = __kvmppc_vcpu_run(kvm_run, vcpu); 322 + kvm_guest_exit(); 323 + local_irq_enable(); 324 + 325 + return ret; 326 + } 327 + 315 328 /** 316 329 * kvmppc_handle_exit 317 330 *
+1 -5
arch/powerpc/kvm/powerpc.c
··· 500 500 501 501 kvmppc_core_deliver_interrupts(vcpu); 502 502 503 - local_irq_disable(); 504 - kvm_guest_enter(); 505 - r = __kvmppc_vcpu_run(run, vcpu); 506 - kvm_guest_exit(); 507 - local_irq_enable(); 503 + r = kvmppc_vcpu_run(run, vcpu); 508 504 509 505 if (vcpu->sigset_active) 510 506 sigprocmask(SIG_SETMASK, &sigsaved, NULL);