Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S: MMIO emulation support for little endian guests

MMIO emulation reads the last instruction executed by the guest
and then emulates. If the guest is running in Little Endian order,
or more generally in a different endian order of the host, the
instruction needs to be byte-swapped before being emulated.

This patch adds a helper routine which tests the endian order of
the host and the guest in order to decide whether a byteswap is
needed or not. It is then used to byteswap the last instruction
of the guest in the endian order of the host before MMIO emulation
is performed.

Finally, kvmppc_handle_load() of kvmppc_handle_store() are modified
to reverse the endianness of the MMIO if required.

Signed-off-by: Cédric Le Goater <clg@fr.ibm.com>
[agraf: add booke handling]
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Cédric Le Goater and committed by
Alexander Graf
73601775 7a8ff56b

+42 -10
+7 -1
arch/powerpc/include/asm/kvm_book3s.h
··· 264 264 return vcpu->arch.pc; 265 265 } 266 266 267 + static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 268 + { 269 + return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 270 + } 271 + 267 272 static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) 268 273 { 269 274 /* Load the instruction manually if it failed to do so in the ··· 276 271 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) 277 272 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); 278 273 279 - return vcpu->arch.last_inst; 274 + return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : 275 + vcpu->arch.last_inst; 280 276 } 281 277 282 278 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
+6
arch/powerpc/include/asm/kvm_booke.h
··· 63 63 return vcpu->arch.xer; 64 64 } 65 65 66 + static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 67 + { 68 + /* XXX Would need to check TLB entry */ 69 + return false; 70 + } 71 + 66 72 static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 67 73 { 68 74 return vcpu->arch.last_inst;
+4 -3
arch/powerpc/include/asm/kvm_ppc.h
··· 54 54 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 55 55 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 56 56 unsigned int rt, unsigned int bytes, 57 - int is_bigendian); 57 + int is_default_endian); 58 58 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 59 59 unsigned int rt, unsigned int bytes, 60 - int is_bigendian); 60 + int is_default_endian); 61 61 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 62 - u64 val, unsigned int bytes, int is_bigendian); 62 + u64 val, unsigned int bytes, 63 + int is_default_endian); 63 64 64 65 extern int kvmppc_emulate_instruction(struct kvm_run *run, 65 66 struct kvm_vcpu *vcpu);
+1 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 558 558 * we just return and retry the instruction. 559 559 */ 560 560 561 - if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) 561 + if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) 562 562 return RESUME_GUEST; 563 563 564 564 /*
-1
arch/powerpc/kvm/emulate.c
··· 219 219 * lmw 220 220 * stmw 221 221 * 222 - * XXX is_bigendian should depend on MMU mapping or MSR[LE] 223 222 */ 224 223 /* XXX Should probably auto-generate instruction decoding for a particular core 225 224 * from opcode tables in the future. */
+24 -4
arch/powerpc/kvm/powerpc.c
··· 673 673 } 674 674 675 675 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 676 - unsigned int rt, unsigned int bytes, int is_bigendian) 676 + unsigned int rt, unsigned int bytes, 677 + int is_default_endian) 677 678 { 678 679 int idx, ret; 680 + int is_bigendian; 681 + 682 + if (kvmppc_need_byteswap(vcpu)) { 683 + /* Default endianness is "little endian". */ 684 + is_bigendian = !is_default_endian; 685 + } else { 686 + /* Default endianness is "big endian". */ 687 + is_bigendian = is_default_endian; 688 + } 679 689 680 690 if (bytes > sizeof(run->mmio.data)) { 681 691 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, ··· 721 711 722 712 /* Same as above, but sign extends */ 723 713 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 724 - unsigned int rt, unsigned int bytes, int is_bigendian) 714 + unsigned int rt, unsigned int bytes, 715 + int is_default_endian) 725 716 { 726 717 int r; 727 718 728 719 vcpu->arch.mmio_sign_extend = 1; 729 - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 720 + r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); 730 721 731 722 return r; 732 723 } 733 724 734 725 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 735 - u64 val, unsigned int bytes, int is_bigendian) 726 + u64 val, unsigned int bytes, int is_default_endian) 736 727 { 737 728 void *data = run->mmio.data; 738 729 int idx, ret; 730 + int is_bigendian; 731 + 732 + if (kvmppc_need_byteswap(vcpu)) { 733 + /* Default endianness is "little endian". */ 734 + is_bigendian = !is_default_endian; 735 + } else { 736 + /* Default endianness is "big endian". */ 737 + is_bigendian = is_default_endian; 738 + } 739 739 740 740 if (bytes > sizeof(run->mmio.data)) { 741 741 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,