Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm bugfixes from Gleb Natapov:
"There is one more fix for MIPS KVM ABI here, MIPS and PPC build
breakage fixes and a couple of PPC bug fixes"

* 'fixes' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
kvm/ppc/booke64: Fix lazy ee handling in kvmppc_handle_exit()
kvm/ppc/booke: Hold srcu lock when calling gfn functions
kvm/ppc/booke64: Disable e6500 support
kvm/ppc/booke64: Fix AltiVec interrupt numbers and build breakage
mips/kvm: Use KVM_REG_MIPS and proper size indicators for *_ONE_REG
kvm: Add definition of KVM_REG_MIPS
KVM: add kvm_para_available to asm-generic/kvm_para.h

+137 -79
+39 -42
arch/mips/include/uapi/asm/kvm.h
··· 58 58 * bits[2..0] - Register 'sel' index. 59 59 * bits[7..3] - Register 'rd' index. 60 60 * bits[15..8] - Must be zero. 61 - * bits[63..16] - 1 -> CP0 registers. 61 + * bits[31..16] - 1 -> CP0 registers. 62 + * bits[51..32] - Must be zero. 63 + * bits[63..52] - As per linux/kvm.h 62 64 * 63 65 * Other sets registers may be added in the future. Each set would 64 - * have its own identifier in bits[63..16]. 65 - * 66 - * The addr field of struct kvm_one_reg must point to an aligned 67 - * 64-bit wide location. For registers that are narrower than 68 - * 64-bits, the value is stored in the low order bits of the location, 69 - * and sign extended to 64-bits. 66 + * have its own identifier in bits[31..16]. 70 67 * 71 68 * The registers defined in struct kvm_regs are also accessible, the 72 69 * id values for these are below. 73 70 */ 74 71 75 - #define KVM_REG_MIPS_R0 0 76 - #define KVM_REG_MIPS_R1 1 77 - #define KVM_REG_MIPS_R2 2 78 - #define KVM_REG_MIPS_R3 3 79 - #define KVM_REG_MIPS_R4 4 80 - #define KVM_REG_MIPS_R5 5 81 - #define KVM_REG_MIPS_R6 6 82 - #define KVM_REG_MIPS_R7 7 83 - #define KVM_REG_MIPS_R8 8 84 - #define KVM_REG_MIPS_R9 9 85 - #define KVM_REG_MIPS_R10 10 86 - #define KVM_REG_MIPS_R11 11 87 - #define KVM_REG_MIPS_R12 12 88 - #define KVM_REG_MIPS_R13 13 89 - #define KVM_REG_MIPS_R14 14 90 - #define KVM_REG_MIPS_R15 15 91 - #define KVM_REG_MIPS_R16 16 92 - #define KVM_REG_MIPS_R17 17 93 - #define KVM_REG_MIPS_R18 18 94 - #define KVM_REG_MIPS_R19 19 95 - #define KVM_REG_MIPS_R20 20 96 - #define KVM_REG_MIPS_R21 21 97 - #define KVM_REG_MIPS_R22 22 98 - #define KVM_REG_MIPS_R23 23 99 - #define KVM_REG_MIPS_R24 24 100 - #define KVM_REG_MIPS_R25 25 101 - #define KVM_REG_MIPS_R26 26 102 - #define KVM_REG_MIPS_R27 27 103 - #define KVM_REG_MIPS_R28 28 104 - #define KVM_REG_MIPS_R29 29 105 - #define KVM_REG_MIPS_R30 30 106 - #define KVM_REG_MIPS_R31 31 72 + #define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) 73 + #define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) 74 + #define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) 75 + #define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) 76 + #define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) 77 + #define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) 78 + #define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) 79 + #define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) 80 + #define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) 81 + #define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) 82 + #define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) 83 + #define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) 84 + #define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) 85 + #define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) 86 + #define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) 87 + #define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) 88 + #define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) 89 + #define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) 90 + #define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) 91 + #define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) 92 + #define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) 93 + #define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) 94 + #define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) 95 + #define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) 96 + #define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) 97 + #define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) 98 + #define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) 99 + #define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) 100 + #define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) 101 + #define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) 102 + #define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) 103 + #define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) 107 104 108 - #define KVM_REG_MIPS_HI 32 109 - #define KVM_REG_MIPS_LO 33 110 - #define KVM_REG_MIPS_PC 34 105 + #define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) 106 + #define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) 107 + #define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) 111 108 112 109 /* 113 110 * KVM MIPS specific structures and definitions
+54 -29
arch/mips/kvm/kvm_mips.c
··· 485 485 return -ENOIOCTLCMD; 486 486 } 487 487 488 - #define KVM_REG_MIPS_CP0_INDEX (0x10000 + 8 * 0 + 0) 489 - #define KVM_REG_MIPS_CP0_ENTRYLO0 (0x10000 + 8 * 2 + 0) 490 - #define KVM_REG_MIPS_CP0_ENTRYLO1 (0x10000 + 8 * 3 + 0) 491 - #define KVM_REG_MIPS_CP0_CONTEXT (0x10000 + 8 * 4 + 0) 492 - #define KVM_REG_MIPS_CP0_USERLOCAL (0x10000 + 8 * 4 + 2) 493 - #define KVM_REG_MIPS_CP0_PAGEMASK (0x10000 + 8 * 5 + 0) 494 - #define KVM_REG_MIPS_CP0_PAGEGRAIN (0x10000 + 8 * 5 + 1) 495 - #define KVM_REG_MIPS_CP0_WIRED (0x10000 + 8 * 6 + 0) 496 - #define KVM_REG_MIPS_CP0_HWRENA (0x10000 + 8 * 7 + 0) 497 - #define KVM_REG_MIPS_CP0_BADVADDR (0x10000 + 8 * 8 + 0) 498 - #define KVM_REG_MIPS_CP0_COUNT (0x10000 + 8 * 9 + 0) 499 - #define KVM_REG_MIPS_CP0_ENTRYHI (0x10000 + 8 * 10 + 0) 500 - #define KVM_REG_MIPS_CP0_COMPARE (0x10000 + 8 * 11 + 0) 501 - #define KVM_REG_MIPS_CP0_STATUS (0x10000 + 8 * 12 + 0) 502 - #define KVM_REG_MIPS_CP0_CAUSE (0x10000 + 8 * 13 + 0) 503 - #define KVM_REG_MIPS_CP0_EBASE (0x10000 + 8 * 15 + 1) 504 - #define KVM_REG_MIPS_CP0_CONFIG (0x10000 + 8 * 16 + 0) 505 - #define KVM_REG_MIPS_CP0_CONFIG1 (0x10000 + 8 * 16 + 1) 506 - #define KVM_REG_MIPS_CP0_CONFIG2 (0x10000 + 8 * 16 + 2) 507 - #define KVM_REG_MIPS_CP0_CONFIG3 (0x10000 + 8 * 16 + 3) 508 - #define KVM_REG_MIPS_CP0_CONFIG7 (0x10000 + 8 * 16 + 7) 509 - #define KVM_REG_MIPS_CP0_XCONTEXT (0x10000 + 8 * 20 + 0) 510 - #define KVM_REG_MIPS_CP0_ERROREPC (0x10000 + 8 * 30 + 0) 488 + #define MIPS_CP0_32(_R, _S) \ 489 + (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) 490 + 491 + #define MIPS_CP0_64(_R, _S) \ 492 + (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) 493 + 494 + #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 495 + #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 496 + #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) 497 + #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 498 + #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 499 + #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 500 + #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 501 + #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 502 + #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 503 + #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 504 + #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 505 + #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 506 + #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 507 + #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 508 + #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 509 + #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 510 + #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 511 + #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 512 + #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 513 + #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 514 + #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 515 + #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 516 + #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 511 517 512 518 static u64 kvm_mips_get_one_regs[] = { 513 519 KVM_REG_MIPS_R0, ··· 573 567 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 574 568 const struct kvm_one_reg *reg) 575 569 { 576 - u64 __user *uaddr = (u64 __user *)(long)reg->addr; 577 - 578 570 struct mips_coproc *cop0 = vcpu->arch.cop0; 579 571 s64 v; 580 572 ··· 635 631 default: 636 632 return -EINVAL; 637 633 } 638 - return put_user(v, uaddr); 634 + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 635 + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 636 + return put_user(v, uaddr64); 637 + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 638 + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 639 + u32 v32 = (u32)v; 640 + return put_user(v32, uaddr32); 641 + } else { 642 + return -EINVAL; 643 + } 639 644 } 640 645 641 646 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 642 647 const struct kvm_one_reg *reg) 643 648 { 644 - u64 __user *uaddr = (u64 __user *)(long)reg->addr; 645 649 struct mips_coproc *cop0 = vcpu->arch.cop0; 646 650 u64 v; 647 651 648 - if (get_user(v, uaddr) != 0) 649 - return -EFAULT; 652 + if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 653 + u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 654 + 655 + if (get_user(v, uaddr64) != 0) 656 + return -EFAULT; 657 + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 658 + u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 659 + s32 v32; 660 + 661 + if (get_user(v32, uaddr32) != 0) 662 + return -EFAULT; 663 + v = (s64)v32; 664 + } else { 665 + return -EINVAL; 666 + } 650 667 651 668 switch (reg->id) { 652 669 case KVM_REG_MIPS_R0:
+10 -6
arch/powerpc/include/asm/kvm_asm.h
··· 54 54 #define BOOKE_INTERRUPT_DEBUG 15 55 55 56 56 /* E500 */ 57 - #define BOOKE_INTERRUPT_SPE_UNAVAIL 32 58 - #define BOOKE_INTERRUPT_SPE_FP_DATA 33 57 + #define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 58 + #define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 59 + /* 60 + * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines 61 + */ 62 + #define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 63 + #define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 64 + #define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 65 + #define BOOKE_INTERRUPT_ALTIVEC_ASSIST \ 66 + BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 59 67 #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 60 68 #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 61 69 #define BOOKE_INTERRUPT_DOORBELL 36 ··· 74 66 #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 75 67 #define BOOKE_INTERRUPT_HV_SYSCALL 40 76 68 #define BOOKE_INTERRUPT_HV_PRIV 41 77 - 78 - /* altivec */ 79 - #define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 80 - #define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 81 69 82 70 /* book3s */ 83 71
+5
arch/powerpc/kvm/44x_tlb.c
··· 441 441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 442 442 struct kvmppc_44x_tlbe *tlbe; 443 443 unsigned int gtlb_index; 444 + int idx; 444 445 445 446 gtlb_index = kvmppc_get_gpr(vcpu, ra); 446 447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { ··· 474 473 return EMULATE_FAIL; 475 474 } 476 475 476 + idx = srcu_read_lock(&vcpu->kvm->srcu); 477 + 477 478 if (tlbe_is_host_safe(vcpu, tlbe)) { 478 479 gva_t eaddr; 479 480 gpa_t gpaddr; ··· 491 488 492 489 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 493 490 } 491 + 492 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 494 493 495 494 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, 496 495 tlbe->word2);
+18
arch/powerpc/kvm/booke.c
··· 832 832 { 833 833 int r = RESUME_HOST; 834 834 int s; 835 + int idx; 836 + 837 + #ifdef CONFIG_PPC64 838 + WARN_ON(local_paca->irq_happened != 0); 839 + #endif 840 + 841 + /* 842 + * We enter with interrupts disabled in hardware, but 843 + * we need to call hard_irq_disable anyway to ensure that 844 + * the software state is kept in sync. 845 + */ 846 + hard_irq_disable(); 835 847 836 848 /* update before a new last_exit_type is rewritten */ 837 849 kvmppc_update_timing_stats(vcpu); ··· 1065 1053 break; 1066 1054 } 1067 1055 1056 + idx = srcu_read_lock(&vcpu->kvm->srcu); 1057 + 1068 1058 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1069 1059 gfn = gpaddr >> PAGE_SHIFT; 1070 1060 ··· 1089 1075 kvmppc_account_exit(vcpu, MMIO_EXITS); 1090 1076 } 1091 1077 1078 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 1092 1079 break; 1093 1080 } 1094 1081 ··· 1113 1098 1114 1099 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); 1115 1100 1101 + idx = srcu_read_lock(&vcpu->kvm->srcu); 1102 + 1116 1103 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); 1117 1104 gfn = gpaddr >> PAGE_SHIFT; 1118 1105 ··· 1131 1114 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 1132 1115 } 1133 1116 1117 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 1134 1118 break; 1135 1119 } 1136 1120
+5
arch/powerpc/kvm/e500_mmu.c
··· 396 396 struct kvm_book3e_206_tlb_entry *gtlbe; 397 397 int tlbsel, esel; 398 398 int recal = 0; 399 + int idx; 399 400 400 401 tlbsel = get_tlb_tlbsel(vcpu); 401 402 esel = get_tlb_esel(vcpu, tlbsel); ··· 431 430 kvmppc_set_tlb1map_range(vcpu, gtlbe); 432 431 } 433 432 433 + idx = srcu_read_lock(&vcpu->kvm->srcu); 434 + 434 435 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 435 436 if (tlbe_is_host_safe(vcpu, gtlbe)) { 436 437 u64 eaddr = get_tlb_eaddr(gtlbe); ··· 446 443 /* Premap the faulting page */ 447 444 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); 448 445 } 446 + 447 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 449 448 450 449 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 451 450 return EMULATE_DONE;
-2
arch/powerpc/kvm/e500mc.c
··· 177 177 r = 0; 178 178 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) 179 179 r = 0; 180 - else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) 181 - r = 0; 182 180 else 183 181 r = -ENOTSUPP; 184 182
+5
include/asm-generic/kvm_para.h
··· 18 18 return 0; 19 19 } 20 20 21 + static inline bool kvm_para_available(void) 22 + { 23 + return false; 24 + } 25 + 21 26 #endif
+1
include/uapi/linux/kvm.h
··· 783 783 #define KVM_REG_IA64 0x3000000000000000ULL 784 784 #define KVM_REG_ARM 0x4000000000000000ULL 785 785 #define KVM_REG_S390 0x5000000000000000ULL 786 + #define KVM_REG_MIPS 0x7000000000000000ULL 786 787 787 788 #define KVM_REG_SIZE_SHIFT 52 788 789 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL