Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Convert MSR to shared page

One of the most obvious registers to share with the guest directly is the
MSR. The MSR contains the "interrupts enabled" flag which the guest has to
toggle in critical sections.

So in order to bring the overhead of interrupt en- and disabling down, let's
put msr into the shared page. Keep in mind that even though you can fully read
its contents, writing to it doesn't always update all state. There are a few
safe fields that don't require hypervisor interaction. See the documentation
for a list of MSR bits that are safe to be set from inside the guest.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

authored by

Alexander Graf and committed by
Avi Kivity
666e7252 96bc451a

+93 -84
-1
arch/powerpc/include/asm/kvm_host.h
··· 211 211 u32 cr; 212 212 #endif 213 213 214 - ulong msr; 215 214 #ifdef CONFIG_PPC_BOOK3S 216 215 ulong shadow_msr; 217 216 ulong hflags;
+1
arch/powerpc/include/asm/kvm_para.h
··· 23 23 #include <linux/types.h> 24 24 25 25 struct kvm_vcpu_arch_shared { 26 + __u64 msr; 26 27 }; 27 28 28 29 #ifdef __KERNEL__
+1 -1
arch/powerpc/kernel/asm-offsets.c
··· 394 394 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 395 395 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 396 396 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 397 - DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); 398 397 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 399 398 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 400 399 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 401 400 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 402 401 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 403 402 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 403 + DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 404 404 405 405 /* book3s */ 406 406 #ifdef CONFIG_PPC_BOOK3S
+4 -4
arch/powerpc/kvm/44x_tlb.c
··· 221 221 222 222 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 223 223 { 224 - unsigned int as = !!(vcpu->arch.msr & MSR_IS); 224 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 225 225 226 226 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 227 227 } 228 228 229 229 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 230 230 { 231 - unsigned int as = !!(vcpu->arch.msr & MSR_DS); 231 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 232 232 233 233 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 234 234 } ··· 354 354 355 355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 356 356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, 357 - vcpu->arch.msr & MSR_PR); 357 + vcpu->arch.shared->msr & MSR_PR); 358 358 stlbe.tid = !(asid & 0xff); 359 359 360 360 /* Keep track of the reference so we can properly release it later. */ ··· 423 423 424 424 /* Does it match current guest AS? */ 425 425 /* XXX what about IS != DS? */ 426 - if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 426 + if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) 427 427 return 0; 428 428 429 429 gpa = get_tlb_raddr(tlbe);
+35 -30
arch/powerpc/kvm/book3s.c
··· 115 115 116 116 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 117 117 { 118 - vcpu->arch.shadow_msr = vcpu->arch.msr; 118 + ulong smsr = vcpu->arch.shared->msr; 119 + 119 120 /* Guest MSR values */ 120 - vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | 121 - MSR_BE | MSR_DE; 121 + smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; 122 122 /* Process MSR values */ 123 - vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | 124 - MSR_EE; 123 + smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; 125 124 /* External providers the guest reserved */ 126 - vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); 125 + smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); 127 126 /* 64-bit Process MSR values */ 128 127 #ifdef CONFIG_PPC_BOOK3S_64 129 - vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; 128 + smsr |= MSR_ISF | MSR_HV; 130 129 #endif 130 + vcpu->arch.shadow_msr = smsr; 131 131 } 132 132 133 133 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 134 134 { 135 - ulong old_msr = vcpu->arch.msr; 135 + ulong old_msr = vcpu->arch.shared->msr; 136 136 137 137 #ifdef EXIT_DEBUG 138 138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 139 139 #endif 140 140 141 141 msr &= to_book3s(vcpu)->msr_mask; 142 - vcpu->arch.msr = msr; 142 + vcpu->arch.shared->msr = msr; 143 143 kvmppc_recalc_shadow_msr(vcpu); 144 144 145 145 if (msr & (MSR_WE|MSR_POW)) { ··· 149 149 } 150 150 } 151 151 152 - if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != 152 + if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != 153 153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 154 154 kvmppc_mmu_flush_segments(vcpu); 155 155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 156 156 } 157 157 158 158 /* Preload FPU if it's enabled */ 159 - if (vcpu->arch.msr & MSR_FP) 159 + if (vcpu->arch.shared->msr & MSR_FP) 160 160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 161 161 } 162 162 163 163 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 164 164 { 165 165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu); 166 - vcpu->arch.srr1 = vcpu->arch.msr | flags; 166 + vcpu->arch.srr1 = vcpu->arch.shared->msr | flags; 167 167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 168 168 vcpu->arch.mmu.reset_msr(vcpu); 169 169 } ··· 254 254 255 255 switch (priority) { 256 256 case BOOK3S_IRQPRIO_DECREMENTER: 257 - deliver = vcpu->arch.msr & MSR_EE; 257 + deliver = vcpu->arch.shared->msr & MSR_EE; 258 258 vec = BOOK3S_INTERRUPT_DECREMENTER; 259 259 break; 260 260 case BOOK3S_IRQPRIO_EXTERNAL: 261 - deliver = vcpu->arch.msr & MSR_EE; 261 + deliver = vcpu->arch.shared->msr & MSR_EE; 262 262 vec = BOOK3S_INTERRUPT_EXTERNAL; 263 263 break; 264 264 case BOOK3S_IRQPRIO_SYSTEM_RESET: ··· 437 437 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 438 438 struct kvmppc_pte *pte) 439 439 { 440 - int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); 440 + int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); 441 441 int r; 442 442 443 443 if (relocated) { ··· 545 545 int page_found = 0; 546 546 struct kvmppc_pte pte; 547 547 bool is_mmio = false; 548 - bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 549 - bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 548 + bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; 549 + bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; 550 550 u64 vsid; 551 551 552 552 relocated = data ? dr : ir; ··· 563 563 pte.vpage = eaddr >> 12; 564 564 } 565 565 566 - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 566 + switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 567 567 case 0: 568 568 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 569 569 break; ··· 571 571 case MSR_IR: 572 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 573 573 574 - if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) 574 + if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) 575 575 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 576 576 else 577 577 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); ··· 596 596 /* Page not found in guest PTE entries */ 597 597 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 598 598 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 599 - vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 599 + vcpu->arch.shared->msr |= 600 + (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 600 601 kvmppc_book3s_queue_irqprio(vcpu, vec); 601 602 } else if (page_found == -EPERM) { 602 603 /* Storage protection */ 603 604 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 604 605 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; 605 606 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 606 - vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 607 + vcpu->arch.shared->msr |= 608 + (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 607 609 kvmppc_book3s_queue_irqprio(vcpu, vec); 608 610 } else if (page_found == -EINVAL) { 609 611 /* Page not found in guest SLB */ ··· 697 695 698 696 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 699 697 if (ret == -ENOENT) { 700 - vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); 701 - vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); 702 - vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 698 + ulong msr = vcpu->arch.shared->msr; 699 + 700 + msr = kvmppc_set_field(msr, 33, 33, 1); 701 + msr = kvmppc_set_field(msr, 34, 36, 0); 702 + vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); 703 703 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 704 704 return EMULATE_AGAIN; 705 705 } ··· 740 736 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 741 737 return RESUME_GUEST; 742 738 743 - if (!(vcpu->arch.msr & msr)) { 739 + if (!(vcpu->arch.shared->msr & msr)) { 744 740 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 745 741 return RESUME_GUEST; 746 742 } ··· 808 804 if ((exit_nr != 0x900) && (exit_nr != 0x500)) 809 805 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", 810 806 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), 811 - vcpu->arch.msr); 807 + vcpu->arch.shared->msr); 812 808 #endif 813 809 kvm_resched(vcpu); 814 810 switch (exit_nr) { ··· 840 836 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 841 837 r = RESUME_GUEST; 842 838 } else { 843 - vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 839 + vcpu->arch.shared->msr |= 840 + to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 844 841 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 845 842 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 846 843 r = RESUME_GUEST; ··· 909 904 program_interrupt: 910 905 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 911 906 912 - if (vcpu->arch.msr & MSR_PR) { 907 + if (vcpu->arch.shared->msr & MSR_PR) { 913 908 #ifdef EXIT_DEBUG 914 909 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 915 910 #endif ··· 1057 1052 regs->ctr = kvmppc_get_ctr(vcpu); 1058 1053 regs->lr = kvmppc_get_lr(vcpu); 1059 1054 regs->xer = kvmppc_get_xer(vcpu); 1060 - regs->msr = vcpu->arch.msr; 1055 + regs->msr = vcpu->arch.shared->msr; 1061 1056 regs->srr0 = vcpu->arch.srr0; 1062 1057 regs->srr1 = vcpu->arch.srr1; 1063 1058 regs->pid = vcpu->arch.pid; ··· 1358 1353 local_irq_enable(); 1359 1354 1360 1355 /* Preload FPU if it's enabled */ 1361 - if (vcpu->arch.msr & MSR_FP) 1356 + if (vcpu->arch.shared->msr & MSR_FP) 1362 1357 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1363 1358 1364 1359 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
+6 -6
arch/powerpc/kvm/book3s_32_mmu.c
··· 133 133 else 134 134 bat = &vcpu_book3s->ibat[i]; 135 135 136 - if (vcpu->arch.msr & MSR_PR) { 136 + if (vcpu->arch.shared->msr & MSR_PR) { 137 137 if (!bat->vp) 138 138 continue; 139 139 } else { ··· 214 214 pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); 215 215 pp = pteg[i+1] & 3; 216 216 217 - if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || 218 - (sre->Ks && !(vcpu->arch.msr & MSR_PR))) 217 + if ((sre->Kp && (vcpu->arch.shared->msr & MSR_PR)) || 218 + (sre->Ks && !(vcpu->arch.shared->msr & MSR_PR))) 219 219 pp |= 4; 220 220 221 221 pte->may_write = false; ··· 334 334 struct kvmppc_sr *sr; 335 335 u64 gvsid = esid; 336 336 337 - if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 337 + if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 338 338 sr = find_sr(to_book3s(vcpu), ea); 339 339 if (sr->valid) 340 340 gvsid = sr->vsid; ··· 343 343 /* In case we only have one of MSR_IR or MSR_DR set, let's put 344 344 that in the real-mode context (and hope RM doesn't access 345 345 high memory) */ 346 - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 346 + switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 347 347 case 0: 348 348 *vsid = VSID_REAL | esid; 349 349 break; ··· 363 363 BUG(); 364 364 } 365 365 366 - if (vcpu->arch.msr & MSR_PR) 366 + if (vcpu->arch.shared->msr & MSR_PR) 367 367 *vsid |= VSID_PR; 368 368 369 369 return 0;
+2 -2
arch/powerpc/kvm/book3s_32_mmu_host.c
··· 86 86 struct kvmppc_sid_map *map; 87 87 u16 sid_map_mask; 88 88 89 - if (vcpu->arch.msr & MSR_PR) 89 + if (vcpu->arch.shared->msr & MSR_PR) 90 90 gvsid |= VSID_PR; 91 91 92 92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); ··· 253 253 u16 sid_map_mask; 254 254 static int backwards_map = 0; 255 255 256 - if (vcpu->arch.msr & MSR_PR) 256 + if (vcpu->arch.shared->msr & MSR_PR) 257 257 gvsid |= VSID_PR; 258 258 259 259 /* We might get collisions that trap in preceding order, so let's
+6 -6
arch/powerpc/kvm/book3s_64_mmu.c
··· 180 180 goto no_page_found; 181 181 } 182 182 183 - if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) 183 + if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) 184 184 key = 4; 185 - else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) 185 + else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) 186 186 key = 4; 187 187 188 188 for (i=0; i<16; i+=2) { ··· 381 381 for (i = 1; i < vcpu_book3s->slb_nr; i++) 382 382 vcpu_book3s->slb[i].valid = false; 383 383 384 - if (vcpu->arch.msr & MSR_IR) { 384 + if (vcpu->arch.shared->msr & MSR_IR) { 385 385 kvmppc_mmu_flush_segments(vcpu); 386 386 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 387 387 } ··· 446 446 struct kvmppc_slb *slb; 447 447 u64 gvsid = esid; 448 448 449 - if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 449 + if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 450 450 slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); 451 451 if (slb) 452 452 gvsid = slb->vsid; 453 453 } 454 454 455 - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 455 + switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { 456 456 case 0: 457 457 *vsid = VSID_REAL | esid; 458 458 break; ··· 473 473 break; 474 474 } 475 475 476 - if (vcpu->arch.msr & MSR_PR) 476 + if (vcpu->arch.shared->msr & MSR_PR) 477 477 *vsid |= VSID_PR; 478 478 479 479 return 0;
+2 -2
arch/powerpc/kvm/book3s_64_mmu_host.c
··· 66 66 struct kvmppc_sid_map *map; 67 67 u16 sid_map_mask; 68 68 69 - if (vcpu->arch.msr & MSR_PR) 69 + if (vcpu->arch.shared->msr & MSR_PR) 70 70 gvsid |= VSID_PR; 71 71 72 72 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); ··· 191 191 u16 sid_map_mask; 192 192 static int backwards_map = 0; 193 193 194 - if (vcpu->arch.msr & MSR_PR) 194 + if (vcpu->arch.shared->msr & MSR_PR) 195 195 gvsid |= VSID_PR; 196 196 197 197 /* We might get collisions that trap in preceding order, so let's
+5 -4
arch/powerpc/kvm/book3s_emulate.c
··· 86 86 case 31: 87 87 switch (get_xop(inst)) { 88 88 case OP_31_XOP_MFMSR: 89 - kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); 89 + kvmppc_set_gpr(vcpu, get_rt(inst), 90 + vcpu->arch.shared->msr); 90 91 break; 91 92 case OP_31_XOP_MTMSRD: 92 93 { 93 94 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 94 95 if (inst & 0x10000) { 95 - vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 96 - vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 96 + vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); 97 + vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); 97 98 } else 98 99 kvmppc_set_msr(vcpu, rs); 99 100 break; ··· 205 204 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 206 205 207 206 addr = (ra + rb) & ~31ULL; 208 - if (!(vcpu->arch.msr & MSR_SF)) 207 + if (!(vcpu->arch.shared->msr & MSR_SF)) 209 208 addr &= 0xffffffff; 210 209 vaddr = addr; 211 210
+4 -3
arch/powerpc/kvm/book3s_paired_singles.c
··· 165 165 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) 166 166 { 167 167 u64 dsisr; 168 + struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; 168 169 169 - vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); 170 - vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 170 + shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); 171 + shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); 171 172 vcpu->arch.dear = eaddr; 172 173 /* Page Fault */ 173 174 dsisr = kvmppc_set_field(0, 33, 33, 1); ··· 659 658 if (!kvmppc_inst_is_paired_single(vcpu, inst)) 660 659 return EMULATE_FAIL; 661 660 662 - if (!(vcpu->arch.msr & MSR_FP)) { 661 + if (!(vcpu->arch.shared->msr & MSR_FP)) { 663 662 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); 664 663 return EMULATE_AGAIN; 665 664 }
+10 -10
arch/powerpc/kvm/booke.c
··· 62 62 { 63 63 int i; 64 64 65 - printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); 65 + printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); 66 66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); 67 67 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); 68 68 ··· 169 169 break; 170 170 case BOOKE_IRQPRIO_CRITICAL: 171 171 case BOOKE_IRQPRIO_WATCHDOG: 172 - allowed = vcpu->arch.msr & MSR_CE; 172 + allowed = vcpu->arch.shared->msr & MSR_CE; 173 173 msr_mask = MSR_ME; 174 174 break; 175 175 case BOOKE_IRQPRIO_MACHINE_CHECK: 176 - allowed = vcpu->arch.msr & MSR_ME; 176 + allowed = vcpu->arch.shared->msr & MSR_ME; 177 177 msr_mask = 0; 178 178 break; 179 179 case BOOKE_IRQPRIO_EXTERNAL: 180 180 case BOOKE_IRQPRIO_DECREMENTER: 181 181 case BOOKE_IRQPRIO_FIT: 182 - allowed = vcpu->arch.msr & MSR_EE; 182 + allowed = vcpu->arch.shared->msr & MSR_EE; 183 183 msr_mask = MSR_CE|MSR_ME|MSR_DE; 184 184 break; 185 185 case BOOKE_IRQPRIO_DEBUG: 186 - allowed = vcpu->arch.msr & MSR_DE; 186 + allowed = vcpu->arch.shared->msr & MSR_DE; 187 187 msr_mask = MSR_ME; 188 188 break; 189 189 } 190 190 191 191 if (allowed) { 192 192 vcpu->arch.srr0 = vcpu->arch.pc; 193 - vcpu->arch.srr1 = vcpu->arch.msr; 193 + vcpu->arch.srr1 = vcpu->arch.shared->msr; 194 194 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 195 195 if (update_esr == true) 196 196 vcpu->arch.esr = vcpu->arch.queued_esr; 197 197 if (update_dear == true) 198 198 vcpu->arch.dear = vcpu->arch.queued_dear; 199 - kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 199 + kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 200 200 201 201 clear_bit(priority, &vcpu->arch.pending_exceptions); 202 202 } ··· 265 265 break; 266 266 267 267 case BOOKE_INTERRUPT_PROGRAM: 268 - if (vcpu->arch.msr & MSR_PR) { 268 + if (vcpu->arch.shared->msr & MSR_PR) { 269 269 /* Program traps generated by user-level software must be handled 270 270 * by the guest kernel. */ 271 271 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); ··· 467 467 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 468 468 { 469 469 vcpu->arch.pc = 0; 470 - vcpu->arch.msr = 0; 470 + vcpu->arch.shared->msr = 0; 471 471 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 472 472 473 473 vcpu->arch.shadow_pid = 1; ··· 490 490 regs->ctr = vcpu->arch.ctr; 491 491 regs->lr = vcpu->arch.lr; 492 492 regs->xer = kvmppc_get_xer(vcpu); 493 - regs->msr = vcpu->arch.msr; 493 + regs->msr = vcpu->arch.shared->msr; 494 494 regs->srr0 = vcpu->arch.srr0; 495 495 regs->srr1 = vcpu->arch.srr1; 496 496 regs->pid = vcpu->arch.pid;
+3 -3
arch/powerpc/kvm/booke.h
··· 54 54 * changing. */ 55 55 static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 56 56 { 57 - if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) 57 + if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) 58 58 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); 59 59 60 - vcpu->arch.msr = new_msr; 60 + vcpu->arch.shared->msr = new_msr; 61 61 62 - if (vcpu->arch.msr & MSR_WE) { 62 + if (vcpu->arch.shared->msr & MSR_WE) { 63 63 kvm_vcpu_block(vcpu); 64 64 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 65 65 };
+3 -3
arch/powerpc/kvm/booke_emulate.c
··· 62 62 63 63 case OP_31_XOP_MFMSR: 64 64 rt = get_rt(inst); 65 - kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); 65 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); 66 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 67 67 break; 68 68 ··· 74 74 75 75 case OP_31_XOP_WRTEE: 76 76 rs = get_rs(inst); 77 - vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 77 + vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) 78 78 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); 79 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 80 80 break; 81 81 82 82 case OP_31_XOP_WRTEEI: 83 - vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) 83 + vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) 84 84 | (inst & MSR_EE); 85 85 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 86 86 break;
+2 -1
arch/powerpc/kvm/booke_interrupts.S
··· 415 415 lwz r8, VCPU_GPR(r8)(r4) 416 416 lwz r3, VCPU_PC(r4) 417 417 mtsrr0 r3 418 - lwz r3, VCPU_MSR(r4) 418 + lwz r3, VCPU_SHARED(r4) 419 + lwz r3, VCPU_SHARED_MSR(r3) 419 420 oris r3, r3, KVMPPC_MSR_MASK@h 420 421 ori r3, r3, KVMPPC_MSR_MASK@l 421 422 mtsrr1 r3
+6 -6
arch/powerpc/kvm/e500_tlb.c
··· 314 314 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 315 315 stlbe->mas2 = (gvaddr & MAS2_EPN) 316 316 | e500_shadow_mas2_attrib(gtlbe->mas2, 317 - vcpu_e500->vcpu.arch.msr & MSR_PR); 317 + vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 318 318 stlbe->mas3 = (hpaddr & MAS3_RPN) 319 319 | e500_shadow_mas3_attrib(gtlbe->mas3, 320 - vcpu_e500->vcpu.arch.msr & MSR_PR); 320 + vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 321 321 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; 322 322 323 323 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, ··· 576 576 577 577 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 578 578 { 579 - unsigned int as = !!(vcpu->arch.msr & MSR_IS); 579 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 580 580 581 581 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 582 582 } 583 583 584 584 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 585 585 { 586 - unsigned int as = !!(vcpu->arch.msr & MSR_DS); 586 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 587 587 588 588 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); 589 589 } 590 590 591 591 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) 592 592 { 593 - unsigned int as = !!(vcpu->arch.msr & MSR_IS); 593 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 594 594 595 595 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); 596 596 } 597 597 598 598 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) 599 599 { 600 - unsigned int as = !!(vcpu->arch.msr & MSR_DS); 600 + unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 601 601 602 602 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); 603 603 }
+1 -1
arch/powerpc/kvm/e500_tlb.h
··· 171 171 172 172 /* Does it match current guest AS? */ 173 173 /* XXX what about IS != DS? */ 174 - if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) 174 + if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) 175 175 return 0; 176 176 177 177 gpa = get_tlb_raddr(tlbe);
+2 -1
arch/powerpc/kvm/powerpc.c
··· 38 38 39 39 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 40 40 { 41 - return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); 41 + return !(v->arch.shared->msr & MSR_WE) || 42 + !!(v->arch.pending_exceptions); 42 43 } 43 44 44 45