Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-ppc-next-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into HEAD

PPC KVM update for 5.8

- Updates and bug fixes for secure guest support
- Other minor bug fixes and cleanups.

+276 -237
+7 -9
arch/powerpc/include/asm/kvm_book3s.h
··· 155 155 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 156 156 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 157 157 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 158 - extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, 159 - struct kvm_vcpu *vcpu, unsigned long addr, 160 - unsigned long status); 158 + extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, 159 + unsigned long addr, unsigned long status); 161 160 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 162 161 unsigned long slb_v, unsigned long valid); 163 - extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 162 + extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, 164 163 unsigned long gpa, gva_t ea, int is_store); 165 164 166 165 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); ··· 173 174 extern int kvmppc_mmu_hv_init(void); 174 175 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); 175 176 176 - extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run, 177 - struct kvm_vcpu *vcpu, 177 + extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, 178 178 unsigned long ea, unsigned long dsisr); 179 179 extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, 180 180 gva_t eaddr, void *to, void *from, ··· 232 234 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 233 235 bool upper, u32 val); 234 236 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 235 - extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 237 + extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); 236 238 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 237 239 bool writing, bool *writable); 238 240 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, ··· 298 300 void kvmhv_release_all_nested(struct kvm *kvm); 299 301 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); 300 302 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); 301 - int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu, 303 + int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, 302 304 u64 time_limit, unsigned long lpcr); 303 305 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); 304 306 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, 305 307 struct hv_guest_state *hr); 306 - long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu); 308 + long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); 307 309 308 310 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 309 311
-1
arch/powerpc/include/asm/kvm_host.h
··· 795 795 struct mmio_hpte_cache_entry *pgfault_cache; 796 796 797 797 struct task_struct *run_task; 798 - struct kvm_run *kvm_run; 799 798 800 799 spinlock_t vpa_update_lock; 801 800 struct kvmppc_vpa vpa;
+13 -14
arch/powerpc/include/asm/kvm_ppc.h
··· 58 58 XLATE_WRITE /* check for write permissions */ 59 59 }; 60 60 61 - extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 62 - extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 61 + extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 62 + extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 63 63 extern void kvmppc_handler_highmem(void); 64 64 65 65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 66 - extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 66 + extern int kvmppc_handle_load(struct kvm_vcpu *vcpu, 67 67 unsigned int rt, unsigned int bytes, 68 68 int is_default_endian); 69 - extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 69 + extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 70 70 unsigned int rt, unsigned int bytes, 71 71 int is_default_endian); 72 - extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 72 + extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 73 73 unsigned int rt, unsigned int bytes, 74 74 int is_default_endian, int mmio_sign_extend); 75 - extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 75 + extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 76 76 unsigned int rt, unsigned int bytes, int is_default_endian); 77 - extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 77 + extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 78 78 unsigned int rs, unsigned int bytes, int is_default_endian); 79 - extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 79 + extern int kvmppc_handle_store(struct kvm_vcpu *vcpu, 80 80 u64 val, unsigned int bytes, 81 81 int is_default_endian); 82 - extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 82 + extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, 83 83 int rs, unsigned int bytes, 84 84 int is_default_endian); 85 85 ··· 90 90 bool data); 91 91 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 92 92 bool data); 93 - extern int kvmppc_emulate_instruction(struct kvm_run *run, 94 - struct kvm_vcpu *vcpu); 93 + extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu); 95 94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); 96 - extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 95 + extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu); 97 96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 98 97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 99 98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); ··· 266 267 void (*vcpu_put)(struct kvm_vcpu *vcpu); 267 268 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); 268 269 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); 269 - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); 270 + int (*vcpu_run)(struct kvm_vcpu *vcpu); 270 271 int (*vcpu_create)(struct kvm_vcpu *vcpu); 271 272 void (*vcpu_free)(struct kvm_vcpu *vcpu); 272 273 int (*check_requests)(struct kvm_vcpu *vcpu); ··· 290 291 int (*init_vm)(struct kvm *kvm); 291 292 void (*destroy_vm)(struct kvm *kvm); 292 293 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); 293 - int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, 294 + int (*emulate_op)(struct kvm_vcpu *vcpu, 294 295 unsigned int inst, int *advance); 295 296 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); 296 297 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+2 -2
arch/powerpc/kvm/book3s.c
··· 755 755 } 756 756 EXPORT_SYMBOL_GPL(kvmppc_set_msr); 757 757 758 - int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 758 + int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) 759 759 { 760 - return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); 760 + return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu); 761 761 } 762 762 763 763 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+1 -1
arch/powerpc/kvm/book3s.h
··· 18 18 19 19 extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); 20 20 extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); 21 - extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 21 + extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, 22 22 unsigned int inst, int *advance); 23 23 extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, 24 24 int sprn, ulong spr_val);
+6 -6
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 413 413 return (instr & mask) != 0; 414 414 } 415 415 416 - int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 416 + int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, 417 417 unsigned long gpa, gva_t ea, int is_store) 418 418 { 419 419 u32 last_inst; ··· 473 473 474 474 vcpu->arch.paddr_accessed = gpa; 475 475 vcpu->arch.vaddr_accessed = ea; 476 - return kvmppc_emulate_mmio(run, vcpu); 476 + return kvmppc_emulate_mmio(vcpu); 477 477 } 478 478 479 - int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, 479 + int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, 480 480 unsigned long ea, unsigned long dsisr) 481 481 { 482 482 struct kvm *kvm = vcpu->kvm; ··· 499 499 pte_t pte, *ptep; 500 500 501 501 if (kvm_is_radix(kvm)) 502 - return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); 502 + return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); 503 503 504 504 /* 505 505 * Real-mode code has already searched the HPT and found the ··· 519 519 gpa_base = r & HPTE_R_RPN & ~(psize - 1); 520 520 gfn_base = gpa_base >> PAGE_SHIFT; 521 521 gpa = gpa_base | (ea & (psize - 1)); 522 - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 522 + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, 523 523 dsisr & DSISR_ISSTORE); 524 524 } 525 525 } ··· 555 555 556 556 /* No memslot means it's an emulated MMIO region */ 557 557 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 558 - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 558 + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, 559 559 dsisr & DSISR_ISSTORE); 560 560 561 561 /*
+28 -8
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 353 353 354 354 static pte_t *kvmppc_pte_alloc(void) 355 355 { 356 - return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); 356 + pte_t *pte; 357 + 358 + pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); 359 + /* pmd_populate() will only reference _pa(pte). */ 360 + kmemleak_ignore(pte); 361 + 362 + return pte; 357 363 } 358 364 359 365 static void kvmppc_pte_free(pte_t *ptep) ··· 369 363 370 364 static pmd_t *kvmppc_pmd_alloc(void) 371 365 { 372 - return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); 366 + pmd_t *pmd; 367 + 368 + pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); 369 + /* pud_populate() will only reference _pa(pmd). */ 370 + kmemleak_ignore(pmd); 371 + 372 + return pmd; 373 373 } 374 374 375 375 static void kvmppc_pmd_free(pmd_t *pmdp) ··· 429 417 * Callers are responsible for flushing the PWC. 430 418 * 431 419 * When page tables are being unmapped/freed as part of page fault path 432 - * (full == false), ptes are not expected. There is code to unmap them 433 - * and emit a warning if encountered, but there may already be data 434 - * corruption due to the unexpected mappings. 420 + * (full == false), valid ptes are generally not expected; however, there 421 + * is one situation where they arise, which is when dirty page logging is 422 + * turned off for a memslot while the VM is running. The new memslot 423 + * becomes visible to page faults before the memslot commit function 424 + * gets to flush the memslot, which can lead to a 2MB page mapping being 425 + * installed for a guest physical address where there are already 64kB 426 + * (or 4kB) mappings (of sub-pages of the same 2MB page). 435 427 */ 436 428 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, 437 429 unsigned int lpid) ··· 449 433 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { 450 434 if (pte_val(*p) == 0) 451 435 continue; 452 - WARN_ON_ONCE(1); 453 436 kvmppc_unmap_pte(kvm, p, 454 437 pte_pfn(*p) << PAGE_SHIFT, 455 438 PAGE_SHIFT, NULL, lpid); ··· 902 887 return ret; 903 888 } 904 889 905 - int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, 890 + int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, 906 891 unsigned long ea, unsigned long dsisr) 907 892 { 908 893 struct kvm *kvm = vcpu->kvm; ··· 948 933 kvmppc_core_queue_data_storage(vcpu, ea, dsisr); 949 934 return RESUME_GUEST; 950 935 } 951 - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); 936 + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); 952 937 } 953 938 954 939 if (memslot->flags & KVM_MEM_READONLY) { ··· 1130 1115 kvm->arch.lpid); 1131 1116 gpa += PAGE_SIZE; 1132 1117 } 1118 + /* 1119 + * Increase the mmu notifier sequence number to prevent any page 1120 + * fault that read the memslot earlier from writing a PTE. 1121 + */ 1122 + kvm->mmu_notifier_seq++; 1133 1123 spin_unlock(&kvm->mmu_lock); 1134 1124 } 1135 1125
+14 -4
arch/powerpc/kvm/book3s_64_vio.c
··· 73 73 struct kvmppc_spapr_tce_iommu_table *stit, *tmp; 74 74 struct iommu_table_group *table_group = NULL; 75 75 76 + rcu_read_lock(); 76 77 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { 77 78 78 79 table_group = iommu_group_get_iommudata(grp); ··· 88 87 kref_put(&stit->kref, kvm_spapr_tce_liobn_put); 89 88 } 90 89 } 90 + cond_resched_rcu(); 91 91 } 92 + rcu_read_unlock(); 92 93 } 93 94 94 95 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, ··· 108 105 if (!f.file) 109 106 return -EBADF; 110 107 108 + rcu_read_lock(); 111 109 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { 112 110 if (stt == f.file->private_data) { 113 111 found = true; 114 112 break; 115 113 } 116 114 } 115 + rcu_read_unlock(); 117 116 118 117 fdput(f); 119 118 ··· 148 143 if (!tbl) 149 144 return -EINVAL; 150 145 146 + rcu_read_lock(); 151 147 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { 152 148 if (tbl != stit->tbl) 153 149 continue; ··· 156 150 if (!kref_get_unless_zero(&stit->kref)) { 157 151 /* stit is being destroyed */ 158 152 iommu_tce_table_put(tbl); 153 + rcu_read_unlock(); 159 154 return -ENOTTY; 160 155 } 161 156 /* 162 157 * The table is already known to this KVM, we just increased 163 158 * its KVM reference counter and can return. 164 159 */ 160 + rcu_read_unlock(); 165 161 return 0; 166 162 } 163 + rcu_read_unlock(); 167 164 168 165 stit = kzalloc(sizeof(*stit), GFP_KERNEL); 169 166 if (!stit) { ··· 374 365 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) 375 366 return H_TOO_HARD; 376 367 368 + rcu_read_lock(); 377 369 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { 378 370 unsigned long hpa = 0; 379 371 struct mm_iommu_table_group_mem_t *mem; 380 372 long shift = stit->tbl->it_page_shift; 381 373 382 374 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); 383 - if (!mem) 375 + if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { 376 + rcu_read_unlock(); 384 377 return H_TOO_HARD; 385 - 386 - if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) 387 - return H_TOO_HARD; 378 + } 388 379 } 380 + rcu_read_unlock(); 389 381 390 382 return H_SUCCESS; 391 383 }
+5 -5
arch/powerpc/kvm/book3s_emulate.c
··· 235 235 236 236 #endif 237 237 238 - int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 238 + int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu, 239 239 unsigned int inst, int *advance) 240 240 { 241 241 int emulated = EMULATE_DONE; ··· 371 371 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) 372 372 break; 373 373 374 - run->papr_hcall.nr = cmd; 374 + vcpu->run->papr_hcall.nr = cmd; 375 375 for (i = 0; i < 9; ++i) { 376 376 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); 377 - run->papr_hcall.args[i] = gpr; 377 + vcpu->run->papr_hcall.args[i] = gpr; 378 378 } 379 379 380 - run->exit_reason = KVM_EXIT_PAPR_HCALL; 380 + vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL; 381 381 vcpu->arch.hcall_needed = 1; 382 382 emulated = EMULATE_EXIT_USER; 383 383 break; ··· 629 629 } 630 630 631 631 if (emulated == EMULATE_FAIL) 632 - emulated = kvmppc_emulate_paired_single(run, vcpu); 632 + emulated = kvmppc_emulate_paired_single(vcpu); 633 633 634 634 return emulated; 635 635 }
+40 -35
arch/powerpc/kvm/book3s_hv.c
··· 1097 1097 ret = kvmppc_h_svm_init_done(vcpu->kvm); 1098 1098 break; 1099 1099 case H_SVM_INIT_ABORT: 1100 - ret = H_UNSUPPORTED; 1101 - if (kvmppc_get_srr1(vcpu) & MSR_S) 1102 - ret = kvmppc_h_svm_init_abort(vcpu->kvm); 1100 + /* 1101 + * Even if that call is made by the Ultravisor, the SSR1 value 1102 + * is the guest context one, with the secure bit clear as it has 1103 + * not yet been secured. So we can't check it here. 1104 + * Instead the kvm->arch.secure_guest flag is checked inside 1105 + * kvmppc_h_svm_init_abort(). 1106 + */ 1107 + ret = kvmppc_h_svm_init_abort(vcpu->kvm); 1103 1108 break; 1104 1109 1105 1110 default: ··· 1159 1154 return kvmppc_hcall_impl_hv_realmode(cmd); 1160 1155 } 1161 1156 1162 - static int kvmppc_emulate_debug_inst(struct kvm_run *run, 1163 - struct kvm_vcpu *vcpu) 1157 + static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) 1164 1158 { 1165 1159 u32 last_inst; 1166 1160 ··· 1173 1169 } 1174 1170 1175 1171 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { 1176 - run->exit_reason = KVM_EXIT_DEBUG; 1177 - run->debug.arch.address = kvmppc_get_pc(vcpu); 1172 + vcpu->run->exit_reason = KVM_EXIT_DEBUG; 1173 + vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 1178 1174 return RESUME_HOST; 1179 1175 } else { 1180 1176 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); ··· 1275 1271 return RESUME_GUEST; 1276 1272 } 1277 1273 1278 - static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 1274 + static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, 1279 1275 struct task_struct *tsk) 1280 1276 { 1277 + struct kvm_run *run = vcpu->run; 1281 1278 int r = RESUME_HOST; 1282 1279 1283 1280 vcpu->stat.sum_exits++; ··· 1413 1408 swab32(vcpu->arch.emul_inst) : 1414 1409 vcpu->arch.emul_inst; 1415 1410 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1416 - r = kvmppc_emulate_debug_inst(run, vcpu); 1411 + r = kvmppc_emulate_debug_inst(vcpu); 1417 1412 } else { 1418 1413 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1419 1414 r = RESUME_GUEST; ··· 1465 1460 return r; 1466 1461 } 1467 1462 1468 - static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 1463 + static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) 1469 1464 { 1470 1465 int r; 1471 1466 int srcu_idx; ··· 1523 1518 */ 1524 1519 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 1525 1520 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1526 - r = kvmhv_nested_page_fault(run, vcpu); 1521 + r = kvmhv_nested_page_fault(vcpu); 1527 1522 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1528 1523 break; 1529 1524 case BOOK3S_INTERRUPT_H_INST_STORAGE: ··· 1533 1528 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) 1534 1529 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; 1535 1530 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1536 - r = kvmhv_nested_page_fault(run, vcpu); 1531 + r = kvmhv_nested_page_fault(vcpu); 1537 1532 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 1538 1533 break; 1539 1534 ··· 2937 2932 2938 2933 ret = RESUME_GUEST; 2939 2934 if (vcpu->arch.trap) 2940 - ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, 2935 + ret = kvmppc_handle_exit_hv(vcpu, 2941 2936 vcpu->arch.run_task); 2942 2937 2943 2938 vcpu->arch.ret = ret; ··· 3902 3897 return r; 3903 3898 } 3904 3899 3905 - static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 3900 + static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) 3906 3901 { 3902 + struct kvm_run *run = vcpu->run; 3907 3903 int n_ceded, i, r; 3908 3904 struct kvmppc_vcore *vc; 3909 3905 struct kvm_vcpu *v; 3910 3906 3911 3907 trace_kvmppc_run_vcpu_enter(vcpu); 3912 3908 3913 - kvm_run->exit_reason = 0; 3909 + run->exit_reason = 0; 3914 3910 vcpu->arch.ret = RESUME_GUEST; 3915 3911 vcpu->arch.trap = 0; 3916 3912 kvmppc_update_vpas(vcpu); ··· 3923 3917 spin_lock(&vc->lock); 3924 3918 vcpu->arch.ceded = 0; 3925 3919 vcpu->arch.run_task = current; 3926 - vcpu->arch.kvm_run = kvm_run; 3927 3920 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); 3928 3921 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; 3929 3922 vcpu->arch.busy_preempt = TB_NIL; ··· 3955 3950 r = kvmhv_setup_mmu(vcpu); 3956 3951 spin_lock(&vc->lock); 3957 3952 if (r) { 3958 - kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3959 - kvm_run->fail_entry. 3953 + run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3954 + run->fail_entry. 3960 3955 hardware_entry_failure_reason = 0; 3961 3956 vcpu->arch.ret = r; 3962 3957 break; ··· 3975 3970 if (signal_pending(v->arch.run_task)) { 3976 3971 kvmppc_remove_runnable(vc, v); 3977 3972 v->stat.signal_exits++; 3978 - v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; 3973 + v->run->exit_reason = KVM_EXIT_INTR; 3979 3974 v->arch.ret = -EINTR; 3980 3975 wake_up(&v->arch.cpu_run); 3981 3976 } ··· 4016 4011 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { 4017 4012 kvmppc_remove_runnable(vc, vcpu); 4018 4013 vcpu->stat.signal_exits++; 4019 - kvm_run->exit_reason = KVM_EXIT_INTR; 4014 + run->exit_reason = KVM_EXIT_INTR; 4020 4015 vcpu->arch.ret = -EINTR; 4021 4016 } 4022 4017 ··· 4027 4022 wake_up(&v->arch.cpu_run); 4028 4023 } 4029 4024 4030 - trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); 4025 + trace_kvmppc_run_vcpu_exit(vcpu); 4031 4026 spin_unlock(&vc->lock); 4032 4027 return vcpu->arch.ret; 4033 4028 } 4034 4029 4035 - int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, 4036 - struct kvm_vcpu *vcpu, u64 time_limit, 4030 + int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, 4037 4031 unsigned long lpcr) 4038 4032 { 4033 + struct kvm_run *run = vcpu->run; 4039 4034 int trap, r, pcpu; 4040 4035 int srcu_idx, lpid; 4041 4036 struct kvmppc_vcore *vc; ··· 4044 4039 4045 4040 trace_kvmppc_run_vcpu_enter(vcpu); 4046 4041 4047 - kvm_run->exit_reason = 0; 4042 + run->exit_reason = 0; 4048 4043 vcpu->arch.ret = RESUME_GUEST; 4049 4044 vcpu->arch.trap = 0; 4050 4045 4051 4046 vc = vcpu->arch.vcore; 4052 4047 vcpu->arch.ceded = 0; 4053 4048 vcpu->arch.run_task = current; 4054 - vcpu->arch.kvm_run = kvm_run; 4055 4049 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); 4056 4050 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; 4057 4051 vcpu->arch.busy_preempt = TB_NIL; ··· 4168 4164 r = RESUME_GUEST; 4169 4165 if (trap) { 4170 4166 if (!nested) 4171 - r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); 4167 + r = kvmppc_handle_exit_hv(vcpu, current); 4172 4168 else 4173 - r = kvmppc_handle_nested_exit(kvm_run, vcpu); 4169 + r = kvmppc_handle_nested_exit(vcpu); 4174 4170 } 4175 4171 vcpu->arch.ret = r; 4176 4172 ··· 4180 4176 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { 4181 4177 if (signal_pending(current)) { 4182 4178 vcpu->stat.signal_exits++; 4183 - kvm_run->exit_reason = KVM_EXIT_INTR; 4179 + run->exit_reason = KVM_EXIT_INTR; 4184 4180 vcpu->arch.ret = -EINTR; 4185 4181 break; 4186 4182 } ··· 4196 4192 4197 4193 done: 4198 4194 kvmppc_remove_runnable(vc, vcpu); 4199 - trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); 4195 + trace_kvmppc_run_vcpu_exit(vcpu); 4200 4196 4201 4197 return vcpu->arch.ret; 4202 4198 4203 4199 sigpend: 4204 4200 vcpu->stat.signal_exits++; 4205 - kvm_run->exit_reason = KVM_EXIT_INTR; 4201 + run->exit_reason = KVM_EXIT_INTR; 4206 4202 vcpu->arch.ret = -EINTR; 4207 4203 out: 4208 4204 local_irq_enable(); ··· 4210 4206 goto done; 4211 4207 } 4212 4208 4213 - static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) 4209 + static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) 4214 4210 { 4211 + struct kvm_run *run = vcpu->run; 4215 4212 int r; 4216 4213 int srcu_idx; 4217 4214 unsigned long ebb_regs[3] = {}; /* shut up GCC */ ··· 4296 4291 */ 4297 4292 if (kvm->arch.threads_indep && kvm_is_radix(kvm) && 4298 4293 !no_mixing_hpt_and_radix) 4299 - r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0, 4294 + r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, 4300 4295 vcpu->arch.vcore->lpcr); 4301 4296 else 4302 - r = kvmppc_run_vcpu(run, vcpu); 4297 + r = kvmppc_run_vcpu(vcpu); 4303 4298 4304 4299 if (run->exit_reason == KVM_EXIT_PAPR_HCALL && 4305 4300 !(vcpu->arch.shregs.msr & MSR_PR)) { ··· 4309 4304 kvmppc_core_prepare_to_enter(vcpu); 4310 4305 } else if (r == RESUME_PAGE_FAULT) { 4311 4306 srcu_idx = srcu_read_lock(&kvm->srcu); 4312 - r = kvmppc_book3s_hv_page_fault(run, vcpu, 4307 + r = kvmppc_book3s_hv_page_fault(vcpu, 4313 4308 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 4314 4309 srcu_read_unlock(&kvm->srcu, srcu_idx); 4315 4310 } else if (r == RESUME_PASSTHROUGH) { ··· 4983 4978 } 4984 4979 4985 4980 /* We don't need to emulate any privileged instructions or dcbz */ 4986 - static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, 4981 + static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, 4987 4982 unsigned int inst, int *advance) 4988 4983 { 4989 4984 return EMULATE_FAIL;
+6 -9
arch/powerpc/kvm/book3s_hv_nested.c
··· 290 290 r = RESUME_HOST; 291 291 break; 292 292 } 293 - r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp, 294 - lpcr); 293 + r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr); 295 294 } while (is_kvmppc_resume_guest(r)); 296 295 297 296 /* save L2 state for return */ ··· 1256 1257 } 1257 1258 1258 1259 /* called with gp->tlb_lock held */ 1259 - static long int __kvmhv_nested_page_fault(struct kvm_run *run, 1260 - struct kvm_vcpu *vcpu, 1260 + static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, 1261 1261 struct kvm_nested_guest *gp) 1262 1262 { 1263 1263 struct kvm *kvm = vcpu->kvm; ··· 1339 1341 } 1340 1342 1341 1343 /* passthrough of emulated MMIO case */ 1342 - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing); 1344 + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); 1343 1345 } 1344 1346 if (memslot->flags & KVM_MEM_READONLY) { 1345 1347 if (writing) { ··· 1414 1416 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1415 1417 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level, 1416 1418 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap); 1417 - if (n_rmap) 1418 - kfree(n_rmap); 1419 + kfree(n_rmap); 1419 1420 if (ret == -EAGAIN) 1420 1421 ret = RESUME_GUEST; /* Let the guest try again */ 1421 1422 ··· 1425 1428 return RESUME_GUEST; 1426 1429 } 1427 1430 1428 - long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu) 1431 + long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu) 1429 1432 { 1430 1433 struct kvm_nested_guest *gp = vcpu->arch.nested; 1431 1434 long int ret; 1432 1435 1433 1436 mutex_lock(&gp->tlb_lock); 1434 - ret = __kvmhv_nested_page_fault(run, vcpu, gp); 1437 + ret = __kvmhv_nested_page_fault(vcpu, gp); 1435 1438 mutex_unlock(&gp->tlb_lock); 1436 1439 return ret; 1437 1440 }
+14
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 749 749 const __be32 *prop; 750 750 u64 size = 0; 751 751 752 + /* 753 + * First try the new ibm,secure-memory nodes which supersede the 754 + * secure-memory-ranges property. 755 + * If we found some, no need to read the deprecated ones. 756 + */ 757 + for_each_compatible_node(np, NULL, "ibm,secure-memory") { 758 + prop = of_get_property(np, "reg", &len); 759 + if (!prop) 760 + continue; 761 + size += of_read_number(prop + 2, 2); 762 + } 763 + if (size) 764 + return size; 765 + 752 766 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); 753 767 if (!np) 754 768 goto out;
+36 -36
arch/powerpc/kvm/book3s_paired_singles.c
··· 169 169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); 170 170 } 171 171 172 - static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 172 + static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, 173 173 int rs, ulong addr, int ls_type) 174 174 { 175 175 int emulated = EMULATE_FAIL; ··· 188 188 kvmppc_inject_pf(vcpu, addr, false); 189 189 goto done_load; 190 190 } else if (r == EMULATE_DO_MMIO) { 191 - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, 191 + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, 192 192 len, 1); 193 193 goto done_load; 194 194 } ··· 213 213 return emulated; 214 214 } 215 215 216 - static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 216 + static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu, 217 217 int rs, ulong addr, int ls_type) 218 218 { 219 219 int emulated = EMULATE_FAIL; ··· 248 248 if (r < 0) { 249 249 kvmppc_inject_pf(vcpu, addr, true); 250 250 } else if (r == EMULATE_DO_MMIO) { 251 - emulated = kvmppc_handle_store(run, vcpu, val, len, 1); 251 + emulated = kvmppc_handle_store(vcpu, val, len, 1); 252 252 } else { 253 253 emulated = EMULATE_DONE; 254 254 } ··· 259 259 return emulated; 260 260 } 261 261 262 - static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 262 + static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu, 263 263 int rs, ulong addr, bool w, int i) 264 264 { 265 265 int emulated = EMULATE_FAIL; ··· 279 279 kvmppc_inject_pf(vcpu, addr, false); 280 280 goto done_load; 281 281 } else if ((r == EMULATE_DO_MMIO) && w) { 282 - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs, 282 + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs, 283 283 4, 1); 284 284 vcpu->arch.qpr[rs] = tmp[1]; 285 285 goto done_load; 286 286 } else if (r == EMULATE_DO_MMIO) { 287 - emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs, 287 + emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs, 288 288 8, 1); 289 289 goto done_load; 290 290 } ··· 302 302 return emulated; 303 303 } 304 304 305 - static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 305 + static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu, 306 306 int rs, ulong addr, bool w, int i) 307 307 { 308 308 int emulated = EMULATE_FAIL; ··· 318 318 if (r < 0) { 319 319 kvmppc_inject_pf(vcpu, addr, true); 320 320 } else if ((r == EMULATE_DO_MMIO) && w) { 321 - emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1); 321 + emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1); 322 322 } else if (r == EMULATE_DO_MMIO) { 323 323 u64 val = ((u64)tmp[0] << 32) | tmp[1]; 324 - emulated = kvmppc_handle_store(run, vcpu, val, 8, 1); 324 + emulated = kvmppc_handle_store(vcpu, val, 8, 1); 325 325 } else { 326 326 emulated = EMULATE_DONE; 327 327 } ··· 618 618 return EMULATE_DONE; 619 619 } 620 620 621 - int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) 621 + int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) 622 622 { 623 623 u32 inst; 624 624 enum emulation_result emulated = EMULATE_DONE; ··· 680 680 int i = inst_get_field(inst, 17, 19); 681 681 682 682 addr += get_d_signext(inst); 683 - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 683 + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); 684 684 break; 685 685 } 686 686 case OP_PSQ_LU: ··· 690 690 int i = inst_get_field(inst, 17, 19); 691 691 692 692 addr += get_d_signext(inst); 693 - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 693 + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); 694 694 695 695 if (emulated == EMULATE_DONE) 696 696 kvmppc_set_gpr(vcpu, ax_ra, addr); ··· 703 703 int i = inst_get_field(inst, 17, 19); 704 704 705 705 addr += get_d_signext(inst); 706 - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 706 + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); 707 707 break; 708 708 } 709 709 case OP_PSQ_STU: ··· 713 713 int i = inst_get_field(inst, 17, 19); 714 714 715 715 addr += get_d_signext(inst); 716 - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 716 + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); 717 717 718 718 if (emulated == EMULATE_DONE) 719 719 kvmppc_set_gpr(vcpu, ax_ra, addr); ··· 733 733 int i = inst_get_field(inst, 22, 24); 734 734 735 735 addr += kvmppc_get_gpr(vcpu, ax_rb); 736 - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 736 + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); 737 737 break; 738 738 } 739 739 case OP_4X_PS_CMPO0: ··· 747 747 int i = inst_get_field(inst, 22, 24); 748 748 749 749 addr += kvmppc_get_gpr(vcpu, ax_rb); 750 - emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); 750 + emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i); 751 751 752 752 if (emulated == EMULATE_DONE) 753 753 kvmppc_set_gpr(vcpu, ax_ra, addr); ··· 824 824 int i = inst_get_field(inst, 22, 24); 825 825 826 826 addr += kvmppc_get_gpr(vcpu, ax_rb); 827 - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 827 + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); 828 828 break; 829 829 } 830 830 case OP_4XW_PSQ_STUX: ··· 834 834 int i = inst_get_field(inst, 22, 24); 835 835 836 836 addr += kvmppc_get_gpr(vcpu, ax_rb); 837 - emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); 837 + emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i); 838 838 839 839 if (emulated == EMULATE_DONE) 840 840 kvmppc_set_gpr(vcpu, ax_ra, addr); ··· 922 922 { 923 923 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 924 924 925 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 925 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, 926 926 FPU_LS_SINGLE); 927 927 break; 928 928 } ··· 930 930 { 931 931 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 932 932 933 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 933 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, 934 934 FPU_LS_SINGLE); 935 935 936 936 if (emulated == EMULATE_DONE) ··· 941 941 { 942 942 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 943 943 944 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 944 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, 945 945 FPU_LS_DOUBLE); 946 946 break; 947 947 } ··· 949 949 { 950 950 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 951 951 952 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, 952 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr, 953 953 FPU_LS_DOUBLE); 954 954 955 955 if (emulated == EMULATE_DONE) ··· 960 960 { 961 961 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 962 962 963 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 963 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, 964 964 FPU_LS_SINGLE); 965 965 break; 966 966 } ··· 968 968 { 969 969 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 970 970 971 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 971 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, 972 972 FPU_LS_SINGLE); 973 973 974 974 if (emulated == EMULATE_DONE) ··· 979 979 { 980 980 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; 981 981 982 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 982 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, 983 983 FPU_LS_DOUBLE); 984 984 break; 985 985 } ··· 987 987 { 988 988 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; 989 989 990 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, 990 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr, 991 991 FPU_LS_DOUBLE); 992 992 993 993 if (emulated == EMULATE_DONE) ··· 1001 1001 ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; 1002 1002 1003 1003 addr += kvmppc_get_gpr(vcpu, ax_rb); 1004 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1004 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, 1005 1005 addr, FPU_LS_SINGLE); 1006 1006 break; 1007 1007 } ··· 1010 1010 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1011 1011 kvmppc_get_gpr(vcpu, ax_rb); 1012 1012 1013 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1013 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, 1014 1014 addr, FPU_LS_SINGLE); 1015 1015 1016 1016 if (emulated == EMULATE_DONE) ··· 1022 1022 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1023 1023 kvmppc_get_gpr(vcpu, ax_rb); 1024 1024 1025 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1025 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, 1026 1026 addr, FPU_LS_DOUBLE); 1027 1027 break; 1028 1028 } ··· 1031 1031 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1032 1032 kvmppc_get_gpr(vcpu, ax_rb); 1033 1033 1034 - emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, 1034 + emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, 1035 1035 addr, FPU_LS_DOUBLE); 1036 1036 1037 1037 if (emulated == EMULATE_DONE) ··· 1043 1043 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1044 1044 kvmppc_get_gpr(vcpu, ax_rb); 1045 1045 1046 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1046 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, 1047 1047 addr, FPU_LS_SINGLE); 1048 1048 break; 1049 1049 } ··· 1052 1052 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1053 1053 kvmppc_get_gpr(vcpu, ax_rb); 1054 1054 1055 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1055 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, 1056 1056 addr, FPU_LS_SINGLE); 1057 1057 1058 1058 if (emulated == EMULATE_DONE) ··· 1064 1064 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1065 1065 kvmppc_get_gpr(vcpu, ax_rb); 1066 1066 1067 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1067 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, 1068 1068 addr, FPU_LS_DOUBLE); 1069 1069 break; 1070 1070 } ··· 1073 1073 ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + 1074 1074 kvmppc_get_gpr(vcpu, ax_rb); 1075 1075 1076 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1076 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, 1077 1077 addr, FPU_LS_DOUBLE); 1078 1078 1079 1079 if (emulated == EMULATE_DONE) ··· 1085 1085 ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + 1086 1086 kvmppc_get_gpr(vcpu, ax_rb); 1087 1087 1088 - emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, 1088 + emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, 1089 1089 addr, 1090 1090 FPU_LS_SINGLE_LOW); 1091 1091 break;
+15 -15
arch/powerpc/kvm/book3s_pr.c
··· 700 700 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); 701 701 } 702 702 703 - int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, 703 + static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, 704 704 ulong eaddr, int vec) 705 705 { 706 706 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); ··· 795 795 /* The guest's PTE is not mapped yet. Map on the host */ 796 796 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { 797 797 /* Exit KVM if mapping failed */ 798 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 798 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 799 799 return RESUME_HOST; 800 800 } 801 801 if (data) ··· 808 808 vcpu->stat.mmio_exits++; 809 809 vcpu->arch.paddr_accessed = pte.raddr; 810 810 vcpu->arch.vaddr_accessed = pte.eaddr; 811 - r = kvmppc_emulate_mmio(run, vcpu); 811 + r = kvmppc_emulate_mmio(vcpu); 812 812 if ( r == RESUME_HOST_NV ) 813 813 r = RESUME_HOST; 814 814 } ··· 992 992 enum emulation_result er = EMULATE_FAIL; 993 993 994 994 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) 995 - er = kvmppc_emulate_instruction(vcpu->run, vcpu); 995 + er = kvmppc_emulate_instruction(vcpu); 996 996 997 997 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { 998 998 /* Couldn't emulate, trigger interrupt in guest */ ··· 1089 1089 } 1090 1090 } 1091 1091 1092 - static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu, 1093 - unsigned int exit_nr) 1092 + static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) 1094 1093 { 1095 1094 enum emulation_result er; 1096 1095 ulong flags; ··· 1123 1124 } 1124 1125 1125 1126 vcpu->stat.emulated_inst_exits++; 1126 - er = kvmppc_emulate_instruction(run, vcpu); 1127 + er = kvmppc_emulate_instruction(vcpu); 1127 1128 switch (er) { 1128 1129 case EMULATE_DONE: 1129 1130 r = RESUME_GUEST_NV; ··· 1138 1139 r = RESUME_GUEST; 1139 1140 break; 1140 1141 case EMULATE_DO_MMIO: 1141 - run->exit_reason = KVM_EXIT_MMIO; 1142 + vcpu->run->exit_reason = KVM_EXIT_MMIO; 1142 1143 r = RESUME_HOST_NV; 1143 1144 break; 1144 1145 case EMULATE_EXIT_USER: ··· 1197 1198 /* only care about PTEG not found errors, but leave NX alone */ 1198 1199 if (shadow_srr1 & 0x40000000) { 1199 1200 int idx = srcu_read_lock(&vcpu->kvm->srcu); 1200 - r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 1201 + r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); 1201 1202 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1202 1203 vcpu->stat.sp_instruc++; 1203 1204 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && ··· 1247 1248 */ 1248 1249 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { 1249 1250 int idx = srcu_read_lock(&vcpu->kvm->srcu); 1250 - r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 1251 + r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); 1251 1252 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1252 1253 } else { 1253 1254 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); ··· 1291 1292 break; 1292 1293 case BOOK3S_INTERRUPT_PROGRAM: 1293 1294 case BOOK3S_INTERRUPT_H_EMUL_ASSIST: 1294 - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); 1295 + r = kvmppc_exit_pr_progint(vcpu, exit_nr); 1295 1296 break; 1296 1297 case BOOK3S_INTERRUPT_SYSCALL: 1297 1298 { ··· 1369 1370 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, 1370 1371 &last_inst); 1371 1372 if (emul == EMULATE_DONE) 1372 - r = kvmppc_exit_pr_progint(run, vcpu, exit_nr); 1373 + r = kvmppc_exit_pr_progint(vcpu, exit_nr); 1373 1374 else 1374 1375 r = RESUME_GUEST; 1375 1376 ··· 1824 1825 vfree(vcpu_book3s); 1825 1826 } 1826 1827 1827 - static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1828 + static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) 1828 1829 { 1830 + struct kvm_run *run = vcpu->run; 1829 1831 int ret; 1830 1832 #ifdef CONFIG_ALTIVEC 1831 1833 unsigned long uninitialized_var(vrsave); ··· 1834 1834 1835 1835 /* Check if we can run the vcpu at all */ 1836 1836 if (!vcpu->arch.sane) { 1837 - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1837 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1838 1838 ret = -EINVAL; 1839 1839 goto out; 1840 1840 } ··· 1861 1861 1862 1862 kvmppc_fix_ee_before_entry(); 1863 1863 1864 - ret = __kvmppc_vcpu_run(kvm_run, vcpu); 1864 + ret = __kvmppc_vcpu_run(run, vcpu); 1865 1865 1866 1866 kvmppc_clear_debug(vcpu); 1867 1867
+19 -17
arch/powerpc/kvm/booke.c
··· 729 729 return r; 730 730 } 731 731 732 - int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 732 + int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) 733 733 { 734 + struct kvm_run *run = vcpu->run; 734 735 int ret, s; 735 736 struct debug_reg debug; 736 737 737 738 if (!vcpu->arch.sane) { 738 - kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 739 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 739 740 return -EINVAL; 740 741 } 741 742 ··· 778 777 vcpu->arch.pgdir = vcpu->kvm->mm->pgd; 779 778 kvmppc_fix_ee_before_entry(); 780 779 781 - ret = __kvmppc_vcpu_run(kvm_run, vcpu); 780 + ret = __kvmppc_vcpu_run(run, vcpu); 782 781 783 782 /* No need for guest_exit. It's done in handle_exit. 784 783 We also get here with interrupts enabled. */ ··· 800 799 return ret; 801 800 } 802 801 803 - static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 802 + static int emulation_exit(struct kvm_vcpu *vcpu) 804 803 { 805 804 enum emulation_result er; 806 805 807 - er = kvmppc_emulate_instruction(run, vcpu); 806 + er = kvmppc_emulate_instruction(vcpu); 808 807 switch (er) { 809 808 case EMULATE_DONE: 810 809 /* don't overwrite subtypes, just account kvm_stats */ ··· 821 820 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); 822 821 /* For debugging, encode the failing instruction and 823 822 * report it to userspace. */ 824 - run->hw.hardware_exit_reason = ~0ULL << 32; 825 - run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 823 + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; 824 + vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 826 825 kvmppc_core_queue_program(vcpu, ESR_PIL); 827 826 return RESUME_HOST; 828 827 ··· 834 833 } 835 834 } 836 835 837 - static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) 836 + static int kvmppc_handle_debug(struct kvm_vcpu *vcpu) 838 837 { 838 + struct kvm_run *run = vcpu->run; 839 839 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); 840 840 u32 dbsr = vcpu->arch.dbsr; 841 841 ··· 955 953 } 956 954 } 957 955 958 - static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 956 + static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, 959 957 enum emulation_result emulated, u32 last_inst) 960 958 { 961 959 switch (emulated) { ··· 967 965 __func__, vcpu->arch.regs.nip); 968 966 /* For debugging, encode the failing instruction and 969 967 * report it to userspace. */ 970 - run->hw.hardware_exit_reason = ~0ULL << 32; 971 - run->hw.hardware_exit_reason |= last_inst; 968 + vcpu->run->hw.hardware_exit_reason = ~0ULL << 32; 969 + vcpu->run->hw.hardware_exit_reason |= last_inst; 972 970 kvmppc_core_queue_program(vcpu, ESR_PIL); 973 971 return RESUME_HOST; 974 972 ··· 1025 1023 run->ready_for_interrupt_injection = 1; 1026 1024 1027 1025 if (emulated != EMULATE_DONE) { 1028 - r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); 1026 + r = kvmppc_resume_inst_load(vcpu, emulated, last_inst); 1029 1027 goto out; 1030 1028 } 1031 1029 ··· 1085 1083 break; 1086 1084 1087 1085 case BOOKE_INTERRUPT_HV_PRIV: 1088 - r = emulation_exit(run, vcpu); 1086 + r = emulation_exit(vcpu); 1089 1087 break; 1090 1088 1091 1089 case BOOKE_INTERRUPT_PROGRAM: ··· 1095 1093 * We are here because of an SW breakpoint instr, 1096 1094 * so lets return to host to handle. 1097 1095 */ 1098 - r = kvmppc_handle_debug(run, vcpu); 1096 + r = kvmppc_handle_debug(vcpu); 1099 1097 run->exit_reason = KVM_EXIT_DEBUG; 1100 1098 kvmppc_account_exit(vcpu, DEBUG_EXITS); 1101 1099 break; ··· 1116 1114 break; 1117 1115 } 1118 1116 1119 - r = emulation_exit(run, vcpu); 1117 + r = emulation_exit(vcpu); 1120 1118 break; 1121 1119 1122 1120 case BOOKE_INTERRUPT_FP_UNAVAIL: ··· 1283 1281 * actually RAM. */ 1284 1282 vcpu->arch.paddr_accessed = gpaddr; 1285 1283 vcpu->arch.vaddr_accessed = eaddr; 1286 - r = kvmppc_emulate_mmio(run, vcpu); 1284 + r = kvmppc_emulate_mmio(vcpu); 1287 1285 kvmppc_account_exit(vcpu, MMIO_EXITS); 1288 1286 } 1289 1287 ··· 1334 1332 } 1335 1333 1336 1334 case BOOKE_INTERRUPT_DEBUG: { 1337 - r = kvmppc_handle_debug(run, vcpu); 1335 + r = kvmppc_handle_debug(vcpu); 1338 1336 if (r == RESUME_HOST) 1339 1337 run->exit_reason = KVM_EXIT_DEBUG; 1340 1338 kvmppc_account_exit(vcpu, DEBUG_EXITS);
+2 -6
arch/powerpc/kvm/booke.h
··· 70 70 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); 71 71 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); 72 72 73 - int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 73 + int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, 74 74 unsigned int inst, int *advance); 75 75 int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); 76 76 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); ··· 94 94 95 95 void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 96 96 97 - extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 98 - struct kvm_vcpu *vcpu, 97 + extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, 99 98 unsigned int inst, int *advance); 100 99 extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, 101 100 ulong spr_val); 102 101 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, 103 102 ulong *spr_val); 104 - extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 105 - struct kvm_vcpu *vcpu, 106 - unsigned int inst, int *advance); 107 103 extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, 108 104 ulong spr_val); 109 105 extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
+1 -1
arch/powerpc/kvm/booke_emulate.c
··· 39 39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); 40 40 } 41 41 42 - int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 42 + int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, 43 43 unsigned int inst, int *advance) 44 44 { 45 45 int emulated = EMULATE_DONE;
+7 -8
arch/powerpc/kvm/e500_emulate.c
··· 83 83 } 84 84 #endif 85 85 86 - static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, 86 + static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu, 87 87 unsigned int inst, int *advance) 88 88 { 89 89 int emulated = EMULATE_DONE; 90 90 91 91 switch (get_oc(inst)) { 92 92 case EHPRIV_OC_DEBUG: 93 - run->exit_reason = KVM_EXIT_DEBUG; 94 - run->debug.arch.address = vcpu->arch.regs.nip; 95 - run->debug.arch.status = 0; 93 + vcpu->run->exit_reason = KVM_EXIT_DEBUG; 94 + vcpu->run->debug.arch.address = vcpu->arch.regs.nip; 95 + vcpu->run->debug.arch.status = 0; 96 96 kvmppc_account_exit(vcpu, DEBUG_EXITS); 97 97 emulated = EMULATE_EXIT_USER; 98 98 *advance = 0; ··· 125 125 return EMULATE_FAIL; 126 126 } 127 127 128 - int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, 128 + int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu, 129 129 unsigned int inst, int *advance) 130 130 { 131 131 int emulated = EMULATE_DONE; ··· 182 182 break; 183 183 184 184 case XOP_EHPRIV: 185 - emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst, 186 - advance); 185 + emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance); 187 186 break; 188 187 189 188 default: ··· 196 197 } 197 198 198 199 if (emulated == EMULATE_FAIL) 199 - emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); 200 + emulated = kvmppc_booke_emulate_op(vcpu, inst, advance); 200 201 201 202 return emulated; 202 203 }
+5 -5
arch/powerpc/kvm/emulate.c
··· 191 191 192 192 /* XXX Should probably auto-generate instruction decoding for a particular core 193 193 * from opcode tables in the future. */ 194 - int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 194 + int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) 195 195 { 196 196 u32 inst; 197 197 int rs, rt, sprn; ··· 270 270 * these are illegal instructions. 271 271 */ 272 272 if (inst == KVMPPC_INST_SW_BREAKPOINT) { 273 - run->exit_reason = KVM_EXIT_DEBUG; 274 - run->debug.arch.status = 0; 275 - run->debug.arch.address = kvmppc_get_pc(vcpu); 273 + vcpu->run->exit_reason = KVM_EXIT_DEBUG; 274 + vcpu->run->debug.arch.status = 0; 275 + vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 276 276 emulated = EMULATE_EXIT_USER; 277 277 advance = 0; 278 278 } else ··· 285 285 } 286 286 287 287 if (emulated == EMULATE_FAIL) { 288 - emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, 288 + emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst, 289 289 &advance); 290 290 if (emulated == EMULATE_AGAIN) { 291 291 advance = 0;
+15 -17
arch/powerpc/kvm/emulate_loadstore.c
··· 71 71 */ 72 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) 73 73 { 74 - struct kvm_run *run = vcpu->run; 75 74 u32 inst; 76 75 enum emulation_result emulated = EMULATE_FAIL; 77 76 int advance = 1; ··· 103 104 int instr_byte_swap = op.type & BYTEREV; 104 105 105 106 if (op.type & SIGNEXT) 106 - emulated = kvmppc_handle_loads(run, vcpu, 107 + emulated = kvmppc_handle_loads(vcpu, 107 108 op.reg, size, !instr_byte_swap); 108 109 else 109 - emulated = kvmppc_handle_load(run, vcpu, 110 + emulated = kvmppc_handle_load(vcpu, 110 111 op.reg, size, !instr_byte_swap); 111 112 112 113 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) ··· 123 124 vcpu->arch.mmio_sp64_extend = 1; 124 125 125 126 if (op.type & SIGNEXT) 126 - emulated = kvmppc_handle_loads(run, vcpu, 127 + emulated = kvmppc_handle_loads(vcpu, 127 128 KVM_MMIO_REG_FPR|op.reg, size, 1); 128 129 else 129 - emulated = kvmppc_handle_load(run, vcpu, 130 + emulated = kvmppc_handle_load(vcpu, 130 131 KVM_MMIO_REG_FPR|op.reg, size, 1); 131 132 132 133 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) ··· 163 164 164 165 if (size == 16) { 165 166 vcpu->arch.mmio_vmx_copy_nums = 2; 166 - emulated = kvmppc_handle_vmx_load(run, 167 - vcpu, KVM_MMIO_REG_VMX|op.reg, 167 + emulated = kvmppc_handle_vmx_load(vcpu, 168 + KVM_MMIO_REG_VMX|op.reg, 168 169 8, 1); 169 170 } else { 170 171 vcpu->arch.mmio_vmx_copy_nums = 1; 171 - emulated = kvmppc_handle_vmx_load(run, vcpu, 172 + emulated = kvmppc_handle_vmx_load(vcpu, 172 173 KVM_MMIO_REG_VMX|op.reg, 173 174 size, 1); 174 175 } ··· 216 217 io_size_each = op.element_size; 217 218 } 218 219 219 - emulated = kvmppc_handle_vsx_load(run, vcpu, 220 + emulated = kvmppc_handle_vsx_load(vcpu, 220 221 KVM_MMIO_REG_VSX|op.reg, io_size_each, 221 222 1, op.type & SIGNEXT); 222 223 break; ··· 226 227 /* if need byte reverse, op.val has been reversed by 227 228 * analyse_instr(). 228 229 */ 229 - emulated = kvmppc_handle_store(run, vcpu, op.val, 230 - size, 1); 230 + emulated = kvmppc_handle_store(vcpu, op.val, size, 1); 231 231 232 232 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) 233 233 kvmppc_set_gpr(vcpu, op.update_reg, op.ea); ··· 248 250 if (op.type & FPCONV) 249 251 vcpu->arch.mmio_sp64_extend = 1; 250 252 251 - emulated = kvmppc_handle_store(run, vcpu, 253 + emulated = kvmppc_handle_store(vcpu, 252 254 VCPU_FPR(vcpu, op.reg), size, 1); 253 255 254 256 if ((op.type & UPDATE) && (emulated != EMULATE_FAIL)) ··· 288 290 289 291 if (size == 16) { 290 292 vcpu->arch.mmio_vmx_copy_nums = 2; 291 - emulated = kvmppc_handle_vmx_store(run, 292 - vcpu, op.reg, 8, 1); 293 + emulated = kvmppc_handle_vmx_store(vcpu, 294 + op.reg, 8, 1); 293 295 } else { 294 296 vcpu->arch.mmio_vmx_copy_nums = 1; 295 - emulated = kvmppc_handle_vmx_store(run, 296 - vcpu, op.reg, size, 1); 297 + emulated = kvmppc_handle_vmx_store(vcpu, 298 + op.reg, size, 1); 297 299 } 298 300 299 301 break; ··· 336 338 io_size_each = op.element_size; 337 339 } 338 340 339 - emulated = kvmppc_handle_vsx_store(run, vcpu, 341 + emulated = kvmppc_handle_vsx_store(vcpu, 340 342 op.reg, io_size_each, 1); 341 343 break; 342 344 }
+37 -35
arch/powerpc/kvm/powerpc.c
··· 279 279 } 280 280 EXPORT_SYMBOL_GPL(kvmppc_sanity_check); 281 281 282 - int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 282 + int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) 283 283 { 284 284 enum emulation_result er; 285 285 int r; ··· 295 295 r = RESUME_GUEST; 296 296 break; 297 297 case EMULATE_DO_MMIO: 298 - run->exit_reason = KVM_EXIT_MMIO; 298 + vcpu->run->exit_reason = KVM_EXIT_MMIO; 299 299 /* We must reload nonvolatiles because "update" load/store 300 300 * instructions modify register state. */ 301 301 /* Future optimization: only reload non-volatiles if they were ··· 1107 1107 #define dp_to_sp(x) (x) 1108 1108 #endif /* CONFIG_PPC_FPU */ 1109 1109 1110 - static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 1111 - struct kvm_run *run) 1110 + static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu) 1112 1111 { 1112 + struct kvm_run *run = vcpu->run; 1113 1113 u64 uninitialized_var(gpr); 1114 1114 1115 1115 if (run->mmio.len > sizeof(gpr)) { ··· 1219 1219 } 1220 1220 } 1221 1221 1222 - static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1222 + static int __kvmppc_handle_load(struct kvm_vcpu *vcpu, 1223 1223 unsigned int rt, unsigned int bytes, 1224 1224 int is_default_endian, int sign_extend) 1225 1225 { 1226 + struct kvm_run *run = vcpu->run; 1226 1227 int idx, ret; 1227 1228 bool host_swabbed; 1228 1229 ··· 1257 1256 srcu_read_unlock(&vcpu->kvm->srcu, idx); 1258 1257 1259 1258 if (!ret) { 1260 - kvmppc_complete_mmio_load(vcpu, run); 1259 + kvmppc_complete_mmio_load(vcpu); 1261 1260 vcpu->mmio_needed = 0; 1262 1261 return EMULATE_DONE; 1263 1262 } ··· 1265 1264 return EMULATE_DO_MMIO; 1266 1265 } 1267 1266 1268 - int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1267 + int kvmppc_handle_load(struct kvm_vcpu *vcpu, 1269 1268 unsigned int rt, unsigned int bytes, 1270 1269 int is_default_endian) 1271 1270 { 1272 - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); 1271 + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0); 1273 1272 } 1274 1273 EXPORT_SYMBOL_GPL(kvmppc_handle_load); 1275 1274 1276 1275 /* Same as above, but sign extends */ 1277 - int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 1276 + int kvmppc_handle_loads(struct kvm_vcpu *vcpu, 1278 1277 unsigned int rt, unsigned int bytes, 1279 1278 int is_default_endian) 1280 1279 { 1281 - return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); 1280 + return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1); 1282 1281 } 1283 1282 1284 1283 #ifdef CONFIG_VSX 1285 - int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1284 + int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu, 1286 1285 unsigned int rt, unsigned int bytes, 1287 1286 int is_default_endian, int mmio_sign_extend) 1288 1287 { ··· 1293 1292 return EMULATE_FAIL; 1294 1293 1295 1294 while (vcpu->arch.mmio_vsx_copy_nums) { 1296 - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1295 + emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1297 1296 is_default_endian, mmio_sign_extend); 1298 1297 1299 1298 if (emulated != EMULATE_DONE) 1300 1299 break; 1301 1300 1302 - vcpu->arch.paddr_accessed += run->mmio.len; 1301 + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1303 1302 1304 1303 vcpu->arch.mmio_vsx_copy_nums--; 1305 1304 vcpu->arch.mmio_vsx_offset++; ··· 1308 1307 } 1309 1308 #endif /* CONFIG_VSX */ 1310 1309 1311 - int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1310 + int kvmppc_handle_store(struct kvm_vcpu *vcpu, 1312 1311 u64 val, unsigned int bytes, int is_default_endian) 1313 1312 { 1313 + struct kvm_run *run = vcpu->run; 1314 1314 void *data = run->mmio.data; 1315 1315 int idx, ret; 1316 1316 bool host_swabbed; ··· 1425 1423 return result; 1426 1424 } 1427 1425 1428 - int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1426 + int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, 1429 1427 int rs, unsigned int bytes, int is_default_endian) 1430 1428 { 1431 1429 u64 val; ··· 1441 1439 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) 1442 1440 return EMULATE_FAIL; 1443 1441 1444 - emulated = kvmppc_handle_store(run, vcpu, 1442 + emulated = kvmppc_handle_store(vcpu, 1445 1443 val, bytes, is_default_endian); 1446 1444 1447 1445 if (emulated != EMULATE_DONE) 1448 1446 break; 1449 1447 1450 - vcpu->arch.paddr_accessed += run->mmio.len; 1448 + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1451 1449 1452 1450 vcpu->arch.mmio_vsx_copy_nums--; 1453 1451 vcpu->arch.mmio_vsx_offset++; ··· 1456 1454 return emulated; 1457 1455 } 1458 1456 1459 - static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, 1460 - struct kvm_run *run) 1457 + static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu) 1461 1458 { 1459 + struct kvm_run *run = vcpu->run; 1462 1460 enum emulation_result emulated = EMULATE_FAIL; 1463 1461 int r; 1464 1462 1465 1463 vcpu->arch.paddr_accessed += run->mmio.len; 1466 1464 1467 1465 if (!vcpu->mmio_is_write) { 1468 - emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, 1466 + emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, 1469 1467 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); 1470 1468 } else { 1471 - emulated = kvmppc_handle_vsx_store(run, vcpu, 1469 + emulated = kvmppc_handle_vsx_store(vcpu, 1472 1470 vcpu->arch.io_gpr, run->mmio.len, 1); 1473 1471 } 1474 1472 ··· 1492 1490 #endif /* CONFIG_VSX */ 1493 1491 1494 1492 #ifdef CONFIG_ALTIVEC 1495 - int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 1493 + int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, 1496 1494 unsigned int rt, unsigned int bytes, int is_default_endian) 1497 1495 { 1498 1496 enum emulation_result emulated = EMULATE_DONE; ··· 1501 1499 return EMULATE_FAIL; 1502 1500 1503 1501 while (vcpu->arch.mmio_vmx_copy_nums) { 1504 - emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, 1502 + emulated = __kvmppc_handle_load(vcpu, rt, bytes, 1505 1503 is_default_endian, 0); 1506 1504 1507 1505 if (emulated != EMULATE_DONE) 1508 1506 break; 1509 1507 1510 - vcpu->arch.paddr_accessed += run->mmio.len; 1508 + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1511 1509 vcpu->arch.mmio_vmx_copy_nums--; 1512 1510 vcpu->arch.mmio_vmx_offset++; 1513 1511 } ··· 1587 1585 return result; 1588 1586 } 1589 1587 1590 - int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 1588 + int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, 1591 1589 unsigned int rs, unsigned int bytes, int is_default_endian) 1592 1590 { 1593 1591 u64 val = 0; ··· 1622 1620 return EMULATE_FAIL; 1623 1621 } 1624 1622 1625 - emulated = kvmppc_handle_store(run, vcpu, val, bytes, 1623 + emulated = kvmppc_handle_store(vcpu, val, bytes, 1626 1624 is_default_endian); 1627 1625 if (emulated != EMULATE_DONE) 1628 1626 break; 1629 1627 1630 - vcpu->arch.paddr_accessed += run->mmio.len; 1628 + vcpu->arch.paddr_accessed += vcpu->run->mmio.len; 1631 1629 vcpu->arch.mmio_vmx_copy_nums--; 1632 1630 vcpu->arch.mmio_vmx_offset++; 1633 1631 } ··· 1635 1633 return emulated; 1636 1634 } 1637 1635 1638 - static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, 1639 - struct kvm_run *run) 1636 + static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu) 1640 1637 { 1638 + struct kvm_run *run = vcpu->run; 1641 1639 enum emulation_result emulated = EMULATE_FAIL; 1642 1640 int r; 1643 1641 1644 1642 vcpu->arch.paddr_accessed += run->mmio.len; 1645 1643 1646 1644 if (!vcpu->mmio_is_write) { 1647 - emulated = kvmppc_handle_vmx_load(run, vcpu, 1645 + emulated = kvmppc_handle_vmx_load(vcpu, 1648 1646 vcpu->arch.io_gpr, run->mmio.len, 1); 1649 1647 } else { 1650 - emulated = kvmppc_handle_vmx_store(run, vcpu, 1648 + emulated = kvmppc_handle_vmx_store(vcpu, 1651 1649 vcpu->arch.io_gpr, run->mmio.len, 1); 1652 1650 } 1653 1651 ··· 1777 1775 if (vcpu->mmio_needed) { 1778 1776 vcpu->mmio_needed = 0; 1779 1777 if (!vcpu->mmio_is_write) 1780 - kvmppc_complete_mmio_load(vcpu, run); 1778 + kvmppc_complete_mmio_load(vcpu); 1781 1779 #ifdef CONFIG_VSX 1782 1780 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1783 1781 vcpu->arch.mmio_vsx_copy_nums--; ··· 1785 1783 } 1786 1784 1787 1785 if (vcpu->arch.mmio_vsx_copy_nums > 0) { 1788 - r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); 1786 + r = kvmppc_emulate_mmio_vsx_loadstore(vcpu); 1789 1787 if (r == RESUME_HOST) { 1790 1788 vcpu->mmio_needed = 1; 1791 1789 goto out; ··· 1799 1797 } 1800 1798 1801 1799 if (vcpu->arch.mmio_vmx_copy_nums > 0) { 1802 - r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); 1800 + r = kvmppc_emulate_mmio_vmx_loadstore(vcpu); 1803 1801 if (r == RESUME_HOST) { 1804 1802 vcpu->mmio_needed = 1; 1805 1803 goto out; ··· 1832 1830 if (run->immediate_exit) 1833 1831 r = -EINTR; 1834 1832 else 1835 - r = kvmppc_vcpu_run(run, vcpu); 1833 + r = kvmppc_vcpu_run(vcpu); 1836 1834 1837 1835 kvm_sigset_deactivate(vcpu); 1838 1836
+3 -3
arch/powerpc/kvm/trace_hv.h
··· 472 472 ); 473 473 474 474 TRACE_EVENT(kvmppc_run_vcpu_exit, 475 - TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run), 475 + TP_PROTO(struct kvm_vcpu *vcpu), 476 476 477 - TP_ARGS(vcpu, run), 477 + TP_ARGS(vcpu), 478 478 479 479 TP_STRUCT__entry( 480 480 __field(int, vcpu_id) ··· 484 484 485 485 TP_fast_assign( 486 486 __entry->vcpu_id = vcpu->vcpu_id; 487 - __entry->exit = run->exit_reason; 487 + __entry->exit = vcpu->run->exit_reason; 488 488 __entry->ret = vcpu->arch.ret; 489 489 ), 490 490