Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Make kvmppc_get_last_inst() produce a ppc_inst_t

This changes kvmppc_get_last_inst() so that the instruction it fetches
is returned in a ppc_inst_t variable rather than a u32. This will
allow us to return a 64-bit prefixed instruction on those 64-bit
machines that implement Power ISA v3.1 or later, such as POWER10.
On 32-bit platforms, ppc_inst_t is 32 bits wide, and is turned back
into a u32 by ppc_inst_val, which is an identity operation on those
platforms.

Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/ZAgsiPlL9O7KnlZZ@cleo

authored by

Paul Mackerras and committed by
Michael Ellerman
acf17878 6cd5c1db

+47 -31
+3 -2
arch/powerpc/include/asm/kvm_ppc.h
··· 28 28 #include <asm/xive.h> 29 29 #include <asm/cpu_has_feature.h> 30 30 #endif 31 + #include <asm/inst.h> 31 32 32 33 /* 33 34 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction ··· 325 324 extern struct kvmppc_ops *kvmppc_pr_ops; 326 325 327 326 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, 328 - enum instruction_fetch_type type, u32 *inst) 327 + enum instruction_fetch_type type, ppc_inst_t *inst) 329 328 { 330 329 int ret = EMULATE_DONE; 331 330 u32 fetched_inst; ··· 343 342 else 344 343 fetched_inst = vcpu->arch.last_inst; 345 344 346 - *inst = fetched_inst; 345 + *inst = ppc_inst(fetched_inst); 347 346 return ret; 348 347 } 349 348
+8 -4
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 415 415 * embodied here.) If the instruction isn't a load or store, then 416 416 * this doesn't return anything useful. 417 417 */ 418 - static int instruction_is_store(unsigned int instr) 418 + static int instruction_is_store(ppc_inst_t instr) 419 419 { 420 420 unsigned int mask; 421 + unsigned int suffix; 421 422 422 423 mask = 0x10000000; 423 - if ((instr & 0xfc000000) == 0x7c000000) 424 + suffix = ppc_inst_val(instr); 425 + if (ppc_inst_prefixed(instr)) 426 + suffix = ppc_inst_suffix(instr); 427 + else if ((suffix & 0xfc000000) == 0x7c000000) 424 428 mask = 0x100; /* major opcode 31 */ 425 - return (instr & mask) != 0; 429 + return (suffix & mask) != 0; 426 430 } 427 431 428 432 int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, 429 433 unsigned long gpa, gva_t ea, int is_store) 430 434 { 431 - u32 last_inst; 435 + ppc_inst_t last_inst; 432 436 433 437 /* 434 438 * Fast path - check if the guest physical address corresponds to a
+8 -5
arch/powerpc/kvm/book3s_hv.c
··· 1412 1412 1413 1413 static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) 1414 1414 { 1415 - u32 last_inst; 1415 + ppc_inst_t last_inst; 1416 1416 1417 1417 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != 1418 1418 EMULATE_DONE) { ··· 1423 1423 return RESUME_GUEST; 1424 1424 } 1425 1425 1426 - if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { 1426 + if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) { 1427 1427 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 1428 1428 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 1429 1429 return RESUME_HOST; ··· 1477 1477 unsigned long arg; 1478 1478 struct kvm *kvm = vcpu->kvm; 1479 1479 struct kvm_vcpu *tvcpu; 1480 + ppc_inst_t pinst; 1480 1481 1481 - if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) 1482 + if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE) 1482 1483 return RESUME_GUEST; 1484 + inst = ppc_inst_val(pinst); 1483 1485 if (get_op(inst) != 31) 1484 1486 return EMULATE_FAIL; 1485 1487 rb = get_rb(inst); ··· 2005 2003 */ 2006 2004 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || 2007 2005 (vcpu->arch.nested_hfscr & (1UL << cause))) { 2006 + ppc_inst_t pinst; 2008 2007 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; 2009 2008 2010 2009 /* 2011 2010 * If the fetch failed, return to guest and 2012 2011 * try executing it again. 2013 2012 */ 2014 - r = kvmppc_get_last_inst(vcpu, INST_GENERIC, 2015 - &vcpu->arch.emul_inst); 2013 + r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 2014 + vcpu->arch.emul_inst = ppc_inst_val(pinst); 2016 2015 if (r != EMULATE_DONE) 2017 2016 r = RESUME_GUEST; 2018 2017 else
+3 -1
arch/powerpc/kvm/book3s_paired_singles.c
··· 621 621 int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) 622 622 { 623 623 u32 inst; 624 + ppc_inst_t pinst; 624 625 enum emulation_result emulated = EMULATE_DONE; 625 626 int ax_rd, ax_ra, ax_rb, ax_rc; 626 627 short full_d; ··· 633 632 int i; 634 633 #endif 635 634 636 - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); 635 + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 636 + inst = ppc_inst_val(pinst); 637 637 if (emulated != EMULATE_DONE) 638 638 return emulated; 639 639
+10 -10
arch/powerpc/kvm/book3s_pr.c
··· 1079 1079 { 1080 1080 enum emulation_result er; 1081 1081 ulong flags; 1082 - u32 last_inst; 1082 + ppc_inst_t last_inst; 1083 1083 int emul, r; 1084 1084 1085 1085 /* ··· 1100 1100 if (kvmppc_get_msr(vcpu) & MSR_PR) { 1101 1101 #ifdef EXIT_DEBUG 1102 1102 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", 1103 - kvmppc_get_pc(vcpu), last_inst); 1103 + kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); 1104 1104 #endif 1105 - if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { 1105 + if ((ppc_inst_val(last_inst) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { 1106 1106 kvmppc_core_queue_program(vcpu, flags); 1107 1107 return RESUME_GUEST; 1108 1108 } ··· 1119 1119 break; 1120 1120 case EMULATE_FAIL: 1121 1121 pr_crit("%s: emulation at %lx failed (%08x)\n", 1122 - __func__, kvmppc_get_pc(vcpu), last_inst); 1122 + __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); 1123 1123 kvmppc_core_queue_program(vcpu, flags); 1124 1124 r = RESUME_GUEST; 1125 1125 break; ··· 1281 1281 break; 1282 1282 case BOOK3S_INTERRUPT_SYSCALL: 1283 1283 { 1284 - u32 last_sc; 1284 + ppc_inst_t last_sc; 1285 1285 int emul; 1286 1286 1287 1287 /* Get last sc for papr */ ··· 1296 1296 } 1297 1297 1298 1298 if (vcpu->arch.papr_enabled && 1299 - (last_sc == 0x44000022) && 1299 + (ppc_inst_val(last_sc) == 0x44000022) && 1300 1300 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 1301 1301 /* SC 1 papr hypercalls */ 1302 1302 ulong cmd = kvmppc_get_gpr(vcpu, 3); ··· 1348 1348 { 1349 1349 int ext_msr = 0; 1350 1350 int emul; 1351 - u32 last_inst; 1351 + ppc_inst_t last_inst; 1352 1352 1353 1353 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { 1354 1354 /* Do paired single instruction emulation */ ··· 1382 1382 } 1383 1383 case BOOK3S_INTERRUPT_ALIGNMENT: 1384 1384 { 1385 - u32 last_inst; 1385 + ppc_inst_t last_inst; 1386 1386 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1387 1387 1388 1388 if (emul == EMULATE_DONE) { 1389 1389 u32 dsisr; 1390 1390 u64 dar; 1391 1391 1392 - dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); 1393 - dar = kvmppc_alignment_dar(vcpu, last_inst); 1392 + dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst)); 1393 + dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst)); 1394 1394 1395 1395 kvmppc_set_dsisr(vcpu, dsisr); 1396 1396 kvmppc_set_dar(vcpu, dar);
+7 -3
arch/powerpc/kvm/booke.c
··· 1015 1015 int s; 1016 1016 int idx; 1017 1017 u32 last_inst = KVM_INST_FETCH_FAILED; 1018 + ppc_inst_t pinst; 1018 1019 enum emulation_result emulated = EMULATE_DONE; 1019 1020 1020 1021 /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ ··· 1035 1034 case BOOKE_INTERRUPT_DATA_STORAGE: 1036 1035 case BOOKE_INTERRUPT_DTLB_MISS: 1037 1036 case BOOKE_INTERRUPT_HV_PRIV: 1038 - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1037 + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 1038 + last_inst = ppc_inst_val(pinst); 1039 1039 break; 1040 1040 case BOOKE_INTERRUPT_PROGRAM: 1041 1041 /* SW breakpoints arrive as illegal instructions on HV */ 1042 - if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 1043 - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 1042 + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1043 + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 1044 + last_inst = ppc_inst_val(pinst); 1045 + } 1044 1046 break; 1045 1047 default: 1046 1048 break;
+3 -1
arch/powerpc/kvm/emulate.c
··· 194 194 int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) 195 195 { 196 196 u32 inst; 197 + ppc_inst_t pinst; 197 198 int rs, rt, sprn; 198 199 enum emulation_result emulated; 199 200 int advance = 1; ··· 202 201 /* this default type might be overwritten by subcategories */ 203 202 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 204 203 205 - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); 204 + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); 205 + inst = ppc_inst_val(pinst); 206 206 if (emulated != EMULATE_DONE) 207 207 return emulated; 208 208
+3 -3
arch/powerpc/kvm/emulate_loadstore.c
··· 71 71 */ 72 72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) 73 73 { 74 - u32 inst; 74 + ppc_inst_t inst; 75 75 enum emulation_result emulated = EMULATE_FAIL; 76 76 struct instruction_op op; 77 77 ··· 93 93 94 94 emulated = EMULATE_FAIL; 95 95 vcpu->arch.regs.msr = vcpu->arch.shared->msr; 96 - if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { 96 + if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { 97 97 int type = op.type & INSTR_TYPE_MASK; 98 98 int size = GETSIZE(op.type); 99 99 ··· 356 356 } 357 357 } 358 358 359 - trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 359 + trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); 360 360 361 361 /* Advance past emulated instruction. */ 362 362 if (emulated != EMULATE_FAIL)
+2 -2
arch/powerpc/kvm/powerpc.c
··· 304 304 break; 305 305 case EMULATE_FAIL: 306 306 { 307 - u32 last_inst; 307 + ppc_inst_t last_inst; 308 308 309 309 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); 310 310 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", 311 - last_inst); 311 + ppc_inst_val(last_inst)); 312 312 313 313 /* 314 314 * Injecting a Data Storage here is a bit more