Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Set SRR1[PREFIX] bit on injected interrupts

Pass the hypervisor (H)SRR1[PREFIX] indication through to synchronous
interrupts injected into the guest.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230330103224.3589928-3-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
6cd5c1db 460ba21d

+38 -20
+9 -4
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 954 954 if (dsisr & DSISR_BADACCESS) { 955 955 /* Reflect to the guest as DSI */ 956 956 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); 957 - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); 957 + kvmppc_core_queue_data_storage(vcpu, 958 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 959 + ea, dsisr); 958 960 return RESUME_GUEST; 959 961 } 960 962 ··· 981 979 * Bad address in guest page table tree, or other 982 980 * unusual error - reflect it to the guest as DSI. 983 981 */ 984 - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); 982 + kvmppc_core_queue_data_storage(vcpu, 983 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 984 + ea, dsisr); 985 985 return RESUME_GUEST; 986 986 } 987 987 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); ··· 992 988 if (memslot->flags & KVM_MEM_READONLY) { 993 989 if (writing) { 994 990 /* give the guest a DSI */ 995 - kvmppc_core_queue_data_storage(vcpu, 0, ea, 996 - DSISR_ISSTORE | DSISR_PROTFAULT); 991 + kvmppc_core_queue_data_storage(vcpu, 992 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 993 + ea, DSISR_ISSTORE | DSISR_PROTFAULT); 997 994 return RESUME_GUEST; 998 995 } 999 996 kvm_ro = true;
+18 -9
arch/powerpc/kvm/book3s_hv.c
··· 1428 1428 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); 1429 1429 return RESUME_HOST; 1430 1430 } else { 1431 - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1431 + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | 1432 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); 1432 1433 return RESUME_GUEST; 1433 1434 } 1434 1435 } ··· 1631 1630 * so that it knows that the machine check occurred. 1632 1631 */ 1633 1632 if (!vcpu->kvm->arch.fwnmi_enabled) { 1634 - ulong flags = vcpu->arch.shregs.msr & 0x083c0000; 1633 + ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | 1634 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 1635 1635 kvmppc_core_queue_machine_check(vcpu, flags); 1636 1636 r = RESUME_GUEST; 1637 1637 break; ··· 1661 1659 * as a result of a hypervisor emulation interrupt 1662 1660 * (e40) getting turned into a 700 by BML RTAS. 1663 1661 */ 1664 - flags = vcpu->arch.shregs.msr & 0x1f0000ull; 1662 + flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | 1663 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 1665 1664 kvmppc_core_queue_program(vcpu, flags); 1666 1665 r = RESUME_GUEST; 1667 1666 break; ··· 1742 1739 } 1743 1740 1744 1741 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { 1745 - kvmppc_core_queue_data_storage(vcpu, 0, 1742 + kvmppc_core_queue_data_storage(vcpu, 1743 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 1746 1744 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); 1747 1745 r = RESUME_GUEST; 1748 1746 break; ··· 1761 1757 } else if (err == -1 || err == -2) { 1762 1758 r = RESUME_PAGE_FAULT; 1763 1759 } else { 1764 - kvmppc_core_queue_data_storage(vcpu, 0, 1760 + kvmppc_core_queue_data_storage(vcpu, 1761 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 1765 1762 vcpu->arch.fault_dar, err); 1766 1763 r = RESUME_GUEST; 1767 1764 } ··· 1790 1785 1791 1786 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { 1792 1787 kvmppc_core_queue_inst_storage(vcpu, 1793 - vcpu->arch.fault_dsisr); 1788 + vcpu->arch.fault_dsisr | 1789 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); 1794 1790 r = RESUME_GUEST; 1795 1791 break; 1796 1792 } ··· 1808 1802 } else if (err == -1) { 1809 1803 r = RESUME_PAGE_FAULT; 1810 1804 } else { 1811 - kvmppc_core_queue_inst_storage(vcpu, err); 1805 + kvmppc_core_queue_inst_storage(vcpu, 1806 + err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); 1812 1807 r = RESUME_GUEST; 1813 1808 } 1814 1809 break; ··· 1830 1823 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { 1831 1824 r = kvmppc_emulate_debug_inst(vcpu); 1832 1825 } else { 1833 - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1826 + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | 1827 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); 1834 1828 r = RESUME_GUEST; 1835 1829 } 1836 1830 break; ··· 1872 1864 r = kvmppc_tm_unavailable(vcpu); 1873 1865 } 1874 1866 if (r == EMULATE_FAIL) { 1875 - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); 1867 + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | 1868 + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); 1876 1869 r = RESUME_GUEST; 1877 1870 } 1878 1871 break;
+6 -3
arch/powerpc/kvm/book3s_hv_nested.c
··· 1560 1560 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { 1561 1561 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { 1562 1562 /* unusual error -> reflect to the guest as a DSI */ 1563 - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); 1563 + kvmppc_core_queue_data_storage(vcpu, 1564 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 1565 + ea, dsisr); 1564 1566 return RESUME_GUEST; 1565 1567 } 1566 1568 ··· 1572 1570 if (memslot->flags & KVM_MEM_READONLY) { 1573 1571 if (writing) { 1574 1572 /* Give the guest a DSI */ 1575 - kvmppc_core_queue_data_storage(vcpu, 0, ea, 1576 - DSISR_ISSTORE | DSISR_PROTFAULT); 1573 + kvmppc_core_queue_data_storage(vcpu, 1574 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 1575 + ea, DSISR_ISSTORE | DSISR_PROTFAULT); 1577 1576 return RESUME_GUEST; 1578 1577 } 1579 1578 kvm_ro = true;
+3 -3
arch/powerpc/kvm/emulate_loadstore.c
··· 28 28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) 29 29 { 30 30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { 31 - kvmppc_core_queue_fpunavail(vcpu, 0); 31 + kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 32 32 return true; 33 33 } 34 34 ··· 40 40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) 41 41 { 42 42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { 43 - kvmppc_core_queue_vsx_unavail(vcpu, 0); 43 + kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 44 44 return true; 45 45 } 46 46 ··· 52 52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) 53 53 { 54 54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { 55 - kvmppc_core_queue_vec_unavail(vcpu, 0); 55 + kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); 56 56 return true; 57 57 } 58 58
+2 -1
arch/powerpc/kvm/powerpc.c
··· 321 321 if (vcpu->mmio_is_write) 322 322 dsisr |= DSISR_ISSTORE; 323 323 324 - kvmppc_core_queue_data_storage(vcpu, 0, 324 + kvmppc_core_queue_data_storage(vcpu, 325 + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, 325 326 vcpu->arch.vaddr_accessed, dsisr); 326 327 } else { 327 328 /*