Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: nv: Forward FEAT_XNX permissions to the shadow stage-2

Add support for FEAT_XNX to shadow stage-2 MMUs, being careful to only
evaluate XN[0] when the feature is actually exposed to the VM.
Restructure the layering of permissions in the fault handler to assume
pX and uX then restricting based on the guest's stage-2 afterwards.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Tested-by: Marc Zyngier <maz@kernel.org>
Link: https://msgid.link/20251124190158.177318-4-oupton@kernel.org
Signed-off-by: Oliver Upton <oupton@kernel.org>

+57 -8
+35 -2
arch/arm64/include/asm/kvm_nested.h
··· 120 120 return trans->writable; 121 121 } 122 122 123 - static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans) 123 + static inline bool kvm_has_xnx(struct kvm *kvm) 124 124 { 125 - return !(trans->desc & BIT(54)); 125 + return cpus_have_final_cap(ARM64_HAS_XNX) && 126 + kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP); 127 + } 128 + 129 + static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans) 130 + { 131 + u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 132 + 133 + if (!kvm_has_xnx(kvm)) 134 + xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 135 + 136 + switch (xn) { 137 + case 0b00: 138 + case 0b01: 139 + return true; 140 + default: 141 + return false; 142 + } 143 + } 144 + 145 + static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans) 146 + { 147 + u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc); 148 + 149 + if (!kvm_has_xnx(kvm)) 150 + xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10); 151 + 152 + switch (xn) { 153 + case 0b00: 154 + case 0b11: 155 + return true; 156 + default: 157 + return false; 158 + } 126 159 } 127 160 128 161 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
+18 -5
arch/arm64/kvm/mmu.c
··· 1521 1521 *prot |= kvm_encode_nested_level(nested); 1522 1522 } 1523 1523 1524 + static void adjust_nested_exec_perms(struct kvm *kvm, 1525 + struct kvm_s2_trans *nested, 1526 + enum kvm_pgtable_prot *prot) 1527 + { 1528 + if (!kvm_s2_trans_exec_el0(kvm, nested)) 1529 + *prot &= ~KVM_PGTABLE_PROT_UX; 1530 + if (!kvm_s2_trans_exec_el1(kvm, nested)) 1531 + *prot &= ~KVM_PGTABLE_PROT_PX; 1532 + } 1533 + 1524 1534 #define KVM_PGTABLE_WALK_MEMABORT_FLAGS (KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED) 1525 1535 1526 1536 static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ··· 1582 1572 if (writable) 1583 1573 prot |= KVM_PGTABLE_PROT_W; 1584 1574 1585 - if (exec_fault || 1586 - (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) && 1587 - (!nested || kvm_s2_trans_executable(nested)))) 1575 + if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) 1588 1576 prot |= KVM_PGTABLE_PROT_X; 1577 + 1578 + if (nested) 1579 + adjust_nested_exec_perms(kvm, nested, &prot); 1589 1580 1590 1581 kvm_fault_lock(kvm); 1591 1582 if (mmu_invalidate_retry(kvm, mmu_seq)) { ··· 1862 1851 prot |= KVM_PGTABLE_PROT_NORMAL_NC; 1863 1852 else 1864 1853 prot |= KVM_PGTABLE_PROT_DEVICE; 1865 - } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC) && 1866 - (!nested || kvm_s2_trans_executable(nested))) { 1854 + } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) { 1867 1855 prot |= KVM_PGTABLE_PROT_X; 1868 1856 } 1857 + 1858 + if (nested) 1859 + adjust_nested_exec_perms(kvm, nested, &prot); 1869 1860 1870 1861 /* 1871 1862 * Under the premise of getting a FSC_PERM fault, we just need to relax
+4 -1
arch/arm64/kvm/nested.c
··· 788 788 return 0; 789 789 790 790 if (kvm_vcpu_trap_is_iabt(vcpu)) { 791 - forward_fault = !kvm_s2_trans_executable(trans); 791 + if (vcpu_mode_priv(vcpu)) 792 + forward_fault = !kvm_s2_trans_exec_el1(vcpu->kvm, trans); 793 + else 794 + forward_fault = !kvm_s2_trans_exec_el0(vcpu->kvm, trans); 792 795 } else { 793 796 bool write_fault = kvm_is_write_fault(vcpu); 794 797