Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvmarm-fixes-6.1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

* Fix the pKVM stage-1 walker erronously using the stage-2 accessor

* Correctly convert vcpu->kvm to a hyp pointer when generating
an exception in a nVHE+MTE configuration

* Check that KVM_CAP_DIRTY_LOG_* are valid before enabling them

* Fix SMPRI_EL1/TPIDR2_EL0 trapping on VHE

* Document the boot requirements for FGT when entering the kernel
at EL1

+34 -36
+8
Documentation/arm64/booting.rst
··· 340 340 - SMCR_EL2.LEN must be initialised to the same value for all CPUs the 341 341 kernel will execute on. 342 342 343 + - HWFGRTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01. 344 + 345 + - HWFGWTR_EL2.nTPIDR2_EL0 (bit 55) must be initialised to 0b01. 346 + 347 + - HWFGRTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01. 348 + 349 + - HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01. 350 + 343 351 For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64) 344 352 345 353 - If EL3 is present:
+2 -1
arch/arm64/kvm/hyp/exception.c
··· 13 13 #include <hyp/adjust_pc.h> 14 14 #include <linux/kvm_host.h> 15 15 #include <asm/kvm_emulate.h> 16 + #include <asm/kvm_mmu.h> 16 17 17 18 #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__) 18 19 #error Hypervisor code only! ··· 116 115 new |= (old & PSR_C_BIT); 117 116 new |= (old & PSR_V_BIT); 118 117 119 - if (kvm_has_mte(vcpu->kvm)) 118 + if (kvm_has_mte(kern_hyp_va(vcpu->kvm))) 120 119 new |= PSR_TCO_BIT; 121 120 122 121 new |= (old & PSR_DIT_BIT);
+20
arch/arm64/kvm/hyp/include/hyp/switch.h
··· 87 87 88 88 vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); 89 89 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); 90 + 91 + if (cpus_have_final_cap(ARM64_SME)) { 92 + sysreg_clear_set_s(SYS_HFGRTR_EL2, 93 + HFGxTR_EL2_nSMPRI_EL1_MASK | 94 + HFGxTR_EL2_nTPIDR2_EL0_MASK, 95 + 0); 96 + sysreg_clear_set_s(SYS_HFGWTR_EL2, 97 + HFGxTR_EL2_nSMPRI_EL1_MASK | 98 + HFGxTR_EL2_nTPIDR2_EL0_MASK, 99 + 0); 100 + } 90 101 } 91 102 92 103 static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) ··· 107 96 write_sysreg(0, hstr_el2); 108 97 if (kvm_arm_support_pmu_v3()) 109 98 write_sysreg(0, pmuserenr_el0); 99 + 100 + if (cpus_have_final_cap(ARM64_SME)) { 101 + sysreg_clear_set_s(SYS_HFGRTR_EL2, 0, 102 + HFGxTR_EL2_nSMPRI_EL1_MASK | 103 + HFGxTR_EL2_nTPIDR2_EL0_MASK); 104 + sysreg_clear_set_s(SYS_HFGWTR_EL2, 0, 105 + HFGxTR_EL2_nSMPRI_EL1_MASK | 106 + HFGxTR_EL2_nTPIDR2_EL0_MASK); 107 + } 110 108 } 111 109 112 110 static inline void ___activate_traps(struct kvm_vcpu *vcpu)
+1 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 516 516 if (!kvm_pte_valid(pte)) 517 517 return PKVM_NOPAGE; 518 518 519 - return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 519 + return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte)); 520 520 } 521 521 522 522 static int __hyp_check_page_state_range(u64 addr, u64 size,
-26
arch/arm64/kvm/hyp/nvhe/switch.c
··· 55 55 write_sysreg(val, cptr_el2); 56 56 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); 57 57 58 - if (cpus_have_final_cap(ARM64_SME)) { 59 - val = read_sysreg_s(SYS_HFGRTR_EL2); 60 - val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK | 61 - HFGxTR_EL2_nSMPRI_EL1_MASK); 62 - write_sysreg_s(val, SYS_HFGRTR_EL2); 63 - 64 - val = read_sysreg_s(SYS_HFGWTR_EL2); 65 - val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK | 66 - HFGxTR_EL2_nSMPRI_EL1_MASK); 67 - write_sysreg_s(val, SYS_HFGWTR_EL2); 68 - } 69 - 70 58 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 71 59 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; 72 60 ··· 97 109 __deactivate_traps_common(vcpu); 98 110 99 111 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); 100 - 101 - if (cpus_have_final_cap(ARM64_SME)) { 102 - u64 val; 103 - 104 - val = read_sysreg_s(SYS_HFGRTR_EL2); 105 - val |= HFGxTR_EL2_nTPIDR2_EL0_MASK | 106 - HFGxTR_EL2_nSMPRI_EL1_MASK; 107 - write_sysreg_s(val, SYS_HFGRTR_EL2); 108 - 109 - val = read_sysreg_s(SYS_HFGWTR_EL2); 110 - val |= HFGxTR_EL2_nTPIDR2_EL0_MASK | 111 - HFGxTR_EL2_nSMPRI_EL1_MASK; 112 - write_sysreg_s(val, SYS_HFGWTR_EL2); 113 - } 114 112 115 113 cptr = CPTR_EL2_DEFAULT; 116 114 if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
-8
arch/arm64/kvm/hyp/vhe/switch.c
··· 63 63 __activate_traps_fpsimd32(vcpu); 64 64 } 65 65 66 - if (cpus_have_final_cap(ARM64_SME)) 67 - write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2, 68 - sctlr_el2); 69 - 70 66 write_sysreg(val, cpacr_el1); 71 67 72 68 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1); ··· 83 87 * the host. 84 88 */ 85 89 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); 86 - 87 - if (cpus_have_final_cap(ARM64_SME)) 88 - write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2, 89 - sctlr_el2); 90 90 91 91 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); 92 92
+3
virt/kvm/kvm_main.c
··· 4585 4585 } 4586 4586 case KVM_CAP_DIRTY_LOG_RING: 4587 4587 case KVM_CAP_DIRTY_LOG_RING_ACQ_REL: 4588 + if (!kvm_vm_ioctl_check_extension_generic(kvm, cap->cap)) 4589 + return -EINVAL; 4590 + 4588 4591 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); 4589 4592 default: 4590 4593 return kvm_vm_ioctl_enable_cap(kvm, cap);