Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Get rid of ARM64_FEATURE_MASK()

The ARM64_FEATURE_MASK() macro was a hack introduce whilst the
automatic generation of sysreg encoding was introduced, and was
too unreliable to be entirely trusted.

We are in a better place now, and we could really do without this
macro. Get rid of it altogether.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250817202158.395078-7-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>

authored by

Marc Zyngier and committed by
Oliver Upton
0843e0ce 7a765aa8

+43 -49
-3
arch/arm64/include/asm/sysreg.h
··· 1146 1146 1147 1147 #define ARM64_FEATURE_FIELD_BITS 4 1148 1148 1149 - /* Defined for compatibility only, do not add new users. */ 1150 - #define ARM64_FEATURE_MASK(x) (x##_MASK) 1151 - 1152 1149 #ifdef __ASSEMBLY__ 1153 1150 1154 1151 .macro mrs_s, rt, sreg
+4 -4
arch/arm64/kvm/arm.c
··· 2404 2404 */ 2405 2405 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 2406 2406 2407 - val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | 2408 - ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); 2407 + val &= ~(ID_AA64PFR0_EL1_CSV2 | 2408 + ID_AA64PFR0_EL1_CSV3); 2409 2409 2410 - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 2410 + val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV2, 2411 2411 arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); 2412 - val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 2412 + val |= FIELD_PREP(ID_AA64PFR0_EL1_CSV3, 2413 2413 arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); 2414 2414 2415 2415 return val;
+19 -19
arch/arm64/kvm/sys_regs.c
··· 1615 1615 break; 1616 1616 case SYS_ID_AA64ISAR1_EL1: 1617 1617 if (!vcpu_has_ptrauth(vcpu)) 1618 - val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) | 1619 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) | 1620 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) | 1621 - ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI)); 1618 + val &= ~(ID_AA64ISAR1_EL1_APA | 1619 + ID_AA64ISAR1_EL1_API | 1620 + ID_AA64ISAR1_EL1_GPA | 1621 + ID_AA64ISAR1_EL1_GPI); 1622 1622 break; 1623 1623 case SYS_ID_AA64ISAR2_EL1: 1624 1624 if (!vcpu_has_ptrauth(vcpu)) 1625 - val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | 1626 - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); 1625 + val &= ~(ID_AA64ISAR2_EL1_APA3 | 1626 + ID_AA64ISAR2_EL1_GPA3); 1627 1627 if (!cpus_have_final_cap(ARM64_HAS_WFXT) || 1628 1628 has_broken_cntvoff()) 1629 - val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); 1629 + val &= ~ID_AA64ISAR2_EL1_WFxT; 1630 1630 break; 1631 1631 case SYS_ID_AA64ISAR3_EL1: 1632 1632 val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX; ··· 1642 1642 ID_AA64MMFR3_EL1_S1PIE; 1643 1643 break; 1644 1644 case SYS_ID_MMFR4_EL1: 1645 - val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX); 1645 + val &= ~ID_MMFR4_EL1_CCIDX; 1646 1646 break; 1647 1647 } 1648 1648 ··· 1828 1828 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); 1829 1829 1830 1830 if (!kvm_has_mte(vcpu->kvm)) { 1831 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE); 1832 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac); 1831 + val &= ~ID_AA64PFR1_EL1_MTE; 1832 + val &= ~ID_AA64PFR1_EL1_MTE_frac; 1833 1833 } 1834 1834 1835 1835 if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) && 1836 1836 SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP)) 1837 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RAS_frac); 1837 + val &= ~ID_AA64PFR1_EL1_RAS_frac; 1838 1838 1839 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME); 1840 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap); 1841 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI); 1842 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS); 1843 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE); 1844 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); 1845 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); 1846 - val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); 1839 + val &= ~ID_AA64PFR1_EL1_SME; 1840 + val &= ~ID_AA64PFR1_EL1_RNDR_trap; 1841 + val &= ~ID_AA64PFR1_EL1_NMI; 1842 + val &= ~ID_AA64PFR1_EL1_GCS; 1843 + val &= ~ID_AA64PFR1_EL1_THE; 1844 + val &= ~ID_AA64PFR1_EL1_MTEX; 1845 + val &= ~ID_AA64PFR1_EL1_PFAR; 1846 + val &= ~ID_AA64PFR1_EL1_MPAM_frac; 1847 1847 1848 1848 return val; 1849 1849 }
-3
tools/arch/arm64/include/asm/sysreg.h
··· 1080 1080 1081 1081 #define ARM64_FEATURE_FIELD_BITS 4 1082 1082 1083 - /* Defined for compatibility only, do not add new users. */ 1084 - #define ARM64_FEATURE_MASK(x) (x##_MASK) 1085 - 1086 1083 #ifdef __ASSEMBLY__ 1087 1084 1088 1085 .macro mrs_s, rt, sreg
+1 -1
tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
··· 146 146 147 147 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 148 148 149 - el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); 149 + el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val); 150 150 return el0 == ID_AA64PFR0_EL1_EL0_IMP; 151 151 } 152 152
+6 -6
tools/testing/selftests/kvm/arm64/debug-exceptions.c
··· 116 116 117 117 /* Reset all bcr/bvr/wcr/wvr registers */ 118 118 dfr0 = read_sysreg(id_aa64dfr0_el1); 119 - brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0); 119 + brps = FIELD_GET(ID_AA64DFR0_EL1_BRPs, dfr0); 120 120 for (i = 0; i <= brps; i++) { 121 121 write_dbgbcr(i, 0); 122 122 write_dbgbvr(i, 0); 123 123 } 124 - wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0); 124 + wrps = FIELD_GET(ID_AA64DFR0_EL1_WRPs, dfr0); 125 125 for (i = 0; i <= wrps; i++) { 126 126 write_dbgwcr(i, 0); 127 127 write_dbgwvr(i, 0); ··· 418 418 419 419 static int debug_version(uint64_t id_aa64dfr0) 420 420 { 421 - return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0); 421 + return FIELD_GET(ID_AA64DFR0_EL1_DebugVer, id_aa64dfr0); 422 422 } 423 423 424 424 static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) ··· 539 539 int b, w, c; 540 540 541 541 /* Number of breakpoints */ 542 - brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1; 542 + brp_num = FIELD_GET(ID_AA64DFR0_EL1_BRPs, aa64dfr0) + 1; 543 543 __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required"); 544 544 545 545 /* Number of watchpoints */ 546 - wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1; 546 + wrp_num = FIELD_GET(ID_AA64DFR0_EL1_WRPs, aa64dfr0) + 1; 547 547 548 548 /* Number of context aware breakpoints */ 549 - ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1; 549 + ctx_brp_num = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, aa64dfr0) + 1; 550 550 551 551 pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__, 552 552 brp_num, wrp_num, ctx_brp_num);
+2 -2
tools/testing/selftests/kvm/arm64/no-vgic-v3.c
··· 54 54 * Check that we advertise that ID_AA64PFR0_EL1.GIC == 0, having 55 55 * hidden the feature at runtime without any other userspace action. 56 56 */ 57 - __GUEST_ASSERT(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 57 + __GUEST_ASSERT(FIELD_GET(ID_AA64PFR0_EL1_GIC, 58 58 read_sysreg(id_aa64pfr0_el1)) == 0, 59 59 "GICv3 wrongly advertised"); 60 60 ··· 165 165 166 166 vm = vm_create_with_one_vcpu(&vcpu, NULL); 167 167 pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 168 - __TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0), 168 + __TEST_REQUIRE(FIELD_GET(ID_AA64PFR0_EL1_GIC, pfr0), 169 169 "GICv3 not supported."); 170 170 kvm_vm_free(vm); 171 171
+3 -3
tools/testing/selftests/kvm/arm64/page_fault_test.c
··· 95 95 uint64_t isar0 = read_sysreg(id_aa64isar0_el1); 96 96 uint64_t atomic; 97 97 98 - atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0); 98 + atomic = FIELD_GET(ID_AA64ISAR0_EL1_ATOMIC, isar0); 99 99 return atomic >= 2; 100 100 } 101 101 102 102 static bool guest_check_dc_zva(void) 103 103 { 104 104 uint64_t dczid = read_sysreg(dczid_el0); 105 - uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid); 105 + uint64_t dzp = FIELD_GET(DCZID_EL0_DZP, dczid); 106 106 107 107 return dzp == 0; 108 108 } ··· 195 195 uint64_t hadbs, tcr; 196 196 197 197 /* Skip if HA is not supported. */ 198 - hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1); 198 + hadbs = FIELD_GET(ID_AA64MMFR1_EL1_HAFDBS, mmfr1); 199 199 if (hadbs == 0) 200 200 return false; 201 201
+4 -4
tools/testing/selftests/kvm/arm64/set_id_regs.c
··· 594 594 */ 595 595 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 596 596 597 - mte = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), val); 598 - mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val); 597 + mte = FIELD_GET(ID_AA64PFR1_EL1_MTE, val); 598 + mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); 599 599 if (mte != ID_AA64PFR1_EL1_MTE_MTE2 || 600 600 mte_frac != ID_AA64PFR1_EL1_MTE_frac_NI) { 601 601 ksft_test_result_skip("MTE_ASYNC or MTE_ASYMM are supported, nothing to test\n"); ··· 612 612 } 613 613 614 614 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1)); 615 - mte_frac = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac), val); 615 + mte_frac = FIELD_GET(ID_AA64PFR1_EL1_MTE_frac, val); 616 616 if (mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI) 617 617 ksft_test_result_pass("ID_AA64PFR1_EL1.MTE_frac=0 accepted and still 0xF\n"); 618 618 else ··· 774 774 775 775 /* Check for AARCH64 only system */ 776 776 val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); 777 - el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); 777 + el0 = FIELD_GET(ID_AA64PFR0_EL1_EL0, val); 778 778 aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP); 779 779 780 780 ksft_print_header();
+1 -1
tools/testing/selftests/kvm/arm64/vpmu_counter_access.c
··· 441 441 442 442 /* Make sure that PMUv3 support is indicated in the ID register */ 443 443 dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1)); 444 - pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); 444 + pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer, dfr0); 445 445 TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && 446 446 pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, 447 447 "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
+3 -3
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 573 573 err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg); 574 574 TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd)); 575 575 576 - gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val); 576 + gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN4, val); 577 577 *ipa4k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN4_NI, 578 578 ID_AA64MMFR0_EL1_TGRAN4_52_BIT); 579 579 580 - gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val); 580 + gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN64, val); 581 581 *ipa64k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN64_NI, 582 582 ID_AA64MMFR0_EL1_TGRAN64_IMP); 583 583 584 - gran = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val); 584 + gran = FIELD_GET(ID_AA64MMFR0_EL1_TGRAN16, val); 585 585 *ipa16k = max_ipa_for_page_size(ipa, gran, ID_AA64MMFR0_EL1_TGRAN16_NI, 586 586 ID_AA64MMFR0_EL1_TGRAN16_52_BIT); 587 587