Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: selftests: Introduce and use hardware-definition macros

The kvm selftest library for arm64 currently configures the hardware
fields, such as shift and mask in the page-table entries and registers,
directly with numbers. While it add comments at places, it's better to
rewrite them with appropriate macros to improve the readability and
reduce the risk of errors. Hence, introduce macros to define the
hardware fields and use them in the arm64 processor library.

Most of the definitions are primary copied from the Linux's header,
arch/arm64/include/asm/pgtable-hwdef.h.

No functional change intended.

Suggested-by: Oliver Upton <oupton@google.com>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
Link: https://lore.kernel.org/r/20250405001042.1470552-2-rananta@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>

authored by

Raghavendra Rao Ananta and committed by
Oliver Upton
d8d78398 26fbdf36

+92 -33
+1 -1
tools/testing/selftests/kvm/arm64/page_fault_test.c
··· 199 199 if (hadbs == 0) 200 200 return false; 201 201 202 - tcr = read_sysreg(tcr_el1) | TCR_EL1_HA; 202 + tcr = read_sysreg(tcr_el1) | TCR_HA; 203 203 write_sysreg(tcr, tcr_el1); 204 204 isb(); 205 205
+60 -6
tools/testing/selftests/kvm/include/arm64/processor.h
··· 62 62 MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ 63 63 MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT)) 64 64 65 + /* TCR_EL1 specific flags */ 66 + #define TCR_T0SZ_OFFSET 0 67 + #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) 68 + 69 + #define TCR_IRGN0_SHIFT 8 70 + #define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) 71 + #define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT) 72 + #define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT) 73 + #define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT) 74 + #define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT) 75 + 76 + #define TCR_ORGN0_SHIFT 10 77 + #define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT) 78 + #define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT) 79 + #define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT) 80 + #define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT) 81 + #define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT) 82 + 83 + #define TCR_SH0_SHIFT 12 84 + #define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT) 85 + #define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT) 86 + 87 + #define TCR_TG0_SHIFT 14 88 + #define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT) 89 + #define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT) 90 + #define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT) 91 + #define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT) 92 + 93 + #define TCR_IPS_SHIFT 32 94 + #define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT) 95 + #define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT) 96 + #define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT) 97 + #define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT) 98 + #define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT) 99 + 100 + #define TCR_HA (UL(1) << 39) 101 + #define TCR_DS (UL(1) << 59) 102 + 103 + /* 104 + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 105 + */ 106 + #define PTE_ATTRINDX(t) ((t) << 2) 107 + #define PTE_ATTRINDX_MASK GENMASK(4, 2) 108 + #define PTE_ATTRINDX_SHIFT 2 109 + 110 + #define PTE_VALID BIT(0) 111 + #define PGD_TYPE_TABLE BIT(1) 112 + #define PUD_TYPE_TABLE BIT(1) 113 + #define PMD_TYPE_TABLE BIT(1) 114 + #define PTE_TYPE_PAGE BIT(1) 115 + 116 + #define PTE_AF BIT(10) 117 + 118 + #define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift)) 119 + #define PTE_ADDR_51_48 GENMASK(15, 12) 120 + #define PTE_ADDR_51_48_SHIFT 12 121 + #define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift)) 122 + #define PTE_ADDR_51_50_LPA2 GENMASK(9, 8) 123 + #define PTE_ADDR_51_50_LPA2_SHIFT 8 124 + 65 125 void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init); 66 126 struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, 67 127 struct kvm_vcpu_init *init, void *guest_code); ··· 161 101 (v) == VECTOR_SYNC_CURRENT || \ 162 102 (v) == VECTOR_SYNC_LOWER_64 || \ 163 103 (v) == VECTOR_SYNC_LOWER_32) 164 - 165 - /* Access flag */ 166 - #define PTE_AF (1ULL << 10) 167 - 168 - /* Access flag update enable/disable */ 169 - #define TCR_EL1_HA (1ULL << 39) 170 104 171 105 void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k, 172 106 uint32_t *ipa16k, uint32_t *ipa64k);
+31 -26
tools/testing/selftests/kvm/lib/arm64/processor.c
··· 72 72 uint64_t pte; 73 73 74 74 if (use_lpa2_pte_format(vm)) { 75 - pte = pa & GENMASK(49, vm->page_shift); 76 - pte |= FIELD_GET(GENMASK(51, 50), pa) << 8; 77 - attrs &= ~GENMASK(9, 8); 75 + pte = pa & PTE_ADDR_MASK_LPA2(vm->page_shift); 76 + pte |= FIELD_GET(GENMASK(51, 50), pa) << PTE_ADDR_51_50_LPA2_SHIFT; 77 + attrs &= ~PTE_ADDR_51_50_LPA2; 78 78 } else { 79 - pte = pa & GENMASK(47, vm->page_shift); 79 + pte = pa & PTE_ADDR_MASK(vm->page_shift); 80 80 if (vm->page_shift == 16) 81 - pte |= FIELD_GET(GENMASK(51, 48), pa) << 12; 81 + pte |= FIELD_GET(GENMASK(51, 48), pa) << PTE_ADDR_51_48_SHIFT; 82 82 } 83 83 pte |= attrs; 84 84 ··· 90 90 uint64_t pa; 91 91 92 92 if (use_lpa2_pte_format(vm)) { 93 - pa = pte & GENMASK(49, vm->page_shift); 94 - pa |= FIELD_GET(GENMASK(9, 8), pte) << 50; 93 + pa = pte & PTE_ADDR_MASK_LPA2(vm->page_shift); 94 + pa |= FIELD_GET(PTE_ADDR_51_50_LPA2, pte) << 50; 95 95 } else { 96 - pa = pte & GENMASK(47, vm->page_shift); 96 + pa = pte & PTE_ADDR_MASK(vm->page_shift); 97 97 if (vm->page_shift == 16) 98 - pa |= FIELD_GET(GENMASK(15, 12), pte) << 48; 98 + pa |= FIELD_GET(PTE_ADDR_51_48, pte) << 48; 99 99 } 100 100 101 101 return pa; ··· 128 128 static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, 129 129 uint64_t flags) 130 130 { 131 - uint8_t attr_idx = flags & 7; 131 + uint8_t attr_idx = flags & (PTE_ATTRINDX_MASK >> PTE_ATTRINDX_SHIFT); 132 + uint64_t pg_attr; 132 133 uint64_t *ptep; 133 134 134 135 TEST_ASSERT((vaddr % vm->page_size) == 0, ··· 148 147 149 148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; 150 149 if (!*ptep) 151 - *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); 150 + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 151 + PGD_TYPE_TABLE | PTE_VALID); 152 152 153 153 switch (vm->pgtable_levels) { 154 154 case 4: 155 155 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; 156 156 if (!*ptep) 157 - *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); 157 + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 158 + PUD_TYPE_TABLE | PTE_VALID); 158 159 /* fall through */ 159 160 case 3: 160 161 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; 161 162 if (!*ptep) 162 - *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); 163 + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 164 + PMD_TYPE_TABLE | PTE_VALID); 163 165 /* fall through */ 164 166 case 2: 165 167 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; ··· 171 167 TEST_FAIL("Page table levels must be 2, 3, or 4"); 172 168 } 173 169 174 - *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3); /* AF */ 170 + pg_attr = PTE_AF | PTE_ATTRINDX(attr_idx) | PTE_TYPE_PAGE | PTE_VALID; 171 + *ptep = addr_pte(vm, paddr, pg_attr); 175 172 } 176 173 177 174 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) ··· 298 293 case VM_MODE_P48V48_64K: 299 294 case VM_MODE_P40V48_64K: 300 295 case VM_MODE_P36V48_64K: 301 - tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ 296 + tcr_el1 |= TCR_TG0_64K; 302 297 break; 303 298 case VM_MODE_P52V48_16K: 304 299 case VM_MODE_P48V48_16K: 305 300 case VM_MODE_P40V48_16K: 306 301 case VM_MODE_P36V48_16K: 307 302 case VM_MODE_P36V47_16K: 308 - tcr_el1 |= 2ul << 14; /* TG0 = 16KB */ 303 + tcr_el1 |= TCR_TG0_16K; 309 304 break; 310 305 case VM_MODE_P52V48_4K: 311 306 case VM_MODE_P48V48_4K: 312 307 case VM_MODE_P40V48_4K: 313 308 case VM_MODE_P36V48_4K: 314 - tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ 309 + tcr_el1 |= TCR_TG0_4K; 315 310 break; 316 311 default: 317 312 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); ··· 324 319 case VM_MODE_P52V48_4K: 325 320 case VM_MODE_P52V48_16K: 326 321 case VM_MODE_P52V48_64K: 327 - tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ 322 + tcr_el1 |= TCR_IPS_52_BITS; 328 323 ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2; 329 324 break; 330 325 case VM_MODE_P48V48_4K: 331 326 case VM_MODE_P48V48_16K: 332 327 case VM_MODE_P48V48_64K: 333 - tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ 328 + tcr_el1 |= TCR_IPS_48_BITS; 334 329 break; 335 330 case VM_MODE_P40V48_4K: 336 331 case VM_MODE_P40V48_16K: 337 332 case VM_MODE_P40V48_64K: 338 - tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ 333 + tcr_el1 |= TCR_IPS_40_BITS; 339 334 break; 340 335 case VM_MODE_P36V48_4K: 341 336 case VM_MODE_P36V48_16K: 342 337 case VM_MODE_P36V48_64K: 343 338 case VM_MODE_P36V47_16K: 344 - tcr_el1 |= 1ul << 32; /* IPS = 36 bits */ 339 + tcr_el1 |= TCR_IPS_36_BITS; 345 340 break; 346 341 default: 347 342 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); 348 343 } 349 344 350 - sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */; 351 - /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */; 352 - tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); 353 - tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; 345 + sctlr_el1 |= SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_I; 346 + 347 + tcr_el1 |= TCR_IRGN0_WBWA | TCR_ORGN0_WBWA | TCR_SH0_INNER; 348 + tcr_el1 |= TCR_T0SZ(vm->va_bits); 354 349 if (use_lpa2_pte_format(vm)) 355 - tcr_el1 |= (1ul << 59) /* DS */; 350 + tcr_el1 |= TCR_DS; 356 351 357 352 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); 358 353 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);