Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: mm: fix booting with 52-bit address space

Joey reports that booting 52-bit VA capable builds on 52-bit VA capable
CPUs is broken since commit 0d9b1ffefabe ("arm64: mm: make vabits_actual
a build time constant if possible"). This is due to the fact that the
primary CPU reads the vabits_actual variable before it has been
assigned.

The reason for deferring the assignment of vabits_actual was that we try
to perform as few stores to memory as we can with the MMU and caches
off, due to the cache coherency issues it creates.

Since __cpu_setup() [which is where the read of vabits_actual occurs] is
also called on the secondary boot path, we cannot just read the CPU ID
registers directly, given that the size of the VA space is decided by
the capabilities of the primary CPU. So let's read vabits_actual only on
the secondary boot path, and read the CPU ID registers directly on the
primary boot path, by making it a function parameter of __cpu_setup().

To ensure that all users of vabits_actual (including kasan_early_init())
observe the correct value, move the assignment of vabits_actual back
into asm code, but still defer it to after the MMU and caches have been
enabled.

Cc: Will Deacon <will@kernel.org>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Fixes: 0d9b1ffefabe ("arm64: mm: make vabits_actual a build time constant if possible")
Reported-by: Joey Gouly <joey.gouly@arm.com>
Co-developed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20220701111045.2944309-1-ardb@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Ard Biesheuvel and committed by
Will Deacon
0aaa6853 bdbcd22d

+22 -16
+18
arch/arm64/kernel/head.S
··· 82 82 * x22 create_idmap() .. start_kernel() ID map VA of the DT blob 83 83 * x23 primary_entry() .. start_kernel() physical misalignment/KASLR offset 84 84 * x24 __primary_switch() linear map KASLR seed 85 + * x25 primary_entry() .. start_kernel() supported VA size 85 86 * x28 create_idmap() callee preserved temp register 86 87 */ 87 88 SYM_CODE_START(primary_entry) ··· 97 96 * On return, the CPU will be ready for the MMU to be turned on and 98 97 * the TCR will have been set. 99 98 */ 99 + #if VA_BITS > 48 100 + mrs_s x0, SYS_ID_AA64MMFR2_EL1 101 + tst x0, #0xf << ID_AA64MMFR2_LVA_SHIFT 102 + mov x0, #VA_BITS 103 + mov x25, #VA_BITS_MIN 104 + csel x25, x25, x0, eq 105 + mov x0, x25 106 + #endif 100 107 bl __cpu_setup // initialise processor 101 108 b __primary_switch 102 109 SYM_CODE_END(primary_entry) ··· 443 434 bl __pi_memset 444 435 dsb ishst // Make zero page visible to PTW 445 436 437 + #if VA_BITS > 48 438 + adr_l x8, vabits_actual // Set this early so KASAN early init 439 + str x25, [x8] // ... observes the correct value 440 + dc civac, x8 // Make visible to booting secondaries 441 + #endif 442 + 446 443 #ifdef CONFIG_RANDOMIZE_BASE 447 444 adrp x5, memstart_offset_seed // Save KASLR linear map seed 448 445 strh w24, [x5, :lo12:memstart_offset_seed] ··· 594 579 mov x20, x0 // preserve boot mode 595 580 bl switch_to_vhe 596 581 bl __cpu_secondary_check52bitva 582 + #if VA_BITS > 48 583 + ldr_l x0, vabits_actual 584 + #endif 597 585 bl __cpu_setup // initialise processor 598 586 adrp x1, swapper_pg_dir 599 587 adrp x2, idmap_pg_dir
+1 -14
arch/arm64/mm/init.c
··· 265 265 266 266 void __init arm64_memblock_init(void) 267 267 { 268 - s64 linear_region_size; 269 - 270 - #if VA_BITS > 48 271 - if (cpuid_feature_extract_unsigned_field( 272 - read_sysreg_s(SYS_ID_AA64MMFR2_EL1), 273 - ID_AA64MMFR2_LVA_SHIFT)) 274 - vabits_actual = VA_BITS; 275 - 276 - /* make the variable visible to secondaries with the MMU off */ 277 - dcache_clean_inval_poc((u64)&vabits_actual, 278 - (u64)&vabits_actual + sizeof(vabits_actual)); 279 - #endif 280 - 281 - linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 268 + s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 282 269 283 270 /* 284 271 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
+3 -2
arch/arm64/mm/proc.S
··· 397 397 * 398 398 * Initialise the processor for turning the MMU on. 399 399 * 400 + * Input: 401 + * x0 - actual number of VA bits (ignored unless VA_BITS > 48) 400 402 * Output: 401 403 * Return in x0 the value of the SCTLR_EL1 register. 402 404 */ ··· 468 466 tcr_clear_errata_bits tcr, x9, x5 469 467 470 468 #ifdef CONFIG_ARM64_VA_BITS_52 471 - ldr_l x9, vabits_actual 472 - sub x9, xzr, x9 469 + sub x9, xzr, x0 473 470 add x9, x9, #64 474 471 tcr_set_t1sz tcr, x9 475 472 #else