Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: mm: extend linear region for 52-bit VA configurations

For historical reasons, the arm64 kernel VA space is configured as two
equally sized halves, i.e., on a 48-bit VA build, the VA space is split
into a 47-bit vmalloc region and a 47-bit linear region.

When support for 52-bit virtual addressing was added, this equal split
was kept, resulting in a substantial waste of virtual address space in
the linear region:

48-bit VA 52-bit VA
0xffff_ffff_ffff_ffff +-------------+ +-------------+
| vmalloc | | vmalloc |
0xffff_8000_0000_0000 +-------------+ _PAGE_END(48) +-------------+
| linear | : :
0xffff_0000_0000_0000 +-------------+ : :
: : : :
: : : :
: : : :
: : : currently :
: unusable : : :
: : : unused :
: by : : :
: : : :
: hardware : : :
: : : :
0xfff8_0000_0000_0000 : : _PAGE_END(52) +-------------+
: : | |
: : | |
: : | |
: : | |
: : | |
: unusable : | |
: : | linear |
: by : | |
: : | region |
: hardware : | |
: : | |
: : | |
: : | |
: : | |
: : | |
: : | |
0xfff0_0000_0000_0000 +-------------+ PAGE_OFFSET +-------------+

As illustrated above, the 52-bit VA kernel uses 47 bits for the vmalloc
space (as before), to ensure that a single 64k granule kernel image can
support any 64k granule capable system, regardless of whether it supports
the 52-bit virtual addressing extension. However, due to the fact that
the VA space is still split in equal halves, the linear region is only
2^51 bytes in size, wasting almost half of the 52-bit VA space.

Let's fix this, by abandoning the equal split, and simply assigning all
VA space outside of the vmalloc region to the linear region.

The KASAN shadow region is reconfigured so that it ends at the start of
the vmalloc region, and grows downwards. That way, the arrangement of
the vmalloc space (which contains kernel mappings, modules, BPF region,
the vmemmap array etc) is identical between non-KASAN and KASAN builds,
which aids debugging.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Link: https://lore.kernel.org/r/20201008153602.9467-3-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Ard Biesheuvel and committed by
Catalin Marinas
f4693c27 f8394f23

+26 -30
+1 -2
Documentation/arm64/kasan-offsets.sh
··· 1 1 #!/bin/sh 2 2 3 3 # Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW 4 - # start address at the mid-point of the kernel VA space 4 + # start address at the top of the linear region 5 5 6 6 print_kasan_offset () { 7 7 printf "%02d\t" $1 8 8 printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \ 9 - + (1 << ($1 - 32 - $2)) \ 10 9 - (1 << (64 - 32 - $2)) )) 11 10 } 12 11
+9 -10
Documentation/arm64/memory.rst
··· 32 32 ----------------------------------------------------------------------- 33 33 0000000000000000 0000ffffffffffff 256TB user 34 34 ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map 35 - ffff800000000000 ffff9fffffffffff 32TB kasan shadow region 36 - ffffa00000000000 ffffa00007ffffff 128MB bpf jit region 37 - ffffa00008000000 ffffa0000fffffff 128MB modules 38 - ffffa00010000000 fffffdffbffeffff ~93TB vmalloc 35 + [ ffff600000000000 ffff7fffffffffff ] 32TB [ kasan shadow region ] 36 + ffff800000000000 ffff800007ffffff 128MB bpf jit region 37 + ffff800008000000 ffff80000fffffff 128MB modules 38 + ffff800010000000 fffffdffbffeffff 125TB vmalloc 39 39 fffffdffbfff0000 fffffdfffe5f8fff ~998MB [guard region] 40 40 fffffdfffe5f9000 fffffdfffe9fffff 4124KB fixed mappings 41 41 fffffdfffea00000 fffffdfffebfffff 2MB [guard region] ··· 50 50 Start End Size Use 51 51 ----------------------------------------------------------------------- 52 52 0000000000000000 000fffffffffffff 4PB user 53 - fff0000000000000 fff7ffffffffffff 2PB kernel logical memory map 54 - fff8000000000000 fffd9fffffffffff 1440TB [gap] 55 - fffda00000000000 ffff9fffffffffff 512TB kasan shadow region 56 - ffffa00000000000 ffffa00007ffffff 128MB bpf jit region 57 - ffffa00008000000 ffffa0000fffffff 128MB modules 58 - ffffa00010000000 fffff81ffffeffff ~88TB vmalloc 53 + fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map 54 + [ fffd800000000000 ffff7fffffffffff ] 512TB [ kasan shadow region ] 55 + ffff800000000000 ffff800007ffffff 128MB bpf jit region 56 + ffff800008000000 ffff80000fffffff 128MB modules 57 + ffff800010000000 fffff81ffffeffff 120TB vmalloc 59 58 fffff81fffff0000 fffffc1ffe58ffff ~3TB [guard region] 60 59 fffffc1ffe590000 fffffc1ffe9fffff 4544KB fixed mappings 61 60 fffffc1ffea00000 fffffc1ffebfffff 2MB [guard region]
+10 -10
arch/arm64/Kconfig
··· 331 331 config KASAN_SHADOW_OFFSET 332 332 hex 333 333 depends on KASAN 334 - default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS 335 - default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS 336 - default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS 337 - default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS 338 - default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS 339 - default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS 340 - default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS 341 - default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS 342 - default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS 343 - default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS 334 + default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS 335 + default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS 336 + default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS 337 + default 0xdfffffc000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS 338 + default 0xdffffff800000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS 339 + default 0xefff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS 340 + default 0xefffc00000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS 341 + default 0xeffffe0000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS 342 + default 0xefffffc000000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS 343 + default 0xeffffff800000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS 344 344 default 0xffffffffffffffff 345 345 346 346 source "arch/arm64/Kconfig.platforms"
+5 -7
arch/arm64/include/asm/memory.h
··· 44 44 #define _PAGE_OFFSET(va) (-(UL(1) << (va))) 45 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 46 46 #define KIMAGE_VADDR (MODULES_END) 47 - #define BPF_JIT_REGION_START (KASAN_SHADOW_END) 47 + #define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN)) 48 48 #define BPF_JIT_REGION_SIZE (SZ_128M) 49 49 #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) 50 50 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) ··· 76 76 #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 77 77 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ 78 78 + KASAN_SHADOW_OFFSET) 79 + #define PAGE_END (KASAN_SHADOW_END - (1UL << (vabits_actual - KASAN_SHADOW_SCALE_SHIFT))) 79 80 #define KASAN_THREAD_SHIFT 1 80 81 #else 81 82 #define KASAN_THREAD_SHIFT 0 82 - #define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN)) 83 + #define PAGE_END (_PAGE_END(VA_BITS_MIN)) 83 84 #endif /* CONFIG_KASAN */ 84 85 85 86 #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) ··· 168 167 #include <asm/bug.h> 169 168 170 169 extern u64 vabits_actual; 171 - #define PAGE_END (_PAGE_END(vabits_actual)) 172 170 173 171 extern s64 memstart_addr; 174 172 /* PHYS_OFFSET - the physical address of the start of memory. */ ··· 238 238 239 239 240 240 /* 241 - * The linear kernel range starts at the bottom of the virtual address 242 - * space. Testing the top bit for the start of the region is a 243 - * sufficient check and avoids having to worry about the tag. 241 + * The linear kernel range starts at the bottom of the virtual address space. 244 242 */ 245 - #define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) 243 + #define __is_lm_address(addr) (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET)) 246 244 247 245 #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) 248 246 #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
+1 -1
arch/arm64/mm/init.c
··· 269 269 270 270 void __init arm64_memblock_init(void) 271 271 { 272 - const s64 linear_region_size = BIT(vabits_actual - 1); 272 + const s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual); 273 273 274 274 /* Handle linux,usable-memory-range property */ 275 275 fdt_enforce_memory_region();