Merge tag 'riscv-for-linus-5.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:
"These are pretty late, but they do fix concrete issues.

- ensure the trap vector's address is aligned.

- avoid re-populating the KASAN shadow memory.

- allow kasan to build without warnings, which have recently become
errors"

* tag 'riscv-for-linus-5.15-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
riscv: Fix asan-stack clang build
riscv: Do not re-populate shadow memory with kasan_populate_early_shadow
riscv: fix misalgned trap vector base address

+11 -13
+6
arch/riscv/Kconfig
··· 163 default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB 164 default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB 165 166 config ARCH_FLATMEM_ENABLE 167 def_bool !NUMA 168
··· 163 default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB 164 default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB 165 166 + config KASAN_SHADOW_OFFSET 167 + hex 168 + depends on KASAN_GENERIC 169 + default 0xdfffffc800000000 if 64BIT 170 + default 0xffffffff if 32BIT 171 + 172 config ARCH_FLATMEM_ENABLE 173 def_bool !NUMA 174
+1 -2
arch/riscv/include/asm/kasan.h
··· 30 #define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) 31 #define KASAN_SHADOW_START KERN_VIRT_START 32 #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) 33 - #define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ 34 - (64 - KASAN_SHADOW_SCALE_SHIFT))) 35 36 void kasan_init(void); 37 asmlinkage void kasan_early_init(void);
··· 30 #define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) 31 #define KASAN_SHADOW_START KERN_VIRT_START 32 #define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) 33 + #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) 34 35 void kasan_init(void); 36 asmlinkage void kasan_early_init(void);
+1
arch/riscv/kernel/head.S
··· 193 csrw CSR_SCRATCH, zero 194 ret 195 196 .Lsecondary_park: 197 /* We lack SMP support or have too many harts, so park this hart */ 198 wfi
··· 193 csrw CSR_SCRATCH, zero 194 ret 195 196 + .align 2 197 .Lsecondary_park: 198 /* We lack SMP support or have too many harts, so park this hart */ 199 wfi
+3 -11
arch/riscv/mm/kasan_init.c
··· 17 uintptr_t i; 18 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START); 19 20 for (i = 0; i < PTRS_PER_PTE; ++i) 21 set_pte(kasan_early_shadow_pte + i, 22 mk_pte(virt_to_page(kasan_early_shadow_page), ··· 175 phys_addr_t p_start, p_end; 176 u64 i; 177 178 - /* 179 - * Populate all kernel virtual address space with kasan_early_shadow_page 180 - * except for the linear mapping and the modules/kernel/BPF mapping. 181 - */ 182 - kasan_populate_early_shadow((void *)KASAN_SHADOW_START, 183 - (void *)kasan_mem_to_shadow((void *) 184 - VMEMMAP_END)); 185 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) 186 kasan_shallow_populate( 187 - (void *)kasan_mem_to_shadow((void *)VMALLOC_START), 188 - (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); 189 - else 190 - kasan_populate_early_shadow( 191 (void *)kasan_mem_to_shadow((void *)VMALLOC_START), 192 (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); 193
··· 17 uintptr_t i; 18 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START); 19 20 + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != 21 + KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); 22 + 23 for (i = 0; i < PTRS_PER_PTE; ++i) 24 set_pte(kasan_early_shadow_pte + i, 25 mk_pte(virt_to_page(kasan_early_shadow_page), ··· 172 phys_addr_t p_start, p_end; 173 u64 i; 174 175 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) 176 kasan_shallow_populate( 177 (void *)kasan_mem_to_shadow((void *)VMALLOC_START), 178 (void *)kasan_mem_to_shadow((void *)VMALLOC_END)); 179