Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'riscv-for-linus-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

- Fixes for a pair of kexec_file_load() failures

- A fix to ensure the direct mapping is PMD-aligned

- A fix for CPU feature detection on SMP=n

- The MMIO ordering fences have been strengthened to ensure ordering
WRT delay()

- Fixes for a pair of -Wmissing-variable-declarations warnings

- A fix to avoid PUD mappings in vmap on sv39

- flush_cache_vmap() now flushes the TLB to avoid issues on systems
that cache invalid mappings

* tag 'riscv-for-linus-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
riscv: Implement flush_cache_vmap()
riscv: Do not allow vmap pud mappings for 3-level page table
riscv: mm: fix 2 instances of -Wmissing-variable-declarations
riscv,mmio: Fix readX()-to-delay() ordering
riscv: Fix CPU feature detection with SMP disabled
riscv: Start of DRAM should at least be aligned on PMD size for the direct mapping
riscv/kexec: load initrd high in available memory
riscv/kexec: handle R_RISCV_CALL_PLT relocation type

+35 -21
+4
arch/riscv/include/asm/cacheflush.h
··· 37 37 #define flush_icache_user_page(vma, pg, addr, len) \ 38 38 flush_icache_mm(vma->vm_mm, 0) 39 39 40 + #ifdef CONFIG_64BIT 41 + #define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) 42 + #endif 43 + 40 44 #ifndef CONFIG_SMP 41 45 42 46 #define flush_icache_all() local_flush_icache_all()
+8 -8
arch/riscv/include/asm/mmio.h
··· 101 101 * Relaxed I/O memory access primitives. These follow the Device memory 102 102 * ordering rules but do not guarantee any ordering relative to Normal memory 103 103 * accesses. These are defined to order the indicated access (either a read or 104 - * write) with all other I/O memory accesses. Since the platform specification 105 - * defines that all I/O regions are strongly ordered on channel 2, no explicit 106 - * fences are required to enforce this ordering. 104 + * write) with all other I/O memory accesses to the same peripheral. Since the 105 + * platform specification defines that all I/O regions are strongly ordered on 106 + * channel 0, no explicit fences are required to enforce this ordering. 107 107 */ 108 108 /* FIXME: These are now the same as asm-generic */ 109 109 #define __io_rbr() do {} while (0) ··· 125 125 #endif 126 126 127 127 /* 128 - * I/O memory access primitives. Reads are ordered relative to any 129 - * following Normal memory access. Writes are ordered relative to any prior 130 - * Normal memory access. The memory barriers here are necessary as RISC-V 128 + * I/O memory access primitives. Reads are ordered relative to any following 129 + * Normal memory read and delay() loop. Writes are ordered relative to any 130 + * prior Normal memory write. The memory barriers here are necessary as RISC-V 131 131 * doesn't define any ordering between the memory space and the I/O space. 132 132 */ 133 133 #define __io_br() do {} while (0) 134 - #define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") 135 - #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") 134 + #define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) 135 + #define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) 136 136 #define __io_aw() mmiowb_set_pending() 137 137 138 138 #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+2
arch/riscv/include/asm/pgtable.h
··· 188 188 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) 189 189 190 190 extern pgd_t swapper_pg_dir[]; 191 + extern pgd_t trampoline_pg_dir[]; 192 + extern pgd_t early_pg_dir[]; 191 193 192 194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 193 195 static inline int pmd_present(pmd_t pmd)
+3 -1
arch/riscv/include/asm/vmalloc.h
··· 3 3 4 4 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 5 5 6 + extern bool pgtable_l4_enabled, pgtable_l5_enabled; 7 + 6 8 #define IOREMAP_MAX_ORDER (PUD_SHIFT) 7 9 8 10 #define arch_vmap_pud_supported arch_vmap_pud_supported 9 11 static inline bool arch_vmap_pud_supported(pgprot_t prot) 10 12 { 11 - return true; 13 + return pgtable_l4_enabled || pgtable_l5_enabled; 12 14 } 13 15 14 16 #define arch_vmap_pmd_supported arch_vmap_pmd_supported
+5
arch/riscv/kernel/cpu.c
··· 17 17 #include <asm/smp.h> 18 18 #include <asm/pgtable.h> 19 19 20 + bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 21 + { 22 + return phys_id == cpuid_to_hartid_map(cpu); 23 + } 24 + 20 25 /* 21 26 * Returns the hart ID of the given device tree node, or -ENODEV if the node 22 27 * isn't an enabled and valid RISC-V hart node.
+2 -1
arch/riscv/kernel/elf_kexec.c
··· 281 281 kbuf.buffer = initrd; 282 282 kbuf.bufsz = kbuf.memsz = initrd_len; 283 283 kbuf.buf_align = PAGE_SIZE; 284 - kbuf.top_down = false; 284 + kbuf.top_down = true; 285 285 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; 286 286 ret = kexec_add_buffer(&kbuf); 287 287 if (ret) ··· 425 425 * sym, instead of searching the whole relsec. 426 426 */ 427 427 case R_RISCV_PCREL_HI20: 428 + case R_RISCV_CALL_PLT: 428 429 case R_RISCV_CALL: 429 430 *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | 430 431 ENCODE_UJTYPE_IMM(val - addr);
-5
arch/riscv/kernel/smp.c
··· 61 61 return -ENOENT; 62 62 } 63 63 64 - bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 65 - { 66 - return phys_id == cpuid_to_hartid_map(cpu); 67 - } 68 - 69 64 static void ipi_stop(void) 70 65 { 71 66 set_cpu_online(smp_processor_id(), false);
+11 -5
arch/riscv/mm/init.c
··· 26 26 #include <linux/kfence.h> 27 27 28 28 #include <asm/fixmap.h> 29 - #include <asm/tlbflush.h> 29 + #include <asm/io.h> 30 + #include <asm/numa.h> 31 + #include <asm/pgtable.h> 32 + #include <asm/ptdump.h> 30 33 #include <asm/sections.h> 31 34 #include <asm/soc.h> 32 - #include <asm/io.h> 33 - #include <asm/ptdump.h> 34 - #include <asm/numa.h> 35 + #include <asm/tlbflush.h> 35 36 36 37 #include "../kernel/head.h" 37 38 ··· 215 214 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); 216 215 217 216 phys_ram_end = memblock_end_of_DRAM(); 217 + 218 + /* 219 + * Make sure we align the start of the memory on a PMD boundary so that 220 + * at worst, we map the linear mapping with PMD mappings. 221 + */ 218 222 if (!IS_ENABLED(CONFIG_XIP_KERNEL)) 219 - phys_ram_base = memblock_start_of_DRAM(); 223 + phys_ram_base = memblock_start_of_DRAM() & PMD_MASK; 220 224 221 225 /* 222 226 * In 64-bit, any use of __va/__pa before this point is wrong as we
-1
arch/riscv/mm/kasan_init.c
··· 22 22 * region is not and then we have to go down to the PUD level. 23 23 */ 24 24 25 - extern pgd_t early_pg_dir[PTRS_PER_PGD]; 26 25 pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 27 26 p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss; 28 27 pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;