Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next/nommu' into for-next

Conflicts:
arch/riscv/boot/Makefile
arch/riscv/include/asm/sbi.h

+947 -375
+27 -9
arch/riscv/Kconfig
··· 26 26 select GENERIC_IRQ_SHOW 27 27 select GENERIC_PCI_IOMAP 28 28 select GENERIC_SCHED_CLOCK 29 - select GENERIC_STRNCPY_FROM_USER 30 - select GENERIC_STRNLEN_USER 29 + select GENERIC_STRNCPY_FROM_USER if MMU 30 + select GENERIC_STRNLEN_USER if MMU 31 31 select GENERIC_SMP_IDLE_THREAD 32 32 select GENERIC_ATOMIC64 if !64BIT 33 33 select HAVE_ARCH_AUDITSYSCALL 34 34 select HAVE_ARCH_SECCOMP_FILTER 35 35 select HAVE_ASM_MODVERSIONS 36 36 select HAVE_MEMBLOCK_NODE_MAP 37 - select HAVE_DMA_CONTIGUOUS 37 + select HAVE_DMA_CONTIGUOUS if MMU 38 38 select HAVE_FUTEX_CMPXCHG if FUTEX 39 39 select HAVE_PERF_EVENTS 40 40 select HAVE_PERF_REGS ··· 51 51 select PCI_DOMAINS_GENERIC if PCI 52 52 select PCI_MSI if PCI 53 53 select RISCV_TIMER 54 + select UACCESS_MEMCPY if !MMU 54 55 select GENERIC_IRQ_MULTI_HANDLER 55 56 select GENERIC_ARCH_TOPOLOGY if SMP 56 57 select ARCH_HAS_PTE_SPECIAL ··· 62 61 select ARCH_WANT_HUGE_PMD_SHARE if 64BIT 63 62 select SPARSEMEM_STATIC if 32BIT 64 63 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU 65 - select HAVE_ARCH_MMAP_RND_BITS 64 + select HAVE_ARCH_MMAP_RND_BITS if MMU 66 65 67 66 config ARCH_MMAP_RND_BITS_MIN 68 67 default 18 if 64BIT ··· 74 73 default 24 if 64BIT # SV39 based 75 74 default 17 76 75 76 + # set if we run in machine mode, cleared if we run in supervisor mode 77 + config RISCV_M_MODE 78 + bool 79 + default !MMU 80 + 81 + # set if we are running in S-mode and can use SBI calls 82 + config RISCV_SBI 83 + bool 84 + depends on !RISCV_M_MODE 85 + default y 86 + 77 87 config MMU 78 - def_bool y 88 + bool "MMU-based Paged Memory Management Support" 89 + default y 90 + help 91 + Select if you want MMU-based virtualised addressing space 92 + support by paged memory management. If unsure, say 'Y'. 79 93 80 94 config ZONE_DMA32 81 95 bool ··· 109 93 config PAGE_OFFSET 110 94 hex 111 95 default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB 96 + default 0x80000000 if 64BIT && !MMU 112 97 default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB 113 98 default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB 114 99 ··· 153 136 def_bool y 154 137 155 138 config FIX_EARLYCON_MEM 156 - def_bool y 139 + def_bool CONFIG_MMU 157 140 158 141 config PGTABLE_LEVELS 159 142 int ··· 178 161 select GENERIC_LIB_ASHRDI3 179 162 select GENERIC_LIB_LSHRDI3 180 163 select GENERIC_LIB_UCMPDI2 164 + select MMU 181 165 182 166 config ARCH_RV64I 183 167 bool "RV64I" ··· 187 169 select HAVE_FUNCTION_TRACER 188 170 select HAVE_FUNCTION_GRAPH_TRACER 189 171 select HAVE_FTRACE_MCOUNT_RECORD 190 - select HAVE_DYNAMIC_FTRACE 191 - select HAVE_DYNAMIC_FTRACE_WITH_REGS 192 - select SWIOTLB 172 + select HAVE_DYNAMIC_FTRACE if MMU 173 + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE 174 + select SWIOTLB if MMU 193 175 194 176 endchoice 195 177
+9 -4
arch/riscv/Makefile
··· 83 83 vdso_install: 84 84 $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ 85 85 86 - all: Image.gz 86 + ifeq ($(CONFIG_RISCV_M_MODE),y) 87 + KBUILD_IMAGE := $(boot)/loader 88 + else 89 + KBUILD_IMAGE := $(boot)/Image.gz 90 + endif 91 + BOOT_TARGETS := Image Image.gz loader 87 92 88 - Image: vmlinux 89 - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 93 + all: $(notdir $(KBUILD_IMAGE)) 90 94 91 - Image.%: Image 95 + $(BOOT_TARGETS): vmlinux 92 96 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 97 + @$(kecho) ' Kernel: $(boot)/$@ is ready' 93 98 94 99 zinstall install: 95 100 $(Q)$(MAKE) $(build)=$(boot) $@
+6 -1
arch/riscv/boot/Makefile
··· 16 16 17 17 OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S 18 18 19 - targets := Image 19 + targets := Image loader 20 20 21 21 $(obj)/Image: vmlinux FORCE 22 22 $(call if_changed,objcopy) 23 23 24 24 $(obj)/Image.gz: $(obj)/Image FORCE 25 25 $(call if_changed,gzip) 26 + 27 + loader.o: $(src)/loader.S $(obj)/Image 28 + 29 + $(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE 30 + $(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o 26 31 27 32 $(obj)/Image.bz2: $(obj)/Image FORCE 28 33 $(call if_changed,bzip2)
+8
arch/riscv/boot/loader.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + .align 4 4 + .section .payload, "ax", %progbits 5 + .globl _start 6 + _start: 7 + .incbin "arch/riscv/boot/Image" 8 +
+16
arch/riscv/boot/loader.lds.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #include <asm/page.h> 4 + 5 + OUTPUT_ARCH(riscv) 6 + ENTRY(_start) 7 + 8 + SECTIONS 9 + { 10 + . = PAGE_OFFSET; 11 + 12 + .payload : { 13 + *(.payload) 14 + . = ALIGN(8); 15 + } 16 + }
+78
arch/riscv/configs/nommu_virt_defconfig
··· 1 + # CONFIG_CPU_ISOLATION is not set 2 + CONFIG_LOG_BUF_SHIFT=16 3 + CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12 4 + CONFIG_BLK_DEV_INITRD=y 5 + # CONFIG_RD_BZIP2 is not set 6 + # CONFIG_RD_LZMA is not set 7 + # CONFIG_RD_XZ is not set 8 + # CONFIG_RD_LZO is not set 9 + # CONFIG_RD_LZ4 is not set 10 + CONFIG_CC_OPTIMIZE_FOR_SIZE=y 11 + CONFIG_EXPERT=y 12 + # CONFIG_SYSFS_SYSCALL is not set 13 + # CONFIG_FHANDLE is not set 14 + # CONFIG_BASE_FULL is not set 15 + # CONFIG_EPOLL is not set 16 + # CONFIG_SIGNALFD is not set 17 + # CONFIG_TIMERFD is not set 18 + # CONFIG_EVENTFD is not set 19 + # CONFIG_AIO is not set 20 + # CONFIG_IO_URING is not set 21 + # CONFIG_ADVISE_SYSCALLS is not set 22 + # CONFIG_MEMBARRIER is not set 23 + # CONFIG_KALLSYMS is not set 24 + # CONFIG_VM_EVENT_COUNTERS is not set 25 + # CONFIG_COMPAT_BRK is not set 26 + CONFIG_SLOB=y 27 + # CONFIG_SLAB_MERGE_DEFAULT is not set 28 + # CONFIG_MMU is not set 29 + CONFIG_MAXPHYSMEM_2GB=y 30 + CONFIG_SMP=y 31 + CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0" 32 + CONFIG_CMDLINE_FORCE=y 33 + # CONFIG_BLK_DEV_BSG is not set 34 + CONFIG_PARTITION_ADVANCED=y 35 + # CONFIG_MSDOS_PARTITION is not set 36 + # CONFIG_EFI_PARTITION is not set 37 + # CONFIG_MQ_IOSCHED_DEADLINE is not set 38 + # CONFIG_MQ_IOSCHED_KYBER is not set 39 + CONFIG_BINFMT_FLAT=y 40 + # CONFIG_COREDUMP is not set 41 + CONFIG_DEVTMPFS=y 42 + CONFIG_DEVTMPFS_MOUNT=y 43 + # CONFIG_FW_LOADER is not set 44 + # CONFIG_ALLOW_DEV_COREDUMP is not set 45 + CONFIG_VIRTIO_BLK=y 46 + # CONFIG_INPUT_KEYBOARD is not set 47 + # CONFIG_INPUT_MOUSE is not set 48 + # CONFIG_SERIO is not set 49 + # CONFIG_LEGACY_PTYS is not set 50 + # CONFIG_LDISC_AUTOLOAD is not set 51 + # CONFIG_DEVMEM is not set 52 + CONFIG_SERIAL_8250=y 53 + # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set 54 + CONFIG_SERIAL_8250_CONSOLE=y 55 + CONFIG_SERIAL_8250_NR_UARTS=1 56 + CONFIG_SERIAL_8250_RUNTIME_UARTS=1 57 + CONFIG_SERIAL_OF_PLATFORM=y 58 + # CONFIG_HW_RANDOM is not set 59 + # CONFIG_HWMON is not set 60 + # CONFIG_LCD_CLASS_DEVICE is not set 61 + # CONFIG_BACKLIGHT_CLASS_DEVICE is not set 62 + # CONFIG_VGA_CONSOLE is not set 63 + # CONFIG_HID is not set 64 + # CONFIG_USB_SUPPORT is not set 65 + CONFIG_VIRTIO_MMIO=y 66 + CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y 67 + CONFIG_SIFIVE_PLIC=y 68 + # CONFIG_VALIDATE_FS_PARSER is not set 69 + CONFIG_EXT2_FS=y 70 + # CONFIG_DNOTIFY is not set 71 + # CONFIG_INOTIFY_USER is not set 72 + # CONFIG_MISC_FILESYSTEMS is not set 73 + CONFIG_LSM="[]" 74 + CONFIG_PRINTK_TIME=y 75 + # CONFIG_SCHED_DEBUG is not set 76 + # CONFIG_RCU_TRACE is not set 77 + # CONFIG_FTRACE is not set 78 + # CONFIG_RUNTIME_TESTING_MENU is not set
+8
arch/riscv/include/asm/cache.h
··· 11 11 12 12 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 13 13 14 + /* 15 + * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that 16 + * the flat loader aligns it accordingly. 17 + */ 18 + #ifndef CONFIG_MMU 19 + #define ARCH_SLAB_MINALIGN 16 20 + #endif 21 + 14 22 #endif /* _ASM_RISCV_CACHE_H */
+39
arch/riscv/include/asm/clint.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_RISCV_CLINT_H 3 + #define _ASM_RISCV_CLINT_H 1 4 + 5 + #include <linux/io.h> 6 + #include <linux/smp.h> 7 + 8 + #ifdef CONFIG_RISCV_M_MODE 9 + extern u32 __iomem *clint_ipi_base; 10 + 11 + void clint_init_boot_cpu(void); 12 + 13 + static inline void clint_send_ipi_single(unsigned long hartid) 14 + { 15 + writel(1, clint_ipi_base + hartid); 16 + } 17 + 18 + static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask) 19 + { 20 + int hartid; 21 + 22 + for_each_cpu(hartid, hartid_mask) 23 + clint_send_ipi_single(hartid); 24 + } 25 + 26 + static inline void clint_clear_ipi(unsigned long hartid) 27 + { 28 + writel(0, clint_ipi_base + hartid); 29 + } 30 + #else /* CONFIG_RISCV_M_MODE */ 31 + #define clint_init_boot_cpu() do { } while (0) 32 + 33 + /* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */ 34 + void clint_send_ipi_single(unsigned long hartid); 35 + void clint_send_ipi_mask(const struct cpumask *hartid_mask); 36 + void clint_clear_ipi(unsigned long hartid); 37 + #endif /* CONFIG_RISCV_M_MODE */ 38 + 39 + #endif /* _ASM_RISCV_CLINT_H */
+64 -10
arch/riscv/include/asm/csr.h
··· 11 11 12 12 /* Status register flags */ 13 13 #define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ 14 + #define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */ 14 15 #define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ 16 + #define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */ 15 17 #define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ 18 + #define SR_MPP _AC(0x00001800, UL) /* Previously Machine */ 16 19 #define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ 17 20 18 21 #define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ ··· 47 44 #define SATP_MODE SATP_MODE_39 48 45 #endif 49 46 50 - /* SCAUSE */ 51 - #define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) 47 + /* Exception cause high bit - is an interrupt if set */ 48 + #define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) 52 49 50 + /* Interrupt causes (minus the high bit) */ 53 51 #define IRQ_U_SOFT 0 54 52 #define IRQ_S_SOFT 1 55 53 #define IRQ_M_SOFT 3 ··· 61 57 #define IRQ_S_EXT 9 62 58 #define IRQ_M_EXT 11 63 59 60 + /* Exception causes */ 64 61 #define EXC_INST_MISALIGNED 0 65 62 #define EXC_INST_ACCESS 1 66 63 #define EXC_BREAKPOINT 3 ··· 72 67 #define EXC_LOAD_PAGE_FAULT 13 73 68 #define EXC_STORE_PAGE_FAULT 15 74 69 75 - /* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */ 76 - #define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT) 77 - #define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER) 78 - #define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT) 79 - 70 + /* symbolic CSR names: */ 80 71 #define CSR_CYCLE 0xc00 81 72 #define CSR_TIME 0xc01 82 73 #define CSR_INSTRET 0xc02 74 + #define CSR_CYCLEH 0xc80 75 + #define CSR_TIMEH 0xc81 76 + #define CSR_INSTRETH 0xc82 77 + 83 78 #define CSR_SSTATUS 0x100 84 79 #define CSR_SIE 0x104 85 80 #define CSR_STVEC 0x105 ··· 90 85 #define CSR_STVAL 0x143 91 86 #define CSR_SIP 0x144 92 87 #define CSR_SATP 0x180 93 - #define CSR_CYCLEH 0xc80 94 - #define CSR_TIMEH 0xc81 95 - #define CSR_INSTRETH 0xc82 88 + 89 + #define CSR_MSTATUS 0x300 90 + #define CSR_MISA 0x301 91 + #define CSR_MIE 0x304 92 + #define CSR_MTVEC 0x305 93 + #define CSR_MSCRATCH 0x340 94 + #define CSR_MEPC 0x341 95 + #define CSR_MCAUSE 0x342 96 + #define CSR_MTVAL 0x343 97 + #define CSR_MIP 0x344 98 + #define CSR_MHARTID 0xf14 99 + 100 + #ifdef CONFIG_RISCV_M_MODE 101 + # define CSR_STATUS CSR_MSTATUS 102 + # define CSR_IE CSR_MIE 103 + # define CSR_TVEC CSR_MTVEC 104 + # define CSR_SCRATCH CSR_MSCRATCH 105 + # define CSR_EPC CSR_MEPC 106 + # define CSR_CAUSE CSR_MCAUSE 107 + # define CSR_TVAL CSR_MTVAL 108 + # define CSR_IP CSR_MIP 109 + 110 + # define SR_IE SR_MIE 111 + # define SR_PIE SR_MPIE 112 + # define SR_PP SR_MPP 113 + 114 + # define IRQ_SOFT IRQ_M_SOFT 115 + # define IRQ_TIMER IRQ_M_TIMER 116 + # define IRQ_EXT IRQ_M_EXT 117 + #else /* CONFIG_RISCV_M_MODE */ 118 + # define CSR_STATUS CSR_SSTATUS 119 + # define CSR_IE CSR_SIE 120 + # define CSR_TVEC CSR_STVEC 121 + # define CSR_SCRATCH CSR_SSCRATCH 122 + # define CSR_EPC CSR_SEPC 123 + # define CSR_CAUSE CSR_SCAUSE 124 + # define CSR_TVAL CSR_STVAL 125 + # define CSR_IP CSR_SIP 126 + 127 + # define SR_IE SR_SIE 128 + # define SR_PIE SR_SPIE 129 + # define SR_PP SR_SPP 130 + 131 + # define IRQ_SOFT IRQ_S_SOFT 132 + # define IRQ_TIMER IRQ_S_TIMER 133 + # define IRQ_EXT IRQ_S_EXT 134 + #endif /* CONFIG_RISCV_M_MODE */ 135 + 136 + /* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ 137 + #define IE_SIE (_AC(0x1, UL) << IRQ_SOFT) 138 + #define IE_TIE (_AC(0x1, UL) << IRQ_TIMER) 139 + #define IE_EIE (_AC(0x1, UL) << IRQ_EXT) 96 140 97 141 #ifndef __ASSEMBLY__ 98 142
+2 -2
arch/riscv/include/asm/elf.h
··· 56 56 */ 57 57 #define ELF_PLATFORM (NULL) 58 58 59 + #ifdef CONFIG_MMU 59 60 #define ARCH_DLINFO \ 60 61 do { \ 61 62 NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 62 63 (elf_addr_t)current->mm->context.vdso); \ 63 64 } while (0) 64 - 65 - 66 65 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 67 66 struct linux_binprm; 68 67 extern int arch_setup_additional_pages(struct linux_binprm *bprm, 69 68 int uses_interp); 69 + #endif /* CONFIG_MMU */ 70 70 71 71 #endif /* _ASM_RISCV_ELF_H */
+2
arch/riscv/include/asm/fixmap.h
··· 11 11 #include <asm/page.h> 12 12 #include <asm/pgtable.h> 13 13 14 + #ifdef CONFIG_MMU 14 15 /* 15 16 * Here we define all the compile-time 'special' virtual addresses. 16 17 * The point is to have a constant address at compile time, but to ··· 43 42 44 43 #include <asm-generic/fixmap.h> 45 44 45 + #endif /* CONFIG_MMU */ 46 46 #endif /* _ASM_RISCV_FIXMAP_H */
+6
arch/riscv/include/asm/futex.h
··· 12 12 #include <linux/errno.h> 13 13 #include <asm/asm.h> 14 14 15 + /* We don't even really need the extable code, but for now keep it simple */ 16 + #ifndef CONFIG_MMU 17 + #define __enable_user_access() do { } while (0) 18 + #define __disable_user_access() do { } while (0) 19 + #endif 20 + 15 21 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 16 22 { \ 17 23 uintptr_t tmp; \
+5 -144
arch/riscv/include/asm/io.h
··· 15 15 #include <asm/mmiowb.h> 16 16 #include <asm/pgtable.h> 17 17 18 - extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); 19 - 20 18 /* 21 - * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't 22 - * change the properties of memory regions. This should be fixed by the 23 - * upcoming platform spec. 19 + * MMIO access functions are separated out to break dependency cycles 20 + * when using {read,write}* fns in low-level headers 24 21 */ 25 - #define ioremap_nocache(addr, size) ioremap((addr), (size)) 26 - #define ioremap_wc(addr, size) ioremap((addr), (size)) 27 - #define ioremap_wt(addr, size) ioremap((addr), (size)) 28 - 29 - extern void iounmap(volatile void __iomem *addr); 30 - 31 - /* Generic IO read/write. These perform native-endian accesses. */ 32 - #define __raw_writeb __raw_writeb 33 - static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 34 - { 35 - asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); 36 - } 37 - 38 - #define __raw_writew __raw_writew 39 - static inline void __raw_writew(u16 val, volatile void __iomem *addr) 40 - { 41 - asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); 42 - } 43 - 44 - #define __raw_writel __raw_writel 45 - static inline void __raw_writel(u32 val, volatile void __iomem *addr) 46 - { 47 - asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); 48 - } 49 - 50 - #ifdef CONFIG_64BIT 51 - #define __raw_writeq __raw_writeq 52 - static inline void __raw_writeq(u64 val, volatile void __iomem *addr) 53 - { 54 - asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); 55 - } 56 - #endif 57 - 58 - #define __raw_readb __raw_readb 59 - static inline u8 __raw_readb(const volatile void __iomem *addr) 60 - { 61 - u8 val; 62 - 63 - asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); 64 - return val; 65 - } 66 - 67 - #define __raw_readw __raw_readw 68 - static inline u16 __raw_readw(const volatile void __iomem *addr) 69 - { 70 - u16 val; 71 - 72 - asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); 73 - return val; 74 - } 75 - 76 - #define __raw_readl __raw_readl 77 - static inline u32 __raw_readl(const volatile void __iomem *addr) 78 - { 79 - u32 val; 80 - 81 - asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); 82 - return val; 83 - } 84 - 85 - #ifdef CONFIG_64BIT 86 - #define __raw_readq __raw_readq 87 - static inline u64 __raw_readq(const volatile void __iomem *addr) 88 - { 89 - u64 val; 90 - 91 - asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); 92 - return val; 93 - } 94 - #endif 95 - 96 - /* 97 - * Unordered I/O memory access primitives. These are even more relaxed than 98 - * the relaxed versions, as they don't even order accesses between successive 99 - * operations to the I/O regions. 100 - */ 101 - #define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) 102 - #define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) 103 - #define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 104 - 105 - #define writeb_cpu(v,c) ((void)__raw_writeb((v),(c))) 106 - #define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) 107 - #define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) 108 - 109 - #ifdef CONFIG_64BIT 110 - #define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) 111 - #define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) 112 - #endif 113 - 114 - /* 115 - * Relaxed I/O memory access primitives. These follow the Device memory 116 - * ordering rules but do not guarantee any ordering relative to Normal memory 117 - * accesses. These are defined to order the indicated access (either a read or 118 - * write) with all other I/O memory accesses. Since the platform specification 119 - * defines that all I/O regions are strongly ordered on channel 2, no explicit 120 - * fences are required to enforce this ordering. 121 - */ 122 - /* FIXME: These are now the same as asm-generic */ 123 - #define __io_rbr() do {} while (0) 124 - #define __io_rar() do {} while (0) 125 - #define __io_rbw() do {} while (0) 126 - #define __io_raw() do {} while (0) 127 - 128 - #define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) 129 - #define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) 130 - #define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) 131 - 132 - #define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); }) 133 - #define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); }) 134 - #define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); }) 135 - 136 - #ifdef CONFIG_64BIT 137 - #define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) 138 - #define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); }) 139 - #endif 140 - 141 - /* 142 - * I/O memory access primitives. Reads are ordered relative to any 143 - * following Normal memory access. Writes are ordered relative to any prior 144 - * Normal memory access. The memory barriers here are necessary as RISC-V 145 - * doesn't define any ordering between the memory space and the I/O space. 146 - */ 147 - #define __io_br() do {} while (0) 148 - #define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); 149 - #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); 150 - #define __io_aw() mmiowb_set_pending() 151 - 152 - #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) 153 - #define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) 154 - #define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) 155 - 156 - #define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) 157 - #define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) 158 - #define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) 159 - 160 - #ifdef CONFIG_64BIT 161 - #define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) 162 - #define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) 163 - #endif 22 + #include <asm/mmio.h> 164 23 165 24 /* 166 25 * I/O port access constants. 167 26 */ 27 + #ifdef CONFIG_MMU 168 28 #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) 169 29 #define PCI_IOBASE ((void __iomem *)PCI_IO_START) 30 + #endif /* CONFIG_MMU */ 170 31 171 32 /* 172 33 * Emulation routines for the port-mapped IO space used by some PCI drivers.
+6 -6
arch/riscv/include/asm/irqflags.h
··· 13 13 /* read interrupt enabled status */ 14 14 static inline unsigned long arch_local_save_flags(void) 15 15 { 16 - return csr_read(CSR_SSTATUS); 16 + return csr_read(CSR_STATUS); 17 17 } 18 18 19 19 /* unconditionally enable interrupts */ 20 20 static inline void arch_local_irq_enable(void) 21 21 { 22 - csr_set(CSR_SSTATUS, SR_SIE); 22 + csr_set(CSR_STATUS, SR_IE); 23 23 } 24 24 25 25 /* unconditionally disable interrupts */ 26 26 static inline void arch_local_irq_disable(void) 27 27 { 28 - csr_clear(CSR_SSTATUS, SR_SIE); 28 + csr_clear(CSR_STATUS, SR_IE); 29 29 } 30 30 31 31 /* get status and disable interrupts */ 32 32 static inline unsigned long arch_local_irq_save(void) 33 33 { 34 - return csr_read_clear(CSR_SSTATUS, SR_SIE); 34 + return csr_read_clear(CSR_STATUS, SR_IE); 35 35 } 36 36 37 37 /* test flags */ 38 38 static inline int arch_irqs_disabled_flags(unsigned long flags) 39 39 { 40 - return !(flags & SR_SIE); 40 + return !(flags & SR_IE); 41 41 } 42 42 43 43 /* test hardware interrupt enable bit */ ··· 49 49 /* set interrupt enabled status */ 50 50 static inline void arch_local_irq_restore(unsigned long flags) 51 51 { 52 - csr_set(CSR_SSTATUS, flags & SR_SIE); 52 + csr_set(CSR_STATUS, flags & SR_IE); 53 53 } 54 54 55 55 #endif /* _ASM_RISCV_IRQFLAGS_H */
+168
arch/riscv/include/asm/mmio.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h 4 + * which was based on arch/arm/include/io.h 5 + * 6 + * Copyright (C) 1996-2000 Russell King 7 + * Copyright (C) 2012 ARM Ltd. 8 + * Copyright (C) 2014 Regents of the University of California 9 + */ 10 + 11 + #ifndef _ASM_RISCV_MMIO_H 12 + #define _ASM_RISCV_MMIO_H 13 + 14 + #include <linux/types.h> 15 + #include <asm/mmiowb.h> 16 + 17 + #ifdef CONFIG_MMU 18 + void __iomem *ioremap(phys_addr_t offset, unsigned long size); 19 + 20 + /* 21 + * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't 22 + * change the properties of memory regions. This should be fixed by the 23 + * upcoming platform spec. 24 + */ 25 + #define ioremap_nocache(addr, size) ioremap((addr), (size)) 26 + #define ioremap_wc(addr, size) ioremap((addr), (size)) 27 + #define ioremap_wt(addr, size) ioremap((addr), (size)) 28 + 29 + void iounmap(volatile void __iomem *addr); 30 + #else 31 + #define pgprot_noncached(x) (x) 32 + #endif /* CONFIG_MMU */ 33 + 34 + /* Generic IO read/write. These perform native-endian accesses. */ 35 + #define __raw_writeb __raw_writeb 36 + static inline void __raw_writeb(u8 val, volatile void __iomem *addr) 37 + { 38 + asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); 39 + } 40 + 41 + #define __raw_writew __raw_writew 42 + static inline void __raw_writew(u16 val, volatile void __iomem *addr) 43 + { 44 + asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); 45 + } 46 + 47 + #define __raw_writel __raw_writel 48 + static inline void __raw_writel(u32 val, volatile void __iomem *addr) 49 + { 50 + asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); 51 + } 52 + 53 + #ifdef CONFIG_64BIT 54 + #define __raw_writeq __raw_writeq 55 + static inline void __raw_writeq(u64 val, volatile void __iomem *addr) 56 + { 57 + asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); 58 + } 59 + #endif 60 + 61 + #define __raw_readb __raw_readb 62 + static inline u8 __raw_readb(const volatile void __iomem *addr) 63 + { 64 + u8 val; 65 + 66 + asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); 67 + return val; 68 + } 69 + 70 + #define __raw_readw __raw_readw 71 + static inline u16 __raw_readw(const volatile void __iomem *addr) 72 + { 73 + u16 val; 74 + 75 + asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); 76 + return val; 77 + } 78 + 79 + #define __raw_readl __raw_readl 80 + static inline u32 __raw_readl(const volatile void __iomem *addr) 81 + { 82 + u32 val; 83 + 84 + asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); 85 + return val; 86 + } 87 + 88 + #ifdef CONFIG_64BIT 89 + #define __raw_readq __raw_readq 90 + static inline u64 __raw_readq(const volatile void __iomem *addr) 91 + { 92 + u64 val; 93 + 94 + asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); 95 + return val; 96 + } 97 + #endif 98 + 99 + /* 100 + * Unordered I/O memory access primitives. These are even more relaxed than 101 + * the relaxed versions, as they don't even order accesses between successive 102 + * operations to the I/O regions. 103 + */ 104 + #define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) 105 + #define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) 106 + #define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) 107 + 108 + #define writeb_cpu(v, c) ((void)__raw_writeb((v), (c))) 109 + #define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c))) 110 + #define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c))) 111 + 112 + #ifdef CONFIG_64BIT 113 + #define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) 114 + #define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c))) 115 + #endif 116 + 117 + /* 118 + * Relaxed I/O memory access primitives. These follow the Device memory 119 + * ordering rules but do not guarantee any ordering relative to Normal memory 120 + * accesses. These are defined to order the indicated access (either a read or 121 + * write) with all other I/O memory accesses. Since the platform specification 122 + * defines that all I/O regions are strongly ordered on channel 2, no explicit 123 + * fences are required to enforce this ordering. 124 + */ 125 + /* FIXME: These are now the same as asm-generic */ 126 + #define __io_rbr() do {} while (0) 127 + #define __io_rar() do {} while (0) 128 + #define __io_rbw() do {} while (0) 129 + #define __io_raw() do {} while (0) 130 + 131 + #define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) 132 + #define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) 133 + #define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) 134 + 135 + #define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); }) 136 + #define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); }) 137 + #define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); }) 138 + 139 + #ifdef CONFIG_64BIT 140 + #define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) 141 + #define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); }) 142 + #endif 143 + 144 + /* 145 + * I/O memory access primitives. Reads are ordered relative to any 146 + * following Normal memory access. Writes are ordered relative to any prior 147 + * Normal memory access. The memory barriers here are necessary as RISC-V 148 + * doesn't define any ordering between the memory space and the I/O space. 149 + */ 150 + #define __io_br() do {} while (0) 151 + #define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") 152 + #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") 153 + #define __io_aw() mmiowb_set_pending() 154 + 155 + #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) 156 + #define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) 157 + #define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) 158 + 159 + #define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); }) 160 + #define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); }) 161 + #define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); }) 162 + 163 + #ifdef CONFIG_64BIT 164 + #define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) 165 + #define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) 166 + #endif 167 + 168 + #endif /* _ASM_RISCV_MMIO_H */
+3
arch/riscv/include/asm/mmu.h
··· 10 10 #ifndef __ASSEMBLY__ 11 11 12 12 typedef struct { 13 + #ifndef CONFIG_MMU 14 + unsigned long end_brk; 15 + #endif 13 16 void *vdso; 14 17 #ifdef CONFIG_SMP 15 18 /* A local icache flush is needed before user execution can resume. */
+7 -3
arch/riscv/include/asm/page.h
··· 88 88 #define PTE_FMT "%08lx" 89 89 #endif 90 90 91 + #ifdef CONFIG_MMU 91 92 extern unsigned long va_pa_offset; 92 93 extern unsigned long pfn_base; 94 + #define ARCH_PFN_OFFSET (pfn_base) 95 + #else 96 + #define va_pa_offset 0 97 + #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 98 + #endif /* CONFIG_MMU */ 93 99 94 100 extern unsigned long max_low_pfn; 95 101 extern unsigned long min_low_pfn; ··· 118 112 119 113 #ifdef CONFIG_FLATMEM 120 114 #define pfn_valid(pfn) \ 121 - (((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr)) 115 + (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) 122 116 #endif 123 - 124 - #define ARCH_PFN_OFFSET (pfn_base) 125 117 126 118 #endif /* __ASSEMBLY__ */ 127 119
+2
arch/riscv/include/asm/pgalloc.h
··· 10 10 #include <linux/mm.h> 11 11 #include <asm/tlb.h> 12 12 13 + #ifdef CONFIG_MMU 13 14 #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ 14 15 15 16 static inline void pmd_populate_kernel(struct mm_struct *mm, ··· 82 81 pgtable_pte_page_dtor(pte); \ 83 82 tlb_remove_page((tlb), pte); \ 84 83 } while (0) 84 + #endif /* CONFIG_MMU */ 85 85 86 86 #endif /* _ASM_RISCV_PGALLOC_H */
+53 -41
arch/riscv/include/asm/pgtable.h
··· 25 25 #include <asm/pgtable-32.h> 26 26 #endif /* CONFIG_64BIT */ 27 27 28 + #ifdef CONFIG_MMU 28 29 /* Number of entries in the page global directory */ 29 30 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) 30 31 /* Number of entries in the page table */ ··· 33 32 34 33 /* Number of PGD entries that a user-mode program can use */ 35 34 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 36 - #define FIRST_USER_ADDRESS 0 37 35 38 36 /* Page protection bits */ 39 37 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) ··· 83 83 #define __S101 PAGE_READ_EXEC 84 84 #define __S110 PAGE_SHARED_EXEC 85 85 #define __S111 PAGE_SHARED_EXEC 86 - 87 - #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 88 - #define VMALLOC_END (PAGE_OFFSET - 1) 89 - #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 90 - #define PCI_IO_SIZE SZ_16M 91 - 92 - /* 93 - * Roughly size the vmemmap space to be large enough to fit enough 94 - * struct pages to map half the virtual address space. Then 95 - * position vmemmap directly below the VMALLOC region. 96 - */ 97 - #define VMEMMAP_SHIFT \ 98 - (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 99 - #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 100 - #define VMEMMAP_END (VMALLOC_START - 1) 101 - #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 102 - 103 - #define vmemmap ((struct page *)VMEMMAP_START) 104 - 105 - #define PCI_IO_END VMEMMAP_START 106 - #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 107 - #define FIXADDR_TOP PCI_IO_START 108 - 109 - #ifdef CONFIG_64BIT 110 - #define FIXADDR_SIZE PMD_SIZE 111 - #else 112 - #define FIXADDR_SIZE PGDIR_SIZE 113 - #endif 114 - #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 115 - 116 - /* 117 - * ZERO_PAGE is a global shared page that is always zero, 118 - * used for zero-mapped memory areas, etc. 119 - */ 120 - extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 121 - #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 122 86 123 87 static inline int pmd_present(pmd_t pmd) 124 88 { ··· 394 430 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 395 431 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 396 432 397 - #define kern_addr_valid(addr) (1) /* FIXME */ 433 + #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) 434 + #define VMALLOC_END (PAGE_OFFSET - 1) 435 + #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 398 436 399 - extern void *dtb_early_va; 400 - extern void setup_bootmem(void); 401 - extern void paging_init(void); 437 + /* 438 + * Roughly size the vmemmap space to be large enough to fit enough 439 + * struct pages to map half the virtual address space. Then 440 + * position vmemmap directly below the VMALLOC region. 441 + */ 442 + #define VMEMMAP_SHIFT \ 443 + (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 444 + #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) 445 + #define VMEMMAP_END (VMALLOC_START - 1) 446 + #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) 447 + 448 + #define vmemmap ((struct page *)VMEMMAP_START) 449 + 450 + #define PCI_IO_SIZE SZ_16M 451 + #define PCI_IO_END VMEMMAP_START 452 + #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 453 + 454 + #define FIXADDR_TOP PCI_IO_START 455 + #ifdef CONFIG_64BIT 456 + #define FIXADDR_SIZE PMD_SIZE 457 + #else 458 + #define FIXADDR_SIZE PGDIR_SIZE 459 + #endif 460 + #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 402 461 403 462 /* 404 463 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. ··· 432 445 #else 433 446 #define TASK_SIZE FIXADDR_START 434 447 #endif 448 + 449 + #else /* CONFIG_MMU */ 450 + 451 + #define PAGE_KERNEL __pgprot(0) 452 + #define swapper_pg_dir NULL 453 + #define VMALLOC_START 0 454 + 455 + #define TASK_SIZE 0xffffffffUL 456 + 457 + #endif /* !CONFIG_MMU */ 458 + 459 + #define kern_addr_valid(addr) (1) /* FIXME */ 460 + 461 + extern void *dtb_early_va; 462 + void setup_bootmem(void); 463 + void paging_init(void); 464 + 465 + #define FIRST_USER_ADDRESS 0 466 + 467 + /* 468 + * ZERO_PAGE is a global shared page that is always zero, 469 + * used for zero-mapped memory areas, etc. 470 + */ 471 + extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 472 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 435 473 436 474 #include <asm-generic/pgtable.h> 437 475
+1 -1
arch/riscv/include/asm/processor.h
··· 42 42 ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ 43 43 - ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) 44 44 45 - #define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc) 45 + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) 46 46 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) 47 47 48 48
+8 -8
arch/riscv/include/asm/ptrace.h
··· 12 12 #ifndef __ASSEMBLY__ 13 13 14 14 struct pt_regs { 15 - unsigned long sepc; 15 + unsigned long epc; 16 16 unsigned long ra; 17 17 unsigned long sp; 18 18 unsigned long gp; ··· 44 44 unsigned long t4; 45 45 unsigned long t5; 46 46 unsigned long t6; 47 - /* Supervisor CSRs */ 48 - unsigned long sstatus; 49 - unsigned long sbadaddr; 50 - unsigned long scause; 47 + /* Supervisor/Machine CSRs */ 48 + unsigned long status; 49 + unsigned long badaddr; 50 + unsigned long cause; 51 51 /* a0 value before the syscall */ 52 52 unsigned long orig_a0; 53 53 }; ··· 58 58 #define REG_FMT "%08lx" 59 59 #endif 60 60 61 - #define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0) 61 + #define user_mode(regs) (((regs)->status & SR_PP) == 0) 62 62 63 63 64 64 /* Helpers for working with the instruction pointer */ 65 65 static inline unsigned long instruction_pointer(struct pt_regs *regs) 66 66 { 67 - return regs->sepc; 67 + return regs->epc; 68 68 } 69 69 static inline void instruction_pointer_set(struct pt_regs *regs, 70 70 unsigned long val) 71 71 { 72 - regs->sepc = val; 72 + regs->epc = val; 73 73 } 74 74 75 75 #define profile_pc(regs) instruction_pointer(regs)
+8 -1
arch/riscv/include/asm/sbi.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 + #ifdef CONFIG_RISCV_SBI 11 12 #define SBI_SET_TIMER 0 12 13 #define SBI_CONSOLE_PUTCHAR 1 13 14 #define SBI_CONSOLE_GETCHAR 2 ··· 94 93 { 95 94 SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid); 96 95 } 97 - 96 + #else /* CONFIG_RISCV_SBI */ 97 + /* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */ 98 + void sbi_set_timer(uint64_t stime_value); 99 + void sbi_clear_ipi(void); 100 + void sbi_send_ipi(const unsigned long *hart_mask); 101 + void sbi_remote_fence_i(const unsigned long *hart_mask); 102 + #endif /* CONFIG_RISCV_SBI */ 98 103 #endif /* _ASM_RISCV_SBI_H */
+5 -5
arch/riscv/include/asm/switch_to.h
··· 17 17 18 18 static inline void __fstate_clean(struct pt_regs *regs) 19 19 { 20 - regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; 20 + regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN; 21 21 } 22 22 23 23 static inline void fstate_off(struct task_struct *task, 24 24 struct pt_regs *regs) 25 25 { 26 - regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; 26 + regs->status = (regs->status & ~SR_FS) | SR_FS_OFF; 27 27 } 28 28 29 29 static inline void fstate_save(struct task_struct *task, 30 30 struct pt_regs *regs) 31 31 { 32 - if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) { 32 + if ((regs->status & SR_FS) == SR_FS_DIRTY) { 33 33 __fstate_save(task); 34 34 __fstate_clean(regs); 35 35 } ··· 38 38 static inline void fstate_restore(struct task_struct *task, 39 39 struct pt_regs *regs) 40 40 { 41 - if ((regs->sstatus & SR_FS) != SR_FS_OFF) { 41 + if ((regs->status & SR_FS) != SR_FS_OFF) { 42 42 __fstate_restore(task); 43 43 __fstate_clean(regs); 44 44 } ··· 50 50 struct pt_regs *regs; 51 51 52 52 regs = task_pt_regs(prev); 53 - if (unlikely(regs->sstatus & SR_SD)) 53 + if (unlikely(regs->status & SR_SD)) 54 54 fstate_save(prev, regs); 55 55 fstate_restore(next, task_pt_regs(next)); 56 56 }
+17 -2
arch/riscv/include/asm/timex.h
··· 7 7 #define _ASM_RISCV_TIMEX_H 8 8 9 9 #include <asm/csr.h> 10 + #include <asm/mmio.h> 10 11 11 12 typedef unsigned long cycles_t; 12 13 14 + extern u64 __iomem *riscv_time_val; 15 + extern u64 __iomem *riscv_time_cmp; 16 + 17 + #ifdef CONFIG_64BIT 18 + #define mmio_get_cycles() readq_relaxed(riscv_time_val) 19 + #else 20 + #define mmio_get_cycles() readl_relaxed(riscv_time_val) 21 + #define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1) 22 + #endif 23 + 13 24 static inline cycles_t get_cycles(void) 14 25 { 15 - return csr_read(CSR_TIME); 26 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 27 + return csr_read(CSR_TIME); 28 + return mmio_get_cycles(); 16 29 } 17 30 #define get_cycles get_cycles 18 31 ··· 37 24 #else /* CONFIG_64BIT */ 38 25 static inline u32 get_cycles_hi(void) 39 26 { 40 - return csr_read(CSR_TIMEH); 27 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 28 + return csr_read(CSR_TIMEH); 29 + return mmio_get_cycles_hi(); 41 30 } 42 31 43 32 static inline u64 get_cycles64(void)
+9 -3
arch/riscv/include/asm/tlbflush.h
··· 10 10 #include <linux/mm_types.h> 11 11 #include <asm/smp.h> 12 12 13 + #ifdef CONFIG_MMU 13 14 static inline void local_flush_tlb_all(void) 14 15 { 15 16 __asm__ __volatile__ ("sfence.vma" : : : "memory"); ··· 21 20 { 22 21 __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); 23 22 } 23 + #else /* CONFIG_MMU */ 24 + #define local_flush_tlb_all() do { } while (0) 25 + #define local_flush_tlb_page(addr) do { } while (0) 26 + #endif /* CONFIG_MMU */ 24 27 25 - #ifdef CONFIG_SMP 28 + #if defined(CONFIG_SMP) && defined(CONFIG_MMU) 26 29 void flush_tlb_all(void); 27 30 void flush_tlb_mm(struct mm_struct *mm); 28 31 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); 29 32 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 30 33 unsigned long end); 31 - #else /* CONFIG_SMP */ 34 + #else /* CONFIG_SMP && CONFIG_MMU */ 35 + 32 36 #define flush_tlb_all() local_flush_tlb_all() 33 37 #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) 34 38 ··· 44 38 } 45 39 46 40 #define flush_tlb_mm(mm) flush_tlb_all() 47 - #endif /* CONFIG_SMP */ 41 + #endif /* !CONFIG_SMP || !CONFIG_MMU */ 48 42 49 43 /* Flush a range of kernel pages */ 50 44 static inline void flush_tlb_kernel_range(unsigned long start,
+4
arch/riscv/include/asm/uaccess.h
··· 11 11 /* 12 12 * User space memory access functions 13 13 */ 14 + #ifdef CONFIG_MMU 14 15 #include <linux/errno.h> 15 16 #include <linux/compiler.h> 16 17 #include <linux/thread_info.h> ··· 476 475 __ret; \ 477 476 }) 478 477 478 + #else /* CONFIG_MMU */ 479 + #include <asm-generic/uaccess.h> 480 + #endif /* CONFIG_MMU */ 479 481 #endif /* _ASM_RISCV_UACCESS_H */
+3 -2
arch/riscv/kernel/Makefile
··· 25 25 obj-y += traps.o 26 26 obj-y += riscv_ksyms.o 27 27 obj-y += stacktrace.o 28 - obj-y += vdso.o 29 28 obj-y += cacheinfo.o 30 - obj-y += vdso/ 29 + obj-$(CONFIG_MMU) += vdso.o vdso/ 31 30 31 + obj-$(CONFIG_RISCV_M_MODE) += clint.o 32 32 obj-$(CONFIG_FPU) += fpu.o 33 33 obj-$(CONFIG_SMP) += smpboot.o 34 34 obj-$(CONFIG_SMP) += smp.o ··· 41 41 obj-$(CONFIG_PERF_EVENTS) += perf_event.o 42 42 obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o 43 43 obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o 44 + obj-$(CONFIG_RISCV_SBI) += sbi.o 44 45 45 46 clean:
+4 -4
arch/riscv/kernel/asm-offsets.c
··· 71 71 OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); 72 72 73 73 DEFINE(PT_SIZE, sizeof(struct pt_regs)); 74 - OFFSET(PT_SEPC, pt_regs, sepc); 74 + OFFSET(PT_EPC, pt_regs, epc); 75 75 OFFSET(PT_RA, pt_regs, ra); 76 76 OFFSET(PT_FP, pt_regs, s0); 77 77 OFFSET(PT_S0, pt_regs, s0); ··· 105 105 OFFSET(PT_T6, pt_regs, t6); 106 106 OFFSET(PT_GP, pt_regs, gp); 107 107 OFFSET(PT_ORIG_A0, pt_regs, orig_a0); 108 - OFFSET(PT_SSTATUS, pt_regs, sstatus); 109 - OFFSET(PT_SBADADDR, pt_regs, sbadaddr); 110 - OFFSET(PT_SCAUSE, pt_regs, scause); 108 + OFFSET(PT_STATUS, pt_regs, status); 109 + OFFSET(PT_BADADDR, pt_regs, badaddr); 110 + OFFSET(PT_CAUSE, pt_regs, cause); 111 111 112 112 /* 113 113 * THREAD_{F,X}* might be larger than a S-type offset can handle, but
+44
arch/riscv/kernel/clint.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2019 Christoph Hellwig. 4 + */ 5 + 6 + #include <linux/io.h> 7 + #include <linux/of_address.h> 8 + #include <linux/types.h> 9 + #include <asm/clint.h> 10 + #include <asm/csr.h> 11 + #include <asm/timex.h> 12 + #include <asm/smp.h> 13 + 14 + /* 15 + * This is the layout used by the SiFive clint, which is also shared by the qemu 16 + * virt platform, and the Kendryte KD210 at least. 17 + */ 18 + #define CLINT_IPI_OFF 0 19 + #define CLINT_TIME_CMP_OFF 0x4000 20 + #define CLINT_TIME_VAL_OFF 0xbff8 21 + 22 + u32 __iomem *clint_ipi_base; 23 + 24 + void clint_init_boot_cpu(void) 25 + { 26 + struct device_node *np; 27 + void __iomem *base; 28 + 29 + np = of_find_compatible_node(NULL, NULL, "riscv,clint0"); 30 + if (!np) { 31 + panic("clint not found"); 32 + return; 33 + } 34 + 35 + base = of_iomap(np, 0); 36 + if (!base) 37 + panic("could not map CLINT"); 38 + 39 + clint_ipi_base = base + CLINT_IPI_OFF; 40 + riscv_time_cmp = base + CLINT_TIME_CMP_OFF; 41 + riscv_time_val = base + CLINT_TIME_VAL_OFF; 42 + 43 + clint_clear_ipi(boot_cpu_hartid); 44 + }
+54 -31
arch/riscv/kernel/entry.S
··· 26 26 27 27 /* 28 28 * If coming from userspace, preserve the user thread pointer and load 29 - * the kernel thread pointer. If we came from the kernel, sscratch 30 - * will contain 0, and we should continue on the current TP. 29 + * the kernel thread pointer. If we came from the kernel, the scratch 30 + * register will contain 0, and we should continue on the current TP. 31 31 */ 32 - csrrw tp, CSR_SSCRATCH, tp 32 + csrrw tp, CSR_SCRATCH, tp 33 33 bnez tp, _save_context 34 34 35 35 _restore_kernel_tpsp: 36 - csrr tp, CSR_SSCRATCH 36 + csrr tp, CSR_SCRATCH 37 37 REG_S sp, TASK_TI_KERNEL_SP(tp) 38 38 _save_context: 39 39 REG_S sp, TASK_TI_USER_SP(tp) ··· 79 79 li t0, SR_SUM | SR_FS 80 80 81 81 REG_L s0, TASK_TI_USER_SP(tp) 82 - csrrc s1, CSR_SSTATUS, t0 83 - csrr s2, CSR_SEPC 84 - csrr s3, CSR_STVAL 85 - csrr s4, CSR_SCAUSE 86 - csrr s5, CSR_SSCRATCH 82 + csrrc s1, CSR_STATUS, t0 83 + csrr s2, CSR_EPC 84 + csrr s3, CSR_TVAL 85 + csrr s4, CSR_CAUSE 86 + csrr s5, CSR_SCRATCH 87 87 REG_S s0, PT_SP(sp) 88 - REG_S s1, PT_SSTATUS(sp) 89 - REG_S s2, PT_SEPC(sp) 90 - REG_S s3, PT_SBADADDR(sp) 91 - REG_S s4, PT_SCAUSE(sp) 88 + REG_S s1, PT_STATUS(sp) 89 + REG_S s2, PT_EPC(sp) 90 + REG_S s3, PT_BADADDR(sp) 91 + REG_S s4, PT_CAUSE(sp) 92 92 REG_S s5, PT_TP(sp) 93 93 .endm 94 94 ··· 97 97 * registers from the stack. 98 98 */ 99 99 .macro RESTORE_ALL 100 - REG_L a0, PT_SSTATUS(sp) 100 + REG_L a0, PT_STATUS(sp) 101 101 /* 102 102 * The current load reservation is effectively part of the processor's 103 103 * state, in the sense that load reservations cannot be shared between ··· 115 115 * completes, implementations are allowed to expand reservations to be 116 116 * arbitrarily large. 117 117 */ 118 - REG_L a2, PT_SEPC(sp) 119 - REG_SC x0, a2, PT_SEPC(sp) 118 + REG_L a2, PT_EPC(sp) 119 + REG_SC x0, a2, PT_EPC(sp) 120 120 121 - csrw CSR_SSTATUS, a0 122 - csrw CSR_SEPC, a2 121 + csrw CSR_STATUS, a0 122 + csrw CSR_EPC, a2 123 123 124 124 REG_L x1, PT_RA(sp) 125 125 REG_L x3, PT_GP(sp) ··· 163 163 SAVE_ALL 164 164 165 165 /* 166 - * Set sscratch register to 0, so that if a recursive exception 166 + * Set the scratch register to 0, so that if a recursive exception 167 167 * occurs, the exception vector knows it came from the kernel 168 168 */ 169 - csrw CSR_SSCRATCH, x0 169 + csrw CSR_SCRATCH, x0 170 170 171 171 /* Load the global pointer */ 172 172 .option push ··· 185 185 move a0, sp /* pt_regs */ 186 186 tail do_IRQ 187 187 1: 188 - /* Exceptions run with interrupts enabled or disabled 189 - depending on the state of sstatus.SR_SPIE */ 190 - andi t0, s1, SR_SPIE 188 + /* 189 + * Exceptions run with interrupts enabled or disabled depending on the 190 + * state of SR_PIE in m/sstatus. 191 + */ 192 + andi t0, s1, SR_PIE 191 193 beqz t0, 1f 192 - csrs CSR_SSTATUS, SR_SIE 194 + csrs CSR_STATUS, SR_IE 193 195 194 196 1: 195 197 /* Handle syscalls */ ··· 219 217 * scall instruction on sret 220 218 */ 221 219 addi s2, s2, 0x4 222 - REG_S s2, PT_SEPC(sp) 220 + REG_S s2, PT_EPC(sp) 223 221 /* Trace syscalls, but only if requested by the user. */ 224 222 REG_L t0, TASK_TI_FLAGS(tp) 225 223 andi t0, t0, _TIF_SYSCALL_WORK ··· 269 267 bnez t0, handle_syscall_trace_exit 270 268 271 269 ret_from_exception: 272 - REG_L s0, PT_SSTATUS(sp) 273 - csrc CSR_SSTATUS, SR_SIE 270 + REG_L s0, PT_STATUS(sp) 271 + csrc CSR_STATUS, SR_IE 272 + #ifdef CONFIG_RISCV_M_MODE 273 + /* the MPP value is too large to be used as an immediate arg for addi */ 274 + li t0, SR_MPP 275 + and s0, s0, t0 276 + #else 274 277 andi s0, s0, SR_SPP 278 + #endif 275 279 bnez s0, resume_kernel 276 280 277 281 resume_userspace: ··· 291 283 REG_S s0, TASK_TI_KERNEL_SP(tp) 292 284 293 285 /* 294 - * Save TP into sscratch, so we can find the kernel data structures 295 - * again. 286 + * Save TP into the scratch register , so we can find the kernel data 287 + * structures again. 296 288 */ 297 - csrw CSR_SSCRATCH, tp 289 + csrw CSR_SCRATCH, tp 298 290 299 291 restore_all: 300 292 RESTORE_ALL 293 + #ifdef CONFIG_RISCV_M_MODE 294 + mret 295 + #else 301 296 sret 297 + #endif 302 298 303 299 #if IS_ENABLED(CONFIG_PREEMPT) 304 300 resume_kernel: ··· 322 310 bnez s1, work_resched 323 311 work_notifysig: 324 312 /* Handle pending signals and notify-resume requests */ 325 - csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ 313 + csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ 326 314 move a0, sp /* pt_regs */ 327 315 move a1, s0 /* current_thread_info->flags */ 328 316 tail do_notify_resume ··· 421 409 ret 422 410 ENDPROC(__switch_to) 423 411 412 + #ifndef CONFIG_MMU 413 + #define do_page_fault do_trap_unknown 414 + #endif 415 + 424 416 .section ".rodata" 425 417 /* Exception vector table */ 426 418 ENTRY(excp_vect_table) ··· 446 430 RISCV_PTR do_page_fault /* store page fault */ 447 431 excp_vect_table_end: 448 432 END(excp_vect_table) 433 + 434 + #ifndef CONFIG_MMU 435 + ENTRY(__user_rt_sigreturn) 436 + li a7, __NR_rt_sigreturn 437 + scall 438 + END(__user_rt_sigreturn) 439 + #endif
+4 -4
arch/riscv/kernel/fpu.S
··· 23 23 li a2, TASK_THREAD_F0 24 24 add a0, a0, a2 25 25 li t1, SR_FS 26 - csrs CSR_SSTATUS, t1 26 + csrs CSR_STATUS, t1 27 27 frcsr t0 28 28 fsd f0, TASK_THREAD_F0_F0(a0) 29 29 fsd f1, TASK_THREAD_F1_F0(a0) ··· 58 58 fsd f30, TASK_THREAD_F30_F0(a0) 59 59 fsd f31, TASK_THREAD_F31_F0(a0) 60 60 sw t0, TASK_THREAD_FCSR_F0(a0) 61 - csrc CSR_SSTATUS, t1 61 + csrc CSR_STATUS, t1 62 62 ret 63 63 ENDPROC(__fstate_save) 64 64 ··· 67 67 add a0, a0, a2 68 68 li t1, SR_FS 69 69 lw t0, TASK_THREAD_FCSR_F0(a0) 70 - csrs CSR_SSTATUS, t1 70 + csrs CSR_STATUS, t1 71 71 fld f0, TASK_THREAD_F0_F0(a0) 72 72 fld f1, TASK_THREAD_F1_F0(a0) 73 73 fld f2, TASK_THREAD_F2_F0(a0) ··· 101 101 fld f30, TASK_THREAD_F30_F0(a0) 102 102 fld f31, TASK_THREAD_F31_F0(a0) 103 103 fscsr t0 104 - csrc CSR_SSTATUS, t1 104 + csrc CSR_STATUS, t1 105 105 ret 106 106 ENDPROC(__fstate_restore)
+106 -6
arch/riscv/kernel/head.S
··· 11 11 #include <asm/thread_info.h> 12 12 #include <asm/page.h> 13 13 #include <asm/csr.h> 14 + #include <asm/hwcap.h> 14 15 #include <asm/image.h> 15 16 16 17 __INIT ··· 48 47 .global _start_kernel 49 48 _start_kernel: 50 49 /* Mask all interrupts */ 51 - csrw CSR_SIE, zero 52 - csrw CSR_SIP, zero 50 + csrw CSR_IE, zero 51 + csrw CSR_IP, zero 52 + 53 + #ifdef CONFIG_RISCV_M_MODE 54 + /* flush the instruction cache */ 55 + fence.i 56 + 57 + /* Reset all registers except ra, a0, a1 */ 58 + call reset_regs 59 + 60 + /* 61 + * The hartid in a0 is expected later on, and we have no firmware 62 + * to hand it to us. 63 + */ 64 + csrr a0, CSR_MHARTID 65 + #endif /* CONFIG_RISCV_M_MODE */ 53 66 54 67 /* Load the global pointer */ 55 68 .option push ··· 76 61 * floating point in kernel space 77 62 */ 78 63 li t0, SR_FS 79 - csrc CSR_SSTATUS, t0 64 + csrc CSR_STATUS, t0 80 65 81 66 #ifdef CONFIG_SMP 82 67 li t0, CONFIG_NR_CPUS ··· 109 94 la sp, init_thread_union + THREAD_SIZE 110 95 mv a0, s1 111 96 call setup_vm 97 + #ifdef CONFIG_MMU 112 98 la a0, early_pg_dir 113 99 call relocate 100 + #endif /* CONFIG_MMU */ 114 101 115 102 /* Restore C environment */ 116 103 la tp, init_task ··· 123 106 call parse_dtb 124 107 tail start_kernel 125 108 109 + #ifdef CONFIG_MMU 126 110 relocate: 127 111 /* Relocate return address */ 128 112 li a1, PAGE_OFFSET ··· 134 116 /* Point stvec to virtual address of intruction after satp write */ 135 117 la a2, 1f 136 118 add a2, a2, a1 137 - csrw CSR_STVEC, a2 119 + csrw CSR_TVEC, a2 138 120 139 121 /* Compute satp for kernel page tables, but don't load it yet */ 140 122 srl a2, a0, PAGE_SHIFT ··· 156 138 1: 157 139 /* Set trap vector to spin forever to help debug */ 158 140 la a0, .Lsecondary_park 159 - csrw CSR_STVEC, a0 141 + csrw CSR_TVEC, a0 160 142 161 143 /* Reload the global pointer */ 162 144 .option push ··· 174 156 sfence.vma 175 157 176 158 ret 159 + #endif /* CONFIG_MMU */ 177 160 178 161 .Lsecondary_start: 179 162 #ifdef CONFIG_SMP 180 163 /* Set trap vector to spin forever to help debug */ 181 164 la a3, .Lsecondary_park 182 - csrw CSR_STVEC, a3 165 + csrw CSR_TVEC, a3 183 166 184 167 slli a3, a0, LGREG 185 168 la a1, __cpu_up_stack_pointer ··· 200 181 beqz tp, .Lwait_for_cpu_up 201 182 fence 202 183 184 + #ifdef CONFIG_MMU 203 185 /* Enable virtual memory and relocate to virtual address */ 204 186 la a0, swapper_pg_dir 205 187 call relocate 188 + #endif 206 189 207 190 tail smp_callin 208 191 #endif ··· 215 194 wfi 216 195 j .Lsecondary_park 217 196 END(_start) 197 + 198 + #ifdef CONFIG_RISCV_M_MODE 199 + ENTRY(reset_regs) 200 + li sp, 0 201 + li gp, 0 202 + li tp, 0 203 + li t0, 0 204 + li t1, 0 205 + li t2, 0 206 + li s0, 0 207 + li s1, 0 208 + li a2, 0 209 + li a3, 0 210 + li a4, 0 211 + li a5, 0 212 + li a6, 0 213 + li a7, 0 214 + li s2, 0 215 + li s3, 0 216 + li s4, 0 217 + li s5, 0 218 + li s6, 0 219 + li s7, 0 220 + li s8, 0 221 + li s9, 0 222 + li s10, 0 223 + li s11, 0 224 + li t3, 0 225 + li t4, 0 226 + li t5, 0 227 + li t6, 0 228 + csrw sscratch, 0 229 + 230 + #ifdef CONFIG_FPU 231 + csrr t0, CSR_MISA 232 + andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D) 233 + bnez t0, .Lreset_regs_done 234 + 235 + li t1, SR_FS 236 + csrs CSR_STATUS, t1 237 + fmv.s.x f0, zero 238 + fmv.s.x f1, zero 239 + fmv.s.x f2, zero 240 + fmv.s.x f3, zero 241 + fmv.s.x f4, zero 242 + fmv.s.x f5, zero 243 + fmv.s.x f6, zero 244 + fmv.s.x f7, zero 245 + fmv.s.x f8, zero 246 + fmv.s.x f9, zero 247 + fmv.s.x f10, zero 248 + fmv.s.x f11, zero 249 + fmv.s.x f12, zero 250 + fmv.s.x f13, zero 251 + fmv.s.x f14, zero 252 + fmv.s.x f15, zero 253 + fmv.s.x f16, zero 254 + fmv.s.x f17, zero 255 + fmv.s.x f18, zero 256 + fmv.s.x f19, zero 257 + fmv.s.x f20, zero 258 + fmv.s.x f21, zero 259 + fmv.s.x f22, zero 260 + fmv.s.x f23, zero 261 + fmv.s.x f24, zero 262 + fmv.s.x f25, zero 263 + fmv.s.x f26, zero 264 + fmv.s.x f27, zero 265 + fmv.s.x f28, zero 266 + fmv.s.x f29, zero 267 + fmv.s.x f30, zero 268 + fmv.s.x f31, zero 269 + csrw fcsr, 0 270 + /* note that the caller must clear SR_FS */ 271 + #endif /* CONFIG_FPU */ 272 + .Lreset_regs_done: 273 + ret 274 + END(reset_regs) 275 + #endif /* CONFIG_RISCV_M_MODE */ 218 276 219 277 __PAGE_ALIGNED_BSS 220 278 /* Empty zero page */
+5 -12
arch/riscv/kernel/irq.c
··· 11 11 #include <linux/seq_file.h> 12 12 #include <asm/smp.h> 13 13 14 - /* 15 - * Possible interrupt causes: 16 - */ 17 - #define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT 18 - #define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER 19 - #define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT 20 - 21 14 int arch_show_interrupts(struct seq_file *p, int prec) 22 15 { 23 16 show_ipi_stats(p, prec); ··· 22 29 struct pt_regs *old_regs = set_irq_regs(regs); 23 30 24 31 irq_enter(); 25 - switch (regs->scause & ~SCAUSE_IRQ_FLAG) { 26 - case INTERRUPT_CAUSE_TIMER: 32 + switch (regs->cause & ~CAUSE_IRQ_FLAG) { 33 + case IRQ_TIMER: 27 34 riscv_timer_interrupt(); 28 35 break; 29 36 #ifdef CONFIG_SMP 30 - case INTERRUPT_CAUSE_SOFTWARE: 37 + case IRQ_SOFT: 31 38 /* 32 39 * We only use software interrupts to pass IPIs, so if a non-SMP 33 40 * system gets one, then we don't know what to do. ··· 35 42 riscv_software_interrupt(); 36 43 break; 37 44 #endif 38 - case INTERRUPT_CAUSE_EXTERNAL: 45 + case IRQ_EXT: 39 46 handle_arch_irq(regs); 40 47 break; 41 48 default: 42 - pr_alert("unexpected interrupt cause 0x%lx", regs->scause); 49 + pr_alert("unexpected interrupt cause 0x%lx", regs->cause); 43 50 BUG(); 44 51 } 45 52 irq_exit();
+1 -1
arch/riscv/kernel/perf_callchain.c
··· 67 67 return; 68 68 69 69 fp = regs->s0; 70 - perf_callchain_store(entry, regs->sepc); 70 + perf_callchain_store(entry, regs->epc); 71 71 72 72 fp = user_backtrace(entry, fp, regs->ra); 73 73 while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
+9 -8
arch/riscv/kernel/process.c
··· 35 35 { 36 36 show_regs_print_info(KERN_DEFAULT); 37 37 38 - pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", 39 - regs->sepc, regs->ra, regs->sp); 38 + pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", 39 + regs->epc, regs->ra, regs->sp); 40 40 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", 41 41 regs->gp, regs->tp, regs->t0); 42 42 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n", ··· 58 58 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", 59 59 regs->t5, regs->t6); 60 60 61 - pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n", 62 - regs->sstatus, regs->sbadaddr, regs->scause); 61 + pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n", 62 + regs->status, regs->badaddr, regs->cause); 63 63 } 64 64 65 65 void start_thread(struct pt_regs *regs, unsigned long pc, 66 66 unsigned long sp) 67 67 { 68 - regs->sstatus = SR_SPIE; 68 + regs->status = SR_PIE; 69 69 if (has_fpu) { 70 - regs->sstatus |= SR_FS_INITIAL; 70 + regs->status |= SR_FS_INITIAL; 71 71 /* 72 72 * Restore the initial value to the FP register 73 73 * before starting the user program. 74 74 */ 75 75 fstate_restore(current, regs); 76 76 } 77 - regs->sepc = pc; 77 + regs->epc = pc; 78 78 regs->sp = sp; 79 79 set_fs(USER_DS); 80 80 } ··· 110 110 const register unsigned long gp __asm__ ("gp"); 111 111 memset(childregs, 0, sizeof(struct pt_regs)); 112 112 childregs->gp = gp; 113 - childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */ 113 + /* Supervisor/Machine, irqs on: */ 114 + childregs->status = SR_PP | SR_PIE; 114 115 115 116 p->thread.ra = (unsigned long)ret_from_kernel_thread; 116 117 p->thread.s[0] = usp; /* fn */
+2 -3
arch/riscv/kernel/reset.c
··· 5 5 6 6 #include <linux/reboot.h> 7 7 #include <linux/pm.h> 8 - #include <asm/sbi.h> 9 8 10 9 static void default_power_off(void) 11 10 { 12 - sbi_shutdown(); 13 - while (1); 11 + while (1) 12 + wait_for_interrupt(); 14 13 } 15 14 16 15 void (*pm_power_off)(void) = default_power_off;
+17
arch/riscv/kernel/sbi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include <linux/init.h> 4 + #include <linux/pm.h> 5 + #include <asm/sbi.h> 6 + 7 + static void sbi_power_off(void) 8 + { 9 + sbi_shutdown(); 10 + } 11 + 12 + static int __init sbi_init(void) 13 + { 14 + pm_power_off = sbi_power_off; 15 + return 0; 16 + } 17 + early_initcall(sbi_init);
+2
arch/riscv/kernel/setup.c
··· 17 17 #include <linux/sched/task.h> 18 18 #include <linux/swiotlb.h> 19 19 20 + #include <asm/clint.h> 20 21 #include <asm/setup.h> 21 22 #include <asm/sections.h> 22 23 #include <asm/pgtable.h> ··· 68 67 setup_bootmem(); 69 68 paging_init(); 70 69 unflatten_device_tree(); 70 + clint_init_boot_cpu(); 71 71 72 72 #ifdef CONFIG_SWIOTLB 73 73 swiotlb_init(1);
+26 -12
arch/riscv/kernel/signal.c
··· 17 17 #include <asm/switch_to.h> 18 18 #include <asm/csr.h> 19 19 20 + extern u32 __user_rt_sigreturn[2]; 21 + 20 22 #define DEBUG_SIG 0 21 23 22 24 struct rt_sigframe { 23 25 struct siginfo info; 24 26 struct ucontext uc; 27 + #ifndef CONFIG_MMU 28 + u32 sigreturn_code[2]; 29 + #endif 25 30 }; 26 31 27 32 #ifdef CONFIG_FPU ··· 129 124 pr_info_ratelimited( 130 125 "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n", 131 126 task->comm, task_pid_nr(task), __func__, 132 - frame, (void *)regs->sepc, (void *)regs->sp); 127 + frame, (void *)regs->epc, (void *)regs->sp); 133 128 } 134 129 force_sig(SIGSEGV); 135 130 return 0; ··· 171 166 return (void __user *)sp; 172 167 } 173 168 174 - 175 169 static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, 176 170 struct pt_regs *regs) 177 171 { ··· 193 189 return -EFAULT; 194 190 195 191 /* Set up to return from userspace. */ 192 + #ifdef CONFIG_MMU 196 193 regs->ra = (unsigned long)VDSO_SYMBOL( 197 194 current->mm->context.vdso, rt_sigreturn); 195 + #else 196 + /* 197 + * For the nommu case we don't have a VDSO. Instead we push two 198 + * instructions to call the rt_sigreturn syscall onto the user stack. 199 + */ 200 + if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn, 201 + sizeof(frame->sigreturn_code))) 202 + return -EFAULT; 203 + regs->ra = (unsigned long)&frame->sigreturn_code; 204 + #endif /* CONFIG_MMU */ 198 205 199 206 /* 200 207 * Set up registers for signal handler. ··· 214 199 * We always pass siginfo and mcontext, regardless of SA_SIGINFO, 215 200 * since some things rely on this (e.g. glibc's debug/segfault.c). 216 201 */ 217 - regs->sepc = (unsigned long)ksig->ka.sa.sa_handler; 202 + regs->epc = (unsigned long)ksig->ka.sa.sa_handler; 218 203 regs->sp = (unsigned long)frame; 219 204 regs->a0 = ksig->sig; /* a0: signal number */ 220 205 regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */ ··· 223 208 #if DEBUG_SIG 224 209 pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", 225 210 current->comm, task_pid_nr(current), ksig->sig, 226 - (void *)regs->sepc, (void *)regs->ra, frame); 211 + (void *)regs->epc, (void *)regs->ra, frame); 227 212 #endif 228 213 229 214 return 0; ··· 235 220 int ret; 236 221 237 222 /* Are we from a system call? */ 238 - if (regs->scause == EXC_SYSCALL) { 223 + if (regs->cause == EXC_SYSCALL) { 239 224 /* Avoid additional syscall restarting via ret_from_exception */ 240 - regs->scause = -1UL; 241 - 225 + regs->cause = -1UL; 242 226 /* If so, check system call restarting.. */ 243 227 switch (regs->a0) { 244 228 case -ERESTART_RESTARTBLOCK: ··· 253 239 /* fallthrough */ 254 240 case -ERESTARTNOINTR: 255 241 regs->a0 = regs->orig_a0; 256 - regs->sepc -= 0x4; 242 + regs->epc -= 0x4; 257 243 break; 258 244 } 259 245 } ··· 275 261 } 276 262 277 263 /* Did we come from a system call? */ 278 - if (regs->scause == EXC_SYSCALL) { 264 + if (regs->cause == EXC_SYSCALL) { 279 265 /* Avoid additional syscall restarting via ret_from_exception */ 280 - regs->scause = -1UL; 266 + regs->cause = -1UL; 281 267 282 268 /* Restart the system call - no handlers present */ 283 269 switch (regs->a0) { ··· 285 271 case -ERESTARTSYS: 286 272 case -ERESTARTNOINTR: 287 273 regs->a0 = regs->orig_a0; 288 - regs->sepc -= 0x4; 274 + regs->epc -= 0x4; 289 275 break; 290 276 case -ERESTART_RESTARTBLOCK: 291 277 regs->a0 = regs->orig_a0; 292 278 regs->a7 = __NR_restart_syscall; 293 - regs->sepc -= 0x4; 279 + regs->epc -= 0x4; 294 280 break; 295 281 } 296 282 }
+13 -3
arch/riscv/kernel/smp.c
··· 16 16 #include <linux/seq_file.h> 17 17 #include <linux/delay.h> 18 18 19 + #include <asm/clint.h> 19 20 #include <asm/sbi.h> 20 21 #include <asm/tlbflush.h> 21 22 #include <asm/cacheflush.h> ··· 93 92 smp_mb__after_atomic(); 94 93 95 94 riscv_cpuid_to_hartid_mask(mask, &hartid_mask); 96 - sbi_send_ipi(cpumask_bits(&hartid_mask)); 95 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 96 + sbi_send_ipi(cpumask_bits(&hartid_mask)); 97 + else 98 + clint_send_ipi_mask(&hartid_mask); 97 99 } 98 100 99 101 static void send_ipi_single(int cpu, enum ipi_message_type op) ··· 107 103 set_bit(op, &ipi_data[cpu].bits); 108 104 smp_mb__after_atomic(); 109 105 110 - sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); 106 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 107 + sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); 108 + else 109 + clint_send_ipi_single(hartid); 111 110 } 112 111 113 112 static inline void clear_ipi(void) 114 113 { 115 - csr_clear(CSR_SIP, SIE_SSIE); 114 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 115 + csr_clear(CSR_IP, IE_SIE); 116 + else 117 + clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); 116 118 } 117 119 118 120 void riscv_software_interrupt(void)
+4
arch/riscv/kernel/smpboot.c
··· 24 24 #include <linux/of.h> 25 25 #include <linux/sched/task_stack.h> 26 26 #include <linux/sched/mm.h> 27 + #include <asm/clint.h> 27 28 #include <asm/irq.h> 28 29 #include <asm/mmu_context.h> 29 30 #include <asm/tlbflush.h> ··· 137 136 asmlinkage __visible void __init smp_callin(void) 138 137 { 139 138 struct mm_struct *mm = &init_mm; 139 + 140 + if (!IS_ENABLED(CONFIG_RISCV_SBI)) 141 + clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); 140 142 141 143 /* All kernel threads share the same mm context. */ 142 144 mmgrab(mm);
+8 -8
arch/riscv/kernel/traps.c
··· 41 41 print_modules(); 42 42 show_regs(regs); 43 43 44 - ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV); 44 + ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV); 45 45 46 46 bust_spinlocks(0); 47 47 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); ··· 86 86 #define DO_ERROR_INFO(name, signo, code, str) \ 87 87 asmlinkage __visible void name(struct pt_regs *regs) \ 88 88 { \ 89 - do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \ 89 + do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 90 90 } 91 91 92 92 DO_ERROR_INFO(do_trap_unknown, ··· 124 124 asmlinkage __visible void do_trap_break(struct pt_regs *regs) 125 125 { 126 126 if (user_mode(regs)) 127 - force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc); 128 - else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN) 129 - regs->sepc += get_break_insn_length(regs->sepc); 127 + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); 128 + else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN) 129 + regs->epc += get_break_insn_length(regs->epc); 130 130 else 131 131 die(regs, "Kernel BUG"); 132 132 } ··· 153 153 * Set sup0 scratch register to 0, indicating to exception vector 154 154 * that we are presently executing in the kernel 155 155 */ 156 - csr_write(CSR_SSCRATCH, 0); 156 + csr_write(CSR_SCRATCH, 0); 157 157 /* Set the exception vector address */ 158 - csr_write(CSR_STVEC, &handle_exception); 158 + csr_write(CSR_TVEC, &handle_exception); 159 159 /* Enable all interrupts */ 160 - csr_write(CSR_SIE, -1); 160 + csr_write(CSR_IE, -1); 161 161 }
+5 -6
arch/riscv/lib/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - lib-y += delay.o 3 - lib-y += memcpy.o 4 - lib-y += memset.o 5 - lib-y += uaccess.o 6 - 7 - lib-$(CONFIG_64BIT) += tishift.o 2 + lib-y += delay.o 3 + lib-y += memcpy.o 4 + lib-y += memset.o 5 + lib-$(CONFIG_MMU) += uaccess.o 6 + lib-$(CONFIG_64BIT) += tishift.o
+6 -6
arch/riscv/lib/uaccess.S
··· 18 18 19 19 /* Enable access to user memory */ 20 20 li t6, SR_SUM 21 - csrs CSR_SSTATUS, t6 21 + csrs CSR_STATUS, t6 22 22 23 23 add a3, a1, a2 24 24 /* Use word-oriented copy only if low-order bits match */ ··· 47 47 48 48 3: 49 49 /* Disable access to user memory */ 50 - csrc CSR_SSTATUS, t6 50 + csrc CSR_STATUS, t6 51 51 li a0, 0 52 52 ret 53 53 4: /* Edge case: unalignment */ ··· 72 72 73 73 /* Enable access to user memory */ 74 74 li t6, SR_SUM 75 - csrs CSR_SSTATUS, t6 75 + csrs CSR_STATUS, t6 76 76 77 77 add a3, a0, a1 78 78 addi t0, a0, SZREG-1 ··· 94 94 95 95 3: 96 96 /* Disable access to user memory */ 97 - csrc CSR_SSTATUS, t6 97 + csrc CSR_STATUS, t6 98 98 li a0, 0 99 99 ret 100 100 4: /* Edge case: unalignment */ ··· 114 114 /* Fixup code for __copy_user(10) and __clear_user(11) */ 115 115 10: 116 116 /* Disable access to user memory */ 117 - csrs CSR_SSTATUS, t6 117 + csrs CSR_STATUS, t6 118 118 mv a0, a2 119 119 ret 120 120 11: 121 - csrs CSR_SSTATUS, t6 121 + csrs CSR_STATUS, t6 122 122 mv a0, a1 123 123 ret 124 124 .previous
+1 -2
arch/riscv/mm/Makefile
··· 6 6 endif 7 7 8 8 obj-y += init.o 9 - obj-y += fault.o 10 9 obj-y += extable.o 11 - obj-y += ioremap.o 10 + obj-$(CONFIG_MMU) += fault.o ioremap.o 12 11 obj-y += cacheflush.o 13 12 obj-y += context.o 14 13 obj-y += sifive_l2_cache.o
+20 -6
arch/riscv/mm/cacheflush.c
··· 10 10 11 11 #include <asm/sbi.h> 12 12 13 + static void ipi_remote_fence_i(void *info) 14 + { 15 + return local_flush_icache_all(); 16 + } 17 + 13 18 void flush_icache_all(void) 14 19 { 15 - sbi_remote_fence_i(NULL); 20 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 21 + sbi_remote_fence_i(NULL); 22 + else 23 + on_each_cpu(ipi_remote_fence_i, NULL, 1); 16 24 } 17 25 18 26 /* ··· 36 28 void flush_icache_mm(struct mm_struct *mm, bool local) 37 29 { 38 30 unsigned int cpu; 39 - cpumask_t others, hmask, *mask; 31 + cpumask_t others, *mask; 40 32 41 33 preempt_disable(); 42 34 ··· 54 46 */ 55 47 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 56 48 local |= cpumask_empty(&others); 57 - if (mm != current->active_mm || !local) { 58 - riscv_cpuid_to_hartid_mask(&others, &hmask); 59 - sbi_remote_fence_i(hmask.bits); 60 - } else { 49 + if (mm == current->active_mm && local) { 61 50 /* 62 51 * It's assumed that at least one strongly ordered operation is 63 52 * performed on this hart between setting a hart's cpumask bit ··· 64 59 * with flush_icache_deferred(). 65 60 */ 66 61 smp_mb(); 62 + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { 63 + cpumask_t hartid_mask; 64 + 65 + riscv_cpuid_to_hartid_mask(&others, &hartid_mask); 66 + sbi_remote_fence_i(cpumask_bits(&hartid_mask)); 67 + } else { 68 + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); 67 69 } 68 70 69 71 preempt_enable(); ··· 78 66 79 67 #endif /* CONFIG_SMP */ 80 68 69 + #ifdef CONFIG_MMU 81 70 void flush_icache_pte(pte_t pte) 82 71 { 83 72 struct page *page = pte_page(pte); ··· 86 73 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 87 74 flush_icache_all(); 88 75 } 76 + #endif /* CONFIG_MMU */
+2
arch/riscv/mm/context.c
··· 58 58 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 59 59 cpumask_set_cpu(cpu, mm_cpumask(next)); 60 60 61 + #ifdef CONFIG_MMU 61 62 csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); 62 63 local_flush_tlb_all(); 64 + #endif 63 65 64 66 flush_icache_deferred(next); 65 67 }
+2 -2
arch/riscv/mm/extable.c
··· 15 15 { 16 16 const struct exception_table_entry *fixup; 17 17 18 - fixup = search_exception_tables(regs->sepc); 18 + fixup = search_exception_tables(regs->epc); 19 19 if (fixup) { 20 - regs->sepc = fixup->fixup; 20 + regs->epc = fixup->fixup; 21 21 return 1; 22 22 } 23 23 return 0;
+3 -3
arch/riscv/mm/fault.c
··· 34 34 int code = SEGV_MAPERR; 35 35 vm_fault_t fault; 36 36 37 - cause = regs->scause; 38 - addr = regs->sbadaddr; 37 + cause = regs->cause; 38 + addr = regs->badaddr; 39 39 40 40 tsk = current; 41 41 mm = tsk->mm; ··· 53 53 goto vmalloc_fault; 54 54 55 55 /* Enable interrupts if they were enabled in the parent context. */ 56 - if (likely(regs->sstatus & SR_SPIE)) 56 + if (likely(regs->status & SR_PIE)) 57 57 local_irq_enable(); 58 58 59 59 /*
+13 -2
arch/riscv/mm/init.c
··· 26 26 EXPORT_SYMBOL(empty_zero_page); 27 27 28 28 extern char _start[]; 29 + void *dtb_early_va; 29 30 30 31 static void __init zone_sizes_init(void) 31 32 { ··· 41 40 free_area_init_nodes(max_zone_pfns); 42 41 } 43 42 44 - void setup_zero_page(void) 43 + static void setup_zero_page(void) 45 44 { 46 45 memset((void *)empty_zero_page, 0, PAGE_SIZE); 47 46 } ··· 143 142 } 144 143 } 145 144 145 + #ifdef CONFIG_MMU 146 146 unsigned long va_pa_offset; 147 147 EXPORT_SYMBOL(va_pa_offset); 148 148 unsigned long pfn_base; 149 149 EXPORT_SYMBOL(pfn_base); 150 150 151 - void *dtb_early_va; 152 151 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 153 152 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 154 153 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; ··· 445 444 csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); 446 445 local_flush_tlb_all(); 447 446 } 447 + #else 448 + asmlinkage void __init setup_vm(uintptr_t dtb_pa) 449 + { 450 + dtb_early_va = (void *)dtb_pa; 451 + } 452 + 453 + static inline void setup_vm_final(void) 454 + { 455 + } 456 + #endif /* CONFIG_MMU */ 448 457 449 458 void __init paging_init(void) 450 459 {
+23 -8
drivers/clocksource/timer-riscv.c
··· 3 3 * Copyright (C) 2012 Regents of the University of California 4 4 * Copyright (C) 2017 SiFive 5 5 * 6 - * All RISC-V systems have a timer attached to every hart. These timers can be 7 - * read from the "time" and "timeh" CSRs, and can use the SBI to setup 8 - * events. 6 + * All RISC-V systems have a timer attached to every hart. These timers can 7 + * either be read from the "time" and "timeh" CSRs, and can use the SBI to 8 + * setup events, or directly accessed using MMIO registers. 9 9 */ 10 10 #include <linux/clocksource.h> 11 11 #include <linux/clockchips.h> ··· 13 13 #include <linux/delay.h> 14 14 #include <linux/irq.h> 15 15 #include <linux/sched_clock.h> 16 + #include <linux/io-64-nonatomic-lo-hi.h> 16 17 #include <asm/smp.h> 17 18 #include <asm/sbi.h> 19 + 20 + u64 __iomem *riscv_time_cmp; 21 + u64 __iomem *riscv_time_val; 22 + 23 + static inline void mmio_set_timer(u64 val) 24 + { 25 + void __iomem *r; 26 + 27 + r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id()); 28 + writeq_relaxed(val, r); 29 + } 18 30 19 31 static int riscv_clock_next_event(unsigned long delta, 20 32 struct clock_event_device *ce) 21 33 { 22 - csr_set(sie, SIE_STIE); 23 - sbi_set_timer(get_cycles64() + delta); 34 + csr_set(CSR_IE, IE_TIE); 35 + if (IS_ENABLED(CONFIG_RISCV_SBI)) 36 + sbi_set_timer(get_cycles64() + delta); 37 + else 38 + mmio_set_timer(get_cycles64() + delta); 24 39 return 0; 25 40 } 26 41 ··· 76 61 ce->cpumask = cpumask_of(cpu); 77 62 clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff); 78 63 79 - csr_set(sie, SIE_STIE); 64 + csr_set(CSR_IE, IE_TIE); 80 65 return 0; 81 66 } 82 67 83 68 static int riscv_timer_dying_cpu(unsigned int cpu) 84 69 { 85 - csr_clear(sie, SIE_STIE); 70 + csr_clear(CSR_IE, IE_TIE); 86 71 return 0; 87 72 } 88 73 ··· 91 76 { 92 77 struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event); 93 78 94 - csr_clear(sie, SIE_STIE); 79 + csr_clear(CSR_IE, IE_TIE); 95 80 evdev->event_handler(evdev); 96 81 } 97 82
+7 -4
drivers/irqchip/irq-sifive-plic.c
··· 181 181 182 182 WARN_ON_ONCE(!handler->present); 183 183 184 - csr_clear(sie, SIE_SEIE); 184 + csr_clear(CSR_IE, IE_EIE); 185 185 while ((hwirq = readl(claim))) { 186 186 int irq = irq_find_mapping(plic_irqdomain, hwirq); 187 187 ··· 191 191 else 192 192 generic_handle_irq(irq); 193 193 } 194 - csr_set(sie, SIE_SEIE); 194 + csr_set(CSR_IE, IE_EIE); 195 195 } 196 196 197 197 /* ··· 252 252 continue; 253 253 } 254 254 255 - /* skip contexts other than supervisor external interrupt */ 256 - if (parent.args[0] != IRQ_S_EXT) 255 + /* 256 + * Skip contexts other than external interrupts for our 257 + * privilege level. 258 + */ 259 + if (parent.args[0] != IRQ_EXT) 257 260 continue; 258 261 259 262 hartid = plic_find_hart_id(parent.np);
+1 -1
drivers/tty/hvc/Kconfig
··· 89 89 90 90 config HVC_RISCV_SBI 91 91 bool "RISC-V SBI console support" 92 - depends on RISCV 92 + depends on RISCV_SBI 93 93 select HVC_DRIVER 94 94 help 95 95 This enables support for console output via RISC-V SBI calls, which
+1 -1
drivers/tty/serial/Kconfig
··· 88 88 89 89 config SERIAL_EARLYCON_RISCV_SBI 90 90 bool "Early console using RISC-V SBI" 91 - depends on RISCV 91 + depends on RISCV_SBI 92 92 select SERIAL_CORE 93 93 select SERIAL_CORE_CONSOLE 94 94 select SERIAL_EARLYCON