"Das U-Boot" Source Tree

riscv: make use of the barrier functions from Linux

Replace the barrier functions in arch/riscv/include/asm/io.h with those
defined in barrier.h, which is imported from Linux. This version is
modified to remove the include statement of asm-generic/barrier.h, which
is not available in U-Boot or required.

Signed-off-by: Lukas Auer <lukas.auer@aisec.fraunhofer.de>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
Reviewed-by: Rick Chen <rick@andestech.com>

authored by

Lukas Auer and committed by
Andes
fc8c76f4 b2c860c6

+71 -7
+67
arch/riscv/include/asm/barrier.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + * Copyright (C) 2013 Regents of the University of California 5 + * Copyright (C) 2017 SiFive 6 + * 7 + * Taken from Linux arch/riscv/include/asm/barrier.h, which is based on 8 + * arch/arm/include/asm/barrier.h 9 + */ 10 + 11 + #ifndef _ASM_RISCV_BARRIER_H 12 + #define _ASM_RISCV_BARRIER_H 13 + 14 + #ifndef __ASSEMBLY__ 15 + 16 + #define nop() __asm__ __volatile__ ("nop") 17 + 18 + #define RISCV_FENCE(p, s) \ 19 + __asm__ __volatile__ ("fence " #p "," #s : : : "memory") 20 + 21 + /* These barriers need to enforce ordering on both devices or memory. */ 22 + #define mb() RISCV_FENCE(iorw,iorw) 23 + #define rmb() RISCV_FENCE(ir,ir) 24 + #define wmb() RISCV_FENCE(ow,ow) 25 + 26 + /* These barriers do not need to enforce ordering on devices, just memory. */ 27 + #define __smp_mb() RISCV_FENCE(rw,rw) 28 + #define __smp_rmb() RISCV_FENCE(r,r) 29 + #define __smp_wmb() RISCV_FENCE(w,w) 30 + 31 + #define __smp_store_release(p, v) \ 32 + do { \ 33 + compiletime_assert_atomic_type(*p); \ 34 + RISCV_FENCE(rw,w); \ 35 + WRITE_ONCE(*p, v); \ 36 + } while (0) 37 + 38 + #define __smp_load_acquire(p) \ 39 + ({ \ 40 + typeof(*p) ___p1 = READ_ONCE(*p); \ 41 + compiletime_assert_atomic_type(*p); \ 42 + RISCV_FENCE(r,rw); \ 43 + ___p1; \ 44 + }) 45 + 46 + /* 47 + * This is a very specific barrier: it's currently only used in two places in 48 + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two 49 + * orderings it guarantees, but the "critical section is RCsc" guarantee 50 + * mandates a barrier on RISC-V. The sequence looks like: 51 + * 52 + * lr.aq lock 53 + * sc lock <= LOCKED 54 + * smp_mb__after_spinlock() 55 + * // critical section 56 + * lr lock 57 + * sc.rl lock <= UNLOCKED 58 + * 59 + * The AQ/RL pair provides a RCpc critical section, but there's not really any 60 + * way we can take advantage of that here because the ordering is only enforced 61 + * on that one lock. Thus, we're just doing a full fence. 62 + */ 63 + #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) 64 + 65 + #endif /* __ASSEMBLY__ */ 66 + 67 + #endif /* _ASM_RISCV_BARRIER_H */
+4 -7
arch/riscv/include/asm/io.h
··· 10 10 #ifdef __KERNEL__ 11 11 12 12 #include <linux/types.h> 13 + #include <asm/barrier.h> 13 14 #include <asm/byteorder.h> 14 15 15 16 static inline void sync(void) ··· 91 92 #define __raw_readl(a) __arch_getl(a) 92 93 #define __raw_readq(a) __arch_getq(a) 93 94 94 - /* 95 - * TODO: The kernel offers some more advanced versions of barriers, it might 96 - * have some advantages to use them instead of the simple one here. 97 - */ 98 - #define dmb() __asm__ __volatile__ ("" : : : "memory") 99 - #define __iormb() dmb() 100 - #define __iowmb() dmb() 95 + #define dmb() mb() 96 + #define __iormb() rmb() 97 + #define __iowmb() wmb() 101 98 102 99 static inline void writeb(u8 val, volatile void __iomem *addr) 103 100 {