Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch: Clean up asm/barrier.h implementations using asm-generic/barrier.h

We're going to be adding a few new barrier primitives, and in order to
avoid endless duplication make more agressive use of
asm-generic/barrier.h.

Change the asm-generic/barrier.h such that it allows partial barrier
definitions and fills out the rest with defaults.

There are a few architectures (m32r, m68k) that could probably
do away with their barrier.h file entirely but are kept for now due to
their unconventional nop() implementation.

Suggested-by: Geert Uytterhoeven <geert@linux-m68k.org>
Reviewed-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Michael Ellerman <michael@ellerman.id.au>
Cc: Michael Neuling <mikey@neuling.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Victor Kaplansky <VICTORK@il.ibm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20131213150640.846368594@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
93ea02bb 1de7da37

+58 -414
+5 -20
arch/alpha/include/asm/barrier.h
··· 3 3 4 4 #include <asm/compiler.h> 5 5 6 - #define mb() \ 7 - __asm__ __volatile__("mb": : :"memory") 6 + #define mb() __asm__ __volatile__("mb": : :"memory") 7 + #define rmb() __asm__ __volatile__("mb": : :"memory") 8 + #define wmb() __asm__ __volatile__("wmb": : :"memory") 8 9 9 - #define rmb() \ 10 - __asm__ __volatile__("mb": : :"memory") 11 - 12 - #define wmb() \ 13 - __asm__ __volatile__("wmb": : :"memory") 14 - 15 - #define read_barrier_depends() \ 16 - __asm__ __volatile__("mb": : :"memory") 10 + #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") 17 11 18 12 #ifdef CONFIG_SMP 19 13 #define __ASM_SMP_MB "\tmb\n" 20 - #define smp_mb() mb() 21 - #define smp_rmb() rmb() 22 - #define smp_wmb() wmb() 23 - #define smp_read_barrier_depends() read_barrier_depends() 24 14 #else 25 15 #define __ASM_SMP_MB 26 - #define smp_mb() barrier() 27 - #define smp_rmb() barrier() 28 - #define smp_wmb() barrier() 29 - #define smp_read_barrier_depends() do { } while (0) 30 16 #endif 31 17 32 - #define set_mb(var, value) \ 33 - do { var = value; mb(); } while (0) 18 + #include <asm-generic/barrier.h> 34 19 35 20 #endif /* __BARRIER_H */
+1
arch/arc/include/asm/Kbuild
··· 47 47 generic-y += vga.h 48 48 generic-y += xor.h 49 49 generic-y += preempt.h 50 + generic-y += barrier.h
+5 -12
arch/avr32/include/asm/barrier.h
··· 8 8 #ifndef __ASM_AVR32_BARRIER_H 9 9 #define __ASM_AVR32_BARRIER_H 10 10 11 - #define nop() asm volatile("nop") 12 - 13 - #define mb() asm volatile("" : : : "memory") 14 - #define rmb() mb() 15 - #define wmb() asm volatile("sync 0" : : : "memory") 16 - #define read_barrier_depends() do { } while(0) 17 - #define set_mb(var, value) do { var = value; mb(); } while(0) 11 + /* 12 + * Weirdest thing ever.. no full barrier, but it has a write barrier! 13 + */ 14 + #define wmb() asm volatile("sync 0" : : : "memory") 18 15 19 16 #ifdef CONFIG_SMP 20 17 # error "The AVR32 port does not support SMP" 21 - #else 22 - # define smp_mb() barrier() 23 - # define smp_rmb() barrier() 24 - # define smp_wmb() barrier() 25 - # define smp_read_barrier_depends() do { } while(0) 26 18 #endif 27 19 20 + #include <asm-generic/barrier.h> 28 21 29 22 #endif /* __ASM_AVR32_BARRIER_H */
+1 -17
arch/blackfin/include/asm/barrier.h
··· 23 23 # define rmb() do { barrier(); smp_check_barrier(); } while (0) 24 24 # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 25 25 # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 26 - #else 27 - # define mb() barrier() 28 - # define rmb() barrier() 29 - # define wmb() barrier() 30 - # define read_barrier_depends() do { } while (0) 31 26 #endif 32 - 33 - #else /* !CONFIG_SMP */ 34 - 35 - #define mb() barrier() 36 - #define rmb() barrier() 37 - #define wmb() barrier() 38 - #define read_barrier_depends() do { } while (0) 39 27 40 28 #endif /* !CONFIG_SMP */ 41 29 42 - #define smp_mb() mb() 43 - #define smp_rmb() rmb() 44 - #define smp_wmb() wmb() 45 - #define set_mb(var, value) do { var = value; mb(); } while (0) 46 - #define smp_read_barrier_depends() read_barrier_depends() 30 + #include <asm-generic/barrier.h> 47 31 48 32 #endif /* _BLACKFIN_BARRIER_H */
+1
arch/cris/include/asm/Kbuild
··· 12 12 generic-y += vga.h 13 13 generic-y += xor.h 14 14 generic-y += preempt.h 15 + generic-y += barrier.h
-25
arch/cris/include/asm/barrier.h
··· 1 - #ifndef __ASM_CRIS_BARRIER_H 2 - #define __ASM_CRIS_BARRIER_H 3 - 4 - #define nop() __asm__ __volatile__ ("nop"); 5 - 6 - #define barrier() __asm__ __volatile__("": : :"memory") 7 - #define mb() barrier() 8 - #define rmb() mb() 9 - #define wmb() mb() 10 - #define read_barrier_depends() do { } while(0) 11 - #define set_mb(var, value) do { var = value; mb(); } while (0) 12 - 13 - #ifdef CONFIG_SMP 14 - #define smp_mb() mb() 15 - #define smp_rmb() rmb() 16 - #define smp_wmb() wmb() 17 - #define smp_read_barrier_depends() read_barrier_depends() 18 - #else 19 - #define smp_mb() barrier() 20 - #define smp_rmb() barrier() 21 - #define smp_wmb() barrier() 22 - #define smp_read_barrier_depends() do { } while(0) 23 - #endif 24 - 25 - #endif /* __ASM_CRIS_BARRIER_H */
+1 -7
arch/frv/include/asm/barrier.h
··· 17 17 #define mb() asm volatile ("membar" : : :"memory") 18 18 #define rmb() asm volatile ("membar" : : :"memory") 19 19 #define wmb() asm volatile ("membar" : : :"memory") 20 - #define read_barrier_depends() do { } while (0) 21 20 22 - #define smp_mb() barrier() 23 - #define smp_rmb() barrier() 24 - #define smp_wmb() barrier() 25 - #define smp_read_barrier_depends() do {} while(0) 26 - #define set_mb(var, value) \ 27 - do { var = (value); barrier(); } while (0) 21 + #include <asm-generic/barrier.h> 28 22 29 23 #endif /* _ASM_BARRIER_H */
+1
arch/hexagon/include/asm/Kbuild
··· 54 54 generic-y += unaligned.h 55 55 generic-y += xor.h 56 56 generic-y += preempt.h 57 + generic-y += barrier.h
+1 -79
arch/m32r/include/asm/barrier.h
··· 11 11 12 12 #define nop() __asm__ __volatile__ ("nop" : : ) 13 13 14 - /* 15 - * Memory barrier. 16 - * 17 - * mb() prevents loads and stores being reordered across this point. 18 - * rmb() prevents loads being reordered across this point. 19 - * wmb() prevents stores being reordered across this point. 20 - */ 21 - #define mb() barrier() 22 - #define rmb() mb() 23 - #define wmb() mb() 24 - 25 - /** 26 - * read_barrier_depends - Flush all pending reads that subsequents reads 27 - * depend on. 28 - * 29 - * No data-dependent reads from memory-like regions are ever reordered 30 - * over this barrier. All reads preceding this primitive are guaranteed 31 - * to access memory (but not necessarily other CPUs' caches) before any 32 - * reads following this primitive that depend on the data return by 33 - * any of the preceding reads. This primitive is much lighter weight than 34 - * rmb() on most CPUs, and is never heavier weight than is 35 - * rmb(). 36 - * 37 - * These ordering constraints are respected by both the local CPU 38 - * and the compiler. 39 - * 40 - * Ordering is not guaranteed by anything other than these primitives, 41 - * not even by data dependencies. See the documentation for 42 - * memory_barrier() for examples and URLs to more information. 43 - * 44 - * For example, the following code would force ordering (the initial 45 - * value of "a" is zero, "b" is one, and "p" is "&a"): 46 - * 47 - * <programlisting> 48 - * CPU 0 CPU 1 49 - * 50 - * b = 2; 51 - * memory_barrier(); 52 - * p = &b; q = p; 53 - * read_barrier_depends(); 54 - * d = *q; 55 - * </programlisting> 56 - * 57 - * 58 - * because the read of "*q" depends on the read of "p" and these 59 - * two reads are separated by a read_barrier_depends(). However, 60 - * the following code, with the same initial values for "a" and "b": 61 - * 62 - * <programlisting> 63 - * CPU 0 CPU 1 64 - * 65 - * a = 2; 66 - * memory_barrier(); 67 - * b = 3; y = b; 68 - * read_barrier_depends(); 69 - * x = a; 70 - * </programlisting> 71 - * 72 - * does not enforce ordering, since there is no data dependency between 73 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 74 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 75 - * in cases like this where there are no data dependencies. 76 - **/ 77 - 78 - #define read_barrier_depends() do { } while (0) 79 - 80 - #ifdef CONFIG_SMP 81 - #define smp_mb() mb() 82 - #define smp_rmb() rmb() 83 - #define smp_wmb() wmb() 84 - #define smp_read_barrier_depends() read_barrier_depends() 85 - #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 86 - #else 87 - #define smp_mb() barrier() 88 - #define smp_rmb() barrier() 89 - #define smp_wmb() barrier() 90 - #define smp_read_barrier_depends() do { } while (0) 91 - #define set_mb(var, value) do { var = value; barrier(); } while (0) 92 - #endif 14 + #include <asm-generic/barrier.h> 93 15 94 16 #endif /* _ASM_M32R_BARRIER_H */
+1 -13
arch/m68k/include/asm/barrier.h
··· 1 1 #ifndef _M68K_BARRIER_H 2 2 #define _M68K_BARRIER_H 3 3 4 - /* 5 - * Force strict CPU ordering. 6 - * Not really required on m68k... 7 - */ 8 4 #define nop() do { asm volatile ("nop"); barrier(); } while (0) 9 - #define mb() barrier() 10 - #define rmb() barrier() 11 - #define wmb() barrier() 12 - #define read_barrier_depends() ((void)0) 13 - #define set_mb(var, value) ({ (var) = (value); wmb(); }) 14 5 15 - #define smp_mb() barrier() 16 - #define smp_rmb() barrier() 17 - #define smp_wmb() barrier() 18 - #define smp_read_barrier_depends() ((void)0) 6 + #include <asm-generic/barrier.h> 19 7 20 8 #endif /* _M68K_BARRIER_H */
+1
arch/microblaze/include/asm/Kbuild
··· 4 4 generic-y += trace_clock.h 5 5 generic-y += syscalls.h 6 6 generic-y += preempt.h 7 + generic-y += barrier.h
-27
arch/microblaze/include/asm/barrier.h
··· 1 - /* 2 - * Copyright (C) 2006 Atmark Techno, Inc. 3 - * 4 - * This file is subject to the terms and conditions of the GNU General Public 5 - * License. See the file "COPYING" in the main directory of this archive 6 - * for more details. 7 - */ 8 - 9 - #ifndef _ASM_MICROBLAZE_BARRIER_H 10 - #define _ASM_MICROBLAZE_BARRIER_H 11 - 12 - #define nop() asm volatile ("nop") 13 - 14 - #define smp_read_barrier_depends() do {} while (0) 15 - #define read_barrier_depends() do {} while (0) 16 - 17 - #define mb() barrier() 18 - #define rmb() mb() 19 - #define wmb() mb() 20 - #define set_mb(var, value) do { var = value; mb(); } while (0) 21 - #define set_wmb(var, value) do { var = value; wmb(); } while (0) 22 - 23 - #define smp_mb() mb() 24 - #define smp_rmb() rmb() 25 - #define smp_wmb() wmb() 26 - 27 - #endif /* _ASM_MICROBLAZE_BARRIER_H */
+1
arch/mn10300/include/asm/Kbuild
··· 3 3 generic-y += exec.h 4 4 generic-y += trace_clock.h 5 5 generic-y += preempt.h 6 + generic-y += barrier.h
-37
arch/mn10300/include/asm/barrier.h
··· 1 - /* MN10300 memory barrier definitions 2 - * 3 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public Licence 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the Licence, or (at your option) any later version. 10 - */ 11 - #ifndef _ASM_BARRIER_H 12 - #define _ASM_BARRIER_H 13 - 14 - #define nop() asm volatile ("nop") 15 - 16 - #define mb() asm volatile ("": : :"memory") 17 - #define rmb() mb() 18 - #define wmb() asm volatile ("": : :"memory") 19 - 20 - #ifdef CONFIG_SMP 21 - #define smp_mb() mb() 22 - #define smp_rmb() rmb() 23 - #define smp_wmb() wmb() 24 - #define set_mb(var, value) do { xchg(&var, value); } while (0) 25 - #else /* CONFIG_SMP */ 26 - #define smp_mb() barrier() 27 - #define smp_rmb() barrier() 28 - #define smp_wmb() barrier() 29 - #define set_mb(var, value) do { var = value; mb(); } while (0) 30 - #endif /* CONFIG_SMP */ 31 - 32 - #define set_wmb(var, value) do { var = value; wmb(); } while (0) 33 - 34 - #define read_barrier_depends() do {} while (0) 35 - #define smp_read_barrier_depends() do {} while (0) 36 - 37 - #endif /* _ASM_BARRIER_H */
+1
arch/parisc/include/asm/Kbuild
··· 5 5 poll.h xor.h clkdev.h exec.h 6 6 generic-y += trace_clock.h 7 7 generic-y += preempt.h 8 + generic-y += barrier.h
-35
arch/parisc/include/asm/barrier.h
··· 1 - #ifndef __PARISC_BARRIER_H 2 - #define __PARISC_BARRIER_H 3 - 4 - /* 5 - ** This is simply the barrier() macro from linux/kernel.h but when serial.c 6 - ** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h 7 - ** hasn't yet been included yet so it fails, thus repeating the macro here. 8 - ** 9 - ** PA-RISC architecture allows for weakly ordered memory accesses although 10 - ** none of the processors use it. There is a strong ordered bit that is 11 - ** set in the O-bit of the page directory entry. Operating systems that 12 - ** can not tolerate out of order accesses should set this bit when mapping 13 - ** pages. The O-bit of the PSW should also be set to 1 (I don't believe any 14 - ** of the processor implemented the PSW O-bit). The PCX-W ERS states that 15 - ** the TLB O-bit is not implemented so the page directory does not need to 16 - ** have the O-bit set when mapping pages (section 3.1). This section also 17 - ** states that the PSW Y, Z, G, and O bits are not implemented. 18 - ** So it looks like nothing needs to be done for parisc-linux (yet). 19 - ** (thanks to chada for the above comment -ggg) 20 - ** 21 - ** The __asm__ op below simple prevents gcc/ld from reordering 22 - ** instructions across the mb() "call". 23 - */ 24 - #define mb() __asm__ __volatile__("":::"memory") /* barrier() */ 25 - #define rmb() mb() 26 - #define wmb() mb() 27 - #define smp_mb() mb() 28 - #define smp_rmb() mb() 29 - #define smp_wmb() mb() 30 - #define smp_read_barrier_depends() do { } while(0) 31 - #define read_barrier_depends() do { } while(0) 32 - 33 - #define set_mb(var, value) do { var = value; mb(); } while (0) 34 - 35 - #endif /* __PARISC_BARRIER_H */
+1
arch/score/include/asm/Kbuild
··· 5 5 generic-y += trace_clock.h 6 6 generic-y += xor.h 7 7 generic-y += preempt.h 8 + generic-y += barrier.h
-16
arch/score/include/asm/barrier.h
··· 1 - #ifndef _ASM_SCORE_BARRIER_H 2 - #define _ASM_SCORE_BARRIER_H 3 - 4 - #define mb() barrier() 5 - #define rmb() barrier() 6 - #define wmb() barrier() 7 - #define smp_mb() barrier() 8 - #define smp_rmb() barrier() 9 - #define smp_wmb() barrier() 10 - 11 - #define read_barrier_depends() do {} while (0) 12 - #define smp_read_barrier_depends() do {} while (0) 13 - 14 - #define set_mb(var, value) do {var = value; wmb(); } while (0) 15 - 16 - #endif /* _ASM_SCORE_BARRIER_H */
+3 -18
arch/sh/include/asm/barrier.h
··· 26 26 #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) 27 27 #define mb() __asm__ __volatile__ ("synco": : :"memory") 28 28 #define rmb() mb() 29 - #define wmb() __asm__ __volatile__ ("synco": : :"memory") 29 + #define wmb() mb() 30 30 #define ctrl_barrier() __icbi(PAGE_OFFSET) 31 - #define read_barrier_depends() do { } while(0) 32 31 #else 33 - #define mb() __asm__ __volatile__ ("": : :"memory") 34 - #define rmb() mb() 35 - #define wmb() __asm__ __volatile__ ("": : :"memory") 36 32 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 37 - #define read_barrier_depends() do { } while(0) 38 - #endif 39 - 40 - #ifdef CONFIG_SMP 41 - #define smp_mb() mb() 42 - #define smp_rmb() rmb() 43 - #define smp_wmb() wmb() 44 - #define smp_read_barrier_depends() read_barrier_depends() 45 - #else 46 - #define smp_mb() barrier() 47 - #define smp_rmb() barrier() 48 - #define smp_wmb() barrier() 49 - #define smp_read_barrier_depends() do { } while(0) 50 33 #endif 51 34 52 35 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 36 + 37 + #include <asm-generic/barrier.h> 53 38 54 39 #endif /* __ASM_SH_BARRIER_H */
+2 -10
arch/sparc/include/asm/barrier_32.h
··· 1 1 #ifndef __SPARC_BARRIER_H 2 2 #define __SPARC_BARRIER_H 3 3 4 - /* XXX Change this if we ever use a PSO mode kernel. */ 5 - #define mb() __asm__ __volatile__ ("" : : : "memory") 6 - #define rmb() mb() 7 - #define wmb() mb() 8 - #define read_barrier_depends() do { } while(0) 9 - #define set_mb(__var, __value) do { __var = __value; mb(); } while(0) 10 - #define smp_mb() __asm__ __volatile__("":::"memory") 11 - #define smp_rmb() __asm__ __volatile__("":::"memory") 12 - #define smp_wmb() __asm__ __volatile__("":::"memory") 13 - #define smp_read_barrier_depends() do { } while(0) 4 + #include <asm/processor.h> /* for nop() */ 5 + #include <asm-generic/barrier.h> 14 6 15 7 #endif /* !(__SPARC_BARRIER_H) */
+1 -67
arch/tile/include/asm/barrier.h
··· 22 22 #include <arch/spr_def.h> 23 23 #include <asm/timex.h> 24 24 25 - /* 26 - * read_barrier_depends - Flush all pending reads that subsequents reads 27 - * depend on. 28 - * 29 - * No data-dependent reads from memory-like regions are ever reordered 30 - * over this barrier. All reads preceding this primitive are guaranteed 31 - * to access memory (but not necessarily other CPUs' caches) before any 32 - * reads following this primitive that depend on the data return by 33 - * any of the preceding reads. This primitive is much lighter weight than 34 - * rmb() on most CPUs, and is never heavier weight than is 35 - * rmb(). 36 - * 37 - * These ordering constraints are respected by both the local CPU 38 - * and the compiler. 39 - * 40 - * Ordering is not guaranteed by anything other than these primitives, 41 - * not even by data dependencies. See the documentation for 42 - * memory_barrier() for examples and URLs to more information. 43 - * 44 - * For example, the following code would force ordering (the initial 45 - * value of "a" is zero, "b" is one, and "p" is "&a"): 46 - * 47 - * <programlisting> 48 - * CPU 0 CPU 1 49 - * 50 - * b = 2; 51 - * memory_barrier(); 52 - * p = &b; q = p; 53 - * read_barrier_depends(); 54 - * d = *q; 55 - * </programlisting> 56 - * 57 - * because the read of "*q" depends on the read of "p" and these 58 - * two reads are separated by a read_barrier_depends(). However, 59 - * the following code, with the same initial values for "a" and "b": 60 - * 61 - * <programlisting> 62 - * CPU 0 CPU 1 63 - * 64 - * a = 2; 65 - * memory_barrier(); 66 - * b = 3; y = b; 67 - * read_barrier_depends(); 68 - * x = a; 69 - * </programlisting> 70 - * 71 - * does not enforce ordering, since there is no data dependency between 72 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 73 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 74 - * in cases like this where there are no data dependencies. 75 - */ 76 - #define read_barrier_depends() do { } while (0) 77 - 78 25 #define __sync() __insn_mf() 79 26 80 27 #include <hv/syscall_public.h> ··· 72 125 #define mb() fast_mb() 73 126 #define iob() fast_iob() 74 127 75 - #ifdef CONFIG_SMP 76 - #define smp_mb() mb() 77 - #define smp_rmb() rmb() 78 - #define smp_wmb() wmb() 79 - #define smp_read_barrier_depends() read_barrier_depends() 80 - #else 81 - #define smp_mb() barrier() 82 - #define smp_rmb() barrier() 83 - #define smp_wmb() barrier() 84 - #define smp_read_barrier_depends() do { } while (0) 85 - #endif 86 - 87 - #define set_mb(var, value) \ 88 - do { var = value; mb(); } while (0) 128 + #include <asm-generic/barrier.h> 89 129 90 130 #endif /* !__ASSEMBLY__ */ 91 131 #endif /* _ASM_TILE_BARRIER_H */
+1 -10
arch/unicore32/include/asm/barrier.h
··· 14 14 #define dsb() __asm__ __volatile__ ("" : : : "memory") 15 15 #define dmb() __asm__ __volatile__ ("" : : : "memory") 16 16 17 - #define mb() barrier() 18 - #define rmb() barrier() 19 - #define wmb() barrier() 20 - #define smp_mb() barrier() 21 - #define smp_rmb() barrier() 22 - #define smp_wmb() barrier() 23 - #define read_barrier_depends() do { } while (0) 24 - #define smp_read_barrier_depends() do { } while (0) 25 - 26 - #define set_mb(var, value) do { var = value; smp_mb(); } while (0) 17 + #include <asm-generic/barrier.h> 27 18 28 19 #endif /* __UNICORE_BARRIER_H__ */
+1 -8
arch/xtensa/include/asm/barrier.h
··· 9 9 #ifndef _XTENSA_SYSTEM_H 10 10 #define _XTENSA_SYSTEM_H 11 11 12 - #define smp_read_barrier_depends() do { } while(0) 13 - #define read_barrier_depends() do { } while(0) 14 - 15 12 #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) 16 13 #define rmb() barrier() 17 14 #define wmb() mb() 18 15 19 16 #ifdef CONFIG_SMP 20 17 #error smp_* not defined 21 - #else 22 - #define smp_mb() barrier() 23 - #define smp_rmb() barrier() 24 - #define smp_wmb() barrier() 25 18 #endif 26 19 27 - #define set_mb(var, value) do { var = value; mb(); } while (0) 20 + #include <asm-generic/barrier.h> 28 21 29 22 #endif /* _XTENSA_SYSTEM_H */
+29 -13
include/asm-generic/barrier.h
··· 1 - /* Generic barrier definitions, based on MN10300 definitions. 1 + /* 2 + * Generic barrier definitions, originally based on MN10300 definitions. 2 3 * 3 4 * It should be possible to use these on really simple architectures, 4 5 * but it serves more as a starting point for new ports. ··· 17 16 18 17 #ifndef __ASSEMBLY__ 19 18 20 - #define nop() asm volatile ("nop") 19 + #include <linux/compiler.h> 20 + 21 + #ifndef nop 22 + #define nop() asm volatile ("nop") 23 + #endif 21 24 22 25 /* 23 - * Force strict CPU ordering. 24 - * And yes, this is required on UP too when we're talking 25 - * to devices. 26 + * Force strict CPU ordering. And yes, this is required on UP too when we're 27 + * talking to devices. 26 28 * 27 - * This implementation only contains a compiler barrier. 29 + * Fall back to compiler barriers if nothing better is provided. 28 30 */ 29 31 30 - #define mb() asm volatile ("": : :"memory") 32 + #ifndef mb 33 + #define mb() barrier() 34 + #endif 35 + 36 + #ifndef rmb 31 37 #define rmb() mb() 32 - #define wmb() asm volatile ("": : :"memory") 38 + #endif 39 + 40 + #ifndef wmb 41 + #define wmb() mb() 42 + #endif 43 + 44 + #ifndef read_barrier_depends 45 + #define read_barrier_depends() do { } while (0) 46 + #endif 33 47 34 48 #ifdef CONFIG_SMP 35 49 #define smp_mb() mb() 36 50 #define smp_rmb() rmb() 37 51 #define smp_wmb() wmb() 52 + #define smp_read_barrier_depends() read_barrier_depends() 38 53 #else 39 54 #define smp_mb() barrier() 40 55 #define smp_rmb() barrier() 41 56 #define smp_wmb() barrier() 57 + #define smp_read_barrier_depends() do { } while (0) 42 58 #endif 43 59 44 - #define set_mb(var, value) do { var = value; mb(); } while (0) 45 - #define set_wmb(var, value) do { var = value; wmb(); } while (0) 46 - 47 - #define read_barrier_depends() do {} while (0) 48 - #define smp_read_barrier_depends() do {} while (0) 60 + #ifndef set_mb 61 + #define set_mb(var, value) do { (var) = (value); mb(); } while (0) 62 + #endif 49 63 50 64 #endif /* !__ASSEMBLY__ */ 51 65 #endif /* __ASM_GENERIC_BARRIER_H */