Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arch: Cleanup read_barrier_depends() and comments

This patch is meant to cleanup the handling of read_barrier_depends and
smp_read_barrier_depends. In multiple spots in the kernel headers
read_barrier_depends is defined as "do {} while (0)", however we then go
into the SMP vs non-SMP sections and have the SMP version reference
read_barrier_depends, and the non-SMP define it as yet another empty
do/while.

With this commit I went through and cleaned out the duplicate definitions
and reduced the number of definitions down to 2 per header. In addition I
moved the 50 line comments for the macro from the x86 and mips headers that
defined it as an empty do/while to those that were actually defining the
macro, alpha and blackfin.

Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alexander Duyck and committed by
David S. Miller
8a449718 c11a9009

+129 -135
+51
arch/alpha/include/asm/barrier.h
··· 7 7 #define rmb() __asm__ __volatile__("mb": : :"memory") 8 8 #define wmb() __asm__ __volatile__("wmb": : :"memory") 9 9 10 + /** 11 + * read_barrier_depends - Flush all pending reads that subsequents reads 12 + * depend on. 13 + * 14 + * No data-dependent reads from memory-like regions are ever reordered 15 + * over this barrier. All reads preceding this primitive are guaranteed 16 + * to access memory (but not necessarily other CPUs' caches) before any 17 + * reads following this primitive that depend on the data return by 18 + * any of the preceding reads. This primitive is much lighter weight than 19 + * rmb() on most CPUs, and is never heavier weight than is 20 + * rmb(). 21 + * 22 + * These ordering constraints are respected by both the local CPU 23 + * and the compiler. 24 + * 25 + * Ordering is not guaranteed by anything other than these primitives, 26 + * not even by data dependencies. See the documentation for 27 + * memory_barrier() for examples and URLs to more information. 28 + * 29 + * For example, the following code would force ordering (the initial 30 + * value of "a" is zero, "b" is one, and "p" is "&a"): 31 + * 32 + * <programlisting> 33 + * CPU 0 CPU 1 34 + * 35 + * b = 2; 36 + * memory_barrier(); 37 + * p = &b; q = p; 38 + * read_barrier_depends(); 39 + * d = *q; 40 + * </programlisting> 41 + * 42 + * because the read of "*q" depends on the read of "p" and these 43 + * two reads are separated by a read_barrier_depends(). However, 44 + * the following code, with the same initial values for "a" and "b": 45 + * 46 + * <programlisting> 47 + * CPU 0 CPU 1 48 + * 49 + * a = 2; 50 + * memory_barrier(); 51 + * b = 3; y = b; 52 + * read_barrier_depends(); 53 + * x = a; 54 + * </programlisting> 55 + * 56 + * does not enforce ordering, since there is no data dependency between 57 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 58 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 59 + * in cases like this where there are no data dependencies. 60 + */ 10 61 #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") 11 62 12 63 #ifdef CONFIG_SMP
+51
arch/blackfin/include/asm/barrier.h
··· 22 22 # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 23 23 # define rmb() do { barrier(); smp_check_barrier(); } while (0) 24 24 # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 25 + /* 26 + * read_barrier_depends - Flush all pending reads that subsequents reads 27 + * depend on. 28 + * 29 + * No data-dependent reads from memory-like regions are ever reordered 30 + * over this barrier. All reads preceding this primitive are guaranteed 31 + * to access memory (but not necessarily other CPUs' caches) before any 32 + * reads following this primitive that depend on the data return by 33 + * any of the preceding reads. This primitive is much lighter weight than 34 + * rmb() on most CPUs, and is never heavier weight than is 35 + * rmb(). 36 + * 37 + * These ordering constraints are respected by both the local CPU 38 + * and the compiler. 39 + * 40 + * Ordering is not guaranteed by anything other than these primitives, 41 + * not even by data dependencies. See the documentation for 42 + * memory_barrier() for examples and URLs to more information. 43 + * 44 + * For example, the following code would force ordering (the initial 45 + * value of "a" is zero, "b" is one, and "p" is "&a"): 46 + * 47 + * <programlisting> 48 + * CPU 0 CPU 1 49 + * 50 + * b = 2; 51 + * memory_barrier(); 52 + * p = &b; q = p; 53 + * read_barrier_depends(); 54 + * d = *q; 55 + * </programlisting> 56 + * 57 + * because the read of "*q" depends on the read of "p" and these 58 + * two reads are separated by a read_barrier_depends(). However, 59 + * the following code, with the same initial values for "a" and "b": 60 + * 61 + * <programlisting> 62 + * CPU 0 CPU 1 63 + * 64 + * a = 2; 65 + * memory_barrier(); 66 + * b = 3; y = b; 67 + * read_barrier_depends(); 68 + * x = a; 69 + * </programlisting> 70 + * 71 + * does not enforce ordering, since there is no data dependency between 72 + * the read of "a" and the read of "b". Therefore, on some CPUs, such 73 + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 74 + * in cases like this where there are no data dependencies. 75 + */ 25 76 # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 26 77 #endif 27 78
+9 -13
arch/ia64/include/asm/barrier.h
··· 35 35 * it's (presumably) much slower than mf and (b) mf.a is supported for 36 36 * sequential memory pages only. 37 37 */ 38 - #define mb() ia64_mf() 39 - #define rmb() mb() 40 - #define wmb() mb() 41 - #define read_barrier_depends() do { } while(0) 38 + #define mb() ia64_mf() 39 + #define rmb() mb() 40 + #define wmb() mb() 42 41 43 42 #ifdef CONFIG_SMP 44 43 # define smp_mb() mb() 45 - # define smp_rmb() rmb() 46 - # define smp_wmb() wmb() 47 - # define smp_read_barrier_depends() read_barrier_depends() 48 - 49 44 #else 50 - 51 45 # define smp_mb() barrier() 52 - # define smp_rmb() barrier() 53 - # define smp_wmb() barrier() 54 - # define smp_read_barrier_depends() do { } while(0) 55 - 56 46 #endif 47 + 48 + #define smp_rmb() smp_mb() 49 + #define smp_wmb() smp_mb() 50 + 51 + #define read_barrier_depends() do { } while (0) 52 + #define smp_read_barrier_depends() do { } while (0) 57 53 58 54 #define smp_mb__before_atomic() barrier() 59 55 #define smp_mb__after_atomic() barrier()
+4 -3
arch/metag/include/asm/barrier.h
··· 47 47 wr_fence(); 48 48 } 49 49 50 - #define read_barrier_depends() do { } while (0) 51 - 52 50 #ifndef CONFIG_SMP 53 51 #define fence() do { } while (0) 54 52 #define smp_mb() barrier() ··· 80 82 #define smp_wmb() barrier() 81 83 #endif 82 84 #endif 83 - #define smp_read_barrier_depends() do { } while (0) 85 + 86 + #define read_barrier_depends() do { } while (0) 87 + #define smp_read_barrier_depends() do { } while (0) 88 + 84 89 #define set_mb(var, value) do { var = value; smp_mb(); } while (0) 85 90 86 91 #define smp_store_release(p, v) \
-52
arch/mips/include/asm/barrier.h
··· 10 10 11 11 #include <asm/addrspace.h> 12 12 13 - /* 14 - * read_barrier_depends - Flush all pending reads that subsequents reads 15 - * depend on. 16 - * 17 - * No data-dependent reads from memory-like regions are ever reordered 18 - * over this barrier. All reads preceding this primitive are guaranteed 19 - * to access memory (but not necessarily other CPUs' caches) before any 20 - * reads following this primitive that depend on the data return by 21 - * any of the preceding reads. This primitive is much lighter weight than 22 - * rmb() on most CPUs, and is never heavier weight than is 23 - * rmb(). 24 - * 25 - * These ordering constraints are respected by both the local CPU 26 - * and the compiler. 27 - * 28 - * Ordering is not guaranteed by anything other than these primitives, 29 - * not even by data dependencies. See the documentation for 30 - * memory_barrier() for examples and URLs to more information. 31 - * 32 - * For example, the following code would force ordering (the initial 33 - * value of "a" is zero, "b" is one, and "p" is "&a"): 34 - * 35 - * <programlisting> 36 - * CPU 0 CPU 1 37 - * 38 - * b = 2; 39 - * memory_barrier(); 40 - * p = &b; q = p; 41 - * read_barrier_depends(); 42 - * d = *q; 43 - * </programlisting> 44 - * 45 - * because the read of "*q" depends on the read of "p" and these 46 - * two reads are separated by a read_barrier_depends(). However, 47 - * the following code, with the same initial values for "a" and "b": 48 - * 49 - * <programlisting> 50 - * CPU 0 CPU 1 51 - * 52 - * a = 2; 53 - * memory_barrier(); 54 - * b = 3; y = b; 55 - * read_barrier_depends(); 56 - * x = a; 57 - * </programlisting> 58 - * 59 - * does not enforce ordering, since there is no data dependency between 60 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 61 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 62 - * in cases like this where there are no data dependencies. 63 - */ 64 - 65 13 #define read_barrier_depends() do { } while(0) 66 14 #define smp_read_barrier_depends() do { } while(0) 67 15
+3 -3
arch/powerpc/include/asm/barrier.h
··· 33 33 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 34 34 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") 35 35 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") 36 - #define read_barrier_depends() do { } while(0) 37 36 38 37 #define set_mb(var, value) do { var = value; mb(); } while (0) 39 38 ··· 49 50 #define smp_mb() mb() 50 51 #define smp_rmb() __lwsync() 51 52 #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 52 - #define smp_read_barrier_depends() read_barrier_depends() 53 53 #else 54 54 #define __lwsync() barrier() 55 55 56 56 #define smp_mb() barrier() 57 57 #define smp_rmb() barrier() 58 58 #define smp_wmb() barrier() 59 - #define smp_read_barrier_depends() do { } while(0) 60 59 #endif /* CONFIG_SMP */ 60 + 61 + #define read_barrier_depends() do { } while (0) 62 + #define smp_read_barrier_depends() do { } while (0) 61 63 62 64 /* 63 65 * This is a barrier which prevents following instructions from being
+3 -2
arch/s390/include/asm/barrier.h
··· 24 24 25 25 #define rmb() mb() 26 26 #define wmb() mb() 27 - #define read_barrier_depends() do { } while(0) 28 27 #define smp_mb() mb() 29 28 #define smp_rmb() rmb() 30 29 #define smp_wmb() wmb() 31 - #define smp_read_barrier_depends() read_barrier_depends() 30 + 31 + #define read_barrier_depends() do { } while (0) 32 + #define smp_read_barrier_depends() do { } while (0) 32 33 33 34 #define smp_mb__before_atomic() smp_mb() 34 35 #define smp_mb__after_atomic() smp_mb()
+2 -2
arch/sparc/include/asm/barrier_64.h
··· 37 37 #define rmb() __asm__ __volatile__("":::"memory") 38 38 #define wmb() __asm__ __volatile__("":::"memory") 39 39 40 - #define read_barrier_depends() do { } while(0) 41 40 #define set_mb(__var, __value) \ 42 41 do { __var = __value; membar_safe("#StoreLoad"); } while(0) 43 42 ··· 50 51 #define smp_wmb() __asm__ __volatile__("":::"memory") 51 52 #endif 52 53 53 - #define smp_read_barrier_depends() do { } while(0) 54 + #define read_barrier_depends() do { } while (0) 55 + #define smp_read_barrier_depends() do { } while (0) 54 56 55 57 #define smp_store_release(p, v) \ 56 58 do { \
+3 -56
arch/x86/include/asm/barrier.h
··· 24 24 #define wmb() asm volatile("sfence" ::: "memory") 25 25 #endif 26 26 27 - /** 28 - * read_barrier_depends - Flush all pending reads that subsequents reads 29 - * depend on. 30 - * 31 - * No data-dependent reads from memory-like regions are ever reordered 32 - * over this barrier. All reads preceding this primitive are guaranteed 33 - * to access memory (but not necessarily other CPUs' caches) before any 34 - * reads following this primitive that depend on the data return by 35 - * any of the preceding reads. This primitive is much lighter weight than 36 - * rmb() on most CPUs, and is never heavier weight than is 37 - * rmb(). 38 - * 39 - * These ordering constraints are respected by both the local CPU 40 - * and the compiler. 41 - * 42 - * Ordering is not guaranteed by anything other than these primitives, 43 - * not even by data dependencies. See the documentation for 44 - * memory_barrier() for examples and URLs to more information. 45 - * 46 - * For example, the following code would force ordering (the initial 47 - * value of "a" is zero, "b" is one, and "p" is "&a"): 48 - * 49 - * <programlisting> 50 - * CPU 0 CPU 1 51 - * 52 - * b = 2; 53 - * memory_barrier(); 54 - * p = &b; q = p; 55 - * read_barrier_depends(); 56 - * d = *q; 57 - * </programlisting> 58 - * 59 - * because the read of "*q" depends on the read of "p" and these 60 - * two reads are separated by a read_barrier_depends(). However, 61 - * the following code, with the same initial values for "a" and "b": 62 - * 63 - * <programlisting> 64 - * CPU 0 CPU 1 65 - * 66 - * a = 2; 67 - * memory_barrier(); 68 - * b = 3; y = b; 69 - * read_barrier_depends(); 70 - * x = a; 71 - * </programlisting> 72 - * 73 - * does not enforce ordering, since there is no data dependency between 74 - * the read of "a" and the read of "b". Therefore, on some CPUs, such 75 - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() 76 - * in cases like this where there are no data dependencies. 77 - **/ 78 - 79 - #define read_barrier_depends() do { } while (0) 80 - 81 27 #ifdef CONFIG_SMP 82 28 #define smp_mb() mb() 83 29 #ifdef CONFIG_X86_PPRO_FENCE ··· 32 86 # define smp_rmb() barrier() 33 87 #endif 34 88 #define smp_wmb() barrier() 35 - #define smp_read_barrier_depends() read_barrier_depends() 36 89 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 37 90 #else /* !SMP */ 38 91 #define smp_mb() barrier() 39 92 #define smp_rmb() barrier() 40 93 #define smp_wmb() barrier() 41 - #define smp_read_barrier_depends() do { } while (0) 42 94 #define set_mb(var, value) do { var = value; barrier(); } while (0) 43 95 #endif /* SMP */ 96 + 97 + #define read_barrier_depends() do { } while (0) 98 + #define smp_read_barrier_depends() do { } while (0) 44 99 45 100 #if defined(CONFIG_X86_PPRO_FENCE) 46 101
+3 -4
arch/x86/um/asm/barrier.h
··· 29 29 30 30 #endif /* CONFIG_X86_32 */ 31 31 32 - #define read_barrier_depends() do { } while (0) 33 - 34 32 #ifdef CONFIG_SMP 35 33 36 34 #define smp_mb() mb() ··· 40 42 41 43 #define smp_wmb() barrier() 42 44 43 - #define smp_read_barrier_depends() read_barrier_depends() 44 45 #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 45 46 46 47 #else /* CONFIG_SMP */ ··· 47 50 #define smp_mb() barrier() 48 51 #define smp_rmb() barrier() 49 52 #define smp_wmb() barrier() 50 - #define smp_read_barrier_depends() do { } while (0) 51 53 #define set_mb(var, value) do { var = value; barrier(); } while (0) 52 54 53 55 #endif /* CONFIG_SMP */ 56 + 57 + #define read_barrier_depends() do { } while (0) 58 + #define smp_read_barrier_depends() do { } while (0) 54 59 55 60 /* 56 61 * Stop RDTSC speculation. This is needed when you need to use RDTSC