Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: SMP: make all barriers handle cache issues

When suspending/resuming, the common task freezing code will run in
parallel and freeze processes on each core. This is because the code
uses the non-smp version of memory barriers (as well it should).

The Blackfin smp barrier logic at the moment contains the cache sync
logic, but the non-smp barriers do not. This is incorrect as Rafel
summarized:
> ...
> The existing memory barriers are SMP barriers too, but they are more
> than _just_ SMP barriers. At least that's how it is _supposed_ to be
> (eg. rmb() is supposed to be stronger than smp_rmb()).
> ...
> However, looking at the blackfin's definitions of SMP barriers I see
> that it uses extra stuff that should _also_ be used in the definitions
> of the mandatory barriers.
> ...

URL: http://lkml.org/lkml/2011/4/13/11
LKML-Reference: <BANLkTi=F-C-vwX4PGGfbkdTBw3OWL-twfg@mail.gmail.com>
Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>

authored by

Graf Yang and committed by
Mike Frysinger
943aee0c 85f2e689

+18 -18
+18 -18
arch/blackfin/include/asm/system.h
··· 19 19 * Force strict CPU ordering. 20 20 */ 21 21 #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) 22 - #define mb() __asm__ __volatile__ ("" : : : "memory") 23 - #define rmb() __asm__ __volatile__ ("" : : : "memory") 24 - #define wmb() __asm__ __volatile__ ("" : : : "memory") 25 - #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) 26 - #define read_barrier_depends() do { } while(0) 22 + #define smp_mb() mb() 23 + #define smp_rmb() rmb() 24 + #define smp_wmb() wmb() 25 + #define set_mb(var, value) do { var = value; mb(); } while (0) 26 + #define smp_read_barrier_depends() read_barrier_depends() 27 27 28 28 #ifdef CONFIG_SMP 29 29 asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); ··· 37 37 unsigned long new, unsigned long old); 38 38 39 39 #ifdef __ARCH_SYNC_CORE_DCACHE 40 - # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 41 - # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) 42 - # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) 43 - #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 44 - 40 + /* Force Core data cache coherence */ 41 + # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) 42 + # define rmb() do { barrier(); smp_check_barrier(); } while (0) 43 + # define wmb() do { barrier(); smp_mark_barrier(); } while (0) 44 + # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) 45 45 #else 46 - # define smp_mb() barrier() 47 - # define smp_rmb() barrier() 48 - # define smp_wmb() barrier() 49 - #define smp_read_barrier_depends() barrier() 46 + # define mb() barrier() 47 + # define rmb() barrier() 48 + # define wmb() barrier() 49 + # define read_barrier_depends() do { } while (0) 50 50 #endif 51 51 52 52 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, ··· 99 99 100 100 #else /* !CONFIG_SMP */ 101 101 102 - #define smp_mb() barrier() 103 - #define smp_rmb() barrier() 104 - #define smp_wmb() barrier() 105 - #define smp_read_barrier_depends() do { } while(0) 102 + #define mb() barrier() 103 + #define rmb() barrier() 104 + #define wmb() barrier() 105 + #define read_barrier_depends() do { } while (0) 106 106 107 107 struct __xchg_dummy { 108 108 unsigned long a[100];