Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Split out atomic ops logically.

We have a few different ways to do the atomic operations, so split
them out in to different headers rather than bloating atomic.h.
Kernelspace gUSA will take this up to a third implementation.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

+180 -151
+71
include/asm-sh/atomic-irq.h
··· 1 + #ifndef __ASM_SH_ATOMIC_IRQ_H 2 + #define __ASM_SH_ATOMIC_IRQ_H 3 + 4 + /* 5 + * To get proper branch prediction for the main line, we must branch 6 + * forward to code at the end of this object's .text section, then 7 + * branch back to restart the operation. 8 + */ 9 + static inline void atomic_add(int i, atomic_t *v) 10 + { 11 + unsigned long flags; 12 + 13 + local_irq_save(flags); 14 + *(long *)v += i; 15 + local_irq_restore(flags); 16 + } 17 + 18 + static inline void atomic_sub(int i, atomic_t *v) 19 + { 20 + unsigned long flags; 21 + 22 + local_irq_save(flags); 23 + *(long *)v -= i; 24 + local_irq_restore(flags); 25 + } 26 + 27 + static inline int atomic_add_return(int i, atomic_t *v) 28 + { 29 + unsigned long temp, flags; 30 + 31 + local_irq_save(flags); 32 + temp = *(long *)v; 33 + temp += i; 34 + *(long *)v = temp; 35 + local_irq_restore(flags); 36 + 37 + return temp; 38 + } 39 + 40 + static inline int atomic_sub_return(int i, atomic_t *v) 41 + { 42 + unsigned long temp, flags; 43 + 44 + local_irq_save(flags); 45 + temp = *(long *)v; 46 + temp -= i; 47 + *(long *)v = temp; 48 + local_irq_restore(flags); 49 + 50 + return temp; 51 + } 52 + 53 + static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 54 + { 55 + unsigned long flags; 56 + 57 + local_irq_save(flags); 58 + *(long *)v &= ~mask; 59 + local_irq_restore(flags); 60 + } 61 + 62 + static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 63 + { 64 + unsigned long flags; 65 + 66 + local_irq_save(flags); 67 + *(long *)v |= mask; 68 + local_irq_restore(flags); 69 + } 70 + 71 + #endif /* __ASM_SH_ATOMIC_IRQ_H */
+107
include/asm-sh/atomic-llsc.h
··· 1 + #ifndef __ASM_SH_ATOMIC_LLSC_H 2 + #define __ASM_SH_ATOMIC_LLSC_H 3 + 4 + /* 5 + * To get proper branch prediction for the main line, we must branch 6 + * forward to code at the end of this object's .text section, then 7 + * branch back to restart the operation. 8 + */ 9 + static inline void atomic_add(int i, atomic_t *v) 10 + { 11 + unsigned long tmp; 12 + 13 + __asm__ __volatile__ ( 14 + "1: movli.l @%2, %0 ! atomic_add \n" 15 + " add %1, %0 \n" 16 + " movco.l %0, @%2 \n" 17 + " bf 1b \n" 18 + : "=&z" (tmp) 19 + : "r" (i), "r" (&v->counter) 20 + : "t"); 21 + } 22 + 23 + static inline void atomic_sub(int i, atomic_t *v) 24 + { 25 + unsigned long tmp; 26 + 27 + __asm__ __volatile__ ( 28 + "1: movli.l @%2, %0 ! atomic_sub \n" 29 + " sub %1, %0 \n" 30 + " movco.l %0, @%2 \n" 31 + " bf 1b \n" 32 + : "=&z" (tmp) 33 + : "r" (i), "r" (&v->counter) 34 + : "t"); 35 + } 36 + 37 + /* 38 + * SH-4A note: 39 + * 40 + * We basically get atomic_xxx_return() for free compared with 41 + * atomic_xxx(). movli.l/movco.l require r0 due to the instruction 42 + * encoding, so the retval is automatically set without having to 43 + * do any special work. 44 + */ 45 + static inline int atomic_add_return(int i, atomic_t *v) 46 + { 47 + unsigned long temp; 48 + 49 + __asm__ __volatile__ ( 50 + "1: movli.l @%2, %0 ! atomic_add_return \n" 51 + " add %1, %0 \n" 52 + " movco.l %0, @%2 \n" 53 + " bf 1b \n" 54 + " synco \n" 55 + : "=&z" (temp) 56 + : "r" (i), "r" (&v->counter) 57 + : "t"); 58 + 59 + return temp; 60 + } 61 + 62 + static inline int atomic_sub_return(int i, atomic_t *v) 63 + { 64 + unsigned long temp; 65 + 66 + __asm__ __volatile__ ( 67 + "1: movli.l @%2, %0 ! atomic_sub_return \n" 68 + " sub %1, %0 \n" 69 + " movco.l %0, @%2 \n" 70 + " bf 1b \n" 71 + " synco \n" 72 + : "=&z" (temp) 73 + : "r" (i), "r" (&v->counter) 74 + : "t"); 75 + 76 + return temp; 77 + } 78 + 79 + static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 80 + { 81 + unsigned long tmp; 82 + 83 + __asm__ __volatile__ ( 84 + "1: movli.l @%2, %0 ! atomic_clear_mask \n" 85 + " and %1, %0 \n" 86 + " movco.l %0, @%2 \n" 87 + " bf 1b \n" 88 + : "=&z" (tmp) 89 + : "r" (~mask), "r" (&v->counter) 90 + : "t"); 91 + } 92 + 93 + static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 94 + { 95 + unsigned long tmp; 96 + 97 + __asm__ __volatile__ ( 98 + "1: movli.l @%2, %0 ! atomic_set_mask \n" 99 + " or %1, %0 \n" 100 + " movco.l %0, @%2 \n" 101 + " bf 1b \n" 102 + : "=&z" (tmp) 103 + : "r" (mask), "r" (&v->counter) 104 + : "t"); 105 + } 106 + 107 + #endif /* __ASM_SH_ATOMIC_LLSC_H */
+2 -151
include/asm-sh/atomic.h
··· 17 17 #include <linux/compiler.h> 18 18 #include <asm/system.h> 19 19 20 - /* 21 - * To get proper branch prediction for the main line, we must branch 22 - * forward to code at the end of this object's .text section, then 23 - * branch back to restart the operation. 24 - */ 25 - static inline void atomic_add(int i, atomic_t *v) 26 - { 27 20 #ifdef CONFIG_CPU_SH4A 28 - unsigned long tmp; 29 - 30 - __asm__ __volatile__ ( 31 - "1: movli.l @%2, %0 ! atomic_add \n" 32 - " add %1, %0 \n" 33 - " movco.l %0, @%2 \n" 34 - " bf 1b \n" 35 - : "=&z" (tmp) 36 - : "r" (i), "r" (&v->counter) 37 - : "t"); 21 + #include <asm/atomic-llsc.h> 38 22 #else 39 - unsigned long flags; 40 - 41 - local_irq_save(flags); 42 - *(long *)v += i; 43 - local_irq_restore(flags); 23 + #include <asm/atomic-irq.h> 44 24 #endif 45 - } 46 - 47 - static inline void atomic_sub(int i, atomic_t *v) 48 - { 49 - #ifdef CONFIG_CPU_SH4A 50 - unsigned long tmp; 51 - 52 - __asm__ __volatile__ ( 53 - "1: movli.l @%2, %0 ! atomic_sub \n" 54 - " sub %1, %0 \n" 55 - " movco.l %0, @%2 \n" 56 - " bf 1b \n" 57 - : "=&z" (tmp) 58 - : "r" (i), "r" (&v->counter) 59 - : "t"); 60 - #else 61 - unsigned long flags; 62 - 63 - local_irq_save(flags); 64 - *(long *)v -= i; 65 - local_irq_restore(flags); 66 - #endif 67 - } 68 - 69 - /* 70 - * SH-4A note: 71 - * 72 - * We basically get atomic_xxx_return() for free compared with 73 - * atomic_xxx(). movli.l/movco.l require r0 due to the instruction 74 - * encoding, so the retval is automatically set without having to 75 - * do any special work. 76 - */ 77 - static inline int atomic_add_return(int i, atomic_t *v) 78 - { 79 - unsigned long temp; 80 - 81 - #ifdef CONFIG_CPU_SH4A 82 - __asm__ __volatile__ ( 83 - "1: movli.l @%2, %0 ! atomic_add_return \n" 84 - " add %1, %0 \n" 85 - " movco.l %0, @%2 \n" 86 - " bf 1b \n" 87 - " synco \n" 88 - : "=&z" (temp) 89 - : "r" (i), "r" (&v->counter) 90 - : "t"); 91 - #else 92 - unsigned long flags; 93 - 94 - local_irq_save(flags); 95 - temp = *(long *)v; 96 - temp += i; 97 - *(long *)v = temp; 98 - local_irq_restore(flags); 99 - #endif 100 - 101 - return temp; 102 - } 103 25 104 26 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 105 - 106 - static inline int atomic_sub_return(int i, atomic_t *v) 107 - { 108 - unsigned long temp; 109 - 110 - #ifdef CONFIG_CPU_SH4A 111 - __asm__ __volatile__ ( 112 - "1: movli.l @%2, %0 ! atomic_sub_return \n" 113 - " sub %1, %0 \n" 114 - " movco.l %0, @%2 \n" 115 - " bf 1b \n" 116 - " synco \n" 117 - : "=&z" (temp) 118 - : "r" (i), "r" (&v->counter) 119 - : "t"); 120 - #else 121 - unsigned long flags; 122 - 123 - local_irq_save(flags); 124 - temp = *(long *)v; 125 - temp -= i; 126 - *(long *)v = temp; 127 - local_irq_restore(flags); 128 - #endif 129 - 130 - return temp; 131 - } 132 27 133 28 #define atomic_dec_return(v) atomic_sub_return(1,(v)) 134 29 #define atomic_inc_return(v) atomic_add_return(1,(v)) ··· 74 179 return ret != u; 75 180 } 76 181 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 77 - 78 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 79 - { 80 - #ifdef CONFIG_CPU_SH4A 81 - unsigned long tmp; 82 - 83 - __asm__ __volatile__ ( 84 - "1: movli.l @%2, %0 ! atomic_clear_mask \n" 85 - " and %1, %0 \n" 86 - " movco.l %0, @%2 \n" 87 - " bf 1b \n" 88 - : "=&z" (tmp) 89 - : "r" (~mask), "r" (&v->counter) 90 - : "t"); 91 - #else 92 - unsigned long flags; 93 - 94 - local_irq_save(flags); 95 - *(long *)v &= ~mask; 96 - local_irq_restore(flags); 97 - #endif 98 - } 99 - 100 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 101 - { 102 - #ifdef CONFIG_CPU_SH4A 103 - unsigned long tmp; 104 - 105 - __asm__ __volatile__ ( 106 - "1: movli.l @%2, %0 ! atomic_set_mask \n" 107 - " or %1, %0 \n" 108 - " movco.l %0, @%2 \n" 109 - " bf 1b \n" 110 - : "=&z" (tmp) 111 - : "r" (mask), "r" (&v->counter) 112 - : "t"); 113 - #else 114 - unsigned long flags; 115 - 116 - local_irq_save(flags); 117 - *(long *)v |= mask; 118 - local_irq_restore(flags); 119 - #endif 120 - } 121 182 122 183 /* Atomic operations are already serializing on SH */ 123 184 #define smp_mb__before_atomic_dec() barrier()