Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locking/arch-atomic' into locking/core, because it's ready for upstream

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+846 -1020
+27 -15
arch/alpha/include/asm/atomic.h
··· 29 29 * branch back to restart the operation. 30 30 */ 31 31 32 - #define ATOMIC_OP(op) \ 32 + #define ATOMIC_OP(op, asm_op) \ 33 33 static __inline__ void atomic_##op(int i, atomic_t * v) \ 34 34 { \ 35 35 unsigned long temp; \ 36 36 __asm__ __volatile__( \ 37 37 "1: ldl_l %0,%1\n" \ 38 - " " #op "l %0,%2,%0\n" \ 38 + " " #asm_op " %0,%2,%0\n" \ 39 39 " stl_c %0,%1\n" \ 40 40 " beq %0,2f\n" \ 41 41 ".subsection 2\n" \ ··· 45 45 :"Ir" (i), "m" (v->counter)); \ 46 46 } \ 47 47 48 - #define ATOMIC_OP_RETURN(op) \ 48 + #define ATOMIC_OP_RETURN(op, asm_op) \ 49 49 static inline int atomic_##op##_return(int i, atomic_t *v) \ 50 50 { \ 51 51 long temp, result; \ 52 52 smp_mb(); \ 53 53 __asm__ __volatile__( \ 54 54 "1: ldl_l %0,%1\n" \ 55 - " " #op "l %0,%3,%2\n" \ 56 - " " #op "l %0,%3,%0\n" \ 55 + " " #asm_op " %0,%3,%2\n" \ 56 + " " #asm_op " %0,%3,%0\n" \ 57 57 " stl_c %0,%1\n" \ 58 58 " beq %0,2f\n" \ 59 59 ".subsection 2\n" \ ··· 65 65 return result; \ 66 66 } 67 67 68 - #define ATOMIC64_OP(op) \ 68 + #define ATOMIC64_OP(op, asm_op) \ 69 69 static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 70 70 { \ 71 71 unsigned long temp; \ 72 72 __asm__ __volatile__( \ 73 73 "1: ldq_l %0,%1\n" \ 74 - " " #op "q %0,%2,%0\n" \ 74 + " " #asm_op " %0,%2,%0\n" \ 75 75 " stq_c %0,%1\n" \ 76 76 " beq %0,2f\n" \ 77 77 ".subsection 2\n" \ ··· 81 81 :"Ir" (i), "m" (v->counter)); \ 82 82 } \ 83 83 84 - #define ATOMIC64_OP_RETURN(op) \ 84 + #define ATOMIC64_OP_RETURN(op, asm_op) \ 85 85 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 86 86 { \ 87 87 long temp, result; \ 88 88 smp_mb(); \ 89 89 __asm__ __volatile__( \ 90 90 "1: ldq_l %0,%1\n" \ 91 - " " #op "q %0,%3,%2\n" \ 92 - " " #op "q %0,%3,%0\n" \ 91 + " " #asm_op " %0,%3,%2\n" \ 92 + " " #asm_op " %0,%3,%0\n" \ 93 93 " stq_c %0,%1\n" \ 94 94 " beq %0,2f\n" \ 95 95 ".subsection 2\n" \ ··· 101 101 return result; \ 102 102 } 103 103 104 - #define ATOMIC_OPS(opg) \ 105 - ATOMIC_OP(opg) \ 106 - ATOMIC_OP_RETURN(opg) \ 107 - ATOMIC64_OP(opg) \ 108 - ATOMIC64_OP_RETURN(opg) 104 + #define ATOMIC_OPS(op) \ 105 + ATOMIC_OP(op, op##l) \ 106 + ATOMIC_OP_RETURN(op, op##l) \ 107 + ATOMIC64_OP(op, op##q) \ 108 + ATOMIC64_OP_RETURN(op, op##q) 109 109 110 110 ATOMIC_OPS(add) 111 111 ATOMIC_OPS(sub) 112 + 113 + #define atomic_andnot atomic_andnot 114 + #define atomic64_andnot atomic64_andnot 115 + 116 + ATOMIC_OP(and, and) 117 + ATOMIC_OP(andnot, bic) 118 + ATOMIC_OP(or, bis) 119 + ATOMIC_OP(xor, xor) 120 + ATOMIC64_OP(and, and) 121 + ATOMIC64_OP(andnot, bic) 122 + ATOMIC64_OP(or, bis) 123 + ATOMIC64_OP(xor, xor) 112 124 113 125 #undef ATOMIC_OPS 114 126 #undef ATOMIC64_OP_RETURN
+6 -2
arch/arc/include/asm/atomic.h
··· 143 143 144 144 ATOMIC_OPS(add, +=, add) 145 145 ATOMIC_OPS(sub, -=, sub) 146 - ATOMIC_OP(and, &=, and) 147 146 148 - #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) 147 + #define atomic_andnot atomic_andnot 148 + 149 + ATOMIC_OP(and, &=, and) 150 + ATOMIC_OP(andnot, &= ~, bic) 151 + ATOMIC_OP(or, |=, or) 152 + ATOMIC_OP(xor, ^=, xor) 149 153 150 154 #undef ATOMIC_OPS 151 155 #undef ATOMIC_OP_RETURN
+14
arch/arm/include/asm/atomic.h
··· 194 194 ATOMIC_OPS(add, +=, add) 195 195 ATOMIC_OPS(sub, -=, sub) 196 196 197 + #define atomic_andnot atomic_andnot 198 + 199 + ATOMIC_OP(and, &=, and) 200 + ATOMIC_OP(andnot, &= ~, bic) 201 + ATOMIC_OP(or, |=, orr) 202 + ATOMIC_OP(xor, ^=, eor) 203 + 197 204 #undef ATOMIC_OPS 198 205 #undef ATOMIC_OP_RETURN 199 206 #undef ATOMIC_OP ··· 327 320 328 321 ATOMIC64_OPS(add, adds, adc) 329 322 ATOMIC64_OPS(sub, subs, sbc) 323 + 324 + #define atomic64_andnot atomic64_andnot 325 + 326 + ATOMIC64_OP(and, and, and) 327 + ATOMIC64_OP(andnot, bic, bic) 328 + ATOMIC64_OP(or, orr, orr) 329 + ATOMIC64_OP(xor, eor, eor) 330 330 331 331 #undef ATOMIC64_OPS 332 332 #undef ATOMIC64_OP_RETURN
+14
arch/arm64/include/asm/atomic.h
··· 85 85 ATOMIC_OPS(add, add) 86 86 ATOMIC_OPS(sub, sub) 87 87 88 + #define atomic_andnot atomic_andnot 89 + 90 + ATOMIC_OP(and, and) 91 + ATOMIC_OP(andnot, bic) 92 + ATOMIC_OP(or, orr) 93 + ATOMIC_OP(xor, eor) 94 + 88 95 #undef ATOMIC_OPS 89 96 #undef ATOMIC_OP_RETURN 90 97 #undef ATOMIC_OP ··· 189 182 190 183 ATOMIC64_OPS(add, add) 191 184 ATOMIC64_OPS(sub, sub) 185 + 186 + #define atomic64_andnot atomic64_andnot 187 + 188 + ATOMIC64_OP(and, and) 189 + ATOMIC64_OP(andnot, bic) 190 + ATOMIC64_OP(or, orr) 191 + ATOMIC64_OP(xor, eor) 192 192 193 193 #undef ATOMIC64_OPS 194 194 #undef ATOMIC64_OP_RETURN
+12
arch/avr32/include/asm/atomic.h
··· 44 44 ATOMIC_OP_RETURN(sub, sub, rKs21) 45 45 ATOMIC_OP_RETURN(add, add, r) 46 46 47 + #define ATOMIC_OP(op, asm_op) \ 48 + ATOMIC_OP_RETURN(op, asm_op, r) \ 49 + static inline void atomic_##op(int i, atomic_t *v) \ 50 + { \ 51 + (void)__atomic_##op##_return(i, v); \ 52 + } 53 + 54 + ATOMIC_OP(and, and) 55 + ATOMIC_OP(or, or) 56 + ATOMIC_OP(xor, eor) 57 + 58 + #undef ATOMIC_OP 47 59 #undef ATOMIC_OP_RETURN 48 60 49 61 /*
+9 -7
arch/blackfin/include/asm/atomic.h
··· 16 16 #include <linux/types.h> 17 17 18 18 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 19 - asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); 20 - asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value); 21 - asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); 19 + asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); 20 + 21 + asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); 22 + asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); 22 23 asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); 23 24 asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); 24 25 25 26 #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) 26 27 27 - #define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) 28 - #define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) 28 + #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) 29 + #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) 29 30 30 - #define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) 31 - #define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) 31 + #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) 32 + #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) 33 + #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) 32 34 33 35 #endif 34 36
+4 -3
arch/blackfin/kernel/bfin_ksyms.c
··· 83 83 EXPORT_SYMBOL(insl_16); 84 84 85 85 #ifdef CONFIG_SMP 86 - EXPORT_SYMBOL(__raw_atomic_update_asm); 87 - EXPORT_SYMBOL(__raw_atomic_clear_asm); 88 - EXPORT_SYMBOL(__raw_atomic_set_asm); 86 + EXPORT_SYMBOL(__raw_atomic_add_asm); 87 + EXPORT_SYMBOL(__raw_atomic_and_asm); 88 + EXPORT_SYMBOL(__raw_atomic_or_asm); 89 89 EXPORT_SYMBOL(__raw_atomic_xor_asm); 90 90 EXPORT_SYMBOL(__raw_atomic_test_asm); 91 + 91 92 EXPORT_SYMBOL(__raw_xchg_1_asm); 92 93 EXPORT_SYMBOL(__raw_xchg_2_asm); 93 94 EXPORT_SYMBOL(__raw_xchg_4_asm);
+15 -15
arch/blackfin/mach-bf561/atomic.S
··· 587 587 * r0 = ptr 588 588 * r1 = value 589 589 * 590 - * Add a signed value to a 32bit word and return the new value atomically. 590 + * ADD a signed value to a 32bit word and return the new value atomically. 591 591 * Clobbers: r3:0, p1:0 592 592 */ 593 - ENTRY(___raw_atomic_update_asm) 593 + ENTRY(___raw_atomic_add_asm) 594 594 p1 = r0; 595 595 r3 = r1; 596 596 [--sp] = rets; ··· 603 603 r0 = r3; 604 604 rets = [sp++]; 605 605 rts; 606 - ENDPROC(___raw_atomic_update_asm) 606 + ENDPROC(___raw_atomic_add_asm) 607 607 608 608 /* 609 609 * r0 = ptr 610 610 * r1 = mask 611 611 * 612 - * Clear the mask bits from a 32bit word and return the old 32bit value 612 + * AND the mask bits from a 32bit word and return the old 32bit value 613 613 * atomically. 614 614 * Clobbers: r3:0, p1:0 615 615 */ 616 - ENTRY(___raw_atomic_clear_asm) 616 + ENTRY(___raw_atomic_and_asm) 617 617 p1 = r0; 618 - r3 = ~r1; 618 + r3 = r1; 619 619 [--sp] = rets; 620 620 call _get_core_lock; 621 621 r2 = [p1]; ··· 627 627 r0 = r3; 628 628 rets = [sp++]; 629 629 rts; 630 - ENDPROC(___raw_atomic_clear_asm) 630 + ENDPROC(___raw_atomic_and_asm) 631 631 632 632 /* 633 633 * r0 = ptr 634 634 * r1 = mask 635 635 * 636 - * Set the mask bits into a 32bit word and return the old 32bit value 636 + * OR the mask bits into a 32bit word and return the old 32bit value 637 637 * atomically. 638 638 * Clobbers: r3:0, p1:0 639 639 */ 640 - ENTRY(___raw_atomic_set_asm) 640 + ENTRY(___raw_atomic_or_asm) 641 641 p1 = r0; 642 642 r3 = r1; 643 643 [--sp] = rets; ··· 651 651 r0 = r3; 652 652 rets = [sp++]; 653 653 rts; 654 - ENDPROC(___raw_atomic_set_asm) 654 + ENDPROC(___raw_atomic_or_asm) 655 655 656 656 /* 657 657 * r0 = ptr ··· 787 787 r2 = r1; 788 788 r1 = 1; 789 789 r1 <<= r2; 790 - jump ___raw_atomic_set_asm 790 + jump ___raw_atomic_or_asm 791 791 ENDPROC(___raw_bit_set_asm) 792 792 793 793 /* ··· 798 798 * Clobbers: r3:0, p1:0 799 799 */ 800 800 ENTRY(___raw_bit_clear_asm) 801 - r2 = r1; 802 - r1 = 1; 803 - r1 <<= r2; 804 - jump ___raw_atomic_clear_asm 801 + r2 = 1; 802 + r2 <<= r1; 803 + r1 = ~r2; 804 + jump ___raw_atomic_and_asm 805 805 ENDPROC(___raw_bit_clear_asm) 806 806 807 807 /*
+1 -1
arch/blackfin/mach-common/smp.c
··· 195 195 local_irq_save(flags); 196 196 for_each_cpu(cpu, cpumask) { 197 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 198 - atomic_set_mask((1 << msg), &bfin_ipi_data->bits); 198 + atomic_or((1 << msg), &bfin_ipi_data->bits); 199 199 atomic_inc(&bfin_ipi_data->count); 200 200 } 201 201 local_irq_restore(flags);
+55 -54
arch/frv/include/asm/atomic.h
··· 15 15 #define _ASM_ATOMIC_H 16 16 17 17 #include <linux/types.h> 18 - #include <asm/spr-regs.h> 19 18 #include <asm/cmpxchg.h> 20 19 #include <asm/barrier.h> 21 20 22 21 #ifdef CONFIG_SMP 23 22 #error not SMP safe 24 23 #endif 24 + 25 + #include <asm/atomic_defs.h> 25 26 26 27 /* 27 28 * Atomic operations that C can't guarantee us. Useful for ··· 35 34 #define atomic_read(v) ACCESS_ONCE((v)->counter) 36 35 #define atomic_set(v, i) (((v)->counter) = (i)) 37 36 38 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 37 + static inline int atomic_inc_return(atomic_t *v) 38 + { 39 + return __atomic_add_return(1, &v->counter); 40 + } 41 + 42 + static inline int atomic_dec_return(atomic_t *v) 43 + { 44 + return __atomic_sub_return(1, &v->counter); 45 + } 46 + 39 47 static inline int atomic_add_return(int i, atomic_t *v) 40 48 { 41 - unsigned long val; 42 - 43 - asm("0: \n" 44 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 45 - " ckeq icc3,cc7 \n" 46 - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 47 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 48 - " add%I2 %1,%2,%1 \n" 49 - " cst.p %1,%M0 ,cc3,#1 \n" 50 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ 51 - " beq icc3,#0,0b \n" 52 - : "+U"(v->counter), "=&r"(val) 53 - : "NPr"(i) 54 - : "memory", "cc7", "cc3", "icc3" 55 - ); 56 - 57 - return val; 49 + return __atomic_add_return(i, &v->counter); 58 50 } 59 51 60 52 static inline int atomic_sub_return(int i, atomic_t *v) 61 53 { 62 - unsigned long val; 63 - 64 - asm("0: \n" 65 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 66 - " ckeq icc3,cc7 \n" 67 - " ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ 68 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 69 - " sub%I2 %1,%2,%1 \n" 70 - " cst.p %1,%M0 ,cc3,#1 \n" 71 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */ 72 - " beq icc3,#0,0b \n" 73 - : "+U"(v->counter), "=&r"(val) 74 - : "NPr"(i) 75 - : "memory", "cc7", "cc3", "icc3" 76 - ); 77 - 78 - return val; 54 + return __atomic_sub_return(i, &v->counter); 79 55 } 80 - 81 - #else 82 - 83 - extern int atomic_add_return(int i, atomic_t *v); 84 - extern int atomic_sub_return(int i, atomic_t *v); 85 - 86 - #endif 87 56 88 57 static inline int atomic_add_negative(int i, atomic_t *v) 89 58 { ··· 72 101 73 102 static inline void atomic_inc(atomic_t *v) 74 103 { 75 - atomic_add_return(1, v); 104 + atomic_inc_return(v); 76 105 } 77 106 78 107 static inline void atomic_dec(atomic_t *v) 79 108 { 80 - atomic_sub_return(1, v); 109 + atomic_dec_return(v); 81 110 } 82 - 83 - #define atomic_dec_return(v) atomic_sub_return(1, (v)) 84 - #define atomic_inc_return(v) atomic_add_return(1, (v)) 85 111 86 112 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 87 113 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) ··· 88 120 * 64-bit atomic ops 89 121 */ 90 122 typedef struct { 91 - volatile long long counter; 123 + long long counter; 92 124 } atomic64_t; 93 125 94 126 #define ATOMIC64_INIT(i) { (i) } 95 127 96 - static inline long long atomic64_read(atomic64_t *v) 128 + static inline long long atomic64_read(const atomic64_t *v) 97 129 { 98 130 long long counter; 99 131 100 132 asm("ldd%I1 %M1,%0" 101 133 : "=e"(counter) 102 134 : "m"(v->counter)); 135 + 103 136 return counter; 104 137 } 105 138 ··· 111 142 : "e"(i)); 112 143 } 113 144 114 - extern long long atomic64_inc_return(atomic64_t *v); 115 - extern long long atomic64_dec_return(atomic64_t *v); 116 - extern long long atomic64_add_return(long long i, atomic64_t *v); 117 - extern long long atomic64_sub_return(long long i, atomic64_t *v); 145 + static inline long long atomic64_inc_return(atomic64_t *v) 146 + { 147 + return __atomic64_add_return(1, &v->counter); 148 + } 149 + 150 + static inline long long atomic64_dec_return(atomic64_t *v) 151 + { 152 + return __atomic64_sub_return(1, &v->counter); 153 + } 154 + 155 + static inline long long atomic64_add_return(long long i, atomic64_t *v) 156 + { 157 + return __atomic64_add_return(i, &v->counter); 158 + } 159 + 160 + static inline long long atomic64_sub_return(long long i, atomic64_t *v) 161 + { 162 + return __atomic64_sub_return(i, &v->counter); 163 + } 118 164 119 165 static inline long long atomic64_add_negative(long long i, atomic64_t *v) 120 166 { ··· 160 176 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) 161 177 #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) 162 178 179 + 163 180 #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) 164 181 #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) 165 182 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) ··· 181 196 return c; 182 197 } 183 198 199 + #define ATOMIC_OP(op) \ 200 + static inline void atomic_##op(int i, atomic_t *v) \ 201 + { \ 202 + (void)__atomic32_fetch_##op(i, &v->counter); \ 203 + } \ 204 + \ 205 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 206 + { \ 207 + (void)__atomic64_fetch_##op(i, &v->counter); \ 208 + } 209 + 210 + ATOMIC_OP(or) 211 + ATOMIC_OP(and) 212 + ATOMIC_OP(xor) 213 + 214 + #undef ATOMIC_OP 184 215 185 216 #endif /* _ASM_ATOMIC_H */
+172
arch/frv/include/asm/atomic_defs.h
··· 1 + 2 + #include <asm/spr-regs.h> 3 + 4 + #ifdef __ATOMIC_LIB__ 5 + 6 + #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 7 + 8 + #define ATOMIC_QUALS 9 + #define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x) 10 + 11 + #else /* !OUTOFLINE && LIB */ 12 + 13 + #define ATOMIC_OP_RETURN(op) 14 + #define ATOMIC_FETCH_OP(op) 15 + 16 + #endif /* OUTOFLINE */ 17 + 18 + #else /* !__ATOMIC_LIB__ */ 19 + 20 + #define ATOMIC_EXPORT(x) 21 + 22 + #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 23 + 24 + #define ATOMIC_OP_RETURN(op) \ 25 + extern int __atomic_##op##_return(int i, int *v); \ 26 + extern long long __atomic64_##op##_return(long long i, long long *v); 27 + 28 + #define ATOMIC_FETCH_OP(op) \ 29 + extern int __atomic32_fetch_##op(int i, int *v); \ 30 + extern long long __atomic64_fetch_##op(long long i, long long *v); 31 + 32 + #else /* !OUTOFLINE && !LIB */ 33 + 34 + #define ATOMIC_QUALS static inline 35 + 36 + #endif /* OUTOFLINE */ 37 + #endif /* __ATOMIC_LIB__ */ 38 + 39 + 40 + /* 41 + * Note on the 64 bit inline asm variants... 42 + * 43 + * CSTD is a conditional instruction and needs a constrained memory reference. 44 + * Normally 'U' provides the correct constraints for conditional instructions 45 + * and this is used for the 32 bit version, however 'U' does not appear to work 46 + * for 64 bit values (gcc-4.9) 47 + * 48 + * The exact constraint is that conditional instructions cannot deal with an 49 + * immediate displacement in the memory reference, so what we do is we read the 50 + * address through a volatile cast into a local variable in order to insure we 51 + * _have_ to compute the correct address without displacement. This allows us 52 + * to use the regular 'm' for the memory address. 53 + * 54 + * Furthermore, the %Ln operand, which prints the low word register (r+1), 55 + * really only works for registers, this means we cannot allow immediate values 56 + * for the 64 bit versions -- like we do for the 32 bit ones. 57 + * 58 + */ 59 + 60 + #ifndef ATOMIC_OP_RETURN 61 + #define ATOMIC_OP_RETURN(op) \ 62 + ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \ 63 + { \ 64 + int val; \ 65 + \ 66 + asm volatile( \ 67 + "0: \n" \ 68 + " orcc gr0,gr0,gr0,icc3 \n" \ 69 + " ckeq icc3,cc7 \n" \ 70 + " ld.p %M0,%1 \n" \ 71 + " orcr cc7,cc7,cc3 \n" \ 72 + " "#op"%I2 %1,%2,%1 \n" \ 73 + " cst.p %1,%M0 ,cc3,#1 \n" \ 74 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 75 + " beq icc3,#0,0b \n" \ 76 + : "+U"(*v), "=&r"(val) \ 77 + : "NPr"(i) \ 78 + : "memory", "cc7", "cc3", "icc3" \ 79 + ); \ 80 + \ 81 + return val; \ 82 + } \ 83 + ATOMIC_EXPORT(__atomic_##op##_return); \ 84 + \ 85 + ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \ 86 + { \ 87 + long long *__v = READ_ONCE(v); \ 88 + long long val; \ 89 + \ 90 + asm volatile( \ 91 + "0: \n" \ 92 + " orcc gr0,gr0,gr0,icc3 \n" \ 93 + " ckeq icc3,cc7 \n" \ 94 + " ldd.p %M0,%1 \n" \ 95 + " orcr cc7,cc7,cc3 \n" \ 96 + " "#op"cc %L1,%L2,%L1,icc0 \n" \ 97 + " "#op"x %1,%2,%1,icc0 \n" \ 98 + " cstd.p %1,%M0 ,cc3,#1 \n" \ 99 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 100 + " beq icc3,#0,0b \n" \ 101 + : "+m"(*__v), "=&e"(val) \ 102 + : "e"(i) \ 103 + : "memory", "cc7", "cc3", "icc0", "icc3" \ 104 + ); \ 105 + \ 106 + return val; \ 107 + } \ 108 + ATOMIC_EXPORT(__atomic64_##op##_return); 109 + #endif 110 + 111 + #ifndef ATOMIC_FETCH_OP 112 + #define ATOMIC_FETCH_OP(op) \ 113 + ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \ 114 + { \ 115 + int old, tmp; \ 116 + \ 117 + asm volatile( \ 118 + "0: \n" \ 119 + " orcc gr0,gr0,gr0,icc3 \n" \ 120 + " ckeq icc3,cc7 \n" \ 121 + " ld.p %M0,%1 \n" \ 122 + " orcr cc7,cc7,cc3 \n" \ 123 + " "#op"%I3 %1,%3,%2 \n" \ 124 + " cst.p %2,%M0 ,cc3,#1 \n" \ 125 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 126 + " beq icc3,#0,0b \n" \ 127 + : "+U"(*v), "=&r"(old), "=r"(tmp) \ 128 + : "NPr"(i) \ 129 + : "memory", "cc7", "cc3", "icc3" \ 130 + ); \ 131 + \ 132 + return old; \ 133 + } \ 134 + ATOMIC_EXPORT(__atomic32_fetch_##op); \ 135 + \ 136 + ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \ 137 + { \ 138 + long long *__v = READ_ONCE(v); \ 139 + long long old, tmp; \ 140 + \ 141 + asm volatile( \ 142 + "0: \n" \ 143 + " orcc gr0,gr0,gr0,icc3 \n" \ 144 + " ckeq icc3,cc7 \n" \ 145 + " ldd.p %M0,%1 \n" \ 146 + " orcr cc7,cc7,cc3 \n" \ 147 + " "#op" %L1,%L3,%L2 \n" \ 148 + " "#op" %1,%3,%2 \n" \ 149 + " cstd.p %2,%M0 ,cc3,#1 \n" \ 150 + " corcc gr29,gr29,gr0 ,cc3,#1 \n" \ 151 + " beq icc3,#0,0b \n" \ 152 + : "+m"(*__v), "=&e"(old), "=e"(tmp) \ 153 + : "e"(i) \ 154 + : "memory", "cc7", "cc3", "icc3" \ 155 + ); \ 156 + \ 157 + return old; \ 158 + } \ 159 + ATOMIC_EXPORT(__atomic64_fetch_##op); 160 + #endif 161 + 162 + ATOMIC_FETCH_OP(or) 163 + ATOMIC_FETCH_OP(and) 164 + ATOMIC_FETCH_OP(xor) 165 + 166 + ATOMIC_OP_RETURN(add) 167 + ATOMIC_OP_RETURN(sub) 168 + 169 + #undef ATOMIC_FETCH_OP 170 + #undef ATOMIC_OP_RETURN 171 + #undef ATOMIC_QUALS 172 + #undef ATOMIC_EXPORT
+10 -89
arch/frv/include/asm/bitops.h
··· 25 25 26 26 #include <asm-generic/bitops/ffz.h> 27 27 28 - #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 29 - static inline 30 - unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v) 31 - { 32 - unsigned long old, tmp; 33 - 34 - asm volatile( 35 - "0: \n" 36 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 37 - " ckeq icc3,cc7 \n" 38 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 39 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 40 - " and%I3 %1,%3,%2 \n" 41 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 42 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 43 - " beq icc3,#0,0b \n" 44 - : "+U"(*v), "=&r"(old), "=r"(tmp) 45 - : "NPr"(~mask) 46 - : "memory", "cc7", "cc3", "icc3" 47 - ); 48 - 49 - return old; 50 - } 51 - 52 - static inline 53 - unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v) 54 - { 55 - unsigned long old, tmp; 56 - 57 - asm volatile( 58 - "0: \n" 59 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 60 - " ckeq icc3,cc7 \n" 61 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 62 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 63 - " or%I3 %1,%3,%2 \n" 64 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 65 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 66 - " beq icc3,#0,0b \n" 67 - : "+U"(*v), "=&r"(old), "=r"(tmp) 68 - : "NPr"(mask) 69 - : "memory", "cc7", "cc3", "icc3" 70 - ); 71 - 72 - return old; 73 - } 74 - 75 - static inline 76 - unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v) 77 - { 78 - unsigned long old, tmp; 79 - 80 - asm volatile( 81 - "0: \n" 82 - " orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ 83 - " ckeq icc3,cc7 \n" 84 - " ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */ 85 - " orcr cc7,cc7,cc3 \n" /* set CC3 to true */ 86 - " xor%I3 %1,%3,%2 \n" 87 - " cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */ 88 - " corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */ 89 - " beq icc3,#0,0b \n" 90 - : "+U"(*v), "=&r"(old), "=r"(tmp) 91 - : "NPr"(mask) 92 - : "memory", "cc7", "cc3", "icc3" 93 - ); 94 - 95 - return old; 96 - } 97 - 98 - #else 99 - 100 - extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 101 - extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 102 - extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 103 - 104 - #endif 105 - 106 - #define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v)) 107 - #define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v)) 28 + #include <asm/atomic.h> 108 29 109 30 static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) 110 31 { 111 - volatile unsigned long *ptr = addr; 112 - unsigned long mask = 1UL << (nr & 31); 32 + unsigned int *ptr = (void *)addr; 33 + unsigned int mask = 1UL << (nr & 31); 113 34 ptr += nr >> 5; 114 - return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; 35 + return (__atomic32_fetch_and(~mask, ptr) & mask) != 0; 115 36 } 116 37 117 38 static inline int test_and_set_bit(unsigned long nr, volatile void *addr) 118 39 { 119 - volatile unsigned long *ptr = addr; 120 - unsigned long mask = 1UL << (nr & 31); 40 + unsigned int *ptr = (void *)addr; 41 + unsigned int mask = 1UL << (nr & 31); 121 42 ptr += nr >> 5; 122 - return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; 43 + return (__atomic32_fetch_or(mask, ptr) & mask) != 0; 123 44 } 124 45 125 46 static inline int test_and_change_bit(unsigned long nr, volatile void *addr) 126 47 { 127 - volatile unsigned long *ptr = addr; 128 - unsigned long mask = 1UL << (nr & 31); 48 + unsigned int *ptr = (void *)addr; 49 + unsigned int mask = 1UL << (nr & 31); 129 50 ptr += nr >> 5; 130 - return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; 51 + return (__atomic32_fetch_xor(mask, ptr) & mask) != 0; 131 52 } 132 53 133 54 static inline void clear_bit(unsigned long nr, volatile void *addr)
+3 -3
arch/frv/kernel/dma.c
··· 109 109 110 110 static DEFINE_RWLOCK(frv_dma_channels_lock); 111 111 112 - unsigned long frv_dma_inprogress; 112 + unsigned int frv_dma_inprogress; 113 113 114 114 #define frv_clear_dma_inprogress(channel) \ 115 - atomic_clear_mask(1 << (channel), &frv_dma_inprogress); 115 + (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress); 116 116 117 117 #define frv_set_dma_inprogress(channel) \ 118 - atomic_set_mask(1 << (channel), &frv_dma_inprogress); 118 + (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress); 119 119 120 120 /*****************************************************************************/ 121 121 /*
-5
arch/frv/kernel/frv_ksyms.c
··· 58 58 EXPORT_SYMBOL(__insl_ns); 59 59 60 60 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS 61 - EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask); 62 - EXPORT_SYMBOL(atomic_test_and_OR_mask); 63 - EXPORT_SYMBOL(atomic_test_and_XOR_mask); 64 - EXPORT_SYMBOL(atomic_add_return); 65 - EXPORT_SYMBOL(atomic_sub_return); 66 61 EXPORT_SYMBOL(__xchg_32); 67 62 EXPORT_SYMBOL(__cmpxchg_32); 68 63 #endif
+1 -1
arch/frv/lib/Makefile
··· 5 5 lib-y := \ 6 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 7 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 8 - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o 8 + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
+7
arch/frv/lib/atomic-lib.c
··· 1 + 2 + #include <linux/export.h> 3 + #include <asm/atomic.h> 4 + 5 + #define __ATOMIC_LIB__ 6 + 7 + #include <asm/atomic_defs.h>
-110
arch/frv/lib/atomic-ops.S
··· 19 19 20 20 ############################################################################### 21 21 # 22 - # unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v); 23 - # 24 - ############################################################################### 25 - .globl atomic_test_and_ANDNOT_mask 26 - .type atomic_test_and_ANDNOT_mask,@function 27 - atomic_test_and_ANDNOT_mask: 28 - not.p gr8,gr10 29 - 0: 30 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 31 - ckeq icc3,cc7 32 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 33 - orcr cc7,cc7,cc3 /* set CC3 to true */ 34 - and gr8,gr10,gr11 35 - cst.p gr11,@(gr9,gr0) ,cc3,#1 36 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 37 - beq icc3,#0,0b 38 - bralr 39 - 40 - .size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask 41 - 42 - ############################################################################### 43 - # 44 - # unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v); 45 - # 46 - ############################################################################### 47 - .globl atomic_test_and_OR_mask 48 - .type atomic_test_and_OR_mask,@function 49 - atomic_test_and_OR_mask: 50 - or.p gr8,gr8,gr10 51 - 0: 52 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 53 - ckeq icc3,cc7 54 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 55 - orcr cc7,cc7,cc3 /* set CC3 to true */ 56 - or gr8,gr10,gr11 57 - cst.p gr11,@(gr9,gr0) ,cc3,#1 58 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 59 - beq icc3,#0,0b 60 - bralr 61 - 62 - .size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask 63 - 64 - ############################################################################### 65 - # 66 - # unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v); 67 - # 68 - ############################################################################### 69 - .globl atomic_test_and_XOR_mask 70 - .type atomic_test_and_XOR_mask,@function 71 - atomic_test_and_XOR_mask: 72 - or.p gr8,gr8,gr10 73 - 0: 74 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 75 - ckeq icc3,cc7 76 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 77 - orcr cc7,cc7,cc3 /* set CC3 to true */ 78 - xor gr8,gr10,gr11 79 - cst.p gr11,@(gr9,gr0) ,cc3,#1 80 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 81 - beq icc3,#0,0b 82 - bralr 83 - 84 - .size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask 85 - 86 - ############################################################################### 87 - # 88 - # int atomic_add_return(int i, atomic_t *v) 89 - # 90 - ############################################################################### 91 - .globl atomic_add_return 92 - .type atomic_add_return,@function 93 - atomic_add_return: 94 - or.p gr8,gr8,gr10 95 - 0: 96 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 97 - ckeq icc3,cc7 98 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 99 - orcr cc7,cc7,cc3 /* set CC3 to true */ 100 - add gr8,gr10,gr8 101 - cst.p gr8,@(gr9,gr0) ,cc3,#1 102 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 103 - beq icc3,#0,0b 104 - bralr 105 - 106 - .size atomic_add_return, .-atomic_add_return 107 - 108 - ############################################################################### 109 - # 110 - # int atomic_sub_return(int i, atomic_t *v) 111 - # 112 - ############################################################################### 113 - .globl atomic_sub_return 114 - .type atomic_sub_return,@function 115 - atomic_sub_return: 116 - or.p gr8,gr8,gr10 117 - 0: 118 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 119 - ckeq icc3,cc7 120 - ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */ 121 - orcr cc7,cc7,cc3 /* set CC3 to true */ 122 - sub gr8,gr10,gr8 123 - cst.p gr8,@(gr9,gr0) ,cc3,#1 124 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 125 - beq icc3,#0,0b 126 - bralr 127 - 128 - .size atomic_sub_return, .-atomic_sub_return 129 - 130 - ############################################################################### 131 - # 132 22 # uint32_t __xchg_32(uint32_t i, uint32_t *v) 133 23 # 134 24 ###############################################################################
-94
arch/frv/lib/atomic64-ops.S
··· 20 20 21 21 ############################################################################### 22 22 # 23 - # long long atomic64_inc_return(atomic64_t *v) 24 - # 25 - ############################################################################### 26 - .globl atomic64_inc_return 27 - .type atomic64_inc_return,@function 28 - atomic64_inc_return: 29 - or.p gr8,gr8,gr10 30 - 0: 31 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 32 - ckeq icc3,cc7 33 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 34 - orcr cc7,cc7,cc3 /* set CC3 to true */ 35 - addicc gr9,#1,gr9,icc0 36 - addxi gr8,#0,gr8,icc0 37 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 38 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 39 - beq icc3,#0,0b 40 - bralr 41 - 42 - .size atomic64_inc_return, .-atomic64_inc_return 43 - 44 - ############################################################################### 45 - # 46 - # long long atomic64_dec_return(atomic64_t *v) 47 - # 48 - ############################################################################### 49 - .globl atomic64_dec_return 50 - .type atomic64_dec_return,@function 51 - atomic64_dec_return: 52 - or.p gr8,gr8,gr10 53 - 0: 54 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 55 - ckeq icc3,cc7 56 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 57 - orcr cc7,cc7,cc3 /* set CC3 to true */ 58 - subicc gr9,#1,gr9,icc0 59 - subxi gr8,#0,gr8,icc0 60 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 61 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 62 - beq icc3,#0,0b 63 - bralr 64 - 65 - .size atomic64_dec_return, .-atomic64_dec_return 66 - 67 - ############################################################################### 68 - # 69 - # long long atomic64_add_return(long long i, atomic64_t *v) 70 - # 71 - ############################################################################### 72 - .globl atomic64_add_return 73 - .type atomic64_add_return,@function 74 - atomic64_add_return: 75 - or.p gr8,gr8,gr4 76 - or gr9,gr9,gr5 77 - 0: 78 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 79 - ckeq icc3,cc7 80 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 81 - orcr cc7,cc7,cc3 /* set CC3 to true */ 82 - addcc gr9,gr5,gr9,icc0 83 - addx gr8,gr4,gr8,icc0 84 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 85 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 86 - beq icc3,#0,0b 87 - bralr 88 - 89 - .size atomic64_add_return, .-atomic64_add_return 90 - 91 - ############################################################################### 92 - # 93 - # long long atomic64_sub_return(long long i, atomic64_t *v) 94 - # 95 - ############################################################################### 96 - .globl atomic64_sub_return 97 - .type atomic64_sub_return,@function 98 - atomic64_sub_return: 99 - or.p gr8,gr8,gr4 100 - or gr9,gr9,gr5 101 - 0: 102 - orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */ 103 - ckeq icc3,cc7 104 - ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */ 105 - orcr cc7,cc7,cc3 /* set CC3 to true */ 106 - subcc gr9,gr5,gr9,icc0 107 - subx gr8,gr4,gr8,icc0 108 - cstd.p gr8,@(gr10,gr0) ,cc3,#1 109 - corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */ 110 - beq icc3,#0,0b 111 - bralr 112 - 113 - .size atomic64_sub_return, .-atomic64_sub_return 114 - 115 - ############################################################################### 116 - # 117 23 # uint64_t __xchg_64(uint64_t i, uint64_t *v) 118 24 # 119 25 ###############################################################################
+39 -106
arch/h8300/include/asm/atomic.h
··· 16 16 17 17 #include <linux/kernel.h> 18 18 19 - static inline int atomic_add_return(int i, atomic_t *v) 20 - { 21 - h8300flags flags; 22 - int ret; 23 - 24 - flags = arch_local_irq_save(); 25 - ret = v->counter += i; 26 - arch_local_irq_restore(flags); 27 - return ret; 19 + #define ATOMIC_OP_RETURN(op, c_op) \ 20 + static inline int atomic_##op##_return(int i, atomic_t *v) \ 21 + { \ 22 + h8300flags flags; \ 23 + int ret; \ 24 + \ 25 + flags = arch_local_irq_save(); \ 26 + ret = v->counter c_op i; \ 27 + arch_local_irq_restore(flags); \ 28 + return ret; \ 28 29 } 29 30 30 - #define atomic_add(i, v) atomic_add_return(i, v) 31 + #define ATOMIC_OP(op, c_op) \ 32 + static inline void atomic_##op(int i, atomic_t *v) \ 33 + { \ 34 + h8300flags flags; \ 35 + \ 36 + flags = arch_local_irq_save(); \ 37 + v->counter c_op i; \ 38 + arch_local_irq_restore(flags); \ 39 + } 40 + 41 + ATOMIC_OP_RETURN(add, +=) 42 + ATOMIC_OP_RETURN(sub, -=) 43 + 44 + ATOMIC_OP(and, &=) 45 + ATOMIC_OP(or, |=) 46 + ATOMIC_OP(xor, ^=) 47 + 48 + #undef ATOMIC_OP_RETURN 49 + #undef ATOMIC_OP 50 + 51 + #define atomic_add(i, v) (void)atomic_add_return(i, v) 31 52 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) 32 53 33 - static inline int atomic_sub_return(int i, atomic_t *v) 34 - { 35 - h8300flags flags; 36 - int ret; 54 + #define atomic_sub(i, v) (void)atomic_sub_return(i, v) 55 + #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 37 56 38 - flags = arch_local_irq_save(); 39 - ret = v->counter -= i; 40 - arch_local_irq_restore(flags); 41 - return ret; 42 - } 57 + #define atomic_inc_return(v) atomic_add_return(1, v) 58 + #define atomic_dec_return(v) atomic_sub_return(1, v) 43 59 44 - #define atomic_sub(i, v) atomic_sub_return(i, v) 45 - #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) 60 + #define atomic_inc(v) (void)atomic_inc_return(v) 61 + #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 46 62 47 - static inline int atomic_inc_return(atomic_t *v) 48 - { 49 - h8300flags flags; 50 - int ret; 51 - 52 - flags = arch_local_irq_save(); 53 - v->counter++; 54 - ret = v->counter; 55 - arch_local_irq_restore(flags); 56 - return ret; 57 - } 58 - 59 - #define atomic_inc(v) atomic_inc_return(v) 60 - 61 - /* 62 - * atomic_inc_and_test - increment and test 63 - * @v: pointer of type atomic_t 64 - * 65 - * Atomically increments @v by 1 66 - * and returns true if the result is zero, or false for all 67 - * other cases. 68 - */ 69 - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 70 - 71 - static inline int atomic_dec_return(atomic_t *v) 72 - { 73 - h8300flags flags; 74 - int ret; 75 - 76 - flags = arch_local_irq_save(); 77 - --v->counter; 78 - ret = v->counter; 79 - arch_local_irq_restore(flags); 80 - return ret; 81 - } 82 - 83 - #define atomic_dec(v) atomic_dec_return(v) 84 - 85 - static inline int atomic_dec_and_test(atomic_t *v) 86 - { 87 - h8300flags flags; 88 - int ret; 89 - 90 - flags = arch_local_irq_save(); 91 - --v->counter; 92 - ret = v->counter; 93 - arch_local_irq_restore(flags); 94 - return ret == 0; 95 - } 63 + #define atomic_dec(v) (void)atomic_dec_return(v) 64 + #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) 96 65 97 66 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 98 67 { ··· 88 119 arch_local_irq_restore(flags); 89 120 return ret; 90 121 } 91 - 92 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 93 - { 94 - unsigned char ccr; 95 - unsigned long tmp; 96 - 97 - __asm__ __volatile__("stc ccr,%w3\n\t" 98 - "orc #0x80,ccr\n\t" 99 - "mov.l %0,%1\n\t" 100 - "and.l %2,%1\n\t" 101 - "mov.l %1,%0\n\t" 102 - "ldc %w3,ccr" 103 - : "=m"(*v), "=r"(tmp) 104 - : "g"(~(mask)), "r"(ccr)); 105 - } 106 - 107 - static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 108 - { 109 - unsigned char ccr; 110 - unsigned long tmp; 111 - 112 - __asm__ __volatile__("stc ccr,%w3\n\t" 113 - "orc #0x80,ccr\n\t" 114 - "mov.l %0,%1\n\t" 115 - "or.l %2,%1\n\t" 116 - "mov.l %1,%0\n\t" 117 - "ldc %w3,ccr" 118 - : "=m"(*v), "=r"(tmp) 119 - : "g"(~(mask)), "r"(ccr)); 120 - } 121 - 122 - /* Atomic operations are already serializing */ 123 - #define smp_mb__before_atomic_dec() barrier() 124 - #define smp_mb__after_atomic_dec() barrier() 125 - #define smp_mb__before_atomic_inc() barrier() 126 - #define smp_mb__after_atomic_inc() barrier() 127 122 128 123 #endif /* __ARCH_H8300_ATOMIC __ */
+4
arch/hexagon/include/asm/atomic.h
··· 132 132 ATOMIC_OPS(add) 133 133 ATOMIC_OPS(sub) 134 134 135 + ATOMIC_OP(and) 136 + ATOMIC_OP(or) 137 + ATOMIC_OP(xor) 138 + 135 139 #undef ATOMIC_OPS 136 140 #undef ATOMIC_OP_RETURN 137 141 #undef ATOMIC_OP
+20 -4
arch/ia64/include/asm/atomic.h
··· 45 45 ATOMIC_OP(add, +) 46 46 ATOMIC_OP(sub, -) 47 47 48 - #undef ATOMIC_OP 49 - 50 48 #define atomic_add_return(i,v) \ 51 49 ({ \ 52 50 int __ia64_aar_i = (i); \ ··· 69 71 : ia64_atomic_sub(__ia64_asr_i, v); \ 70 72 }) 71 73 74 + ATOMIC_OP(and, &) 75 + ATOMIC_OP(or, |) 76 + ATOMIC_OP(xor, ^) 77 + 78 + #define atomic_and(i,v) (void)ia64_atomic_and(i,v) 79 + #define atomic_or(i,v) (void)ia64_atomic_or(i,v) 80 + #define atomic_xor(i,v) (void)ia64_atomic_xor(i,v) 81 + 82 + #undef ATOMIC_OP 83 + 72 84 #define ATOMIC64_OP(op, c_op) \ 73 85 static __inline__ long \ 74 86 ia64_atomic64_##op (__s64 i, atomic64_t *v) \ ··· 96 88 97 89 ATOMIC64_OP(add, +) 98 90 ATOMIC64_OP(sub, -) 99 - 100 - #undef ATOMIC64_OP 101 91 102 92 #define atomic64_add_return(i,v) \ 103 93 ({ \ ··· 120 114 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ 121 115 : ia64_atomic64_sub(__ia64_asr_i, v); \ 122 116 }) 117 + 118 + ATOMIC64_OP(and, &) 119 + ATOMIC64_OP(or, |) 120 + ATOMIC64_OP(xor, ^) 121 + 122 + #define atomic64_and(i,v) (void)ia64_atomic64_and(i,v) 123 + #define atomic64_or(i,v) (void)ia64_atomic64_or(i,v) 124 + #define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v) 125 + 126 + #undef ATOMIC64_OP 123 127 124 128 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 125 129 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+4 -41
arch/m32r/include/asm/atomic.h
··· 94 94 ATOMIC_OPS(add) 95 95 ATOMIC_OPS(sub) 96 96 97 + ATOMIC_OP(and) 98 + ATOMIC_OP(or) 99 + ATOMIC_OP(xor) 100 + 97 101 #undef ATOMIC_OPS 98 102 #undef ATOMIC_OP_RETURN 99 103 #undef ATOMIC_OP ··· 241 237 c = old; 242 238 } 243 239 return c; 244 - } 245 - 246 - 247 - static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) 248 - { 249 - unsigned long flags; 250 - unsigned long tmp; 251 - 252 - local_irq_save(flags); 253 - __asm__ __volatile__ ( 254 - "# atomic_clear_mask \n\t" 255 - DCACHE_CLEAR("%0", "r5", "%1") 256 - M32R_LOCK" %0, @%1; \n\t" 257 - "and %0, %2; \n\t" 258 - M32R_UNLOCK" %0, @%1; \n\t" 259 - : "=&r" (tmp) 260 - : "r" (addr), "r" (~mask) 261 - : "memory" 262 - __ATOMIC_CLOBBER 263 - ); 264 - local_irq_restore(flags); 265 - } 266 - 267 - static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) 268 - { 269 - unsigned long flags; 270 - unsigned long tmp; 271 - 272 - local_irq_save(flags); 273 - __asm__ __volatile__ ( 274 - "# atomic_set_mask \n\t" 275 - DCACHE_CLEAR("%0", "r5", "%1") 276 - M32R_LOCK" %0, @%1; \n\t" 277 - "or %0, %2; \n\t" 278 - M32R_UNLOCK" %0, @%1; \n\t" 279 - : "=&r" (tmp) 280 - : "r" (addr), "r" (mask) 281 - : "memory" 282 - __ATOMIC_CLOBBER 283 - ); 284 - local_irq_restore(flags); 285 240 } 286 241 287 242 #endif /* _ASM_M32R_ATOMIC_H */
+2 -2
arch/m32r/kernel/smp.c
··· 156 156 cpumask_clear_cpu(smp_processor_id(), &cpumask); 157 157 spin_lock(&flushcache_lock); 158 158 mask=cpumask_bits(&cpumask); 159 - atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); 159 + atomic_or(*mask, (atomic_t *)&flushcache_cpumask); 160 160 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); 161 161 _flush_cache_copyback_all(); 162 162 while (flushcache_cpumask) ··· 407 407 flush_vma = vma; 408 408 flush_va = va; 409 409 mask=cpumask_bits(&cpumask); 410 - atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); 410 + atomic_or(*mask, (atomic_t *)&flush_cpumask); 411 411 412 412 /* 413 413 * We have to send the IPI only to
+4 -10
arch/m68k/include/asm/atomic.h
··· 77 77 ATOMIC_OPS(add, +=, add) 78 78 ATOMIC_OPS(sub, -=, sub) 79 79 80 + ATOMIC_OP(and, &=, and) 81 + ATOMIC_OP(or, |=, or) 82 + ATOMIC_OP(xor, ^=, eor) 83 + 80 84 #undef ATOMIC_OPS 81 85 #undef ATOMIC_OP_RETURN 82 86 #undef ATOMIC_OP ··· 172 168 : "=d" (c), "+m" (*v) 173 169 : ASM_DI (i)); 174 170 return c != 0; 175 - } 176 - 177 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 178 - { 179 - __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask))); 180 - } 181 - 182 - static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 183 - { 184 - __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); 185 171 } 186 172 187 173 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+4 -34
arch/metag/include/asm/atomic_lnkget.h
··· 74 74 ATOMIC_OPS(add) 75 75 ATOMIC_OPS(sub) 76 76 77 + ATOMIC_OP(and) 78 + ATOMIC_OP(or) 79 + ATOMIC_OP(xor) 80 + 77 81 #undef ATOMIC_OPS 78 82 #undef ATOMIC_OP_RETURN 79 83 #undef ATOMIC_OP 80 - 81 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 82 - { 83 - int temp; 84 - 85 - asm volatile ( 86 - "1: LNKGETD %0, [%1]\n" 87 - " AND %0, %0, %2\n" 88 - " LNKSETD [%1] %0\n" 89 - " DEFR %0, TXSTAT\n" 90 - " ANDT %0, %0, #HI(0x3f000000)\n" 91 - " CMPT %0, #HI(0x02000000)\n" 92 - " BNZ 1b\n" 93 - : "=&d" (temp) 94 - : "da" (&v->counter), "bd" (~mask) 95 - : "cc"); 96 - } 97 - 98 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 99 - { 100 - int temp; 101 - 102 - asm volatile ( 103 - "1: LNKGETD %0, [%1]\n" 104 - " OR %0, %0, %2\n" 105 - " LNKSETD [%1], %0\n" 106 - " DEFR %0, TXSTAT\n" 107 - " ANDT %0, %0, #HI(0x3f000000)\n" 108 - " CMPT %0, #HI(0x02000000)\n" 109 - " BNZ 1b\n" 110 - : "=&d" (temp) 111 - : "da" (&v->counter), "bd" (mask) 112 - : "cc"); 113 - } 114 84 115 85 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 116 86 {
+3 -20
arch/metag/include/asm/atomic_lock1.h
··· 68 68 69 69 ATOMIC_OPS(add, +=) 70 70 ATOMIC_OPS(sub, -=) 71 + ATOMIC_OP(and, &=) 72 + ATOMIC_OP(or, |=) 73 + ATOMIC_OP(xor, ^=) 71 74 72 75 #undef ATOMIC_OPS 73 76 #undef ATOMIC_OP_RETURN 74 77 #undef ATOMIC_OP 75 - 76 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 77 - { 78 - unsigned long flags; 79 - 80 - __global_lock1(flags); 81 - fence(); 82 - v->counter &= ~mask; 83 - __global_unlock1(flags); 84 - } 85 - 86 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 87 - { 88 - unsigned long flags; 89 - 90 - __global_lock1(flags); 91 - fence(); 92 - v->counter |= mask; 93 - __global_unlock1(flags); 94 - } 95 78 96 79 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 97 80 {
+7
arch/mips/include/asm/atomic.h
··· 137 137 ATOMIC_OPS(add, +=, addu) 138 138 ATOMIC_OPS(sub, -=, subu) 139 139 140 + ATOMIC_OP(and, &=, and) 141 + ATOMIC_OP(or, |=, or) 142 + ATOMIC_OP(xor, ^=, xor) 143 + 140 144 #undef ATOMIC_OPS 141 145 #undef ATOMIC_OP_RETURN 142 146 #undef ATOMIC_OP ··· 420 416 421 417 ATOMIC64_OPS(add, +=, daddu) 422 418 ATOMIC64_OPS(sub, -=, dsubu) 419 + ATOMIC64_OP(and, &=, and) 420 + ATOMIC64_OP(or, |=, or) 421 + ATOMIC64_OP(xor, ^=, xor) 423 422 424 423 #undef ATOMIC64_OPS 425 424 #undef ATOMIC64_OP_RETURN
+4 -67
arch/mn10300/include/asm/atomic.h
··· 89 89 ATOMIC_OPS(add) 90 90 ATOMIC_OPS(sub) 91 91 92 + ATOMIC_OP(and) 93 + ATOMIC_OP(or) 94 + ATOMIC_OP(xor) 95 + 92 96 #undef ATOMIC_OPS 93 97 #undef ATOMIC_OP_RETURN 94 98 #undef ATOMIC_OP ··· 130 126 131 127 #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 132 128 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 133 - 134 - /** 135 - * atomic_clear_mask - Atomically clear bits in memory 136 - * @mask: Mask of the bits to be cleared 137 - * @v: pointer to word in memory 138 - * 139 - * Atomically clears the bits set in mask from the memory word specified. 140 - */ 141 - static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 142 - { 143 - #ifdef CONFIG_SMP 144 - int status; 145 - 146 - asm volatile( 147 - "1: mov %3,(_AAR,%2) \n" 148 - " mov (_ADR,%2),%0 \n" 149 - " and %4,%0 \n" 150 - " mov %0,(_ADR,%2) \n" 151 - " mov (_ADR,%2),%0 \n" /* flush */ 152 - " mov (_ASR,%2),%0 \n" 153 - " or %0,%0 \n" 154 - " bne 1b \n" 155 - : "=&r"(status), "=m"(*addr) 156 - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask) 157 - : "memory", "cc"); 158 - #else 159 - unsigned long flags; 160 - 161 - mask = ~mask; 162 - flags = arch_local_cli_save(); 163 - *addr &= mask; 164 - arch_local_irq_restore(flags); 165 - #endif 166 - } 167 - 168 - /** 169 - * atomic_set_mask - Atomically set bits in memory 170 - * @mask: Mask of the bits to be set 171 - * @v: pointer to word in memory 172 - * 173 - * Atomically sets the bits set in mask from the memory word specified. 174 - */ 175 - static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) 176 - { 177 - #ifdef CONFIG_SMP 178 - int status; 179 - 180 - asm volatile( 181 - "1: mov %3,(_AAR,%2) \n" 182 - " mov (_ADR,%2),%0 \n" 183 - " or %4,%0 \n" 184 - " mov %0,(_ADR,%2) \n" 185 - " mov (_ADR,%2),%0 \n" /* flush */ 186 - " mov (_ASR,%2),%0 \n" 187 - " or %0,%0 \n" 188 - " bne 1b \n" 189 - : "=&r"(status), "=m"(*addr) 190 - : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask) 191 - : "memory", "cc"); 192 - #else 193 - unsigned long flags; 194 - 195 - flags = arch_local_cli_save(); 196 - *addr |= mask; 197 - arch_local_irq_restore(flags); 198 - #endif 199 - } 200 129 201 130 #endif /* __KERNEL__ */ 202 131 #endif /* CONFIG_SMP */
+1 -1
arch/mn10300/mm/tlb-smp.c
··· 119 119 flush_mm = mm; 120 120 flush_va = va; 121 121 #if NR_CPUS <= BITS_PER_LONG 122 - atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); 122 + atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]); 123 123 #else 124 124 #error Not supported. 125 125 #endif
+7
arch/parisc/include/asm/atomic.h
··· 126 126 ATOMIC_OPS(add, +=) 127 127 ATOMIC_OPS(sub, -=) 128 128 129 + ATOMIC_OP(and, &=) 130 + ATOMIC_OP(or, |=) 131 + ATOMIC_OP(xor, ^=) 132 + 129 133 #undef ATOMIC_OPS 130 134 #undef ATOMIC_OP_RETURN 131 135 #undef ATOMIC_OP ··· 189 185 190 186 ATOMIC64_OPS(add, +=) 191 187 ATOMIC64_OPS(sub, -=) 188 + ATOMIC64_OP(and, &=) 189 + ATOMIC64_OP(or, |=) 190 + ATOMIC64_OP(xor, ^=) 192 191 193 192 #undef ATOMIC64_OPS 194 193 #undef ATOMIC64_OP_RETURN
+7
arch/powerpc/include/asm/atomic.h
··· 67 67 ATOMIC_OPS(add, add) 68 68 ATOMIC_OPS(sub, subf) 69 69 70 + ATOMIC_OP(and, and) 71 + ATOMIC_OP(or, or) 72 + ATOMIC_OP(xor, xor) 73 + 70 74 #undef ATOMIC_OPS 71 75 #undef ATOMIC_OP_RETURN 72 76 #undef ATOMIC_OP ··· 308 304 309 305 ATOMIC64_OPS(add, add) 310 306 ATOMIC64_OPS(sub, subf) 307 + ATOMIC64_OP(and, and) 308 + ATOMIC64_OP(or, or) 309 + ATOMIC64_OP(xor, xor) 311 310 312 311 #undef ATOMIC64_OPS 313 312 #undef ATOMIC64_OP_RETURN
-19
arch/powerpc/kernel/misc_32.S
··· 596 596 b 2b 597 597 598 598 /* 599 - * void atomic_clear_mask(atomic_t mask, atomic_t *addr) 600 - * void atomic_set_mask(atomic_t mask, atomic_t *addr); 601 - */ 602 - _GLOBAL(atomic_clear_mask) 603 - 10: lwarx r5,0,r4 604 - andc r5,r5,r3 605 - PPC405_ERR77(0,r4) 606 - stwcx. r5,0,r4 607 - bne- 10b 608 - blr 609 - _GLOBAL(atomic_set_mask) 610 - 10: lwarx r5,0,r4 611 - or r5,r5,r3 612 - PPC405_ERR77(0,r4) 613 - stwcx. r5,0,r4 614 - bne- 10b 615 - blr 616 - 617 - /* 618 599 * Extended precision shifts. 619 600 * 620 601 * Updated to be valid for shift counts from 0 to 63 inclusive.
+24 -17
arch/s390/include/asm/atomic.h
··· 27 27 #define __ATOMIC_OR "lao" 28 28 #define __ATOMIC_AND "lan" 29 29 #define __ATOMIC_ADD "laa" 30 + #define __ATOMIC_XOR "lax" 30 31 #define __ATOMIC_BARRIER "bcr 14,0\n" 31 32 32 33 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ ··· 50 49 #define __ATOMIC_OR "or" 51 50 #define __ATOMIC_AND "nr" 52 51 #define __ATOMIC_ADD "ar" 52 + #define __ATOMIC_XOR "xr" 53 53 #define __ATOMIC_BARRIER "\n" 54 54 55 55 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ ··· 120 118 #define atomic_dec_return(_v) atomic_sub_return(1, _v) 121 119 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 122 120 123 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 124 - { 125 - __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); 121 + #define ATOMIC_OP(op, OP) \ 122 + static inline void atomic_##op(int i, atomic_t *v) \ 123 + { \ 124 + __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ 126 125 } 127 126 128 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 129 - { 130 - __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); 131 - } 127 + ATOMIC_OP(and, AND) 128 + ATOMIC_OP(or, OR) 129 + ATOMIC_OP(xor, XOR) 130 + 131 + #undef ATOMIC_OP 132 132 133 133 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 134 134 ··· 171 167 #define __ATOMIC64_OR "laog" 172 168 #define __ATOMIC64_AND "lang" 173 169 #define __ATOMIC64_ADD "laag" 170 + #define __ATOMIC64_XOR "laxg" 174 171 #define __ATOMIC64_BARRIER "bcr 14,0\n" 175 172 176 173 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ ··· 194 189 #define __ATOMIC64_OR "ogr" 195 190 #define __ATOMIC64_AND "ngr" 196 191 #define __ATOMIC64_ADD "agr" 192 + #define __ATOMIC64_XOR "xgr" 197 193 #define __ATOMIC64_BARRIER "\n" 198 194 199 195 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ ··· 253 247 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); 254 248 } 255 249 256 - static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) 257 - { 258 - __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); 259 - } 260 - 261 - static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) 262 - { 263 - __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); 264 - } 265 - 266 250 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 267 251 268 252 static inline long long atomic64_cmpxchg(atomic64_t *v, ··· 266 270 return old; 267 271 } 268 272 273 + #define ATOMIC64_OP(op, OP) \ 274 + static inline void atomic64_##op(long i, atomic64_t *v) \ 275 + { \ 276 + __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ 277 + } 278 + 279 + ATOMIC64_OP(and, AND) 280 + ATOMIC64_OP(or, OR) 281 + ATOMIC64_OP(xor, XOR) 282 + 283 + #undef ATOMIC64_OP 269 284 #undef __ATOMIC64_LOOP 270 285 271 286 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+2 -2
arch/s390/kernel/time.c
··· 381 381 * increase the "sequence" counter to avoid the race of an 382 382 * etr event and the complete recovery against get_sync_clock. 383 383 */ 384 - atomic_clear_mask(0x80000000, sw_ptr); 384 + atomic_andnot(0x80000000, sw_ptr); 385 385 atomic_inc(sw_ptr); 386 386 } 387 387 ··· 392 392 static void enable_sync_clock(void) 393 393 { 394 394 atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); 395 - atomic_set_mask(0x80000000, sw_ptr); 395 + atomic_or(0x80000000, sw_ptr); 396 396 } 397 397 398 398 /*
+15 -15
arch/s390/kvm/interrupt.c
··· 170 170 171 171 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 172 172 { 173 - atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 173 + atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 174 174 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 175 175 } 176 176 177 177 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 178 178 { 179 - atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 179 + atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); 180 180 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); 181 181 } 182 182 183 183 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) 184 184 { 185 - atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 186 - &vcpu->arch.sie_block->cpuflags); 185 + atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 186 + &vcpu->arch.sie_block->cpuflags); 187 187 vcpu->arch.sie_block->lctl = 0x0000; 188 188 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); 189 189 ··· 196 196 197 197 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 198 198 { 199 - atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); 199 + atomic_or(flag, &vcpu->arch.sie_block->cpuflags); 200 200 } 201 201 202 202 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) ··· 919 919 spin_unlock(&li->lock); 920 920 921 921 /* clear pending external calls set by sigp interpretation facility */ 922 - atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); 922 + atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); 923 923 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; 924 924 } 925 925 ··· 1020 1020 1021 1021 li->irq.ext = irq->u.ext; 1022 1022 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); 1023 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1023 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1024 1024 return 0; 1025 1025 } 1026 1026 ··· 1035 1035 /* another external call is pending */ 1036 1036 return -EBUSY; 1037 1037 } 1038 - atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1038 + atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); 1039 1039 return 0; 1040 1040 } 1041 1041 ··· 1061 1061 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) 1062 1062 return -EBUSY; 1063 1063 *extcall = irq->u.extcall; 1064 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1064 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1065 1065 return 0; 1066 1066 } 1067 1067 ··· 1133 1133 1134 1134 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1135 1135 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1136 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1136 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1137 1137 return 0; 1138 1138 } 1139 1139 ··· 1177 1177 0, 0, 2); 1178 1178 1179 1179 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); 1180 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1180 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1181 1181 return 0; 1182 1182 } 1183 1183 ··· 1190 1190 0, 0, 2); 1191 1191 1192 1192 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); 1193 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1193 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1194 1194 return 0; 1195 1195 } 1196 1196 ··· 1369 1369 spin_lock(&li->lock); 1370 1370 switch (type) { 1371 1371 case KVM_S390_MCHK: 1372 - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); 1372 + atomic_or(CPUSTAT_STOP_INT, li->cpuflags); 1373 1373 break; 1374 1374 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 1375 - atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); 1375 + atomic_or(CPUSTAT_IO_INT, li->cpuflags); 1376 1376 break; 1377 1377 default: 1378 - atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); 1378 + atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1379 1379 break; 1380 1380 } 1381 1381 spin_unlock(&li->lock);
+16 -16
arch/s390/kvm/kvm-s390.c
··· 1215 1215 } 1216 1216 restore_access_regs(vcpu->run->s.regs.acrs); 1217 1217 gmap_enable(vcpu->arch.gmap); 1218 - atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1218 + atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1219 1219 } 1220 1220 1221 1221 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1222 1222 { 1223 - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1223 + atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1224 1224 gmap_disable(vcpu->arch.gmap); 1225 1225 if (test_kvm_facility(vcpu->kvm, 129)) { 1226 1226 save_fp_ctl(&vcpu->run->s.regs.fpc); ··· 1320 1320 CPUSTAT_STOPPED); 1321 1321 1322 1322 if (test_kvm_facility(vcpu->kvm, 78)) 1323 - atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); 1323 + atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); 1324 1324 else if (test_kvm_facility(vcpu->kvm, 8)) 1325 - atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); 1325 + atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); 1326 1326 1327 1327 kvm_s390_vcpu_setup_model(vcpu); 1328 1328 ··· 1422 1422 1423 1423 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) 1424 1424 { 1425 - atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1425 + atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1426 1426 exit_sie(vcpu); 1427 1427 } 1428 1428 1429 1429 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) 1430 1430 { 1431 - atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1431 + atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 1432 1432 } 1433 1433 1434 1434 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) 1435 1435 { 1436 - atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1436 + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1437 1437 exit_sie(vcpu); 1438 1438 } 1439 1439 1440 1440 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) 1441 1441 { 1442 - atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1442 + atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); 1443 1443 } 1444 1444 1445 1445 /* ··· 1448 1448 * return immediately. */ 1449 1449 void exit_sie(struct kvm_vcpu *vcpu) 1450 1450 { 1451 - atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 1451 + atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 1452 1452 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 1453 1453 cpu_relax(); 1454 1454 } ··· 1672 1672 if (dbg->control & KVM_GUESTDBG_ENABLE) { 1673 1673 vcpu->guest_debug = dbg->control; 1674 1674 /* enforce guest PER */ 1675 - atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1675 + atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1676 1676 1677 1677 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 1678 1678 rc = kvm_s390_import_bp_data(vcpu, dbg); 1679 1679 } else { 1680 - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1680 + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1681 1681 vcpu->arch.guestdbg.last_bp = 0; 1682 1682 } 1683 1683 1684 1684 if (rc) { 1685 1685 vcpu->guest_debug = 0; 1686 1686 kvm_s390_clear_bp_data(vcpu); 1687 - atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1687 + atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 1688 1688 } 1689 1689 1690 1690 return rc; ··· 1771 1771 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 1772 1772 if (!ibs_enabled(vcpu)) { 1773 1773 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 1774 - atomic_set_mask(CPUSTAT_IBS, 1774 + atomic_or(CPUSTAT_IBS, 1775 1775 &vcpu->arch.sie_block->cpuflags); 1776 1776 } 1777 1777 goto retry; ··· 1780 1780 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 1781 1781 if (ibs_enabled(vcpu)) { 1782 1782 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 1783 - atomic_clear_mask(CPUSTAT_IBS, 1783 + atomic_andnot(CPUSTAT_IBS, 1784 1784 &vcpu->arch.sie_block->cpuflags); 1785 1785 } 1786 1786 goto retry; ··· 2280 2280 __disable_ibs_on_all_vcpus(vcpu->kvm); 2281 2281 } 2282 2282 2283 - atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2283 + atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2284 2284 /* 2285 2285 * Another VCPU might have used IBS while we were offline. 2286 2286 * Let's play safe and flush the VCPU at startup. ··· 2306 2306 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 2307 2307 kvm_s390_clear_stop_irq(vcpu); 2308 2308 2309 - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2309 + atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 2310 2310 __disable_ibs_on_vcpu(vcpu); 2311 2311 2312 2312 for (i = 0; i < online_vcpus; i++) {
+4 -39
arch/sh/include/asm/atomic-grb.h
··· 48 48 ATOMIC_OPS(add) 49 49 ATOMIC_OPS(sub) 50 50 51 + ATOMIC_OP(and) 52 + ATOMIC_OP(or) 53 + ATOMIC_OP(xor) 54 + 51 55 #undef ATOMIC_OPS 52 56 #undef ATOMIC_OP_RETURN 53 57 #undef ATOMIC_OP 54 - 55 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 56 - { 57 - int tmp; 58 - unsigned int _mask = ~mask; 59 - 60 - __asm__ __volatile__ ( 61 - " .align 2 \n\t" 62 - " mova 1f, r0 \n\t" /* r0 = end point */ 63 - " mov r15, r1 \n\t" /* r1 = saved sp */ 64 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 65 - " mov.l @%1, %0 \n\t" /* load old value */ 66 - " and %2, %0 \n\t" /* add */ 67 - " mov.l %0, @%1 \n\t" /* store new value */ 68 - "1: mov r1, r15 \n\t" /* LOGOUT */ 69 - : "=&r" (tmp), 70 - "+r" (v) 71 - : "r" (_mask) 72 - : "memory" , "r0", "r1"); 73 - } 74 - 75 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 76 - { 77 - int tmp; 78 - 79 - __asm__ __volatile__ ( 80 - " .align 2 \n\t" 81 - " mova 1f, r0 \n\t" /* r0 = end point */ 82 - " mov r15, r1 \n\t" /* r1 = saved sp */ 83 - " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ 84 - " mov.l @%1, %0 \n\t" /* load old value */ 85 - " or %2, %0 \n\t" /* or */ 86 - " mov.l %0, @%1 \n\t" /* store new value */ 87 - "1: mov r1, r15 \n\t" /* LOGOUT */ 88 - : "=&r" (tmp), 89 - "+r" (v) 90 - : "r" (mask) 91 - : "memory" , "r0", "r1"); 92 - } 93 58 94 59 #endif /* __ASM_SH_ATOMIC_GRB_H */
+3 -18
arch/sh/include/asm/atomic-irq.h
··· 37 37 38 38 ATOMIC_OPS(add, +=) 39 39 ATOMIC_OPS(sub, -=) 40 + ATOMIC_OP(and, &=) 41 + ATOMIC_OP(or, |=) 42 + ATOMIC_OP(xor, ^=) 40 43 41 44 #undef ATOMIC_OPS 42 45 #undef ATOMIC_OP_RETURN 43 46 #undef ATOMIC_OP 44 - 45 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 46 - { 47 - unsigned long flags; 48 - 49 - raw_local_irq_save(flags); 50 - v->counter &= ~mask; 51 - raw_local_irq_restore(flags); 52 - } 53 - 54 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 55 - { 56 - unsigned long flags; 57 - 58 - raw_local_irq_save(flags); 59 - v->counter |= mask; 60 - raw_local_irq_restore(flags); 61 - } 62 47 63 48 #endif /* __ASM_SH_ATOMIC_IRQ_H */
+3 -28
arch/sh/include/asm/atomic-llsc.h
··· 52 52 53 53 ATOMIC_OPS(add) 54 54 ATOMIC_OPS(sub) 55 + ATOMIC_OP(and) 56 + ATOMIC_OP(or) 57 + ATOMIC_OP(xor) 55 58 56 59 #undef ATOMIC_OPS 57 60 #undef ATOMIC_OP_RETURN 58 61 #undef ATOMIC_OP 59 - 60 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 61 - { 62 - unsigned long tmp; 63 - 64 - __asm__ __volatile__ ( 65 - "1: movli.l @%2, %0 ! atomic_clear_mask \n" 66 - " and %1, %0 \n" 67 - " movco.l %0, @%2 \n" 68 - " bf 1b \n" 69 - : "=&z" (tmp) 70 - : "r" (~mask), "r" (&v->counter) 71 - : "t"); 72 - } 73 - 74 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 75 - { 76 - unsigned long tmp; 77 - 78 - __asm__ __volatile__ ( 79 - "1: movli.l @%2, %0 ! atomic_set_mask \n" 80 - " or %1, %0 \n" 81 - " movco.l %0, @%2 \n" 82 - " bf 1b \n" 83 - : "=&z" (tmp) 84 - : "r" (mask), "r" (&v->counter) 85 - : "t"); 86 - } 87 62 88 63 #endif /* __ASM_SH_ATOMIC_LLSC_H */
+3 -1
arch/sparc/include/asm/atomic_32.h
··· 17 17 #include <asm/barrier.h> 18 18 #include <asm-generic/atomic64.h> 19 19 20 - 21 20 #define ATOMIC_INIT(i) { (i) } 22 21 23 22 int atomic_add_return(int, atomic_t *); 23 + void atomic_and(int, atomic_t *); 24 + void atomic_or(int, atomic_t *); 25 + void atomic_xor(int, atomic_t *); 24 26 int atomic_cmpxchg(atomic_t *, int, int); 25 27 int atomic_xchg(atomic_t *, int); 26 28 int __atomic_add_unless(atomic_t *, int, int);
+4
arch/sparc/include/asm/atomic_64.h
··· 33 33 ATOMIC_OPS(add) 34 34 ATOMIC_OPS(sub) 35 35 36 + ATOMIC_OP(and) 37 + ATOMIC_OP(or) 38 + ATOMIC_OP(xor) 39 + 36 40 #undef ATOMIC_OPS 37 41 #undef ATOMIC_OP_RETURN 38 42 #undef ATOMIC_OP
+19 -3
arch/sparc/lib/atomic32.c
··· 27 27 28 28 #endif /* SMP */ 29 29 30 - #define ATOMIC_OP(op, cop) \ 30 + #define ATOMIC_OP_RETURN(op, c_op) \ 31 31 int atomic_##op##_return(int i, atomic_t *v) \ 32 32 { \ 33 33 int ret; \ 34 34 unsigned long flags; \ 35 35 spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 36 36 \ 37 - ret = (v->counter cop i); \ 37 + ret = (v->counter c_op i); \ 38 38 \ 39 39 spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 40 40 return ret; \ 41 41 } \ 42 42 EXPORT_SYMBOL(atomic_##op##_return); 43 43 44 - ATOMIC_OP(add, +=) 44 + #define ATOMIC_OP(op, c_op) \ 45 + void atomic_##op(int i, atomic_t *v) \ 46 + { \ 47 + unsigned long flags; \ 48 + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ 49 + \ 50 + v->counter c_op i; \ 51 + \ 52 + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ 53 + } \ 54 + EXPORT_SYMBOL(atomic_##op); 45 55 56 + ATOMIC_OP_RETURN(add, +=) 57 + ATOMIC_OP(and, &=) 58 + ATOMIC_OP(or, |=) 59 + ATOMIC_OP(xor, ^=) 60 + 61 + #undef ATOMIC_OP_RETURN 46 62 #undef ATOMIC_OP 47 63 48 64 int atomic_xchg(atomic_t *v, int new)
+6
arch/sparc/lib/atomic_64.S
··· 47 47 48 48 ATOMIC_OPS(add) 49 49 ATOMIC_OPS(sub) 50 + ATOMIC_OP(and) 51 + ATOMIC_OP(or) 52 + ATOMIC_OP(xor) 50 53 51 54 #undef ATOMIC_OPS 52 55 #undef ATOMIC_OP_RETURN ··· 87 84 88 85 ATOMIC64_OPS(add) 89 86 ATOMIC64_OPS(sub) 87 + ATOMIC64_OP(and) 88 + ATOMIC64_OP(or) 89 + ATOMIC64_OP(xor) 90 90 91 91 #undef ATOMIC64_OPS 92 92 #undef ATOMIC64_OP_RETURN
+3
arch/sparc/lib/ksyms.c
··· 111 111 112 112 ATOMIC_OPS(add) 113 113 ATOMIC_OPS(sub) 114 + ATOMIC_OP(and) 115 + ATOMIC_OP(or) 116 + ATOMIC_OP(xor) 114 117 115 118 #undef ATOMIC_OPS 116 119 #undef ATOMIC_OP_RETURN
+28
arch/tile/include/asm/atomic_32.h
··· 34 34 _atomic_xchg_add(&v->counter, i); 35 35 } 36 36 37 + #define ATOMIC_OP(op) \ 38 + unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ 39 + static inline void atomic_##op(int i, atomic_t *v) \ 40 + { \ 41 + _atomic_##op((unsigned long *)&v->counter, i); \ 42 + } 43 + 44 + ATOMIC_OP(and) 45 + ATOMIC_OP(or) 46 + ATOMIC_OP(xor) 47 + 48 + #undef ATOMIC_OP 49 + 37 50 /** 38 51 * atomic_add_return - add integer and return 39 52 * @v: pointer of type atomic_t ··· 125 112 { 126 113 _atomic64_xchg_add(&v->counter, i); 127 114 } 115 + 116 + #define ATOMIC64_OP(op) \ 117 + long long _atomic64_##op(long long *v, long long n); \ 118 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 119 + { \ 120 + _atomic64_##op(&v->counter, i); \ 121 + } 122 + 123 + ATOMIC64_OP(and) 124 + ATOMIC64_OP(or) 125 + ATOMIC64_OP(xor) 128 126 129 127 /** 130 128 * atomic64_add_return - add integer and return ··· 249 225 extern struct __get_user __atomic_xchg_add_unless(volatile int *p, 250 226 int *lock, int o, int n); 251 227 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 228 + extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); 252 229 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 253 230 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 254 231 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, ··· 259 234 long long n); 260 235 extern long long __atomic64_xchg_add_unless(volatile long long *p, 261 236 int *lock, long long o, long long n); 237 + extern long long __atomic64_and(volatile long long *p, int *lock, long long n); 238 + extern long long __atomic64_or(volatile long long *p, int *lock, long long n); 239 + extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); 262 240 263 241 /* Return failure from the atomic wrappers. */ 264 242 struct __get_user __atomic_bad_address(int __user *addr);
+40
arch/tile/include/asm/atomic_64.h
··· 58 58 return oldval; 59 59 } 60 60 61 + static inline void atomic_and(int i, atomic_t *v) 62 + { 63 + __insn_fetchand4((void *)&v->counter, i); 64 + } 65 + 66 + static inline void atomic_or(int i, atomic_t *v) 67 + { 68 + __insn_fetchor4((void *)&v->counter, i); 69 + } 70 + 71 + static inline void atomic_xor(int i, atomic_t *v) 72 + { 73 + int guess, oldval = v->counter; 74 + do { 75 + guess = oldval; 76 + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); 77 + oldval = __insn_cmpexch4(&v->counter, guess ^ i); 78 + } while (guess != oldval); 79 + } 80 + 61 81 /* Now the true 64-bit operations. */ 62 82 63 83 #define ATOMIC64_INIT(i) { (i) } ··· 109 89 oldval = cmpxchg(&v->counter, guess, guess + a); 110 90 } while (guess != oldval); 111 91 return oldval != u; 92 + } 93 + 94 + static inline void atomic64_and(long i, atomic64_t *v) 95 + { 96 + __insn_fetchand((void *)&v->counter, i); 97 + } 98 + 99 + static inline void atomic64_or(long i, atomic64_t *v) 100 + { 101 + __insn_fetchor((void *)&v->counter, i); 102 + } 103 + 104 + static inline void atomic64_xor(long i, atomic64_t *v) 105 + { 106 + long guess, oldval = v->counter; 107 + do { 108 + guess = oldval; 109 + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); 110 + oldval = __insn_cmpexch(&v->counter, guess ^ i); 111 + } while (guess != oldval); 112 112 } 113 113 114 114 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
+23
arch/tile/lib/atomic_32.c
··· 94 94 } 95 95 EXPORT_SYMBOL(_atomic_or); 96 96 97 + unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) 98 + { 99 + return __atomic_and((int *)p, __atomic_setup(p), mask).val; 100 + } 101 + EXPORT_SYMBOL(_atomic_and); 102 + 97 103 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) 98 104 { 99 105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val; ··· 142 136 } 143 137 EXPORT_SYMBOL(_atomic64_cmpxchg); 144 138 139 + long long _atomic64_and(long long *v, long long n) 140 + { 141 + return __atomic64_and(v, __atomic_setup(v), n); 142 + } 143 + EXPORT_SYMBOL(_atomic64_and); 144 + 145 + long long _atomic64_or(long long *v, long long n) 146 + { 147 + return __atomic64_or(v, __atomic_setup(v), n); 148 + } 149 + EXPORT_SYMBOL(_atomic64_or); 150 + 151 + long long _atomic64_xor(long long *v, long long n) 152 + { 153 + return __atomic64_xor(v, __atomic_setup(v), n); 154 + } 155 + EXPORT_SYMBOL(_atomic64_xor); 145 156 146 157 /* 147 158 * If any of the atomic or futex routines hit a bad address (not in
+4
arch/tile/lib/atomic_asm_32.S
··· 178 178 atomic_op _xchg_add_unless, 32, \ 179 179 "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" 180 180 atomic_op _or, 32, "or r24, r22, r2" 181 + atomic_op _and, 32, "and r24, r22, r2" 181 182 atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" 182 183 atomic_op _xor, 32, "xor r24, r22, r2" 183 184 ··· 192 191 { bbns r26, 3f; add r24, r22, r4 }; \ 193 192 { bbns r27, 3f; add r25, r23, r5 }; \ 194 193 slt_u r26, r24, r22; add r25, r25, r26" 194 + atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" 195 + atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" 196 + atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" 195 197 196 198 jrp lr /* happy backtracer */ 197 199
+15 -10
arch/x86/include/asm/atomic.h
··· 182 182 return xchg(&v->counter, new); 183 183 } 184 184 185 + #define ATOMIC_OP(op) \ 186 + static inline void atomic_##op(int i, atomic_t *v) \ 187 + { \ 188 + asm volatile(LOCK_PREFIX #op"l %1,%0" \ 189 + : "+m" (v->counter) \ 190 + : "ir" (i) \ 191 + : "memory"); \ 192 + } 193 + 194 + ATOMIC_OP(and) 195 + ATOMIC_OP(or) 196 + ATOMIC_OP(xor) 197 + 198 + #undef ATOMIC_OP 199 + 185 200 /** 186 201 * __atomic_add_unless - add unless the number is already a given value 187 202 * @v: pointer of type atomic_t ··· 233 218 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); 234 219 return *v; 235 220 } 236 - 237 - /* These are x86-specific, used by some header files */ 238 - #define atomic_clear_mask(mask, addr) \ 239 - asm volatile(LOCK_PREFIX "andl %0,%1" \ 240 - : : "r" (~(mask)), "m" (*(addr)) : "memory") 241 - 242 - #define atomic_set_mask(mask, addr) \ 243 - asm volatile(LOCK_PREFIX "orl %0,%1" \ 244 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \ 245 - : "memory") 246 221 247 222 #ifdef CONFIG_X86_32 248 223 # include <asm/atomic64_32.h>
+14
arch/x86/include/asm/atomic64_32.h
··· 313 313 #undef alternative_atomic64 314 314 #undef __alternative_atomic64 315 315 316 + #define ATOMIC64_OP(op, c_op) \ 317 + static inline void atomic64_##op(long long i, atomic64_t *v) \ 318 + { \ 319 + long long old, c = 0; \ 320 + while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \ 321 + c = old; \ 322 + } 323 + 324 + ATOMIC64_OP(and, &) 325 + ATOMIC64_OP(or, |) 326 + ATOMIC64_OP(xor, ^) 327 + 328 + #undef ATOMIC64_OP 329 + 316 330 #endif /* _ASM_X86_ATOMIC64_32_H */
+15
arch/x86/include/asm/atomic64_64.h
··· 220 220 return dec; 221 221 } 222 222 223 + #define ATOMIC64_OP(op) \ 224 + static inline void atomic64_##op(long i, atomic64_t *v) \ 225 + { \ 226 + asm volatile(LOCK_PREFIX #op"q %1,%0" \ 227 + : "+m" (v->counter) \ 228 + : "er" (i) \ 229 + : "memory"); \ 230 + } 231 + 232 + ATOMIC64_OP(and) 233 + ATOMIC64_OP(or) 234 + ATOMIC64_OP(xor) 235 + 236 + #undef ATOMIC64_OP 237 + 223 238 #endif /* _ASM_X86_ATOMIC64_64_H */
+4 -69
arch/xtensa/include/asm/atomic.h
··· 145 145 ATOMIC_OPS(add) 146 146 ATOMIC_OPS(sub) 147 147 148 + ATOMIC_OP(and) 149 + ATOMIC_OP(or) 150 + ATOMIC_OP(xor) 151 + 148 152 #undef ATOMIC_OPS 149 153 #undef ATOMIC_OP_RETURN 150 154 #undef ATOMIC_OP ··· 252 248 c = old; 253 249 } 254 250 return c; 255 - } 256 - 257 - 258 - static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) 259 - { 260 - #if XCHAL_HAVE_S32C1I 261 - unsigned long tmp; 262 - int result; 263 - 264 - __asm__ __volatile__( 265 - "1: l32i %1, %3, 0\n" 266 - " wsr %1, scompare1\n" 267 - " and %0, %1, %2\n" 268 - " s32c1i %0, %3, 0\n" 269 - " bne %0, %1, 1b\n" 270 - : "=&a" (result), "=&a" (tmp) 271 - : "a" (~mask), "a" (v) 272 - : "memory" 273 - ); 274 - #else 275 - unsigned int all_f = -1; 276 - unsigned int vval; 277 - 278 - __asm__ __volatile__( 279 - " rsil a15,"__stringify(LOCKLEVEL)"\n" 280 - " l32i %0, %2, 0\n" 281 - " xor %1, %4, %3\n" 282 - " and %0, %0, %4\n" 283 - " s32i %0, %2, 0\n" 284 - " wsr a15, ps\n" 285 - " rsync\n" 286 - : "=&a" (vval), "=a" (mask) 287 - : "a" (v), "a" (all_f), "1" (mask) 288 - : "a15", "memory" 289 - ); 290 - #endif 291 - } 292 - 293 - static inline void atomic_set_mask(unsigned int mask, atomic_t *v) 294 - { 295 - #if XCHAL_HAVE_S32C1I 296 - unsigned long tmp; 297 - int result; 298 - 299 - __asm__ __volatile__( 300 - "1: l32i %1, %3, 0\n" 301 - " wsr %1, scompare1\n" 302 - " or %0, %1, %2\n" 303 - " s32c1i %0, %3, 0\n" 304 - " bne %0, %1, 1b\n" 305 - : "=&a" (result), "=&a" (tmp) 306 - : "a" (mask), "a" (v) 307 - : "memory" 308 - ); 309 - #else 310 - unsigned int vval; 311 - 312 - __asm__ __volatile__( 313 - " rsil a15,"__stringify(LOCKLEVEL)"\n" 314 - " l32i %0, %2, 0\n" 315 - " or %0, %0, %1\n" 316 - " s32i %0, %2, 0\n" 317 - " wsr a15, ps\n" 318 - " rsync\n" 319 - : "=&a" (vval) 320 - : "a" (mask), "a" (v) 321 - : "a15", "memory" 322 - ); 323 - #endif 324 251 } 325 252 326 253 #endif /* __KERNEL__ */
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 748 748 mutex_lock(&dev->struct_mutex); 749 749 if (i915_gem_init_hw(dev)) { 750 750 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 751 - atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 751 + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 752 752 } 753 753 mutex_unlock(&dev->struct_mutex); 754 754
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 5091 5091 * for all other failure, such as an allocation failure, bail. 5092 5092 */ 5093 5093 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 5094 - atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 5094 + atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 5095 5095 ret = 0; 5096 5096 } 5097 5097
+2 -2
drivers/gpu/drm/i915/i915_irq.c
··· 2446 2446 kobject_uevent_env(&dev->primary->kdev->kobj, 2447 2447 KOBJ_CHANGE, reset_done_event); 2448 2448 } else { 2449 - atomic_set_mask(I915_WEDGED, &error->reset_counter); 2449 + atomic_or(I915_WEDGED, &error->reset_counter); 2450 2450 } 2451 2451 2452 2452 /* ··· 2574 2574 i915_report_and_clear_eir(dev); 2575 2575 2576 2576 if (wedged) { 2577 - atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2577 + atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2578 2578 &dev_priv->gpu_error.reset_counter); 2579 2579 2580 2580 /*
+1 -1
drivers/s390/scsi/zfcp_aux.c
··· 529 529 list_add_tail(&port->list, &adapter->port_list); 530 530 write_unlock_irq(&adapter->port_list_lock); 531 531 532 - atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); 532 + atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); 533 533 534 534 return port; 535 535
+31 -31
drivers/s390/scsi/zfcp_erp.c
··· 190 190 if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) 191 191 if (scsi_device_get(sdev)) 192 192 return NULL; 193 - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 193 + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, 194 194 &zfcp_sdev->status); 195 195 erp_action = &zfcp_sdev->erp_action; 196 196 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); ··· 206 206 if (!get_device(&port->dev)) 207 207 return NULL; 208 208 zfcp_erp_action_dismiss_port(port); 209 - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 209 + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 210 210 erp_action = &port->erp_action; 211 211 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 212 212 erp_action->port = port; ··· 217 217 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 218 218 kref_get(&adapter->ref); 219 219 zfcp_erp_action_dismiss_adapter(adapter); 220 - atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 220 + atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 221 221 erp_action = &adapter->erp_action; 222 222 memset(erp_action, 0, sizeof(struct zfcp_erp_action)); 223 223 if (!(atomic_read(&adapter->status) & ··· 254 254 act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); 255 255 if (!act) 256 256 goto out; 257 - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 257 + atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); 258 258 ++adapter->erp_total_count; 259 259 list_add_tail(&act->list, &adapter->erp_ready_head); 260 260 wake_up(&adapter->erp_ready_wq); ··· 486 486 { 487 487 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) 488 488 zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); 489 - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 489 + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 490 490 } 491 491 492 492 static void zfcp_erp_port_unblock(struct zfcp_port *port) 493 493 { 494 494 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) 495 495 zfcp_dbf_rec_run("erpubl1", &port->erp_action); 496 - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 496 + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); 497 497 } 498 498 499 499 static void zfcp_erp_lun_unblock(struct scsi_device *sdev) ··· 502 502 503 503 if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) 504 504 zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); 505 - atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); 505 + atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); 506 506 } 507 507 508 508 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) ··· 642 642 read_lock_irqsave(&adapter->erp_lock, flags); 643 643 if (list_empty(&adapter->erp_ready_head) && 644 644 list_empty(&adapter->erp_running_head)) { 645 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 645 + atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING, 646 646 &adapter->status); 647 647 wake_up(&adapter->erp_done_wqh); 648 648 } ··· 665 665 int sleep = 1; 666 666 struct zfcp_adapter *adapter = erp_action->adapter; 667 667 668 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); 668 + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); 669 669 670 670 for (retries = 7; retries; retries--) { 671 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 671 + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 672 672 &adapter->status); 673 673 write_lock_irq(&adapter->erp_lock); 674 674 zfcp_erp_action_to_running(erp_action); 675 675 write_unlock_irq(&adapter->erp_lock); 676 676 if (zfcp_fsf_exchange_config_data(erp_action)) { 677 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 677 + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 678 678 &adapter->status); 679 679 return ZFCP_ERP_FAILED; 680 680 } ··· 692 692 sleep *= 2; 693 693 } 694 694 695 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 695 + atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 696 696 &adapter->status); 697 697 698 698 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) ··· 764 764 /* all ports and LUNs are closed */ 765 765 zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); 766 766 767 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 767 + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 768 768 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 769 769 } 770 770 ··· 773 773 struct zfcp_adapter *adapter = act->adapter; 774 774 775 775 if (zfcp_qdio_open(adapter->qdio)) { 776 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 776 + atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 777 777 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 778 778 &adapter->status); 779 779 return ZFCP_ERP_FAILED; ··· 784 784 return ZFCP_ERP_FAILED; 785 785 } 786 786 787 - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status); 787 + atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status); 788 788 789 789 return ZFCP_ERP_SUCCEEDED; 790 790 } ··· 948 948 { 949 949 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 950 950 951 - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, 951 + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED, 952 952 &zfcp_sdev->status); 953 953 } 954 954 ··· 1187 1187 switch (erp_action->action) { 1188 1188 case ZFCP_ERP_ACTION_REOPEN_LUN: 1189 1189 zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 1190 - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1190 + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, 1191 1191 &zfcp_sdev->status); 1192 1192 break; 1193 1193 1194 1194 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1195 1195 case ZFCP_ERP_ACTION_REOPEN_PORT: 1196 - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1196 + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, 1197 1197 &erp_action->port->status); 1198 1198 break; 1199 1199 1200 1200 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1201 - atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, 1201 + atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE, 1202 1202 &erp_action->adapter->status); 1203 1203 break; 1204 1204 } ··· 1422 1422 unsigned long flags; 1423 1423 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1424 1424 1425 - atomic_set_mask(mask, &adapter->status); 1425 + atomic_or(mask, &adapter->status); 1426 1426 1427 1427 if (!common_mask) 1428 1428 return; 1429 1429 1430 1430 read_lock_irqsave(&adapter->port_list_lock, flags); 1431 1431 list_for_each_entry(port, &adapter->port_list, list) 1432 - atomic_set_mask(common_mask, &port->status); 1432 + atomic_or(common_mask, &port->status); 1433 1433 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1434 1434 1435 1435 spin_lock_irqsave(adapter->scsi_host->host_lock, flags); 1436 1436 __shost_for_each_device(sdev, adapter->scsi_host) 1437 - atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1437 + atomic_or(common_mask, &sdev_to_zfcp(sdev)->status); 1438 1438 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); 1439 1439 } 1440 1440 ··· 1453 1453 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1454 1454 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1455 1455 1456 - atomic_clear_mask(mask, &adapter->status); 1456 + atomic_andnot(mask, &adapter->status); 1457 1457 1458 1458 if (!common_mask) 1459 1459 return; ··· 1463 1463 1464 1464 read_lock_irqsave(&adapter->port_list_lock, flags); 1465 1465 list_for_each_entry(port, &adapter->port_list, list) { 1466 - atomic_clear_mask(common_mask, &port->status); 1466 + atomic_andnot(common_mask, &port->status); 1467 1467 if (clear_counter) 1468 1468 atomic_set(&port->erp_counter, 0); 1469 1469 } ··· 1471 1471 1472 1472 spin_lock_irqsave(adapter->scsi_host->host_lock, flags); 1473 1473 __shost_for_each_device(sdev, adapter->scsi_host) { 1474 - atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1474 + atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status); 1475 1475 if (clear_counter) 1476 1476 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1477 1477 } ··· 1491 1491 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1492 1492 unsigned long flags; 1493 1493 1494 - atomic_set_mask(mask, &port->status); 1494 + atomic_or(mask, &port->status); 1495 1495 1496 1496 if (!common_mask) 1497 1497 return; ··· 1499 1499 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); 1500 1500 __shost_for_each_device(sdev, port->adapter->scsi_host) 1501 1501 if (sdev_to_zfcp(sdev)->port == port) 1502 - atomic_set_mask(common_mask, 1502 + atomic_or(common_mask, 1503 1503 &sdev_to_zfcp(sdev)->status); 1504 1504 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); 1505 1505 } ··· 1518 1518 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1519 1519 unsigned long flags; 1520 1520 1521 - atomic_clear_mask(mask, &port->status); 1521 + atomic_andnot(mask, &port->status); 1522 1522 1523 1523 if (!common_mask) 1524 1524 return; ··· 1529 1529 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); 1530 1530 __shost_for_each_device(sdev, port->adapter->scsi_host) 1531 1531 if (sdev_to_zfcp(sdev)->port == port) { 1532 - atomic_clear_mask(common_mask, 1532 + atomic_andnot(common_mask, 1533 1533 &sdev_to_zfcp(sdev)->status); 1534 1534 if (clear_counter) 1535 1535 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); ··· 1546 1546 { 1547 1547 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1548 1548 1549 - atomic_set_mask(mask, &zfcp_sdev->status); 1549 + atomic_or(mask, &zfcp_sdev->status); 1550 1550 } 1551 1551 1552 1552 /** ··· 1558 1558 { 1559 1559 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1560 1560 1561 - atomic_clear_mask(mask, &zfcp_sdev->status); 1561 + atomic_andnot(mask, &zfcp_sdev->status); 1562 1562 1563 1563 if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) 1564 1564 atomic_set(&zfcp_sdev->erp_counter, 0);
+4 -4
drivers/s390/scsi/zfcp_fc.c
··· 508 508 /* port is good, unblock rport without going through erp */ 509 509 zfcp_scsi_schedule_rport_register(port); 510 510 out: 511 - atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 511 + atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 512 512 put_device(&port->dev); 513 513 kmem_cache_free(zfcp_fc_req_cache, fc_req); 514 514 } ··· 564 564 if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) 565 565 goto out; 566 566 567 - atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 567 + atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 568 568 569 569 retval = zfcp_fc_adisc(port); 570 570 if (retval == 0) 571 571 return; 572 572 573 573 /* send of ADISC was not possible */ 574 - atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 574 + atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 575 575 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); 576 576 577 577 out: ··· 640 640 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) 641 641 return; 642 642 643 - atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 643 + atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status); 644 644 645 645 if ((port->supported_classes != 0) || 646 646 !list_empty(&port->unit_list))
+13 -13
drivers/s390/scsi/zfcp_fsf.c
··· 114 114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 115 115 return; 116 116 117 - atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 117 + atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 118 118 119 119 zfcp_scsi_schedule_rports_block(adapter); 120 120 ··· 345 345 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 346 346 break; 347 347 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 348 - atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 348 + atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 349 349 &adapter->status); 350 350 break; 351 351 case FSF_PROT_DUPLICATE_REQUEST_ID: ··· 554 554 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 555 555 return; 556 556 } 557 - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 557 + atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 558 558 &adapter->status); 559 559 break; 560 560 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: ··· 567 567 568 568 /* avoids adapter shutdown to be able to recognize 569 569 * events such as LINK UP */ 570 - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 570 + atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 571 571 &adapter->status); 572 572 zfcp_fsf_link_down_info_eval(req, 573 573 &qtcb->header.fsf_status_qual.link_down_info); ··· 1394 1394 break; 1395 1395 case FSF_GOOD: 1396 1396 port->handle = header->port_handle; 1397 - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1397 + atomic_or(ZFCP_STATUS_COMMON_OPEN | 1398 1398 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1399 - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1399 + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1400 1400 &port->status); 1401 1401 /* check whether D_ID has changed during open */ 1402 1402 /* ··· 1677 1677 case FSF_PORT_BOXED: 1678 1678 /* can't use generic zfcp_erp_modify_port_status because 1679 1679 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1680 - atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1680 + atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1681 1681 shost_for_each_device(sdev, port->adapter->scsi_host) 1682 1682 if (sdev_to_zfcp(sdev)->port == port) 1683 - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1683 + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1684 1684 &sdev_to_zfcp(sdev)->status); 1685 1685 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1686 1686 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, ··· 1700 1700 /* can't use generic zfcp_erp_modify_port_status because 1701 1701 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1702 1702 */ 1703 - atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1703 + atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1704 1704 shost_for_each_device(sdev, port->adapter->scsi_host) 1705 1705 if (sdev_to_zfcp(sdev)->port == port) 1706 - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1706 + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1707 1707 &sdev_to_zfcp(sdev)->status); 1708 1708 break; 1709 1709 } ··· 1766 1766 1767 1767 zfcp_sdev = sdev_to_zfcp(sdev); 1768 1768 1769 - atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1769 + atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1770 1770 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1771 1771 &zfcp_sdev->status); 1772 1772 ··· 1822 1822 1823 1823 case FSF_GOOD: 1824 1824 zfcp_sdev->lun_handle = header->lun_handle; 1825 - atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1825 + atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1826 1826 break; 1827 1827 } 1828 1828 } ··· 1913 1913 } 1914 1914 break; 1915 1915 case FSF_GOOD: 1916 - atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1916 + atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1917 1917 break; 1918 1918 } 1919 1919 }
+7 -7
drivers/s390/scsi/zfcp_qdio.c
··· 349 349 350 350 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 351 351 spin_lock_irq(&qdio->req_q_lock); 352 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 352 + atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 353 353 spin_unlock_irq(&qdio->req_q_lock); 354 354 355 355 wake_up(&qdio->req_q_wq); ··· 384 384 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 385 385 return -EIO; 386 386 387 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 387 + atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 388 388 &qdio->adapter->status); 389 389 390 390 zfcp_qdio_setup_init_data(&init_data, qdio); ··· 396 396 goto failed_qdio; 397 397 398 398 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) 399 - atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 399 + atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 400 400 &qdio->adapter->status); 401 401 402 402 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { 403 - atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 403 + atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 404 404 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; 405 405 } else { 406 - atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 406 + atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 407 407 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; 408 408 } 409 409 ··· 427 427 /* set index of first available SBALS / number of available SBALS */ 428 428 qdio->req_q_idx = 0; 429 429 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 430 - atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 430 + atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 431 431 432 432 if (adapter->scsi_host) { 433 433 adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; ··· 499 499 500 500 rc = ccw_device_siosl(adapter->ccw_device); 501 501 if (!rc) 502 - atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 502 + atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 503 503 &adapter->status); 504 504 }
+6 -5
include/asm-generic/atomic.h
··· 98 98 ATOMIC_OP_RETURN(sub, -) 99 99 #endif 100 100 101 - #ifndef atomic_clear_mask 101 + #ifndef atomic_and 102 102 ATOMIC_OP(and, &) 103 - #define atomic_clear_mask(i, v) atomic_and(~(i), (v)) 104 103 #endif 105 104 106 - #ifndef atomic_set_mask 107 - #define CONFIG_ARCH_HAS_ATOMIC_OR 105 + #ifndef atomic_or 108 106 ATOMIC_OP(or, |) 109 - #define atomic_set_mask(i, v) atomic_or((i), (v)) 107 + #endif 108 + 109 + #ifndef atomic_xor 110 + ATOMIC_OP(xor, ^) 110 111 #endif 111 112 112 113 #undef ATOMIC_OP_RETURN
+4
include/asm-generic/atomic64.h
··· 32 32 ATOMIC64_OPS(add) 33 33 ATOMIC64_OPS(sub) 34 34 35 + ATOMIC64_OP(and) 36 + ATOMIC64_OP(or) 37 + ATOMIC64_OP(xor) 38 + 35 39 #undef ATOMIC64_OPS 36 40 #undef ATOMIC64_OP_RETURN 37 41 #undef ATOMIC64_OP
+25 -13
include/linux/atomic.h
··· 28 28 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 29 29 #endif 30 30 31 + #ifndef atomic_andnot 32 + static inline void atomic_andnot(int i, atomic_t *v) 33 + { 34 + atomic_and(~i, v); 35 + } 36 + #endif 37 + 38 + static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) 39 + { 40 + atomic_andnot(mask, v); 41 + } 42 + 43 + static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) 44 + { 45 + atomic_or(mask, v); 46 + } 47 + 31 48 /** 32 49 * atomic_inc_not_zero_hint - increment if not null 33 50 * @v: pointer of type atomic_t ··· 128 111 } 129 112 #endif 130 113 131 - #ifndef CONFIG_ARCH_HAS_ATOMIC_OR 132 - static inline void atomic_or(int i, atomic_t *v) 133 - { 134 - int old; 135 - int new; 136 - 137 - do { 138 - old = atomic_read(v); 139 - new = old | i; 140 - } while (atomic_cmpxchg(v, old, new) != old); 141 - } 142 - #endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ 143 - 144 114 #include <asm-generic/atomic-long.h> 145 115 #ifdef CONFIG_GENERIC_ATOMIC64 146 116 #include <asm-generic/atomic64.h> 147 117 #endif 118 + 119 + #ifndef atomic64_andnot 120 + static inline void atomic64_andnot(long long i, atomic64_t *v) 121 + { 122 + atomic64_and(~i, v); 123 + } 124 + #endif 125 + 148 126 #endif /* _LINUX_ATOMIC_H */
+3
lib/atomic64.c
··· 102 102 103 103 ATOMIC64_OPS(add, +=) 104 104 ATOMIC64_OPS(sub, -=) 105 + ATOMIC64_OP(and, &=) 106 + ATOMIC64_OP(or, |=) 107 + ATOMIC64_OP(xor, ^=) 105 108 106 109 #undef ATOMIC64_OPS 107 110 #undef ATOMIC64_OP_RETURN
+47 -21
lib/atomic64_test.c
··· 16 16 #include <linux/kernel.h> 17 17 #include <linux/atomic.h> 18 18 19 + #define TEST(bit, op, c_op, val) \ 20 + do { \ 21 + atomic##bit##_set(&v, v0); \ 22 + r = v0; \ 23 + atomic##bit##_##op(val, &v); \ 24 + r c_op val; \ 25 + WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \ 26 + (unsigned long long)atomic##bit##_read(&v), \ 27 + (unsigned long long)r); \ 28 + } while (0) 29 + 30 + static __init void test_atomic(void) 31 + { 32 + int v0 = 0xaaa31337; 33 + int v1 = 0xdeadbeef; 34 + int onestwos = 0x11112222; 35 + int one = 1; 36 + 37 + atomic_t v; 38 + int r; 39 + 40 + TEST(, add, +=, onestwos); 41 + TEST(, add, +=, -one); 42 + TEST(, sub, -=, onestwos); 43 + TEST(, sub, -=, -one); 44 + TEST(, or, |=, v1); 45 + TEST(, and, &=, v1); 46 + TEST(, xor, ^=, v1); 47 + TEST(, andnot, &= ~, v1); 48 + } 49 + 19 50 #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) 20 - static __init int test_atomic64(void) 51 + static __init void test_atomic64(void) 21 52 { 22 53 long long v0 = 0xaaa31337c001d00dLL; 23 54 long long v1 = 0xdeadbeefdeafcafeLL; ··· 65 34 BUG_ON(v.counter != r); 66 35 BUG_ON(atomic64_read(&v) != r); 67 36 68 - INIT(v0); 69 - atomic64_add(onestwos, &v); 70 - r += onestwos; 71 - BUG_ON(v.counter != r); 72 - 73 - INIT(v0); 74 - atomic64_add(-one, &v); 75 - r += -one; 76 - BUG_ON(v.counter != r); 37 + TEST(64, add, +=, onestwos); 38 + TEST(64, add, +=, -one); 39 + TEST(64, sub, -=, onestwos); 40 + TEST(64, sub, -=, -one); 41 + TEST(64, or, |=, v1); 42 + TEST(64, and, &=, v1); 43 + TEST(64, xor, ^=, v1); 44 + TEST(64, andnot, &= ~, v1); 77 45 78 46 INIT(v0); 79 47 r += onestwos; ··· 82 52 INIT(v0); 83 53 r += -one; 84 54 BUG_ON(atomic64_add_return(-one, &v) != r); 85 - BUG_ON(v.counter != r); 86 - 87 - INIT(v0); 88 - atomic64_sub(onestwos, &v); 89 - r -= onestwos; 90 - BUG_ON(v.counter != r); 91 - 92 - INIT(v0); 93 - atomic64_sub(-one, &v); 94 - r -= -one; 95 55 BUG_ON(v.counter != r); 96 56 97 57 INIT(v0); ··· 167 147 BUG_ON(!atomic64_inc_not_zero(&v)); 168 148 r += one; 169 149 BUG_ON(v.counter != r); 150 + } 151 + 152 + static __init int test_atomics(void) 153 + { 154 + test_atomic(); 155 + test_atomic64(); 170 156 171 157 #ifdef CONFIG_X86 172 158 pr_info("passed for %s platform %s CX8 and %s SSE\n", ··· 192 166 return 0; 193 167 } 194 168 195 - core_initcall(test_atomic64); 169 + core_initcall(test_atomics);