···1616#include <linux/types.h>17171818asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);1919-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);2020-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);2121-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);1919+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);2020+2121+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);2222+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);2223asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);2324asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);24252526#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)26272727-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)2828-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))2828+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)2929+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))29303030-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)3131-#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m)3131+#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)3232+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)3333+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)32343335#endif3436
···587587 * r0 = ptr588588 * r1 = value589589 *590590- * Add a signed value to a 32bit word and return the new value atomically.590590+ * ADD a signed value to a 32bit word and return the new value atomically.591591 * Clobbers: r3:0, p1:0592592 */593593-ENTRY(___raw_atomic_update_asm)593593+ENTRY(___raw_atomic_add_asm)594594 p1 = r0;595595 r3 = r1;596596 [--sp] = rets;···603603 r0 = r3;604604 rets = [sp++];605605 rts;606606-ENDPROC(___raw_atomic_update_asm)606606+ENDPROC(___raw_atomic_add_asm)607607608608/*609609 * r0 = ptr610610 * r1 = mask611611 *612612- * Clear the mask bits from a 32bit word and return the old 32bit value612612+ * AND the mask bits from a 32bit word and return the old 32bit value613613 * atomically.614614 * Clobbers: r3:0, p1:0615615 */616616-ENTRY(___raw_atomic_clear_asm)616616+ENTRY(___raw_atomic_and_asm)617617 p1 = r0;618618- r3 = ~r1;618618+ r3 = r1;619619 [--sp] = rets;620620 call _get_core_lock;621621 r2 = [p1];···627627 r0 = r3;628628 rets = [sp++];629629 rts;630630-ENDPROC(___raw_atomic_clear_asm)630630+ENDPROC(___raw_atomic_and_asm)631631632632/*633633 * r0 = ptr634634 * r1 = mask635635 *636636- * Set the mask bits into a 32bit word and return the old 32bit value636636+ * OR the mask bits into a 32bit word and return the old 32bit value637637 * atomically.638638 * Clobbers: r3:0, p1:0639639 */640640-ENTRY(___raw_atomic_set_asm)640640+ENTRY(___raw_atomic_or_asm)641641 p1 = r0;642642 r3 = r1;643643 [--sp] = rets;···651651 r0 = r3;652652 rets = [sp++];653653 rts;654654-ENDPROC(___raw_atomic_set_asm)654654+ENDPROC(___raw_atomic_or_asm)655655656656/*657657 * r0 = ptr···787787 r2 = r1;788788 r1 = 1;789789 r1 <<= r2;790790- jump ___raw_atomic_set_asm790790+ jump ___raw_atomic_or_asm791791ENDPROC(___raw_bit_set_asm)792792793793/*···798798 * Clobbers: r3:0, p1:0799799 */800800ENTRY(___raw_bit_clear_asm)801801- r2 = r1;802802- r1 = 1;803803- r1 <<= r2;804804- jump ___raw_atomic_clear_asm801801+ r2 = 1;802802+ r2 <<= r1;803803+ r1 = ~r2;804804+ jump ___raw_atomic_and_asm805805ENDPROC(___raw_bit_clear_asm)806806807807/*
···3434 _atomic_xchg_add(&v->counter, i);3535}36363737+#define ATOMIC_OP(op) \3838+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \3939+static inline void atomic_##op(int i, atomic_t *v) \4040+{ \4141+ _atomic_##op((unsigned long *)&v->counter, i); \4242+}4343+4444+ATOMIC_OP(and)4545+ATOMIC_OP(or)4646+ATOMIC_OP(xor)4747+4848+#undef ATOMIC_OP4949+3750/**3851 * atomic_add_return - add integer and return3952 * @v: pointer of type atomic_t···125112{126113 _atomic64_xchg_add(&v->counter, i);127114}115115+116116+#define ATOMIC64_OP(op) \117117+long long _atomic64_##op(long long *v, long long n); \118118+static inline void atomic64_##op(long long i, atomic64_t *v) \119119+{ \120120+ _atomic64_##op(&v->counter, i); \121121+}122122+123123+ATOMIC64_OP(and)124124+ATOMIC64_OP(or)125125+ATOMIC64_OP(xor)128126129127/**130128 * atomic64_add_return - add integer and return···249225extern struct __get_user __atomic_xchg_add_unless(volatile int *p,250226 int *lock, int o, int n);251227extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);228228+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);252229extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);253230extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);254231extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,···259234 long long n);260235extern long long __atomic64_xchg_add_unless(volatile long long *p,261236 int *lock, long long o, long long n);237237+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);238238+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);239239+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);262240263241/* Return failure from the atomic wrappers. */264242struct __get_user __atomic_bad_address(int __user *addr);
+40
arch/tile/include/asm/atomic_64.h
···5858 return oldval;5959}60606161+static inline void atomic_and(int i, atomic_t *v)6262+{6363+ __insn_fetchand4((void *)&v->counter, i);6464+}6565+6666+static inline void atomic_or(int i, atomic_t *v)6767+{6868+ __insn_fetchor4((void *)&v->counter, i);6969+}7070+7171+static inline void atomic_xor(int i, atomic_t *v)7272+{7373+ int guess, oldval = v->counter;7474+ do {7575+ guess = oldval;7676+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);7777+ oldval = __insn_cmpexch4(&v->counter, guess ^ i);7878+ } while (guess != oldval);7979+}8080+6181/* Now the true 64-bit operations. */62826383#define ATOMIC64_INIT(i) { (i) }···10989 oldval = cmpxchg(&v->counter, guess, guess + a);11090 } while (guess != oldval);11191 return oldval != u;9292+}9393+9494+static inline void atomic64_and(long i, atomic64_t *v)9595+{9696+ __insn_fetchand((void *)&v->counter, i);9797+}9898+9999+static inline void atomic64_or(long i, atomic64_t *v)100100+{101101+ __insn_fetchor((void *)&v->counter, i);102102+}103103+104104+static inline void atomic64_xor(long i, atomic64_t *v)105105+{106106+ long guess, oldval = v->counter;107107+ do {108108+ guess = oldval;109109+ __insn_mtspr(SPR_CMPEXCH_VALUE, guess);110110+ oldval = __insn_cmpexch(&v->counter, guess ^ i);111111+ } while (guess != oldval);112112}113113114114#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
+23
arch/tile/lib/atomic_32.c
···9494}9595EXPORT_SYMBOL(_atomic_or);96969797+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)9898+{9999+ return __atomic_and((int *)p, __atomic_setup(p), mask).val;100100+}101101+EXPORT_SYMBOL(_atomic_and);102102+97103unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)98104{99105 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;···142136}143137EXPORT_SYMBOL(_atomic64_cmpxchg);144138139139+long long _atomic64_and(long long *v, long long n)140140+{141141+ return __atomic64_and(v, __atomic_setup(v), n);142142+}143143+EXPORT_SYMBOL(_atomic64_and);144144+145145+long long _atomic64_or(long long *v, long long n)146146+{147147+ return __atomic64_or(v, __atomic_setup(v), n);148148+}149149+EXPORT_SYMBOL(_atomic64_or);150150+151151+long long _atomic64_xor(long long *v, long long n)152152+{153153+ return __atomic64_xor(v, __atomic_setup(v), n);154154+}155155+EXPORT_SYMBOL(_atomic64_xor);145156146157/*147158 * If any of the atomic or futex routines hit a bad address (not in
···182182 return xchg(&v->counter, new);183183}184184185185+#define ATOMIC_OP(op) \186186+static inline void atomic_##op(int i, atomic_t *v) \187187+{ \188188+ asm volatile(LOCK_PREFIX #op"l %1,%0" \189189+ : "+m" (v->counter) \190190+ : "ir" (i) \191191+ : "memory"); \192192+}193193+194194+ATOMIC_OP(and)195195+ATOMIC_OP(or)196196+ATOMIC_OP(xor)197197+198198+#undef ATOMIC_OP199199+185200/**186201 * __atomic_add_unless - add unless the number is already a given value187202 * @v: pointer of type atomic_t···233218 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));234219 return *v;235220}236236-237237-/* These are x86-specific, used by some header files */238238-#define atomic_clear_mask(mask, addr) \239239- asm volatile(LOCK_PREFIX "andl %0,%1" \240240- : : "r" (~(mask)), "m" (*(addr)) : "memory")241241-242242-#define atomic_set_mask(mask, addr) \243243- asm volatile(LOCK_PREFIX "orl %0,%1" \244244- : : "r" ((unsigned)(mask)), "m" (*(addr)) \245245- : "memory")246221247222#ifdef CONFIG_X86_32248223# include <asm/atomic64_32.h>
+14
arch/x86/include/asm/atomic64_32.h
···313313#undef alternative_atomic64314314#undef __alternative_atomic64315315316316+#define ATOMIC64_OP(op, c_op) \317317+static inline void atomic64_##op(long long i, atomic64_t *v) \318318+{ \319319+ long long old, c = 0; \320320+ while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \321321+ c = old; \322322+}323323+324324+ATOMIC64_OP(and, &)325325+ATOMIC64_OP(or, |)326326+ATOMIC64_OP(xor, ^)327327+328328+#undef ATOMIC64_OP329329+316330#endif /* _ASM_X86_ATOMIC64_32_H */
···145145ATOMIC_OPS(add)146146ATOMIC_OPS(sub)147147148148+ATOMIC_OP(and)149149+ATOMIC_OP(or)150150+ATOMIC_OP(xor)151151+148152#undef ATOMIC_OPS149153#undef ATOMIC_OP_RETURN150154#undef ATOMIC_OP···252248 c = old;253249 }254250 return c;255255-}256256-257257-258258-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)259259-{260260-#if XCHAL_HAVE_S32C1I261261- unsigned long tmp;262262- int result;263263-264264- __asm__ __volatile__(265265- "1: l32i %1, %3, 0\n"266266- " wsr %1, scompare1\n"267267- " and %0, %1, %2\n"268268- " s32c1i %0, %3, 0\n"269269- " bne %0, %1, 1b\n"270270- : "=&a" (result), "=&a" (tmp)271271- : "a" (~mask), "a" (v)272272- : "memory"273273- );274274-#else275275- unsigned int all_f = -1;276276- unsigned int vval;277277-278278- __asm__ __volatile__(279279- " rsil a15,"__stringify(LOCKLEVEL)"\n"280280- " l32i %0, %2, 0\n"281281- " xor %1, %4, %3\n"282282- " and %0, %0, %4\n"283283- " s32i %0, %2, 0\n"284284- " wsr a15, ps\n"285285- " rsync\n"286286- : "=&a" (vval), "=a" (mask)287287- : "a" (v), "a" (all_f), "1" (mask)288288- : "a15", "memory"289289- );290290-#endif291291-}292292-293293-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)294294-{295295-#if XCHAL_HAVE_S32C1I296296- unsigned long tmp;297297- int result;298298-299299- __asm__ __volatile__(300300- "1: l32i %1, %3, 0\n"301301- " wsr %1, scompare1\n"302302- " or %0, %1, %2\n"303303- " s32c1i %0, %3, 0\n"304304- " bne %0, %1, 1b\n"305305- : "=&a" (result), "=&a" (tmp)306306- : "a" (mask), "a" (v)307307- : "memory"308308- );309309-#else310310- unsigned int vval;311311-312312- __asm__ __volatile__(313313- " rsil a15,"__stringify(LOCKLEVEL)"\n"314314- " l32i %0, %2, 0\n"315315- " or %0, %0, %1\n"316316- " s32i %0, %2, 0\n"317317- " wsr a15, ps\n"318318- " rsync\n"319319- : "=&a" (vval)320320- : "a" (mask), "a" (v)321321- : "a15", "memory"322322- );323323-#endif324251}325252326253#endif /* __KERNEL__ */
+1-1
drivers/gpu/drm/i915/i915_drv.c
···748748 mutex_lock(&dev->struct_mutex);749749 if (i915_gem_init_hw(dev)) {750750 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");751751- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);751751+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);752752 }753753 mutex_unlock(&dev->struct_mutex);754754
+1-1
drivers/gpu/drm/i915/i915_gem.c
···50915091 * for all other failure, such as an allocation failure, bail.50925092 */50935093 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");50945094- atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);50945094+ atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);50955095 ret = 0;50965096 }50975097