Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic, arch/blackfin: Implement atomic_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Miao <realmz6@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: adi-buildroot-devel@lists.sourceforge.net
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
e87fc0ec 1a6eafac

+40 -12
+8
arch/blackfin/include/asm/atomic.h
··· 17 17 18 18 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); 19 19 asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value); 20 + asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value); 20 21 21 22 asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value); 22 23 asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value); ··· 29 28 #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i) 30 29 #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i)) 31 30 31 + #define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i) 32 + #define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i)) 33 + 32 34 #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i) 33 35 #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i) 34 36 #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i) 37 + 38 + #define atomic_fetch_or(i, v) __raw_atomic_or_asm(&(v)->counter, i) 39 + #define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i) 40 + #define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i) 35 41 36 42 #endif 37 43
+1
arch/blackfin/kernel/bfin_ksyms.c
··· 84 84 85 85 #ifdef CONFIG_SMP 86 86 EXPORT_SYMBOL(__raw_atomic_add_asm); 87 + EXPORT_SYMBOL(__raw_atomic_xadd_asm); 87 88 EXPORT_SYMBOL(__raw_atomic_and_asm); 88 89 EXPORT_SYMBOL(__raw_atomic_or_asm); 89 90 EXPORT_SYMBOL(__raw_atomic_xor_asm);
+31 -12
arch/blackfin/mach-bf561/atomic.S
··· 607 607 608 608 /* 609 609 * r0 = ptr 610 + * r1 = value 611 + * 612 + * ADD a signed value to a 32bit word and return the old value atomically. 613 + * Clobbers: r3:0, p1:0 614 + */ 615 + ENTRY(___raw_atomic_xadd_asm) 616 + p1 = r0; 617 + r3 = r1; 618 + [--sp] = rets; 619 + call _get_core_lock; 620 + r3 = [p1]; 621 + r2 = r3 + r2; 622 + [p1] = r2; 623 + r1 = p1; 624 + call _put_core_lock; 625 + r0 = r3; 626 + rets = [sp++]; 627 + rts; 628 + ENDPROC(___raw_atomic_add_asm) 629 + 630 + /* 631 + * r0 = ptr 610 632 * r1 = mask 611 633 * 612 634 * AND the mask bits from a 32bit word and return the old 32bit value ··· 640 618 r3 = r1; 641 619 [--sp] = rets; 642 620 call _get_core_lock; 643 - r2 = [p1]; 644 - r3 = r2 & r3; 645 - [p1] = r3; 646 - r3 = r2; 621 + r3 = [p1]; 622 + r2 = r2 & r3; 623 + [p1] = r2; 647 624 r1 = p1; 648 625 call _put_core_lock; 649 626 r0 = r3; ··· 663 642 r3 = r1; 664 643 [--sp] = rets; 665 644 call _get_core_lock; 666 - r2 = [p1]; 667 - r3 = r2 | r3; 668 - [p1] = r3; 669 - r3 = r2; 645 + r3 = [p1]; 646 + r2 = r2 | r3; 647 + [p1] = r2; 670 648 r1 = p1; 671 649 call _put_core_lock; 672 650 r0 = r3; ··· 686 666 r3 = r1; 687 667 [--sp] = rets; 688 668 call _get_core_lock; 689 - r2 = [p1]; 690 - r3 = r2 ^ r3; 691 - [p1] = r3; 692 - r3 = r2; 669 + r3 = [p1]; 670 + r2 = r2 ^ r3; 671 + [p1] = r2; 693 672 r1 = p1; 694 673 call _put_core_lock; 695 674 r0 = r3;