at v4.10 80 lines 1.7 kB view raw
1/* 2 * bitops.c: atomic operations which got too long to be inlined all over 3 * the place. 4 * 5 * Copyright 1999 Philipp Rumpf (prumpf@tux.org) 6 * Copyright 2000 Grant Grundler (grundler@cup.hp.com) 7 */ 8 9#include <linux/kernel.h> 10#include <linux/spinlock.h> 11#include <linux/atomic.h> 12 13#ifdef CONFIG_SMP 14arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 15 [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED 16}; 17#endif 18 19#ifdef CONFIG_64BIT 20unsigned long __xchg64(unsigned long x, unsigned long *ptr) 21{ 22 unsigned long temp, flags; 23 24 _atomic_spin_lock_irqsave(ptr, flags); 25 temp = *ptr; 26 *ptr = x; 27 _atomic_spin_unlock_irqrestore(ptr, flags); 28 return temp; 29} 30#endif 31 32unsigned long __xchg32(int x, int *ptr) 33{ 34 unsigned long flags; 35 long temp; 36 37 _atomic_spin_lock_irqsave(ptr, flags); 38 temp = (long) *ptr; /* XXX - sign extension wanted? */ 39 *ptr = x; 40 _atomic_spin_unlock_irqrestore(ptr, flags); 41 return (unsigned long)temp; 42} 43 44 45unsigned long __xchg8(char x, char *ptr) 46{ 47 unsigned long flags; 48 long temp; 49 50 _atomic_spin_lock_irqsave(ptr, flags); 51 temp = (long) *ptr; /* XXX - sign extension wanted? */ 52 *ptr = x; 53 _atomic_spin_unlock_irqrestore(ptr, flags); 54 return (unsigned long)temp; 55} 56 57 58u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new) 59{ 60 unsigned long flags; 61 u64 prev; 62 63 _atomic_spin_lock_irqsave(ptr, flags); 64 if ((prev = *ptr) == old) 65 *ptr = new; 66 _atomic_spin_unlock_irqrestore(ptr, flags); 67 return prev; 68} 69 70unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) 71{ 72 unsigned long flags; 73 unsigned int prev; 74 75 _atomic_spin_lock_irqsave(ptr, flags); 76 if ((prev = *ptr) == old) 77 *ptr = new; 78 _atomic_spin_unlock_irqrestore(ptr, flags); 79 return (unsigned long)prev; 80}