···11+/*22+ * Spinlock support for the Hexagon architecture33+ *44+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.55+ *66+ *77+ * This program is free software; you can redistribute it and/or modify88+ * it under the terms of the GNU General Public License version 2 and99+ * only version 2 as published by the Free Software Foundation.1010+ *1111+ * This program is distributed in the hope that it will be useful,1212+ * but WITHOUT ANY WARRANTY; without even the implied warranty of1313+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1414+ * GNU General Public License for more details.1515+ *1616+ * You should have received a copy of the GNU General Public License1717+ * along with this program; if not, write to the Free Software1818+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA1919+ * 02110-1301, USA.2020+ */2121+2222+#ifndef _ASM_SPINLOCK_H2323+#define _ASM_SPINLOCK_H2424+2525+#include <asm/irqflags.h>2626+2727+/*2828+ * This file is pulled in for SMP builds.2929+ * Really need to check all the barrier stuff for "true" SMP3030+ */3131+3232+/*3333+ * Read locks:3434+ * - load the lock value3535+ * - increment it3636+ * - if the lock value is still negative, go back and try again.3737+ * - unsuccessful store is unsuccessful. Go back and try again. Loser.3838+ * - successful store new lock value if positive -> lock acquired3939+ */4040+static inline void arch_read_lock(arch_rwlock_t *lock)4141+{4242+ __asm__ __volatile__(4343+ "1: R6 = memw_locked(%0);\n"4444+ " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"4545+ " { if !P3 jump 1b; }\n"4646+ " memw_locked(%0,P3) = R6;\n"4747+ " { if !P3 jump 1b; }\n"4848+ :4949+ : "r" (&lock->lock)5050+ : "memory", "r6", "p3"5151+ );5252+5353+}5454+5555+static inline void arch_read_unlock(arch_rwlock_t *lock)5656+{5757+ __asm__ __volatile__(5858+ "1: R6 = memw_locked(%0);\n"5959+ " R6 = add(R6,#-1);\n"6060+ " memw_locked(%0,P3) = R6\n"6161+ " if !P3 jump 1b;\n"6262+ :6363+ : "r" (&lock->lock)6464+ : "memory", "r6", "p3"6565+ );6666+6767+}6868+6969+/* I think this returns 0 on fail, 1 on success. */7070+static inline int arch_read_trylock(arch_rwlock_t *lock)7171+{7272+ int temp;7373+ __asm__ __volatile__(7474+ " R6 = memw_locked(%1);\n"7575+ " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"7676+ " { if !P3 jump 1f; }\n"7777+ " memw_locked(%1,P3) = R6;\n"7878+ " { %0 = P3 }\n"7979+ "1:\n"8080+ : "=&r" (temp)8181+ : "r" (&lock->lock)8282+ : "memory", "r6", "p3"8383+ );8484+ return temp;8585+}8686+8787+static inline int arch_read_can_lock(arch_rwlock_t *rwlock)8888+{8989+ return rwlock->lock == 0;9090+}9191+9292+static inline int arch_write_can_lock(arch_rwlock_t *rwlock)9393+{9494+ return rwlock->lock == 0;9595+}9696+9797+/* Stuffs a -1 in the lock value? */9898+static inline void arch_write_lock(arch_rwlock_t *lock)9999+{100100+ __asm__ __volatile__(101101+ "1: R6 = memw_locked(%0)\n"102102+ " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"103103+ " { if !P3 jump 1b; }\n"104104+ " memw_locked(%0,P3) = R6;\n"105105+ " { if !P3 jump 1b; }\n"106106+ :107107+ : "r" (&lock->lock)108108+ : "memory", "r6", "p3"109109+ );110110+}111111+112112+113113+static inline int arch_write_trylock(arch_rwlock_t *lock)114114+{115115+ int temp;116116+ __asm__ __volatile__(117117+ " R6 = memw_locked(%1)\n"118118+ " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"119119+ " { if !P3 jump 1f; }\n"120120+ " memw_locked(%1,P3) = R6;\n"121121+ " %0 = P3;\n"122122+ "1:\n"123123+ : "=&r" (temp)124124+ : "r" (&lock->lock)125125+ : "memory", "r6", "p3"126126+ );127127+ return temp;128128+129129+}130130+131131+static inline void arch_write_unlock(arch_rwlock_t *lock)132132+{133133+ smp_mb();134134+ lock->lock = 0;135135+}136136+137137+static inline void arch_spin_lock(arch_spinlock_t *lock)138138+{139139+ __asm__ __volatile__(140140+ "1: R6 = memw_locked(%0);\n"141141+ " P3 = cmp.eq(R6,#0);\n"142142+ " { if !P3 jump 1b; R6 = #1; }\n"143143+ " memw_locked(%0,P3) = R6;\n"144144+ " { if !P3 jump 1b; }\n"145145+ :146146+ : "r" (&lock->lock)147147+ : "memory", "r6", "p3"148148+ );149149+150150+}151151+152152+static inline void arch_spin_unlock(arch_spinlock_t *lock)153153+{154154+ smp_mb();155155+ lock->lock = 0;156156+}157157+158158+static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)159159+{160160+ int temp;161161+ __asm__ __volatile__(162162+ " R6 = memw_locked(%1);\n"163163+ " P3 = cmp.eq(R6,#0);\n"164164+ " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"165165+ " memw_locked(%1,P3) = R6;\n"166166+ " %0 = P3;\n"167167+ "1:\n"168168+ : "=&r" (temp)169169+ : "r" (&lock->lock)170170+ : "memory", "r6", "p3"171171+ );172172+ return temp;173173+}174174+175175+/*176176+ * SMP spinlocks are intended to allow only a single CPU at the lock177177+ */178178+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)179179+#define arch_spin_unlock_wait(lock) \180180+ do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)181181+#define arch_spin_is_locked(x) ((x)->lock != 0)182182+183183+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)184184+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)185185+186186+#endif
+42
arch/hexagon/include/asm/spinlock_types.h
···11+/*22+ * Spinlock support for the Hexagon architecture33+ *44+ * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.55+ *66+ * This program is free software; you can redistribute it and/or modify77+ * it under the terms of the GNU General Public License version 2 and88+ * only version 2 as published by the Free Software Foundation.99+ *1010+ * This program is distributed in the hope that it will be useful,1111+ * but WITHOUT ANY WARRANTY; without even the implied warranty of1212+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the1313+ * GNU General Public License for more details.1414+ *1515+ * You should have received a copy of the GNU General Public License1616+ * along with this program; if not, write to the Free Software1717+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA1818+ * 02110-1301, USA.1919+ */2020+2121+#ifndef _ASM_SPINLOCK_TYPES_H2222+#define _ASM_SPINLOCK_TYPES_H2323+2424+#include <linux/version.h>2525+2626+#ifndef __LINUX_SPINLOCK_TYPES_H2727+# error "please don't include this file directly"2828+#endif2929+3030+typedef struct {3131+ volatile unsigned int lock;3232+} arch_spinlock_t;3333+3434+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }3535+3636+typedef struct {3737+ volatile unsigned int lock;3838+} arch_rwlock_t;3939+4040+#define __ARCH_RW_LOCK_UNLOCKED { 0 }4141+4242+#endif
+132
include/asm-generic/rwsem.h
···11+#ifndef _ASM_POWERPC_RWSEM_H22+#define _ASM_POWERPC_RWSEM_H33+44+#ifndef _LINUX_RWSEM_H55+#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."66+#endif77+88+#ifdef __KERNEL__99+1010+/*1111+ * R/W semaphores for PPC using the stuff in lib/rwsem.c.1212+ * Adapted largely from include/asm-i386/rwsem.h1313+ * by Paul Mackerras <paulus@samba.org>.1414+ */1515+1616+/*1717+ * the semaphore definition1818+ */1919+#ifdef CONFIG_PPC642020+# define RWSEM_ACTIVE_MASK 0xffffffffL2121+#else2222+# define RWSEM_ACTIVE_MASK 0x0000ffffL2323+#endif2424+2525+#define RWSEM_UNLOCKED_VALUE 0x00000000L2626+#define RWSEM_ACTIVE_BIAS 0x00000001L2727+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)2828+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS2929+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)3030+3131+/*3232+ * lock for reading3333+ */3434+static inline void __down_read(struct rw_semaphore *sem)3535+{3636+ if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0))3737+ rwsem_down_read_failed(sem);3838+}3939+4040+static inline int __down_read_trylock(struct rw_semaphore *sem)4141+{4242+ long tmp;4343+4444+ while ((tmp = sem->count) >= 0) {4545+ if (tmp == cmpxchg(&sem->count, tmp,4646+ tmp + RWSEM_ACTIVE_READ_BIAS)) {4747+ return 1;4848+ }4949+ }5050+ return 0;5151+}5252+5353+/*5454+ * lock for writing5555+ */5656+static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)5757+{5858+ long tmp;5959+6060+ tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS,6161+ (atomic_long_t *)&sem->count);6262+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))6363+ rwsem_down_write_failed(sem);6464+}6565+6666+static inline void __down_write(struct rw_semaphore *sem)6767+{6868+ __down_write_nested(sem, 0);6969+}7070+7171+static inline int __down_write_trylock(struct rw_semaphore *sem)7272+{7373+ long tmp;7474+7575+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,7676+ RWSEM_ACTIVE_WRITE_BIAS);7777+ return tmp == RWSEM_UNLOCKED_VALUE;7878+}7979+8080+/*8181+ * unlock after reading8282+ */8383+static inline void __up_read(struct rw_semaphore *sem)8484+{8585+ long tmp;8686+8787+ tmp = atomic_long_dec_return((atomic_long_t *)&sem->count);8888+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))8989+ rwsem_wake(sem);9090+}9191+9292+/*9393+ * unlock after writing9494+ */9595+static inline void __up_write(struct rw_semaphore *sem)9696+{9797+ if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS,9898+ (atomic_long_t *)&sem->count) < 0))9999+ rwsem_wake(sem);100100+}101101+102102+/*103103+ * implement atomic add functionality104104+ */105105+static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)106106+{107107+ atomic_long_add(delta, (atomic_long_t *)&sem->count);108108+}109109+110110+/*111111+ * downgrade write lock to read lock112112+ */113113+static inline void __downgrade_write(struct rw_semaphore *sem)114114+{115115+ long tmp;116116+117117+ tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS,118118+ (atomic_long_t *)&sem->count);119119+ if (tmp < 0)120120+ rwsem_downgrade_wake(sem);121121+}122122+123123+/*124124+ * implement exchange and add functionality125125+ */126126+static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)127127+{128128+ return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);129129+}130130+131131+#endif /* __KERNEL__ */132132+#endif /* _ASM_POWERPC_RWSEM_H */