Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Hexagon: Add locking types and functions

Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Richard Kuo and committed by
Linus Torvalds
dd472da3 43afdf50

+360
+186
arch/hexagon/include/asm/spinlock.h
··· 1 + /* 2 + * Spinlock support for the Hexagon architecture 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 and 9 + * only version 2 as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 19 + * 02110-1301, USA. 20 + */ 21 + 22 + #ifndef _ASM_SPINLOCK_H 23 + #define _ASM_SPINLOCK_H 24 + 25 + #include <asm/irqflags.h> 26 + 27 + /* 28 + * This file is pulled in for SMP builds. 29 + * Really need to check all the barrier stuff for "true" SMP 30 + */ 31 + 32 + /* 33 + * Read locks: 34 + * - load the lock value 35 + * - increment it 36 + * - if the lock value is still negative, go back and try again. 37 + * - unsuccessful store is unsuccessful. Go back and try again. Loser. 38 + * - successful store new lock value if positive -> lock acquired 39 + */ 40 + static inline void arch_read_lock(arch_rwlock_t *lock) 41 + { 42 + __asm__ __volatile__( 43 + "1: R6 = memw_locked(%0);\n" 44 + " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 45 + " { if !P3 jump 1b; }\n" 46 + " memw_locked(%0,P3) = R6;\n" 47 + " { if !P3 jump 1b; }\n" 48 + : 49 + : "r" (&lock->lock) 50 + : "memory", "r6", "p3" 51 + ); 52 + 53 + } 54 + 55 + static inline void arch_read_unlock(arch_rwlock_t *lock) 56 + { 57 + __asm__ __volatile__( 58 + "1: R6 = memw_locked(%0);\n" 59 + " R6 = add(R6,#-1);\n" 60 + " memw_locked(%0,P3) = R6\n" 61 + " if !P3 jump 1b;\n" 62 + : 63 + : "r" (&lock->lock) 64 + : "memory", "r6", "p3" 65 + ); 66 + 67 + } 68 + 69 + /* I think this returns 0 on fail, 1 on success. */ 70 + static inline int arch_read_trylock(arch_rwlock_t *lock) 71 + { 72 + int temp; 73 + __asm__ __volatile__( 74 + " R6 = memw_locked(%1);\n" 75 + " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 76 + " { if !P3 jump 1f; }\n" 77 + " memw_locked(%1,P3) = R6;\n" 78 + " { %0 = P3 }\n" 79 + "1:\n" 80 + : "=&r" (temp) 81 + : "r" (&lock->lock) 82 + : "memory", "r6", "p3" 83 + ); 84 + return temp; 85 + } 86 + 87 + static inline int arch_read_can_lock(arch_rwlock_t *rwlock) 88 + { 89 + return rwlock->lock == 0; 90 + } 91 + 92 + static inline int arch_write_can_lock(arch_rwlock_t *rwlock) 93 + { 94 + return rwlock->lock == 0; 95 + } 96 + 97 + /* Stuffs a -1 in the lock value? */ 98 + static inline void arch_write_lock(arch_rwlock_t *lock) 99 + { 100 + __asm__ __volatile__( 101 + "1: R6 = memw_locked(%0)\n" 102 + " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 103 + " { if !P3 jump 1b; }\n" 104 + " memw_locked(%0,P3) = R6;\n" 105 + " { if !P3 jump 1b; }\n" 106 + : 107 + : "r" (&lock->lock) 108 + : "memory", "r6", "p3" 109 + ); 110 + } 111 + 112 + 113 + static inline int arch_write_trylock(arch_rwlock_t *lock) 114 + { 115 + int temp; 116 + __asm__ __volatile__( 117 + " R6 = memw_locked(%1)\n" 118 + " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 119 + " { if !P3 jump 1f; }\n" 120 + " memw_locked(%1,P3) = R6;\n" 121 + " %0 = P3;\n" 122 + "1:\n" 123 + : "=&r" (temp) 124 + : "r" (&lock->lock) 125 + : "memory", "r6", "p3" 126 + ); 127 + return temp; 128 + 129 + } 130 + 131 + static inline void arch_write_unlock(arch_rwlock_t *lock) 132 + { 133 + smp_mb(); 134 + lock->lock = 0; 135 + } 136 + 137 + static inline void arch_spin_lock(arch_spinlock_t *lock) 138 + { 139 + __asm__ __volatile__( 140 + "1: R6 = memw_locked(%0);\n" 141 + " P3 = cmp.eq(R6,#0);\n" 142 + " { if !P3 jump 1b; R6 = #1; }\n" 143 + " memw_locked(%0,P3) = R6;\n" 144 + " { if !P3 jump 1b; }\n" 145 + : 146 + : "r" (&lock->lock) 147 + : "memory", "r6", "p3" 148 + ); 149 + 150 + } 151 + 152 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 153 + { 154 + smp_mb(); 155 + lock->lock = 0; 156 + } 157 + 158 + static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) 159 + { 160 + int temp; 161 + __asm__ __volatile__( 162 + " R6 = memw_locked(%1);\n" 163 + " P3 = cmp.eq(R6,#0);\n" 164 + " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" 165 + " memw_locked(%1,P3) = R6;\n" 166 + " %0 = P3;\n" 167 + "1:\n" 168 + : "=&r" (temp) 169 + : "r" (&lock->lock) 170 + : "memory", "r6", "p3" 171 + ); 172 + return temp; 173 + } 174 + 175 + /* 176 + * SMP spinlocks are intended to allow only a single CPU at the lock 177 + */ 178 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 179 + #define arch_spin_unlock_wait(lock) \ 180 + do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 181 + #define arch_spin_is_locked(x) ((x)->lock != 0) 182 + 183 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 184 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 185 + 186 + #endif
+42
arch/hexagon/include/asm/spinlock_types.h
··· 1 + /* 2 + * Spinlock support for the Hexagon architecture 3 + * 4 + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 and 8 + * only version 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program; if not, write to the Free Software 17 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 + * 02110-1301, USA. 19 + */ 20 + 21 + #ifndef _ASM_SPINLOCK_TYPES_H 22 + #define _ASM_SPINLOCK_TYPES_H 23 + 24 + #include <linux/version.h> 25 + 26 + #ifndef __LINUX_SPINLOCK_TYPES_H 27 + # error "please don't include this file directly" 28 + #endif 29 + 30 + typedef struct { 31 + volatile unsigned int lock; 32 + } arch_spinlock_t; 33 + 34 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 35 + 36 + typedef struct { 37 + volatile unsigned int lock; 38 + } arch_rwlock_t; 39 + 40 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 41 + 42 + #endif
+132
include/asm-generic/rwsem.h
··· 1 + #ifndef _ASM_POWERPC_RWSEM_H 2 + #define _ASM_POWERPC_RWSEM_H 3 + 4 + #ifndef _LINUX_RWSEM_H 5 + #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead." 6 + #endif 7 + 8 + #ifdef __KERNEL__ 9 + 10 + /* 11 + * R/W semaphores for PPC using the stuff in lib/rwsem.c. 12 + * Adapted largely from include/asm-i386/rwsem.h 13 + * by Paul Mackerras <paulus@samba.org>. 14 + */ 15 + 16 + /* 17 + * the semaphore definition 18 + */ 19 + #ifdef CONFIG_PPC64 20 + # define RWSEM_ACTIVE_MASK 0xffffffffL 21 + #else 22 + # define RWSEM_ACTIVE_MASK 0x0000ffffL 23 + #endif 24 + 25 + #define RWSEM_UNLOCKED_VALUE 0x00000000L 26 + #define RWSEM_ACTIVE_BIAS 0x00000001L 27 + #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) 28 + #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 29 + #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 30 + 31 + /* 32 + * lock for reading 33 + */ 34 + static inline void __down_read(struct rw_semaphore *sem) 35 + { 36 + if (unlikely(atomic_long_inc_return((atomic_long_t *)&sem->count) <= 0)) 37 + rwsem_down_read_failed(sem); 38 + } 39 + 40 + static inline int __down_read_trylock(struct rw_semaphore *sem) 41 + { 42 + long tmp; 43 + 44 + while ((tmp = sem->count) >= 0) { 45 + if (tmp == cmpxchg(&sem->count, tmp, 46 + tmp + RWSEM_ACTIVE_READ_BIAS)) { 47 + return 1; 48 + } 49 + } 50 + return 0; 51 + } 52 + 53 + /* 54 + * lock for writing 55 + */ 56 + static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) 57 + { 58 + long tmp; 59 + 60 + tmp = atomic_long_add_return(RWSEM_ACTIVE_WRITE_BIAS, 61 + (atomic_long_t *)&sem->count); 62 + if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS)) 63 + rwsem_down_write_failed(sem); 64 + } 65 + 66 + static inline void __down_write(struct rw_semaphore *sem) 67 + { 68 + __down_write_nested(sem, 0); 69 + } 70 + 71 + static inline int __down_write_trylock(struct rw_semaphore *sem) 72 + { 73 + long tmp; 74 + 75 + tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, 76 + RWSEM_ACTIVE_WRITE_BIAS); 77 + return tmp == RWSEM_UNLOCKED_VALUE; 78 + } 79 + 80 + /* 81 + * unlock after reading 82 + */ 83 + static inline void __up_read(struct rw_semaphore *sem) 84 + { 85 + long tmp; 86 + 87 + tmp = atomic_long_dec_return((atomic_long_t *)&sem->count); 88 + if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)) 89 + rwsem_wake(sem); 90 + } 91 + 92 + /* 93 + * unlock after writing 94 + */ 95 + static inline void __up_write(struct rw_semaphore *sem) 96 + { 97 + if (unlikely(atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, 98 + (atomic_long_t *)&sem->count) < 0)) 99 + rwsem_wake(sem); 100 + } 101 + 102 + /* 103 + * implement atomic add functionality 104 + */ 105 + static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) 106 + { 107 + atomic_long_add(delta, (atomic_long_t *)&sem->count); 108 + } 109 + 110 + /* 111 + * downgrade write lock to read lock 112 + */ 113 + static inline void __downgrade_write(struct rw_semaphore *sem) 114 + { 115 + long tmp; 116 + 117 + tmp = atomic_long_add_return(-RWSEM_WAITING_BIAS, 118 + (atomic_long_t *)&sem->count); 119 + if (tmp < 0) 120 + rwsem_downgrade_wake(sem); 121 + } 122 + 123 + /* 124 + * implement exchange and add functionality 125 + */ 126 + static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) 127 + { 128 + return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 129 + } 130 + 131 + #endif /* __KERNEL__ */ 132 + #endif /* _ASM_POWERPC_RWSEM_H */