Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Spinlock support for the Hexagon architecture
3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 */
21
22#ifndef _ASM_SPINLOCK_H
23#define _ASM_SPINLOCK_H
24
25#include <asm/irqflags.h>
26#include <asm/barrier.h>
27#include <asm/processor.h>
28
29/*
30 * This file is pulled in for SMP builds.
31 * Really need to check all the barrier stuff for "true" SMP
32 */
33
34/*
35 * Read locks:
36 * - load the lock value
37 * - increment it
38 * - if the lock value is still negative, go back and try again.
39 * - unsuccessful store is unsuccessful. Go back and try again. Loser.
40 * - successful store new lock value if positive -> lock acquired
41 */
42static inline void arch_read_lock(arch_rwlock_t *lock)
43{
44 __asm__ __volatile__(
45 "1: R6 = memw_locked(%0);\n"
46 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
47 " { if !P3 jump 1b; }\n"
48 " memw_locked(%0,P3) = R6;\n"
49 " { if !P3 jump 1b; }\n"
50 :
51 : "r" (&lock->lock)
52 : "memory", "r6", "p3"
53 );
54
55}
56
57static inline void arch_read_unlock(arch_rwlock_t *lock)
58{
59 __asm__ __volatile__(
60 "1: R6 = memw_locked(%0);\n"
61 " R6 = add(R6,#-1);\n"
62 " memw_locked(%0,P3) = R6\n"
63 " if !P3 jump 1b;\n"
64 :
65 : "r" (&lock->lock)
66 : "memory", "r6", "p3"
67 );
68
69}
70
71/* I think this returns 0 on fail, 1 on success. */
72static inline int arch_read_trylock(arch_rwlock_t *lock)
73{
74 int temp;
75 __asm__ __volatile__(
76 " R6 = memw_locked(%1);\n"
77 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
78 " { if !P3 jump 1f; }\n"
79 " memw_locked(%1,P3) = R6;\n"
80 " { %0 = P3 }\n"
81 "1:\n"
82 : "=&r" (temp)
83 : "r" (&lock->lock)
84 : "memory", "r6", "p3"
85 );
86 return temp;
87}
88
89static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
90{
91 return rwlock->lock == 0;
92}
93
94static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
95{
96 return rwlock->lock == 0;
97}
98
99/* Stuffs a -1 in the lock value? */
100static inline void arch_write_lock(arch_rwlock_t *lock)
101{
102 __asm__ __volatile__(
103 "1: R6 = memw_locked(%0)\n"
104 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
105 " { if !P3 jump 1b; }\n"
106 " memw_locked(%0,P3) = R6;\n"
107 " { if !P3 jump 1b; }\n"
108 :
109 : "r" (&lock->lock)
110 : "memory", "r6", "p3"
111 );
112}
113
114
115static inline int arch_write_trylock(arch_rwlock_t *lock)
116{
117 int temp;
118 __asm__ __volatile__(
119 " R6 = memw_locked(%1)\n"
120 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
121 " { if !P3 jump 1f; }\n"
122 " memw_locked(%1,P3) = R6;\n"
123 " %0 = P3;\n"
124 "1:\n"
125 : "=&r" (temp)
126 : "r" (&lock->lock)
127 : "memory", "r6", "p3"
128 );
129 return temp;
130
131}
132
133static inline void arch_write_unlock(arch_rwlock_t *lock)
134{
135 smp_mb();
136 lock->lock = 0;
137}
138
139static inline void arch_spin_lock(arch_spinlock_t *lock)
140{
141 __asm__ __volatile__(
142 "1: R6 = memw_locked(%0);\n"
143 " P3 = cmp.eq(R6,#0);\n"
144 " { if !P3 jump 1b; R6 = #1; }\n"
145 " memw_locked(%0,P3) = R6;\n"
146 " { if !P3 jump 1b; }\n"
147 :
148 : "r" (&lock->lock)
149 : "memory", "r6", "p3"
150 );
151
152}
153
154static inline void arch_spin_unlock(arch_spinlock_t *lock)
155{
156 smp_mb();
157 lock->lock = 0;
158}
159
160static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
161{
162 int temp;
163 __asm__ __volatile__(
164 " R6 = memw_locked(%1);\n"
165 " P3 = cmp.eq(R6,#0);\n"
166 " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
167 " memw_locked(%1,P3) = R6;\n"
168 " %0 = P3;\n"
169 "1:\n"
170 : "=&r" (temp)
171 : "r" (&lock->lock)
172 : "memory", "r6", "p3"
173 );
174 return temp;
175}
176
177/*
178 * SMP spinlocks are intended to allow only a single CPU at the lock
179 */
180#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
181
182#define arch_spin_is_locked(x) ((x)->lock != 0)
183
184#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
185#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
186
187#endif