Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* MN10300 spinlock support
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SPINLOCK_H
12#define _ASM_SPINLOCK_H
13
14#include <linux/atomic.h>
15#include <asm/barrier.h>
16#include <asm/processor.h>
17#include <asm/rwlock.h>
18#include <asm/page.h>
19
20/*
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
23 *
24 * We make no fairness assumptions. They have a cost.
25 */
26
27#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
28
29static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
30{
31 smp_cond_load_acquire(&lock->slock, !VAL);
32}
33
34static inline void arch_spin_unlock(arch_spinlock_t *lock)
35{
36 asm volatile(
37 " bclr 1,(0,%0) \n"
38 :
39 : "a"(&lock->slock)
40 : "memory", "cc");
41}
42
43static inline int arch_spin_trylock(arch_spinlock_t *lock)
44{
45 int ret;
46
47 asm volatile(
48 " mov 1,%0 \n"
49 " bset %0,(%1) \n"
50 " bne 1f \n"
51 " clr %0 \n"
52 "1: xor 1,%0 \n"
53 : "=d"(ret)
54 : "a"(&lock->slock)
55 : "memory", "cc");
56
57 return ret;
58}
59
60static inline void arch_spin_lock(arch_spinlock_t *lock)
61{
62 asm volatile(
63 "1: bset 1,(0,%0) \n"
64 " bne 1b \n"
65 :
66 : "a"(&lock->slock)
67 : "memory", "cc");
68}
69
70static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
71 unsigned long flags)
72{
73 int temp;
74
75 asm volatile(
76 "1: bset 1,(0,%2) \n"
77 " beq 3f \n"
78 " mov %1,epsw \n"
79 "2: mov (0,%2),%0 \n"
80 " or %0,%0 \n"
81 " bne 2b \n"
82 " mov %3,%0 \n"
83 " mov %0,epsw \n"
84 " nop \n"
85 " nop \n"
86 " bra 1b\n"
87 "3: \n"
88 : "=&d" (temp)
89 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
90 : "memory", "cc");
91}
92
93#ifdef __KERNEL__
94
95/*
96 * Read-write spinlocks, allowing multiple readers
97 * but only one writer.
98 *
99 * NOTE! it is quite common to have readers in interrupts
100 * but no interrupt writers. For those circumstances we
101 * can "mix" irq-safe locks - any writer needs to get a
102 * irq-safe write-lock, but readers can get non-irqsafe
103 * read-locks.
104 */
105
106/**
107 * read_can_lock - would read_trylock() succeed?
108 * @lock: the rwlock in question.
109 */
110#define arch_read_can_lock(x) ((int)(x)->lock > 0)
111
112/**
113 * write_can_lock - would write_trylock() succeed?
114 * @lock: the rwlock in question.
115 */
116#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
117
118/*
119 * On mn10300, we implement read-write locks as a 32-bit counter
120 * with the high bit (sign) being the "contended" bit.
121 */
122static inline void arch_read_lock(arch_rwlock_t *rw)
123{
124#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
125 __build_read_lock(rw, "__read_lock_failed");
126#else
127 {
128 atomic_t *count = (atomic_t *)rw;
129 while (atomic_dec_return(count) < 0)
130 atomic_inc(count);
131 }
132#endif
133}
134
135static inline void arch_write_lock(arch_rwlock_t *rw)
136{
137#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
138 __build_write_lock(rw, "__write_lock_failed");
139#else
140 {
141 atomic_t *count = (atomic_t *)rw;
142 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
143 atomic_add(RW_LOCK_BIAS, count);
144 }
145#endif
146}
147
148static inline void arch_read_unlock(arch_rwlock_t *rw)
149{
150#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
151 __build_read_unlock(rw);
152#else
153 {
154 atomic_t *count = (atomic_t *)rw;
155 atomic_inc(count);
156 }
157#endif
158}
159
160static inline void arch_write_unlock(arch_rwlock_t *rw)
161{
162#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
163 __build_write_unlock(rw);
164#else
165 {
166 atomic_t *count = (atomic_t *)rw;
167 atomic_add(RW_LOCK_BIAS, count);
168 }
169#endif
170}
171
172static inline int arch_read_trylock(arch_rwlock_t *lock)
173{
174 atomic_t *count = (atomic_t *)lock;
175 atomic_dec(count);
176 if (atomic_read(count) >= 0)
177 return 1;
178 atomic_inc(count);
179 return 0;
180}
181
182static inline int arch_write_trylock(arch_rwlock_t *lock)
183{
184 atomic_t *count = (atomic_t *)lock;
185 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
186 return 1;
187 atomic_add(RW_LOCK_BIAS, count);
188 return 0;
189}
190
191#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
192#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
193
194#define _raw_spin_relax(lock) cpu_relax()
195#define _raw_read_relax(lock) cpu_relax()
196#define _raw_write_relax(lock) cpu_relax()
197
198#endif /* __KERNEL__ */
199#endif /* _ASM_SPINLOCK_H */