Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/spinlock.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/io.h>
14
15int spin_retry = 1000;
16
17/**
18 * spin_retry= parameter
19 */
20static int __init spin_retry_setup(char *str)
21{
22 spin_retry = simple_strtoul(str, &str, 0);
23 return 1;
24}
25__setup("spin_retry=", spin_retry_setup);
26
27void arch_spin_lock_wait(arch_spinlock_t *lp)
28{
29 int count = spin_retry;
30 unsigned int cpu = ~smp_processor_id();
31 unsigned int owner;
32
33 while (1) {
34 owner = lp->owner_cpu;
35 if (!owner || smp_vcpu_scheduled(~owner)) {
36 for (count = spin_retry; count > 0; count--) {
37 if (arch_spin_is_locked(lp))
38 continue;
39 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
40 cpu) == 0)
41 return;
42 }
43 if (MACHINE_IS_LPAR)
44 continue;
45 }
46 owner = lp->owner_cpu;
47 if (owner)
48 smp_yield_cpu(~owner);
49 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
50 return;
51 }
52}
53EXPORT_SYMBOL(arch_spin_lock_wait);
54
55void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
56{
57 int count = spin_retry;
58 unsigned int cpu = ~smp_processor_id();
59 unsigned int owner;
60
61 local_irq_restore(flags);
62 while (1) {
63 owner = lp->owner_cpu;
64 if (!owner || smp_vcpu_scheduled(~owner)) {
65 for (count = spin_retry; count > 0; count--) {
66 if (arch_spin_is_locked(lp))
67 continue;
68 local_irq_disable();
69 if (_raw_compare_and_swap(&lp->owner_cpu, 0,
70 cpu) == 0)
71 return;
72 local_irq_restore(flags);
73 }
74 if (MACHINE_IS_LPAR)
75 continue;
76 }
77 owner = lp->owner_cpu;
78 if (owner)
79 smp_yield_cpu(~owner);
80 local_irq_disable();
81 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
82 return;
83 local_irq_restore(flags);
84 }
85}
86EXPORT_SYMBOL(arch_spin_lock_wait_flags);
87
88int arch_spin_trylock_retry(arch_spinlock_t *lp)
89{
90 unsigned int cpu = ~smp_processor_id();
91 int count;
92
93 for (count = spin_retry; count > 0; count--) {
94 if (arch_spin_is_locked(lp))
95 continue;
96 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
97 return 1;
98 }
99 return 0;
100}
101EXPORT_SYMBOL(arch_spin_trylock_retry);
102
103void arch_spin_relax(arch_spinlock_t *lock)
104{
105 unsigned int cpu = lock->owner_cpu;
106 if (cpu != 0) {
107 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
108 !smp_vcpu_scheduled(~cpu))
109 smp_yield_cpu(~cpu);
110 }
111}
112EXPORT_SYMBOL(arch_spin_relax);
113
114void _raw_read_lock_wait(arch_rwlock_t *rw)
115{
116 unsigned int old;
117 int count = spin_retry;
118
119 while (1) {
120 if (count-- <= 0) {
121 smp_yield();
122 count = spin_retry;
123 }
124 if (!arch_read_can_lock(rw))
125 continue;
126 old = rw->lock & 0x7fffffffU;
127 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
128 return;
129 }
130}
131EXPORT_SYMBOL(_raw_read_lock_wait);
132
133void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
134{
135 unsigned int old;
136 int count = spin_retry;
137
138 local_irq_restore(flags);
139 while (1) {
140 if (count-- <= 0) {
141 smp_yield();
142 count = spin_retry;
143 }
144 if (!arch_read_can_lock(rw))
145 continue;
146 old = rw->lock & 0x7fffffffU;
147 local_irq_disable();
148 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
149 return;
150 }
151}
152EXPORT_SYMBOL(_raw_read_lock_wait_flags);
153
154int _raw_read_trylock_retry(arch_rwlock_t *rw)
155{
156 unsigned int old;
157 int count = spin_retry;
158
159 while (count-- > 0) {
160 if (!arch_read_can_lock(rw))
161 continue;
162 old = rw->lock & 0x7fffffffU;
163 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
164 return 1;
165 }
166 return 0;
167}
168EXPORT_SYMBOL(_raw_read_trylock_retry);
169
170void _raw_write_lock_wait(arch_rwlock_t *rw)
171{
172 int count = spin_retry;
173
174 while (1) {
175 if (count-- <= 0) {
176 smp_yield();
177 count = spin_retry;
178 }
179 if (!arch_write_can_lock(rw))
180 continue;
181 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
182 return;
183 }
184}
185EXPORT_SYMBOL(_raw_write_lock_wait);
186
187void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
188{
189 int count = spin_retry;
190
191 local_irq_restore(flags);
192 while (1) {
193 if (count-- <= 0) {
194 smp_yield();
195 count = spin_retry;
196 }
197 if (!arch_write_can_lock(rw))
198 continue;
199 local_irq_disable();
200 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
201 return;
202 }
203}
204EXPORT_SYMBOL(_raw_write_lock_wait_flags);
205
206int _raw_write_trylock_retry(arch_rwlock_t *rw)
207{
208 int count = spin_retry;
209
210 while (count-- > 0) {
211 if (!arch_write_can_lock(rw))
212 continue;
213 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
214 return 1;
215 }
216 return 0;
217}
218EXPORT_SYMBOL(_raw_write_trylock_retry);