Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
10#include <asm/bug.h>
11#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
15static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
16
17void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
18{
19 /*
20 * No lock is required. The lock owner may change if we have a read
21 * lock, but it won't change to or away from us. If we have the write
22 * lock, we are the owner and it'll never change.
23 */
24 if (eb->lock_nested && current->pid == eb->lock_owner)
25 return;
26 btrfs_assert_tree_read_locked(eb);
27 atomic_inc(&eb->blocking_readers);
28 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
29 atomic_dec(&eb->spinning_readers);
30 read_unlock(&eb->lock);
31}
32
33void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
34{
35 /*
36 * No lock is required. The lock owner may change if we have a read
37 * lock, but it won't change to or away from us. If we have the write
38 * lock, we are the owner and it'll never change.
39 */
40 if (eb->lock_nested && current->pid == eb->lock_owner)
41 return;
42 if (atomic_read(&eb->blocking_writers) == 0) {
43 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
44 atomic_dec(&eb->spinning_writers);
45 btrfs_assert_tree_locked(eb);
46 atomic_inc(&eb->blocking_writers);
47 write_unlock(&eb->lock);
48 }
49}
50
51void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
52{
53 /*
54 * No lock is required. The lock owner may change if we have a read
55 * lock, but it won't change to or away from us. If we have the write
56 * lock, we are the owner and it'll never change.
57 */
58 if (eb->lock_nested && current->pid == eb->lock_owner)
59 return;
60 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
61 read_lock(&eb->lock);
62 atomic_inc(&eb->spinning_readers);
63 /* atomic_dec_and_test implies a barrier */
64 if (atomic_dec_and_test(&eb->blocking_readers))
65 cond_wake_up_nomb(&eb->read_lock_wq);
66}
67
68void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
69{
70 /*
71 * no lock is required. The lock owner may change if
72 * we have a read lock, but it won't change to or away
73 * from us. If we have the write lock, we are the owner
74 * and it'll never change.
75 */
76 if (eb->lock_nested && current->pid == eb->lock_owner)
77 return;
78 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
79 write_lock(&eb->lock);
80 WARN_ON(atomic_read(&eb->spinning_writers));
81 atomic_inc(&eb->spinning_writers);
82 /* atomic_dec_and_test implies a barrier */
83 if (atomic_dec_and_test(&eb->blocking_writers))
84 cond_wake_up_nomb(&eb->write_lock_wq);
85}
86
87/*
88 * take a spinning read lock. This will wait for any blocking
89 * writers
90 */
91void btrfs_tree_read_lock(struct extent_buffer *eb)
92{
93again:
94 BUG_ON(!atomic_read(&eb->blocking_writers) &&
95 current->pid == eb->lock_owner);
96
97 read_lock(&eb->lock);
98 if (atomic_read(&eb->blocking_writers) &&
99 current->pid == eb->lock_owner) {
100 /*
101 * This extent is already write-locked by our thread. We allow
102 * an additional read lock to be added because it's for the same
103 * thread. btrfs_find_all_roots() depends on this as it may be
104 * called on a partly (write-)locked tree.
105 */
106 BUG_ON(eb->lock_nested);
107 eb->lock_nested = 1;
108 read_unlock(&eb->lock);
109 return;
110 }
111 if (atomic_read(&eb->blocking_writers)) {
112 read_unlock(&eb->lock);
113 wait_event(eb->write_lock_wq,
114 atomic_read(&eb->blocking_writers) == 0);
115 goto again;
116 }
117 atomic_inc(&eb->read_locks);
118 atomic_inc(&eb->spinning_readers);
119}
120
121/*
122 * take a spinning read lock.
123 * returns 1 if we get the read lock and 0 if we don't
124 * this won't wait for blocking writers
125 */
126int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
127{
128 if (atomic_read(&eb->blocking_writers))
129 return 0;
130
131 read_lock(&eb->lock);
132 if (atomic_read(&eb->blocking_writers)) {
133 read_unlock(&eb->lock);
134 return 0;
135 }
136 atomic_inc(&eb->read_locks);
137 atomic_inc(&eb->spinning_readers);
138 return 1;
139}
140
141/*
142 * returns 1 if we get the read lock and 0 if we don't
143 * this won't wait for blocking writers
144 */
145int btrfs_try_tree_read_lock(struct extent_buffer *eb)
146{
147 if (atomic_read(&eb->blocking_writers))
148 return 0;
149
150 if (!read_trylock(&eb->lock))
151 return 0;
152
153 if (atomic_read(&eb->blocking_writers)) {
154 read_unlock(&eb->lock);
155 return 0;
156 }
157 atomic_inc(&eb->read_locks);
158 atomic_inc(&eb->spinning_readers);
159 return 1;
160}
161
162/*
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers or readers
165 */
166int btrfs_try_tree_write_lock(struct extent_buffer *eb)
167{
168 if (atomic_read(&eb->blocking_writers) ||
169 atomic_read(&eb->blocking_readers))
170 return 0;
171
172 write_lock(&eb->lock);
173 if (atomic_read(&eb->blocking_writers) ||
174 atomic_read(&eb->blocking_readers)) {
175 write_unlock(&eb->lock);
176 return 0;
177 }
178 atomic_inc(&eb->write_locks);
179 atomic_inc(&eb->spinning_writers);
180 eb->lock_owner = current->pid;
181 return 1;
182}
183
184/*
185 * drop a spinning read lock
186 */
187void btrfs_tree_read_unlock(struct extent_buffer *eb)
188{
189 /*
190 * if we're nested, we have the write lock. No new locking
191 * is needed as long as we are the lock owner.
192 * The write unlock will do a barrier for us, and the lock_nested
193 * field only matters to the lock owner.
194 */
195 if (eb->lock_nested && current->pid == eb->lock_owner) {
196 eb->lock_nested = 0;
197 return;
198 }
199 btrfs_assert_tree_read_locked(eb);
200 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
201 atomic_dec(&eb->spinning_readers);
202 atomic_dec(&eb->read_locks);
203 read_unlock(&eb->lock);
204}
205
206/*
207 * drop a blocking read lock
208 */
209void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
210{
211 /*
212 * if we're nested, we have the write lock. No new locking
213 * is needed as long as we are the lock owner.
214 * The write unlock will do a barrier for us, and the lock_nested
215 * field only matters to the lock owner.
216 */
217 if (eb->lock_nested && current->pid == eb->lock_owner) {
218 eb->lock_nested = 0;
219 return;
220 }
221 btrfs_assert_tree_read_locked(eb);
222 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
223 /* atomic_dec_and_test implies a barrier */
224 if (atomic_dec_and_test(&eb->blocking_readers))
225 cond_wake_up_nomb(&eb->read_lock_wq);
226 atomic_dec(&eb->read_locks);
227}
228
229/*
230 * take a spinning write lock. This will wait for both
231 * blocking readers or writers
232 */
233void btrfs_tree_lock(struct extent_buffer *eb)
234{
235 WARN_ON(eb->lock_owner == current->pid);
236again:
237 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
238 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
239 write_lock(&eb->lock);
240 if (atomic_read(&eb->blocking_readers) ||
241 atomic_read(&eb->blocking_writers)) {
242 write_unlock(&eb->lock);
243 goto again;
244 }
245 WARN_ON(atomic_read(&eb->spinning_writers));
246 atomic_inc(&eb->spinning_writers);
247 atomic_inc(&eb->write_locks);
248 eb->lock_owner = current->pid;
249}
250
251/*
252 * drop a spinning or a blocking write lock.
253 */
254void btrfs_tree_unlock(struct extent_buffer *eb)
255{
256 int blockers = atomic_read(&eb->blocking_writers);
257
258 BUG_ON(blockers > 1);
259
260 btrfs_assert_tree_locked(eb);
261 eb->lock_owner = 0;
262 atomic_dec(&eb->write_locks);
263
264 if (blockers) {
265 WARN_ON(atomic_read(&eb->spinning_writers));
266 atomic_dec(&eb->blocking_writers);
267 /* Use the lighter barrier after atomic */
268 smp_mb__after_atomic();
269 cond_wake_up_nomb(&eb->write_lock_wq);
270 } else {
271 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
272 atomic_dec(&eb->spinning_writers);
273 write_unlock(&eb->lock);
274 }
275}
276
277void btrfs_assert_tree_locked(struct extent_buffer *eb)
278{
279 BUG_ON(!atomic_read(&eb->write_locks));
280}
281
282static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
283{
284 BUG_ON(!atomic_read(&eb->read_locks));
285}