Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHEFS_BTREE_LOCKING_H
3#define _BCACHEFS_BTREE_LOCKING_H
4
5/*
6 * Only for internal btree use:
7 *
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
11 */
12
13#include "btree_iter.h"
14#include "six.h"
15
16void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp);
17
18void bch2_trans_unlock_write(struct btree_trans *);
19
20static inline bool is_btree_node(struct btree_path *path, unsigned l)
21{
22 return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
23}
24
25static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
26{
27 return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
28 ? &trans->c->btree_transaction_stats[trans->fn_idx]
29 : NULL;
30}
31
32/* matches six lock types */
33enum btree_node_locked_type {
34 BTREE_NODE_UNLOCKED = -1,
35 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
36 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
37 BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
38};
39
40static inline int btree_node_locked_type(struct btree_path *path,
41 unsigned level)
42{
43 return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
44}
45
46static inline int btree_node_locked_type_nowrite(struct btree_path *path,
47 unsigned level)
48{
49 int have = btree_node_locked_type(path, level);
50 return have == BTREE_NODE_WRITE_LOCKED
51 ? BTREE_NODE_INTENT_LOCKED
52 : have;
53}
54
55static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
56{
57 return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
58}
59
60static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
61{
62 return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
63}
64
65static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
66{
67 return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
68}
69
70static inline bool btree_node_locked(struct btree_path *path, unsigned level)
71{
72 return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
73}
74
75static inline void mark_btree_node_locked_noreset(struct btree_path *path,
76 unsigned level,
77 enum btree_node_locked_type type)
78{
79 /* relying on this to avoid a branch */
80 BUILD_BUG_ON(SIX_LOCK_read != 0);
81 BUILD_BUG_ON(SIX_LOCK_intent != 1);
82
83 path->nodes_locked &= ~(3U << (level << 1));
84 path->nodes_locked |= (type + 1) << (level << 1);
85}
86
87static inline void mark_btree_node_locked(struct btree_trans *trans,
88 struct btree_path *path,
89 unsigned level,
90 enum btree_node_locked_type type)
91{
92 mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
93#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
94 path->l[level].lock_taken_time = local_clock();
95#endif
96}
97
98static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
99{
100 return level < path->locks_want
101 ? SIX_LOCK_intent
102 : SIX_LOCK_read;
103}
104
105static inline enum btree_node_locked_type
106btree_lock_want(struct btree_path *path, int level)
107{
108 if (level < path->level)
109 return BTREE_NODE_UNLOCKED;
110 if (level < path->locks_want)
111 return BTREE_NODE_INTENT_LOCKED;
112 if (level == path->level)
113 return BTREE_NODE_READ_LOCKED;
114 return BTREE_NODE_UNLOCKED;
115}
116
117static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
118 struct btree_path *path, unsigned level)
119{
120#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
121 __bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
122 path->l[level].lock_taken_time,
123 local_clock());
124#endif
125}
126
127/* unlock: */
128
129void bch2_btree_node_unlock_write(struct btree_trans *,
130 struct btree_path *, struct btree *);
131
132static inline void btree_node_unlock(struct btree_trans *trans,
133 struct btree_path *path, unsigned level)
134{
135 int lock_type = btree_node_locked_type(path, level);
136
137 EBUG_ON(level >= BTREE_MAX_DEPTH);
138
139 if (lock_type != BTREE_NODE_UNLOCKED) {
140 if (unlikely(lock_type == BTREE_NODE_WRITE_LOCKED)) {
141 bch2_btree_node_unlock_write(trans, path, path->l[level].b);
142 lock_type = BTREE_NODE_INTENT_LOCKED;
143 }
144 six_unlock_type(&path->l[level].b->c.lock, lock_type);
145 btree_trans_lock_hold_time_update(trans, path, level);
146 mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
147 }
148}
149
150static inline int btree_path_lowest_level_locked(struct btree_path *path)
151{
152 return __ffs(path->nodes_locked) >> 1;
153}
154
155static inline int btree_path_highest_level_locked(struct btree_path *path)
156{
157 return __fls(path->nodes_locked) >> 1;
158}
159
160static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
161 struct btree_path *path)
162{
163 btree_path_set_dirty(trans, path, BTREE_ITER_NEED_RELOCK);
164
165 while (path->nodes_locked)
166 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
167}
168
169/*
170 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
171 * succeed:
172 */
173static inline void
174__bch2_btree_node_unlock_write(struct btree_trans *trans, struct btree *b)
175{
176 if (!b->c.lock.write_lock_recurse) {
177 struct btree_path *linked;
178 unsigned i;
179
180 trans_for_each_path_with_node(trans, b, linked, i)
181 linked->l[b->c.level].lock_seq++;
182 }
183
184 six_unlock_write(&b->c.lock);
185}
186
187static inline void
188bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
189 struct btree *b)
190{
191 EBUG_ON(path->l[b->c.level].b != b);
192 EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
193 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
194
195 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
196 __bch2_btree_node_unlock_write(trans, b);
197}
198
199int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
200
201/* lock: */
202
203static inline void trans_set_locked(struct btree_trans *trans, bool try)
204{
205 if (!trans->locked) {
206 lock_acquire_exclusive(&trans->dep_map, 0, try, NULL, _THIS_IP_);
207 trans->locked = true;
208 trans->last_unlock_ip = 0;
209
210 trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
211 current->flags |= PF_MEMALLOC_NOFS;
212 }
213}
214
215static inline void trans_set_unlocked(struct btree_trans *trans)
216{
217 if (trans->locked) {
218 lock_release(&trans->dep_map, _THIS_IP_);
219 trans->locked = false;
220 trans->last_unlock_ip = _RET_IP_;
221
222 if (!trans->pf_memalloc_nofs)
223 current->flags &= ~PF_MEMALLOC_NOFS;
224 }
225}
226
227static inline int __btree_node_lock_nopath(struct btree_trans *trans,
228 struct btree_bkey_cached_common *b,
229 enum six_lock_type type,
230 bool lock_may_not_fail,
231 unsigned long ip)
232{
233 trans->lock_may_not_fail = lock_may_not_fail;
234 trans->lock_must_abort = false;
235 trans->locking = b;
236
237 int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
238 bch2_six_check_for_deadlock, trans, ip);
239 WRITE_ONCE(trans->locking, NULL);
240 WRITE_ONCE(trans->locking_wait.start_time, 0);
241
242 if (!ret)
243 trace_btree_path_lock(trans, _THIS_IP_, b);
244 return ret;
245}
246
247static inline int __must_check
248btree_node_lock_nopath(struct btree_trans *trans,
249 struct btree_bkey_cached_common *b,
250 enum six_lock_type type,
251 unsigned long ip)
252{
253 return __btree_node_lock_nopath(trans, b, type, false, ip);
254}
255
256static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
257 struct btree_bkey_cached_common *b,
258 enum six_lock_type type)
259{
260 int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
261
262 BUG_ON(ret);
263}
264
265/*
266 * Lock a btree node if we already have it locked on one of our linked
267 * iterators:
268 */
269static inline bool btree_node_lock_increment(struct btree_trans *trans,
270 struct btree_bkey_cached_common *b,
271 unsigned level,
272 enum btree_node_locked_type want)
273{
274 struct btree_path *path;
275 unsigned i;
276
277 trans_for_each_path(trans, path, i)
278 if (&path->l[level].b->c == b &&
279 btree_node_locked_type(path, level) >= want) {
280 six_lock_increment(&b->lock, (enum six_lock_type) want);
281 return true;
282 }
283
284 return false;
285}
286
287static inline int btree_node_lock(struct btree_trans *trans,
288 struct btree_path *path,
289 struct btree_bkey_cached_common *b,
290 unsigned level,
291 enum six_lock_type type,
292 unsigned long ip)
293{
294 int ret = 0;
295
296 EBUG_ON(level >= BTREE_MAX_DEPTH);
297 bch2_trans_verify_not_unlocked_or_in_restart(trans);
298
299 if (likely(six_trylock_type(&b->lock, type)) ||
300 btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
301 !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
302#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
303 path->l[b->level].lock_taken_time = local_clock();
304#endif
305 }
306
307 return ret;
308}
309
310int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
311 struct btree_bkey_cached_common *b, bool);
312
313static inline int __btree_node_lock_write(struct btree_trans *trans,
314 struct btree_path *path,
315 struct btree_bkey_cached_common *b,
316 bool lock_may_not_fail)
317{
318 EBUG_ON(&path->l[b->level].b->c != b);
319 EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
320 EBUG_ON(!btree_node_intent_locked(path, b->level));
321
322 /*
323 * six locks are unfair, and read locks block while a thread wants a
324 * write lock: thus, we need to tell the cycle detector we have a write
325 * lock _before_ taking the lock:
326 */
327 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
328
329 return likely(six_trylock_write(&b->lock))
330 ? 0
331 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
332}
333
334static inline int __must_check
335bch2_btree_node_lock_write(struct btree_trans *trans,
336 struct btree_path *path,
337 struct btree_bkey_cached_common *b)
338{
339 return __btree_node_lock_write(trans, path, b, false);
340}
341
342void bch2_btree_node_lock_write_nofail(struct btree_trans *,
343 struct btree_path *,
344 struct btree_bkey_cached_common *);
345
346/* relock: */
347
348bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
349int __bch2_btree_path_relock(struct btree_trans *,
350 struct btree_path *, unsigned long);
351
352static inline int bch2_btree_path_relock(struct btree_trans *trans,
353 struct btree_path *path, unsigned long trace_ip)
354{
355 return btree_node_locked(path, path->level)
356 ? 0
357 : __bch2_btree_path_relock(trans, path, trace_ip);
358}
359
360bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
361
362static inline bool bch2_btree_node_relock(struct btree_trans *trans,
363 struct btree_path *path, unsigned level)
364{
365 EBUG_ON(btree_node_locked(path, level) &&
366 !btree_node_write_locked(path, level) &&
367 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
368
369 return likely(btree_node_locked(path, level)) ||
370 (!IS_ERR_OR_NULL(path->l[level].b) &&
371 __bch2_btree_node_relock(trans, path, level, true));
372}
373
374static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
375 struct btree_path *path, unsigned level)
376{
377 EBUG_ON(btree_node_locked(path, level) &&
378 btree_node_locked_type_nowrite(path, level) !=
379 __btree_lock_want(path, level));
380
381 return likely(btree_node_locked(path, level)) ||
382 (!IS_ERR_OR_NULL(path->l[level].b) &&
383 __bch2_btree_node_relock(trans, path, level, false));
384}
385
386/* upgrade */
387
388bool __bch2_btree_path_upgrade_norestart(struct btree_trans *, struct btree_path *, unsigned);
389
390static inline bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
391 struct btree_path *path,
392 unsigned new_locks_want)
393{
394 return new_locks_want > path->locks_want
395 ? __bch2_btree_path_upgrade_norestart(trans, path, new_locks_want)
396 : true;
397}
398
399int __bch2_btree_path_upgrade(struct btree_trans *,
400 struct btree_path *, unsigned);
401
402static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
403 struct btree_path *path,
404 unsigned new_locks_want)
405{
406 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
407
408 return likely(path->locks_want >= new_locks_want && path->nodes_locked)
409 ? 0
410 : __bch2_btree_path_upgrade(trans, path, new_locks_want);
411}
412
413/* misc: */
414
415static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
416{
417 EBUG_ON(!btree_node_locked(path, path->level));
418 EBUG_ON(path->uptodate);
419
420 if (!path->should_be_locked) {
421 path->should_be_locked = true;
422 trace_btree_path_should_be_locked(trans, path);
423 }
424}
425
426static inline void __btree_path_set_level_up(struct btree_trans *trans,
427 struct btree_path *path,
428 unsigned l)
429{
430 btree_node_unlock(trans, path, l);
431 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
432}
433
434static inline void btree_path_set_level_up(struct btree_trans *trans,
435 struct btree_path *path)
436{
437 __btree_path_set_level_up(trans, path, path->level++);
438 btree_path_set_dirty(trans, path, BTREE_ITER_NEED_TRAVERSE);
439}
440
441/* debug */
442
443struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
444 struct btree_path *,
445 struct btree_bkey_cached_common *b,
446 unsigned);
447
448int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
449
450void __bch2_btree_path_verify_locks(struct btree_trans *, struct btree_path *);
451void __bch2_trans_verify_locks(struct btree_trans *);
452
453static inline void bch2_btree_path_verify_locks(struct btree_trans *trans,
454 struct btree_path *path)
455{
456 if (static_branch_unlikely(&bch2_debug_check_btree_locking))
457 __bch2_btree_path_verify_locks(trans, path);
458}
459
460static inline void bch2_trans_verify_locks(struct btree_trans *trans)
461{
462 if (static_branch_unlikely(&bch2_debug_check_btree_locking))
463 __bch2_trans_verify_locks(trans);
464}
465
466#endif /* _BCACHEFS_BTREE_LOCKING_H */