Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

jbd2: Make state lock a spinlock

Bit-spinlocks are problematic on PREEMPT_RT if functions which might sleep
on RT, e.g. spin_lock(), alloc/free(), are invoked inside the lock held
region because bit spinlocks disable preemption even on RT.

A first attempt was to replace state lock with a spinlock placed in struct
buffer_head and make the locking conditional on PREEMPT_RT and
DEBUG_BIT_SPINLOCKS.

Jan pointed out that there is a 4 byte hole in struct journal_head where a
regular spinlock fits in and he would not object to convert the state lock
to a spinlock unconditionally.

Aside of solving the RT problem, this also gains lockdep coverage for the
journal head state lock (bit-spinlocks are not covered by lockdep as it's
hard to fit a lockdep map into a single bit).

The trivial change would have been to convert the jbd_*lock_bh_state()
inlines, but that comes with the downside that these functions take a
buffer head pointer which needs to be converted to a journal head pointer
which adds another level of indirection.

As almost all functions which use this lock have a journal head pointer
readily available, it makes more sense to remove the lock helper inlines
and write out spin_*lock() at all call sites.

Fixup all locking comments as well.

Suggested-by: Jan Kara <jack@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Mark Fasheh <mark@fasheh.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Jan Kara <jack@suse.com>
Cc: linux-ext4@vger.kernel.org
Link: https://lore.kernel.org/r/20190809124233.13277-7-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso@mit.edu>

authored by

Thomas Gleixner and committed by
Theodore Ts'o
46417064 2e710ff0

+84 -94
+4 -4
fs/jbd2/commit.c
··· 482 482 if (jh->b_committed_data) { 483 483 struct buffer_head *bh = jh2bh(jh); 484 484 485 - jbd_lock_bh_state(bh); 485 + spin_lock(&jh->b_state_lock); 486 486 jbd2_free(jh->b_committed_data, bh->b_size); 487 487 jh->b_committed_data = NULL; 488 - jbd_unlock_bh_state(bh); 488 + spin_unlock(&jh->b_state_lock); 489 489 } 490 490 jbd2_journal_refile_buffer(journal, jh); 491 491 } ··· 928 928 * done with it. 929 929 */ 930 930 get_bh(bh); 931 - jbd_lock_bh_state(bh); 931 + spin_lock(&jh->b_state_lock); 932 932 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); 933 933 934 934 /* ··· 1024 1024 } 1025 1025 JBUFFER_TRACE(jh, "refile or unfile buffer"); 1026 1026 drop_ref = __jbd2_journal_refile_buffer(jh); 1027 - jbd_unlock_bh_state(bh); 1027 + spin_unlock(&jh->b_state_lock); 1028 1028 if (drop_ref) 1029 1029 jbd2_journal_put_journal_head(jh); 1030 1030 if (try_to_free)
+6 -4
fs/jbd2/journal.c
··· 363 363 /* keep subsequent assertions sane */ 364 364 atomic_set(&new_bh->b_count, 1); 365 365 366 - jbd_lock_bh_state(bh_in); 366 + spin_lock(&jh_in->b_state_lock); 367 367 repeat: 368 368 /* 369 369 * If a new transaction has already done a buffer copy-out, then ··· 405 405 if (need_copy_out && !done_copy_out) { 406 406 char *tmp; 407 407 408 - jbd_unlock_bh_state(bh_in); 408 + spin_unlock(&jh_in->b_state_lock); 409 409 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); 410 410 if (!tmp) { 411 411 brelse(new_bh); 412 412 return -ENOMEM; 413 413 } 414 - jbd_lock_bh_state(bh_in); 414 + spin_lock(&jh_in->b_state_lock); 415 415 if (jh_in->b_frozen_data) { 416 416 jbd2_free(tmp, bh_in->b_size); 417 417 goto repeat; ··· 464 464 __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); 465 465 spin_unlock(&journal->j_list_lock); 466 466 set_buffer_shadow(bh_in); 467 - jbd_unlock_bh_state(bh_in); 467 + spin_unlock(&jh_in->b_state_lock); 468 468 469 469 return do_escape | (done_copy_out << 1); 470 470 } ··· 2410 2410 ret = kmem_cache_zalloc(jbd2_journal_head_cache, 2411 2411 GFP_NOFS | __GFP_NOFAIL); 2412 2412 } 2413 + if (ret) 2414 + spin_lock_init(&ret->b_state_lock); 2413 2415 return ret; 2414 2416 } 2415 2417
+47 -53
fs/jbd2/transaction.c
··· 879 879 880 880 start_lock = jiffies; 881 881 lock_buffer(bh); 882 - jbd_lock_bh_state(bh); 882 + spin_lock(&jh->b_state_lock); 883 883 884 884 /* If it takes too long to lock the buffer, trace it */ 885 885 time_lock = jbd2_time_diff(start_lock, jiffies); ··· 929 929 930 930 error = -EROFS; 931 931 if (is_handle_aborted(handle)) { 932 - jbd_unlock_bh_state(bh); 932 + spin_unlock(&jh->b_state_lock); 933 933 goto out; 934 934 } 935 935 error = 0; ··· 993 993 */ 994 994 if (buffer_shadow(bh)) { 995 995 JBUFFER_TRACE(jh, "on shadow: sleep"); 996 - jbd_unlock_bh_state(bh); 996 + spin_unlock(&jh->b_state_lock); 997 997 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); 998 998 goto repeat; 999 999 } ··· 1014 1014 JBUFFER_TRACE(jh, "generate frozen data"); 1015 1015 if (!frozen_buffer) { 1016 1016 JBUFFER_TRACE(jh, "allocate memory for buffer"); 1017 - jbd_unlock_bh_state(bh); 1017 + spin_unlock(&jh->b_state_lock); 1018 1018 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, 1019 1019 GFP_NOFS | __GFP_NOFAIL); 1020 1020 goto repeat; ··· 1033 1033 jh->b_next_transaction = transaction; 1034 1034 1035 1035 done: 1036 - jbd_unlock_bh_state(bh); 1036 + spin_unlock(&jh->b_state_lock); 1037 1037 1038 1038 /* 1039 1039 * If we are about to journal a buffer, then any revoke pending on it is ··· 1172 1172 * that case: the transaction must have deleted the buffer for it to be 1173 1173 * reused here. 1174 1174 */ 1175 - jbd_lock_bh_state(bh); 1175 + spin_lock(&jh->b_state_lock); 1176 1176 J_ASSERT_JH(jh, (jh->b_transaction == transaction || 1177 1177 jh->b_transaction == NULL || 1178 1178 (jh->b_transaction == journal->j_committing_transaction && ··· 1207 1207 jh->b_next_transaction = transaction; 1208 1208 spin_unlock(&journal->j_list_lock); 1209 1209 } 1210 - jbd_unlock_bh_state(bh); 1210 + spin_unlock(&jh->b_state_lock); 1211 1211 1212 1212 /* 1213 1213 * akpm: I added this. ext3_alloc_branch can pick up new indirect ··· 1275 1275 committed_data = jbd2_alloc(jh2bh(jh)->b_size, 1276 1276 GFP_NOFS|__GFP_NOFAIL); 1277 1277 1278 - jbd_lock_bh_state(bh); 1278 + spin_lock(&jh->b_state_lock); 1279 1279 if (!jh->b_committed_data) { 1280 1280 /* Copy out the current buffer contents into the 1281 1281 * preserved, committed copy. */ 1282 1282 JBUFFER_TRACE(jh, "generate b_committed data"); 1283 1283 if (!committed_data) { 1284 - jbd_unlock_bh_state(bh); 1284 + spin_unlock(&jh->b_state_lock); 1285 1285 goto repeat; 1286 1286 } 1287 1287 ··· 1289 1289 committed_data = NULL; 1290 1290 memcpy(jh->b_committed_data, bh->b_data, bh->b_size); 1291 1291 } 1292 - jbd_unlock_bh_state(bh); 1292 + spin_unlock(&jh->b_state_lock); 1293 1293 out: 1294 1294 jbd2_journal_put_journal_head(jh); 1295 1295 if (unlikely(committed_data)) ··· 1390 1390 */ 1391 1391 if (jh->b_transaction != transaction && 1392 1392 jh->b_next_transaction != transaction) { 1393 - jbd_lock_bh_state(bh); 1393 + spin_lock(&jh->b_state_lock); 1394 1394 J_ASSERT_JH(jh, jh->b_transaction == transaction || 1395 1395 jh->b_next_transaction == transaction); 1396 - jbd_unlock_bh_state(bh); 1396 + spin_unlock(&jh->b_state_lock); 1397 1397 } 1398 1398 if (jh->b_modified == 1) { 1399 1399 /* If it's in our transaction it must be in BJ_Metadata list. */ 1400 1400 if (jh->b_transaction == transaction && 1401 1401 jh->b_jlist != BJ_Metadata) { 1402 - jbd_lock_bh_state(bh); 1402 + spin_lock(&jh->b_state_lock); 1403 1403 if (jh->b_transaction == transaction && 1404 1404 jh->b_jlist != BJ_Metadata) 1405 1405 pr_err("JBD2: assertion failure: h_type=%u " ··· 1409 1409 jh->b_jlist); 1410 1410 J_ASSERT_JH(jh, jh->b_transaction != transaction || 1411 1411 jh->b_jlist == BJ_Metadata); 1412 - jbd_unlock_bh_state(bh); 1412 + spin_unlock(&jh->b_state_lock); 1413 1413 } 1414 1414 goto out; 1415 1415 } 1416 1416 1417 1417 journal = transaction->t_journal; 1418 - jbd_lock_bh_state(bh); 1418 + spin_lock(&jh->b_state_lock); 1419 1419 1420 1420 if (jh->b_modified == 0) { 1421 1421 /* ··· 1501 1501 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata); 1502 1502 spin_unlock(&journal->j_list_lock); 1503 1503 out_unlock_bh: 1504 - jbd_unlock_bh_state(bh); 1504 + spin_unlock(&jh->b_state_lock); 1505 1505 out: 1506 1506 JBUFFER_TRACE(jh, "exit"); 1507 1507 return ret; ··· 1539 1539 1540 1540 BUFFER_TRACE(bh, "entry"); 1541 1541 1542 - jbd_lock_bh_state(bh); 1542 + jh = jbd2_journal_grab_journal_head(bh); 1543 + if (!jh) { 1544 + __bforget(bh); 1545 + return 0; 1546 + } 1543 1547 1544 - if (!buffer_jbd(bh)) 1545 - goto not_jbd; 1546 - jh = bh2jh(bh); 1548 + spin_lock(&jh->b_state_lock); 1547 1549 1548 1550 /* Critical error: attempting to delete a bitmap buffer, maybe? 1549 1551 * Don't do any jbd operations, and return an error. */ ··· 1666 1664 spin_unlock(&journal->j_list_lock); 1667 1665 } 1668 1666 drop: 1669 - jbd_unlock_bh_state(bh); 1670 1667 __brelse(bh); 1668 + spin_unlock(&jh->b_state_lock); 1669 + jbd2_journal_put_journal_head(jh); 1671 1670 if (drop_reserve) { 1672 1671 /* no need to reserve log space for this block -bzzz */ 1673 1672 handle->h_buffer_credits++; 1674 1673 } 1675 1674 return err; 1676 - 1677 - not_jbd: 1678 - jbd_unlock_bh_state(bh); 1679 - __bforget(bh); 1680 - goto drop; 1681 1675 } 1682 1676 1683 1677 /** ··· 1872 1874 * 1873 1875 * j_list_lock is held. 1874 1876 * 1875 - * jbd_lock_bh_state(jh2bh(jh)) is held. 1877 + * jh->b_state_lock is held. 1876 1878 */ 1877 1879 1878 1880 static inline void ··· 1896 1898 * 1897 1899 * Called with j_list_lock held, and the journal may not be locked. 1898 1900 * 1899 - * jbd_lock_bh_state(jh2bh(jh)) is held. 1901 + * jh->b_state_lock is held. 1900 1902 */ 1901 1903 1902 1904 static inline void ··· 1928 1930 transaction_t *transaction; 1929 1931 struct buffer_head *bh = jh2bh(jh); 1930 1932 1931 - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1933 + lockdep_assert_held(&jh->b_state_lock); 1932 1934 transaction = jh->b_transaction; 1933 1935 if (transaction) 1934 1936 assert_spin_locked(&transaction->t_journal->j_list_lock); ··· 1982 1984 1983 1985 /* Get reference so that buffer cannot be freed before we unlock it */ 1984 1986 get_bh(bh); 1985 - jbd_lock_bh_state(bh); 1987 + spin_lock(&jh->b_state_lock); 1986 1988 spin_lock(&journal->j_list_lock); 1987 1989 __jbd2_journal_unfile_buffer(jh); 1988 1990 spin_unlock(&journal->j_list_lock); 1989 - jbd_unlock_bh_state(bh); 1991 + spin_unlock(&jh->b_state_lock); 1990 1992 jbd2_journal_put_journal_head(jh); 1991 1993 __brelse(bh); 1992 1994 } ··· 1994 1996 /* 1995 1997 * Called from jbd2_journal_try_to_free_buffers(). 1996 1998 * 1997 - * Called under jbd_lock_bh_state(bh) 1999 + * Called under jh->b_state_lock 1998 2000 */ 1999 2001 static void 2000 2002 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) ··· 2081 2083 if (!jh) 2082 2084 continue; 2083 2085 2084 - jbd_lock_bh_state(bh); 2086 + spin_lock(&jh->b_state_lock); 2085 2087 __journal_try_to_free_buffer(journal, bh); 2088 + spin_unlock(&jh->b_state_lock); 2086 2089 jbd2_journal_put_journal_head(jh); 2087 - jbd_unlock_bh_state(bh); 2088 2090 if (buffer_jbd(bh)) 2089 2091 goto busy; 2090 2092 } while ((bh = bh->b_this_page) != head); ··· 2105 2107 * 2106 2108 * Called under j_list_lock. 2107 2109 * 2108 - * Called under jbd_lock_bh_state(bh). 2110 + * Called under jh->b_state_lock. 2109 2111 */ 2110 2112 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) 2111 2113 { ··· 2199 2201 2200 2202 /* OK, we have data buffer in journaled mode */ 2201 2203 write_lock(&journal->j_state_lock); 2202 - jbd_lock_bh_state(bh); 2204 + spin_lock(&jh->b_state_lock); 2203 2205 spin_lock(&journal->j_list_lock); 2204 2206 2205 2207 /* ··· 2280 2282 * for commit and try again. 2281 2283 */ 2282 2284 if (partial_page) { 2283 - jbd2_journal_put_journal_head(jh); 2284 2285 spin_unlock(&journal->j_list_lock); 2285 - jbd_unlock_bh_state(bh); 2286 + spin_unlock(&jh->b_state_lock); 2286 2287 write_unlock(&journal->j_state_lock); 2288 + jbd2_journal_put_journal_head(jh); 2287 2289 return -EBUSY; 2288 2290 } 2289 2291 /* ··· 2295 2297 set_buffer_freed(bh); 2296 2298 if (journal->j_running_transaction && buffer_jbddirty(bh)) 2297 2299 jh->b_next_transaction = journal->j_running_transaction; 2298 - jbd2_journal_put_journal_head(jh); 2299 2300 spin_unlock(&journal->j_list_lock); 2300 - jbd_unlock_bh_state(bh); 2301 + spin_unlock(&jh->b_state_lock); 2301 2302 write_unlock(&journal->j_state_lock); 2303 + jbd2_journal_put_journal_head(jh); 2302 2304 return 0; 2303 2305 } else { 2304 2306 /* Good, the buffer belongs to the running transaction. ··· 2322 2324 * here. 2323 2325 */ 2324 2326 jh->b_modified = 0; 2325 - jbd2_journal_put_journal_head(jh); 2326 2327 spin_unlock(&journal->j_list_lock); 2327 - jbd_unlock_bh_state(bh); 2328 + spin_unlock(&jh->b_state_lock); 2328 2329 write_unlock(&journal->j_state_lock); 2330 + jbd2_journal_put_journal_head(jh); 2329 2331 zap_buffer_unlocked: 2330 2332 clear_buffer_dirty(bh); 2331 2333 J_ASSERT_BH(bh, !buffer_jbddirty(bh)); ··· 2412 2414 int was_dirty = 0; 2413 2415 struct buffer_head *bh = jh2bh(jh); 2414 2416 2415 - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2417 + lockdep_assert_held(&jh->b_state_lock); 2416 2418 assert_spin_locked(&transaction->t_journal->j_list_lock); 2417 2419 2418 2420 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); ··· 2474 2476 void jbd2_journal_file_buffer(struct journal_head *jh, 2475 2477 transaction_t *transaction, int jlist) 2476 2478 { 2477 - jbd_lock_bh_state(jh2bh(jh)); 2479 + spin_lock(&jh->b_state_lock); 2478 2480 spin_lock(&transaction->t_journal->j_list_lock); 2479 2481 __jbd2_journal_file_buffer(jh, transaction, jlist); 2480 2482 spin_unlock(&transaction->t_journal->j_list_lock); 2481 - jbd_unlock_bh_state(jh2bh(jh)); 2483 + spin_unlock(&jh->b_state_lock); 2482 2484 } 2483 2485 2484 2486 /* ··· 2488 2490 * buffer on that transaction's metadata list. 2489 2491 * 2490 2492 * Called under j_list_lock 2491 - * Called under jbd_lock_bh_state(jh2bh(jh)) 2493 + * Called under jh->b_state_lock 2492 2494 * 2493 2495 * When this function returns true, there's no next transaction to refile to 2494 2496 * and the caller has to drop jh reference through ··· 2499 2501 int was_dirty, jlist; 2500 2502 struct buffer_head *bh = jh2bh(jh); 2501 2503 2502 - J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2504 + lockdep_assert_held(&jh->b_state_lock); 2503 2505 if (jh->b_transaction) 2504 2506 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); 2505 2507 ··· 2545 2547 */ 2546 2548 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2547 2549 { 2548 - struct buffer_head *bh = jh2bh(jh); 2549 2550 bool drop; 2550 2551 2551 - /* Get reference so that buffer cannot be freed before we unlock it */ 2552 - get_bh(bh); 2553 - jbd_lock_bh_state(bh); 2552 + spin_lock(&jh->b_state_lock); 2554 2553 spin_lock(&journal->j_list_lock); 2555 2554 drop = __jbd2_journal_refile_buffer(jh); 2556 - jbd_unlock_bh_state(bh); 2555 + spin_unlock(&jh->b_state_lock); 2557 2556 spin_unlock(&journal->j_list_lock); 2558 - __brelse(bh); 2559 2557 if (drop) 2560 2558 jbd2_journal_put_journal_head(jh); 2561 2559 }
+11 -8
fs/ocfs2/suballoc.c
··· 1252 1252 int nr) 1253 1253 { 1254 1254 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; 1255 + struct journal_head *jh; 1255 1256 int ret; 1256 1257 1257 1258 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) ··· 1261 1260 if (!buffer_jbd(bg_bh)) 1262 1261 return 1; 1263 1262 1264 - jbd_lock_bh_state(bg_bh); 1265 - bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data; 1263 + jh = bh2jh(bg_bh); 1264 + spin_lock(&jh->b_state_lock); 1265 + bg = (struct ocfs2_group_desc *) jh->b_committed_data; 1266 1266 if (bg) 1267 1267 ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); 1268 1268 else 1269 1269 ret = 1; 1270 - jbd_unlock_bh_state(bg_bh); 1270 + spin_unlock(&jh->b_state_lock); 1271 1271 1272 1272 return ret; 1273 1273 } ··· 2389 2387 int status; 2390 2388 unsigned int tmp; 2391 2389 struct ocfs2_group_desc *undo_bg = NULL; 2390 + struct journal_head *jh; 2392 2391 2393 2392 /* The caller got this descriptor from 2394 2393 * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ ··· 2408 2405 goto bail; 2409 2406 } 2410 2407 2408 + jh = bh2jh(group_bh); 2411 2409 if (undo_fn) { 2412 - jbd_lock_bh_state(group_bh); 2413 - undo_bg = (struct ocfs2_group_desc *) 2414 - bh2jh(group_bh)->b_committed_data; 2410 + spin_lock(&jh->b_state_lock); 2411 + undo_bg = (struct ocfs2_group_desc *) jh->b_committed_data; 2415 2412 BUG_ON(!undo_bg); 2416 2413 } 2417 2414 ··· 2426 2423 le16_add_cpu(&bg->bg_free_bits_count, num_bits); 2427 2424 if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) { 2428 2425 if (undo_fn) 2429 - jbd_unlock_bh_state(group_bh); 2426 + spin_unlock(&jh->b_state_lock); 2430 2427 return ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit count %u but claims %u are freed. num_bits %d\n", 2431 2428 (unsigned long long)le64_to_cpu(bg->bg_blkno), 2432 2429 le16_to_cpu(bg->bg_bits), ··· 2435 2432 } 2436 2433 2437 2434 if (undo_fn) 2438 - jbd_unlock_bh_state(group_bh); 2435 + spin_unlock(&jh->b_state_lock); 2439 2436 2440 2437 ocfs2_journal_dirty(handle, group_bh); 2441 2438 bail:
+2 -18
include/linux/jbd2.h
··· 313 313 BH_Revoked, /* Has been revoked from the log */ 314 314 BH_RevokeValid, /* Revoked flag is valid */ 315 315 BH_JBDDirty, /* Is dirty but journaled */ 316 - BH_State, /* Pins most journal_head state */ 317 316 BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ 318 317 BH_Shadow, /* IO on shadow buffer is running */ 319 318 BH_Verified, /* Metadata block has been verified ok */ ··· 339 340 static inline struct journal_head *bh2jh(struct buffer_head *bh) 340 341 { 341 342 return bh->b_private; 342 - } 343 - 344 - static inline void jbd_lock_bh_state(struct buffer_head *bh) 345 - { 346 - bit_spin_lock(BH_State, &bh->b_state); 347 - } 348 - 349 - static inline int jbd_is_locked_bh_state(struct buffer_head *bh) 350 - { 351 - return bit_spin_is_locked(BH_State, &bh->b_state); 352 - } 353 - 354 - static inline void jbd_unlock_bh_state(struct buffer_head *bh) 355 - { 356 - bit_spin_unlock(BH_State, &bh->b_state); 357 343 } 358 344 359 345 static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) ··· 535 551 * ->jbd_lock_bh_journal_head() (This is "innermost") 536 552 * 537 553 * j_state_lock 538 - * ->jbd_lock_bh_state() 554 + * ->b_state_lock 539 555 * 540 - * jbd_lock_bh_state() 556 + * b_state_lock 541 557 * ->j_list_lock 542 558 * 543 559 * j_state_lock
+14 -7
include/linux/journal-head.h
··· 11 11 #ifndef JOURNAL_HEAD_H_INCLUDED 12 12 #define JOURNAL_HEAD_H_INCLUDED 13 13 14 + #include <linux/spinlock.h> 15 + 14 16 typedef unsigned int tid_t; /* Unique transaction ID */ 15 17 typedef struct transaction_s transaction_t; /* Compound transaction type */ 16 18 ··· 26 24 struct buffer_head *b_bh; 27 25 28 26 /* 27 + * Protect the buffer head state 28 + */ 29 + spinlock_t b_state_lock; 30 + 31 + /* 29 32 * Reference count - see description in journal.c 30 33 * [jbd_lock_bh_journal_head()] 31 34 */ 32 35 int b_jcount; 33 36 34 37 /* 35 - * Journalling list for this buffer [jbd_lock_bh_state()] 38 + * Journalling list for this buffer [b_state_lock] 36 39 * NOTE: We *cannot* combine this with b_modified into a bitfield 37 40 * as gcc would then (which the C standard allows but which is 38 41 * very unuseful) make 64-bit accesses to the bitfield and clobber ··· 48 41 /* 49 42 * This flag signals the buffer has been modified by 50 43 * the currently running transaction 51 - * [jbd_lock_bh_state()] 44 + * [b_state_lock] 52 45 */ 53 46 unsigned b_modified; 54 47 55 48 /* 56 49 * Copy of the buffer data frozen for writing to the log. 57 - * [jbd_lock_bh_state()] 50 + * [b_state_lock] 58 51 */ 59 52 char *b_frozen_data; 60 53 61 54 /* 62 55 * Pointer to a saved copy of the buffer containing no uncommitted 63 56 * deallocation references, so that allocations can avoid overwriting 64 - * uncommitted deletes. [jbd_lock_bh_state()] 57 + * uncommitted deletes. [b_state_lock] 65 58 */ 66 59 char *b_committed_data; 67 60 ··· 70 63 * metadata: either the running transaction or the committing 71 64 * transaction (if there is one). Only applies to buffers on a 72 65 * transaction's data or metadata journaling list. 73 - * [j_list_lock] [jbd_lock_bh_state()] 66 + * [j_list_lock] [b_state_lock] 74 67 * Either of these locks is enough for reading, both are needed for 75 68 * changes. 76 69 */ ··· 80 73 * Pointer to the running compound transaction which is currently 81 74 * modifying the buffer's metadata, if there was already a transaction 82 75 * committing it when the new transaction touched it. 83 - * [t_list_lock] [jbd_lock_bh_state()] 76 + * [t_list_lock] [b_state_lock] 84 77 */ 85 78 transaction_t *b_next_transaction; 86 79 87 80 /* 88 81 * Doubly-linked list of buffers on a transaction's data, metadata or 89 - * forget queue. [t_list_lock] [jbd_lock_bh_state()] 82 + * forget queue. [t_list_lock] [b_state_lock] 90 83 */ 91 84 struct journal_head *b_tnext, *b_tprev; 92 85