Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: rename buffer trylock

Like the page lock change, this also requires name change, so convert the
raw test_and_set bitop to a trylock.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Nick Piggin and committed by
Linus Torvalds
ca5de404 529ae9aa

+17 -13
+2 -2
fs/buffer.c
··· 1720 1720 */ 1721 1721 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1722 1722 lock_buffer(bh); 1723 - } else if (test_set_buffer_locked(bh)) { 1723 + } else if (!trylock_buffer(bh)) { 1724 1724 redirty_page_for_writepage(wbc, page); 1725 1725 continue; 1726 1726 } ··· 3000 3000 3001 3001 if (rw == SWRITE || rw == SWRITE_SYNC) 3002 3002 lock_buffer(bh); 3003 - else if (test_set_buffer_locked(bh)) 3003 + else if (!trylock_buffer(bh)) 3004 3004 continue; 3005 3005 3006 3006 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
+1 -1
fs/jbd/commit.c
··· 221 221 * blocking lock_buffer(). 222 222 */ 223 223 if (buffer_dirty(bh)) { 224 - if (test_set_buffer_locked(bh)) { 224 + if (!trylock_buffer(bh)) { 225 225 BUFFER_TRACE(bh, "needs blocking lock"); 226 226 spin_unlock(&journal->j_list_lock); 227 227 /* Write out all data to prevent deadlocks */
+1 -1
fs/ntfs/aops.c
··· 1194 1194 tbh = bhs[i]; 1195 1195 if (!tbh) 1196 1196 continue; 1197 - if (unlikely(test_set_buffer_locked(tbh))) 1197 + if (!trylock_buffer(tbh)) 1198 1198 BUG(); 1199 1199 /* The buffer dirty state is now irrelevant, just clean it. */ 1200 1200 clear_buffer_dirty(tbh);
+1 -1
fs/ntfs/compress.c
··· 665 665 for (i = 0; i < nr_bhs; i++) { 666 666 struct buffer_head *tbh = bhs[i]; 667 667 668 - if (unlikely(test_set_buffer_locked(tbh))) 668 + if (!trylock_buffer(tbh)) 669 669 continue; 670 670 if (unlikely(buffer_uptodate(tbh))) { 671 671 unlock_buffer(tbh);
+2 -2
fs/ntfs/mft.c
··· 586 586 for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { 587 587 struct buffer_head *tbh = bhs[i_bhs]; 588 588 589 - if (unlikely(test_set_buffer_locked(tbh))) 589 + if (!trylock_buffer(tbh)) 590 590 BUG(); 591 591 BUG_ON(!buffer_uptodate(tbh)); 592 592 clear_buffer_dirty(tbh); ··· 779 779 for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { 780 780 struct buffer_head *tbh = bhs[i_bhs]; 781 781 782 - if (unlikely(test_set_buffer_locked(tbh))) 782 + if (!trylock_buffer(tbh)) 783 783 BUG(); 784 784 BUG_ON(!buffer_uptodate(tbh)); 785 785 clear_buffer_dirty(tbh);
+1 -1
fs/reiserfs/inode.c
··· 2435 2435 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 2436 2436 lock_buffer(bh); 2437 2437 } else { 2438 - if (test_set_buffer_locked(bh)) { 2438 + if (!trylock_buffer(bh)) { 2439 2439 redirty_page_for_writepage(wbc, page); 2440 2440 continue; 2441 2441 }
+2 -2
fs/reiserfs/journal.c
··· 855 855 jh = JH_ENTRY(list->next); 856 856 bh = jh->bh; 857 857 get_bh(bh); 858 - if (test_set_buffer_locked(bh)) { 858 + if (!trylock_buffer(bh)) { 859 859 if (!buffer_dirty(bh)) { 860 860 list_move(&jh->list, &tmp); 861 861 goto loop_next; ··· 3871 3871 { 3872 3872 PROC_INFO_INC(p_s_sb, journal.prepare); 3873 3873 3874 - if (test_set_buffer_locked(bh)) { 3874 + if (!trylock_buffer(bh)) { 3875 3875 if (!wait) 3876 3876 return 0; 3877 3877 lock_buffer(bh);
+1 -1
fs/xfs/linux-2.6/xfs_aops.c
··· 1104 1104 * that we are writing into for the first time. 1105 1105 */ 1106 1106 type = IOMAP_NEW; 1107 - if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 1107 + if (trylock_buffer(bh)) { 1108 1108 ASSERT(buffer_mapped(bh)); 1109 1109 if (iomap_valid) 1110 1110 all_bh = 1;
+6 -2
include/linux/buffer_head.h
··· 115 115 BUFFER_FNS(Dirty, dirty) 116 116 TAS_BUFFER_FNS(Dirty, dirty) 117 117 BUFFER_FNS(Lock, locked) 118 - TAS_BUFFER_FNS(Lock, locked) 119 118 BUFFER_FNS(Req, req) 120 119 TAS_BUFFER_FNS(Req, req) 121 120 BUFFER_FNS(Mapped, mapped) ··· 320 321 __wait_on_buffer(bh); 321 322 } 322 323 324 + static inline int trylock_buffer(struct buffer_head *bh) 325 + { 326 + return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); 327 + } 328 + 323 329 static inline void lock_buffer(struct buffer_head *bh) 324 330 { 325 331 might_sleep(); 326 - if (test_set_buffer_locked(bh)) 332 + if (!trylock_buffer(bh)) 327 333 __lock_buffer(bh); 328 334 } 329 335