Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs/buffer: Combine two submit_bh() and ll_rw_block() arguments

Both submit_bh() and ll_rw_block() accept a request operation type and
request flags as their first two arguments. Micro-optimize these two
functions by combining these first two arguments into a single argument.
This patch does not change the behavior of any of the modified code.

Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Jan Kara <jack@suse.cz>
Acked-by: Song Liu <song@kernel.org> (for the md changes)
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-48-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Bart Van Assche and committed by
Jens Axboe
1420c4a5 3ae72869

+88 -90
+2 -2
drivers/md/md-bitmap.c
··· 302 302 atomic_inc(&bitmap->pending_writes); 303 303 set_buffer_locked(bh); 304 304 set_buffer_mapped(bh); 305 - submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 305 + submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); 306 306 bh = bh->b_this_page; 307 307 } 308 308 ··· 394 394 atomic_inc(&bitmap->pending_writes); 395 395 set_buffer_locked(bh); 396 396 set_buffer_mapped(bh); 397 - submit_bh(REQ_OP_READ, 0, bh); 397 + submit_bh(REQ_OP_READ, bh); 398 398 } 399 399 blk_cur++; 400 400 bh = bh->b_this_page;
+27 -26
fs/buffer.c
··· 52 52 #include "internal.h" 53 53 54 54 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 55 - static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags, 56 - struct buffer_head *bh, struct writeback_control *wbc); 55 + static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 56 + struct writeback_control *wbc); 57 57 58 58 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers) 59 59 ··· 562 562 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 563 563 if (bh) { 564 564 if (buffer_dirty(bh)) 565 - ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 565 + ll_rw_block(REQ_OP_WRITE, 1, &bh); 566 566 put_bh(bh); 567 567 } 568 568 } ··· 1174 1174 } else { 1175 1175 get_bh(bh); 1176 1176 bh->b_end_io = end_buffer_read_sync; 1177 - submit_bh(REQ_OP_READ, 0, bh); 1177 + submit_bh(REQ_OP_READ, bh); 1178 1178 wait_on_buffer(bh); 1179 1179 if (buffer_uptodate(bh)) 1180 1180 return bh; ··· 1342 1342 { 1343 1343 struct buffer_head *bh = __getblk(bdev, block, size); 1344 1344 if (likely(bh)) { 1345 - ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 1345 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh); 1346 1346 brelse(bh); 1347 1347 } 1348 1348 } ··· 1353 1353 { 1354 1354 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp); 1355 1355 if (likely(bh)) { 1356 - ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh); 1356 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh); 1357 1357 brelse(bh); 1358 1358 } 1359 1359 } ··· 1804 1804 do { 1805 1805 struct buffer_head *next = bh->b_this_page; 1806 1806 if (buffer_async_write(bh)) { 1807 - submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 1807 + submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1808 1808 nr_underway++; 1809 1809 } 1810 1810 bh = next; ··· 1858 1858 struct buffer_head *next = bh->b_this_page; 1859 1859 if (buffer_async_write(bh)) { 1860 1860 clear_buffer_dirty(bh); 1861 - submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc); 1861 + submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc); 1862 1862 nr_underway++; 1863 1863 } 1864 1864 bh = next; ··· 2033 2033 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 2034 2034 !buffer_unwritten(bh) && 2035 2035 (block_start < from || block_end > to)) { 2036 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 2036 + ll_rw_block(REQ_OP_READ, 1, &bh); 2037 2037 *wait_bh++=bh; 2038 2038 } 2039 2039 } ··· 2334 2334 if (buffer_uptodate(bh)) 2335 2335 end_buffer_async_read(bh, 1); 2336 2336 else 2337 - submit_bh(REQ_OP_READ, 0, bh); 2337 + submit_bh(REQ_OP_READ, bh); 2338 2338 } 2339 2339 return 0; 2340 2340 } ··· 2665 2665 if (block_start < from || block_end > to) { 2666 2666 lock_buffer(bh); 2667 2667 bh->b_end_io = end_buffer_read_nobh; 2668 - submit_bh(REQ_OP_READ, 0, bh); 2668 + submit_bh(REQ_OP_READ, bh); 2669 2669 nr_reads++; 2670 2670 } 2671 2671 } ··· 2915 2915 2916 2916 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2917 2917 err = -EIO; 2918 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 2918 + ll_rw_block(REQ_OP_READ, 1, &bh); 2919 2919 wait_on_buffer(bh); 2920 2920 /* Uhhuh. Read error. Complain and punt. */ 2921 2921 if (!buffer_uptodate(bh)) ··· 2994 2994 bio_put(bio); 2995 2995 } 2996 2996 2997 - static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags, 2998 - struct buffer_head *bh, struct writeback_control *wbc) 2997 + static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2998 + struct writeback_control *wbc) 2999 2999 { 3000 + const enum req_op op = opf & REQ_OP_MASK; 3000 3001 struct bio *bio; 3001 3002 3002 3003 BUG_ON(!buffer_locked(bh)); ··· 3013 3012 clear_buffer_write_io_error(bh); 3014 3013 3015 3014 if (buffer_meta(bh)) 3016 - op_flags |= REQ_META; 3015 + opf |= REQ_META; 3017 3016 if (buffer_prio(bh)) 3018 - op_flags |= REQ_PRIO; 3017 + opf |= REQ_PRIO; 3019 3018 3020 - bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO); 3019 + bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); 3021 3020 3022 3021 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); 3023 3022 ··· 3041 3040 return 0; 3042 3041 } 3043 3042 3044 - int submit_bh(enum req_op op, blk_opf_t op_flags, struct buffer_head *bh) 3043 + int submit_bh(blk_opf_t opf, struct buffer_head *bh) 3045 3044 { 3046 - return submit_bh_wbc(op, op_flags, bh, NULL); 3045 + return submit_bh_wbc(opf, bh, NULL); 3047 3046 } 3048 3047 EXPORT_SYMBOL(submit_bh); 3049 3048 ··· 3073 3072 * All of the buffers must be for the same device, and must also be a 3074 3073 * multiple of the current approved size for the device. 3075 3074 */ 3076 - void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr, 3077 - struct buffer_head *bhs[]) 3075 + void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[]) 3078 3076 { 3077 + const enum req_op op = opf & REQ_OP_MASK; 3079 3078 int i; 3080 3079 3081 3080 for (i = 0; i < nr; i++) { ··· 3087 3086 if (test_clear_buffer_dirty(bh)) { 3088 3087 bh->b_end_io = end_buffer_write_sync; 3089 3088 get_bh(bh); 3090 - submit_bh(op, op_flags, bh); 3089 + submit_bh(opf, bh); 3091 3090 continue; 3092 3091 } 3093 3092 } else { 3094 3093 if (!buffer_uptodate(bh)) { 3095 3094 bh->b_end_io = end_buffer_read_sync; 3096 3095 get_bh(bh); 3097 - submit_bh(op, op_flags, bh); 3096 + submit_bh(opf, bh); 3098 3097 continue; 3099 3098 } 3100 3099 } ··· 3112 3111 } 3113 3112 bh->b_end_io = end_buffer_write_sync; 3114 3113 get_bh(bh); 3115 - submit_bh(REQ_OP_WRITE, op_flags, bh); 3114 + submit_bh(REQ_OP_WRITE | op_flags, bh); 3116 3115 } 3117 3116 EXPORT_SYMBOL(write_dirty_buffer); 3118 3117 ··· 3139 3138 3140 3139 get_bh(bh); 3141 3140 bh->b_end_io = end_buffer_write_sync; 3142 - ret = submit_bh(REQ_OP_WRITE, op_flags, bh); 3141 + ret = submit_bh(REQ_OP_WRITE | op_flags, bh); 3143 3142 wait_on_buffer(bh); 3144 3143 if (!ret && !buffer_uptodate(bh)) 3145 3144 ret = -EIO; ··· 3367 3366 3368 3367 get_bh(bh); 3369 3368 bh->b_end_io = end_buffer_read_sync; 3370 - submit_bh(REQ_OP_READ, 0, bh); 3369 + submit_bh(REQ_OP_READ, bh); 3371 3370 wait_on_buffer(bh); 3372 3371 if (buffer_uptodate(bh)) 3373 3372 return 0;
+1 -1
fs/ext4/fast_commit.c
··· 668 668 set_buffer_dirty(bh); 669 669 set_buffer_uptodate(bh); 670 670 bh->b_end_io = ext4_end_buffer_io_sync; 671 - submit_bh(REQ_OP_WRITE, write_flags, bh); 671 + submit_bh(REQ_OP_WRITE | write_flags, bh); 672 672 EXT4_SB(sb)->s_fc_bh = NULL; 673 673 } 674 674
+1 -1
fs/ext4/mmp.c
··· 52 52 lock_buffer(bh); 53 53 bh->b_end_io = end_buffer_write_sync; 54 54 get_bh(bh); 55 - submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh); 55 + submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh); 56 56 wait_on_buffer(bh); 57 57 sb_end_write(sb); 58 58 if (unlikely(!buffer_uptodate(bh)))
+3 -3
fs/ext4/super.c
··· 171 171 172 172 bh->b_end_io = end_io ? end_io : end_buffer_read_sync; 173 173 get_bh(bh); 174 - submit_bh(REQ_OP_READ, op_flags, bh); 174 + submit_bh(REQ_OP_READ | op_flags, bh); 175 175 } 176 176 177 177 void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags, ··· 5939 5939 /* Clear potential dirty bit if it was journalled update */ 5940 5940 clear_buffer_dirty(sbh); 5941 5941 sbh->b_end_io = end_buffer_write_sync; 5942 - submit_bh(REQ_OP_WRITE, 5943 - REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 5942 + submit_bh(REQ_OP_WRITE | REQ_SYNC | 5943 + (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); 5944 5944 wait_on_buffer(sbh); 5945 5945 if (buffer_write_io_error(sbh)) { 5946 5946 ext4_msg(sb, KERN_ERR, "I/O error while writing "
+2 -3
fs/gfs2/bmap.c
··· 310 310 if (trylock_buffer(rabh)) { 311 311 if (!buffer_uptodate(rabh)) { 312 312 rabh->b_end_io = end_buffer_read_sync; 313 - submit_bh(REQ_OP_READ, 314 - REQ_RAHEAD | REQ_META | REQ_PRIO, 315 - rabh); 313 + submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META | 314 + REQ_PRIO, rabh); 316 315 continue; 317 316 } 318 317 unlock_buffer(rabh);
+2 -3
fs/gfs2/dir.c
··· 1508 1508 continue; 1509 1509 } 1510 1510 bh->b_end_io = end_buffer_read_sync; 1511 - submit_bh(REQ_OP_READ, 1512 - REQ_RAHEAD | REQ_META | REQ_PRIO, 1513 - bh); 1511 + submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META | 1512 + REQ_PRIO, bh); 1514 1513 continue; 1515 1514 } 1516 1515 brelse(bh);
+4 -5
fs/gfs2/meta_io.c
··· 75 75 do { 76 76 struct buffer_head *next = bh->b_this_page; 77 77 if (buffer_async_write(bh)) { 78 - submit_bh(REQ_OP_WRITE, write_flags, bh); 78 + submit_bh(REQ_OP_WRITE | write_flags, bh); 79 79 nr_underway++; 80 80 } 81 81 bh = next; ··· 527 527 if (buffer_uptodate(first_bh)) 528 528 goto out; 529 529 if (!buffer_locked(first_bh)) 530 - ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &first_bh); 530 + ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &first_bh); 531 531 532 532 dblock++; 533 533 extlen--; ··· 536 536 bh = gfs2_getbuf(gl, dblock, CREATE); 537 537 538 538 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 539 - ll_rw_block(REQ_OP_READ, 540 - REQ_RAHEAD | REQ_META | REQ_PRIO, 541 - 1, &bh); 539 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD | REQ_META | 540 + REQ_PRIO, 1, &bh); 542 541 brelse(bh); 543 542 dblock++; 544 543 extlen--;
+1 -1
fs/gfs2/quota.c
··· 746 746 if (PageUptodate(page)) 747 747 set_buffer_uptodate(bh); 748 748 if (!buffer_uptodate(bh)) { 749 - ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 749 + ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh); 750 750 wait_on_buffer(bh); 751 751 if (!buffer_uptodate(bh)) 752 752 goto unlock_out;
+1 -1
fs/isofs/compress.c
··· 82 82 return 0; 83 83 } 84 84 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); 85 - ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); 85 + ll_rw_block(REQ_OP_READ, haveblocks, bhs); 86 86 87 87 curbh = 0; 88 88 curpage = 0;
+4 -4
fs/jbd2/commit.c
··· 155 155 156 156 if (journal->j_flags & JBD2_BARRIER && 157 157 !jbd2_has_feature_async_commit(journal)) 158 - ret = submit_bh(REQ_OP_WRITE, 159 - REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh); 158 + ret = submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | 159 + REQ_FUA, bh); 160 160 else 161 - ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 161 + ret = submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); 162 162 163 163 *cbh = bh; 164 164 return ret; ··· 763 763 clear_buffer_dirty(bh); 764 764 set_buffer_uptodate(bh); 765 765 bh->b_end_io = journal_end_buffer_io_sync; 766 - submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 766 + submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); 767 767 } 768 768 cond_resched(); 769 769
+2 -2
fs/jbd2/journal.c
··· 1638 1638 sb->s_checksum = jbd2_superblock_csum(journal, sb); 1639 1639 get_bh(bh); 1640 1640 bh->b_end_io = end_buffer_write_sync; 1641 - ret = submit_bh(REQ_OP_WRITE, write_flags, bh); 1641 + ret = submit_bh(REQ_OP_WRITE | write_flags, bh); 1642 1642 wait_on_buffer(bh); 1643 1643 if (buffer_write_io_error(bh)) { 1644 1644 clear_buffer_write_io_error(bh); ··· 1900 1900 1901 1901 J_ASSERT(bh != NULL); 1902 1902 if (!buffer_uptodate(bh)) { 1903 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1903 + ll_rw_block(REQ_OP_READ, 1, &bh); 1904 1904 wait_on_buffer(bh); 1905 1905 if (!buffer_uptodate(bh)) { 1906 1906 printk(KERN_ERR
+2 -2
fs/jbd2/recovery.c
··· 100 100 if (!buffer_uptodate(bh) && !buffer_locked(bh)) { 101 101 bufs[nbufs++] = bh; 102 102 if (nbufs == MAXBUF) { 103 - ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); 103 + ll_rw_block(REQ_OP_READ, nbufs, bufs); 104 104 journal_brelse_array(bufs, nbufs); 105 105 nbufs = 0; 106 106 } ··· 109 109 } 110 110 111 111 if (nbufs) 112 - ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); 112 + ll_rw_block(REQ_OP_READ, nbufs, bufs); 113 113 err = 0; 114 114 115 115 failed:
+1 -1
fs/nilfs2/btnode.c
··· 122 122 bh->b_blocknr = pblocknr; /* set block address for read */ 123 123 bh->b_end_io = end_buffer_read_sync; 124 124 get_bh(bh); 125 - submit_bh(mode, mode_flags, bh); 125 + submit_bh(mode | mode_flags, bh); 126 126 bh->b_blocknr = blocknr; /* set back to the given block address */ 127 127 *submit_ptr = pblocknr; 128 128 err = 0;
+1 -1
fs/nilfs2/gcinode.c
··· 92 92 bh->b_blocknr = pbn; 93 93 bh->b_end_io = end_buffer_read_sync; 94 94 get_bh(bh); 95 - submit_bh(REQ_OP_READ, 0, bh); 95 + submit_bh(REQ_OP_READ, bh); 96 96 if (vbn) 97 97 bh->b_blocknr = vbn; 98 98 out:
+1 -1
fs/nilfs2/mdt.c
··· 148 148 149 149 bh->b_end_io = end_buffer_read_sync; 150 150 get_bh(bh); 151 - submit_bh(mode, mode_flags, bh); 151 + submit_bh(mode | mode_flags, bh); 152 152 ret = 0; 153 153 154 154 trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
+3 -3
fs/ntfs/aops.c
··· 342 342 for (i = 0; i < nr; i++) { 343 343 tbh = arr[i]; 344 344 if (likely(!buffer_uptodate(tbh))) 345 - submit_bh(REQ_OP_READ, 0, tbh); 345 + submit_bh(REQ_OP_READ, tbh); 346 346 else 347 347 ntfs_end_buffer_async_read(tbh, 1); 348 348 } ··· 859 859 do { 860 860 struct buffer_head *next = bh->b_this_page; 861 861 if (buffer_async_write(bh)) { 862 - submit_bh(REQ_OP_WRITE, 0, bh); 862 + submit_bh(REQ_OP_WRITE, bh); 863 863 need_end_writeback = false; 864 864 } 865 865 bh = next; ··· 1187 1187 BUG_ON(!buffer_mapped(tbh)); 1188 1188 get_bh(tbh); 1189 1189 tbh->b_end_io = end_buffer_write_sync; 1190 - submit_bh(REQ_OP_WRITE, 0, tbh); 1190 + submit_bh(REQ_OP_WRITE, tbh); 1191 1191 } 1192 1192 /* Synchronize the mft mirror now if not @sync. */ 1193 1193 if (is_mft && !sync)
+1 -1
fs/ntfs/compress.c
··· 658 658 } 659 659 get_bh(tbh); 660 660 tbh->b_end_io = end_buffer_read_sync; 661 - submit_bh(REQ_OP_READ, 0, tbh); 661 + submit_bh(REQ_OP_READ, tbh); 662 662 } 663 663 664 664 /* Wait for io completion on all buffer heads. */
+1 -1
fs/ntfs/file.c
··· 537 537 lock_buffer(bh); 538 538 get_bh(bh); 539 539 bh->b_end_io = end_buffer_read_sync; 540 - return submit_bh(REQ_OP_READ, 0, bh); 540 + return submit_bh(REQ_OP_READ, bh); 541 541 } 542 542 543 543 /**
+1 -1
fs/ntfs/logfile.c
··· 807 807 * completed ignore errors afterwards as we can assume 808 808 * that if one buffer worked all of them will work. 809 809 */ 810 - submit_bh(REQ_OP_WRITE, 0, bh); 810 + submit_bh(REQ_OP_WRITE, bh); 811 811 if (should_wait) { 812 812 should_wait = false; 813 813 wait_on_buffer(bh);
+2 -2
fs/ntfs/mft.c
··· 583 583 clear_buffer_dirty(tbh); 584 584 get_bh(tbh); 585 585 tbh->b_end_io = end_buffer_write_sync; 586 - submit_bh(REQ_OP_WRITE, 0, tbh); 586 + submit_bh(REQ_OP_WRITE, tbh); 587 587 } 588 588 /* Wait on i/o completion of buffers. */ 589 589 for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { ··· 780 780 clear_buffer_dirty(tbh); 781 781 get_bh(tbh); 782 782 tbh->b_end_io = end_buffer_write_sync; 783 - submit_bh(REQ_OP_WRITE, 0, tbh); 783 + submit_bh(REQ_OP_WRITE, tbh); 784 784 } 785 785 /* Synchronize the mft mirror now if not @sync. */ 786 786 if (!sync && ni->mft_no < vol->mftmirr_size)
+1 -1
fs/ntfs3/file.c
··· 242 242 lock_buffer(bh); 243 243 bh->b_end_io = end_buffer_read_sync; 244 244 get_bh(bh); 245 - submit_bh(REQ_OP_READ, 0, bh); 245 + submit_bh(REQ_OP_READ, bh); 246 246 247 247 wait_on_buffer(bh); 248 248 if (!buffer_uptodate(bh)) {
+1 -1
fs/ntfs3/inode.c
··· 629 629 bh->b_size = block_size; 630 630 off = vbo & (PAGE_SIZE - 1); 631 631 set_bh_page(bh, page, off); 632 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 632 + ll_rw_block(REQ_OP_READ, 1, &bh); 633 633 wait_on_buffer(bh); 634 634 if (!buffer_uptodate(bh)) { 635 635 err = -EIO;
+1 -1
fs/ocfs2/aops.c
··· 638 638 !buffer_new(bh) && 639 639 ocfs2_should_read_blk(inode, page, block_start) && 640 640 (block_start < from || block_end > to)) { 641 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 641 + ll_rw_block(REQ_OP_READ, 1, &bh); 642 642 *wait_bh++=bh; 643 643 } 644 644
+4 -4
fs/ocfs2/buffer_head_io.c
··· 64 64 65 65 get_bh(bh); /* for end_buffer_write_sync() */ 66 66 bh->b_end_io = end_buffer_write_sync; 67 - submit_bh(REQ_OP_WRITE, 0, bh); 67 + submit_bh(REQ_OP_WRITE, bh); 68 68 69 69 wait_on_buffer(bh); 70 70 ··· 147 147 148 148 get_bh(bh); /* for end_buffer_read_sync() */ 149 149 bh->b_end_io = end_buffer_read_sync; 150 - submit_bh(REQ_OP_READ, 0, bh); 150 + submit_bh(REQ_OP_READ, bh); 151 151 } 152 152 153 153 read_failure: ··· 328 328 if (validate) 329 329 set_buffer_needs_validate(bh); 330 330 bh->b_end_io = end_buffer_read_sync; 331 - submit_bh(REQ_OP_READ, 0, bh); 331 + submit_bh(REQ_OP_READ, bh); 332 332 continue; 333 333 } 334 334 } ··· 449 449 get_bh(bh); /* for end_buffer_write_sync() */ 450 450 bh->b_end_io = end_buffer_write_sync; 451 451 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); 452 - submit_bh(REQ_OP_WRITE, 0, bh); 452 + submit_bh(REQ_OP_WRITE, bh); 453 453 454 454 wait_on_buffer(bh); 455 455
+1 -1
fs/ocfs2/super.c
··· 1785 1785 if (!buffer_dirty(*bh)) 1786 1786 clear_buffer_uptodate(*bh); 1787 1787 unlock_buffer(*bh); 1788 - ll_rw_block(REQ_OP_READ, 0, 1, bh); 1788 + ll_rw_block(REQ_OP_READ, 1, bh); 1789 1789 wait_on_buffer(*bh); 1790 1790 if (!buffer_uptodate(*bh)) { 1791 1791 mlog_errno(-EIO);
+2 -2
fs/reiserfs/inode.c
··· 2664 2664 do { 2665 2665 struct buffer_head *next = bh->b_this_page; 2666 2666 if (buffer_async_write(bh)) { 2667 - submit_bh(REQ_OP_WRITE, 0, bh); 2667 + submit_bh(REQ_OP_WRITE, bh); 2668 2668 nr++; 2669 2669 } 2670 2670 put_bh(bh); ··· 2724 2724 struct buffer_head *next = bh->b_this_page; 2725 2725 if (buffer_async_write(bh)) { 2726 2726 clear_buffer_dirty(bh); 2727 - submit_bh(REQ_OP_WRITE, 0, bh); 2727 + submit_bh(REQ_OP_WRITE, bh); 2728 2728 nr++; 2729 2729 } 2730 2730 put_bh(bh);
+6 -6
fs/reiserfs/journal.c
··· 650 650 BUG(); 651 651 if (!buffer_uptodate(bh)) 652 652 BUG(); 653 - submit_bh(REQ_OP_WRITE, 0, bh); 653 + submit_bh(REQ_OP_WRITE, bh); 654 654 } 655 655 656 656 static void submit_ordered_buffer(struct buffer_head *bh) ··· 660 660 clear_buffer_dirty(bh); 661 661 if (!buffer_uptodate(bh)) 662 662 BUG(); 663 - submit_bh(REQ_OP_WRITE, 0, bh); 663 + submit_bh(REQ_OP_WRITE, bh); 664 664 } 665 665 666 666 #define CHUNK_SIZE 32 ··· 868 868 */ 869 869 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { 870 870 spin_unlock(lock); 871 - ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 871 + ll_rw_block(REQ_OP_WRITE, 1, &bh); 872 872 spin_lock(lock); 873 873 } 874 874 put_bh(bh); ··· 1054 1054 if (tbh) { 1055 1055 if (buffer_dirty(tbh)) { 1056 1056 depth = reiserfs_write_unlock_nested(s); 1057 - ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh); 1057 + ll_rw_block(REQ_OP_WRITE, 1, &tbh); 1058 1058 reiserfs_write_lock_nested(s, depth); 1059 1059 } 1060 1060 put_bh(tbh) ; ··· 2240 2240 } 2241 2241 } 2242 2242 /* read in the log blocks, memcpy to the corresponding real block */ 2243 - ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks); 2243 + ll_rw_block(REQ_OP_READ, get_desc_trans_len(desc), log_blocks); 2244 2244 for (i = 0; i < get_desc_trans_len(desc); i++) { 2245 2245 2246 2246 wait_on_buffer(log_blocks[i]); ··· 2342 2342 } else 2343 2343 bhlist[j++] = bh; 2344 2344 } 2345 - ll_rw_block(REQ_OP_READ, 0, j, bhlist); 2345 + ll_rw_block(REQ_OP_READ, j, bhlist); 2346 2346 for (i = 1; i < j; i++) 2347 2347 brelse(bhlist[i]); 2348 2348 bh = bhlist[0];
+2 -2
fs/reiserfs/stree.c
··· 579 579 if (!buffer_uptodate(bh[j])) { 580 580 if (depth == -1) 581 581 depth = reiserfs_write_unlock_nested(s); 582 - ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j); 582 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, bh + j); 583 583 } 584 584 brelse(bh[j]); 585 585 } ··· 685 685 if (!buffer_uptodate(bh) && depth == -1) 686 686 depth = reiserfs_write_unlock_nested(sb); 687 687 688 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 688 + ll_rw_block(REQ_OP_READ, 1, &bh); 689 689 wait_on_buffer(bh); 690 690 691 691 if (depth != -1)
+1 -1
fs/reiserfs/super.c
··· 1702 1702 /* after journal replay, reread all bitmap and super blocks */ 1703 1703 static int reread_meta_blocks(struct super_block *s) 1704 1704 { 1705 - ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s)); 1705 + ll_rw_block(REQ_OP_READ, 1, &SB_BUFFER_WITH_SB(s)); 1706 1706 wait_on_buffer(SB_BUFFER_WITH_SB(s)); 1707 1707 if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { 1708 1708 reiserfs_warning(s, "reiserfs-2504", "error reading the super");
+1 -1
fs/udf/dir.c
··· 130 130 brelse(tmp); 131 131 } 132 132 if (num) { 133 - ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha); 133 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha); 134 134 for (i = 0; i < num; i++) 135 135 brelse(bha[i]); 136 136 }
+1 -1
fs/udf/directory.c
··· 89 89 brelse(tmp); 90 90 } 91 91 if (num) { 92 - ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha); 92 + ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha); 93 93 for (i = 0; i < num; i++) 94 94 brelse(bha[i]); 95 95 }
+1 -1
fs/udf/inode.c
··· 1214 1214 if (buffer_uptodate(bh)) 1215 1215 return bh; 1216 1216 1217 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1217 + ll_rw_block(REQ_OP_READ, 1, &bh); 1218 1218 1219 1219 wait_on_buffer(bh); 1220 1220 if (buffer_uptodate(bh))
+1 -1
fs/ufs/balloc.c
··· 296 296 if (!buffer_mapped(bh)) 297 297 map_bh(bh, inode->i_sb, oldb + pos); 298 298 if (!buffer_uptodate(bh)) { 299 - ll_rw_block(REQ_OP_READ, 0, 1, &bh); 299 + ll_rw_block(REQ_OP_READ, 1, &bh); 300 300 wait_on_buffer(bh); 301 301 if (!buffer_uptodate(bh)) { 302 302 ufs_error(inode->i_sb, __func__,
+2 -2
include/linux/buffer_head.h
··· 202 202 void free_buffer_head(struct buffer_head * bh); 203 203 void unlock_buffer(struct buffer_head *bh); 204 204 void __lock_buffer(struct buffer_head *bh); 205 - void ll_rw_block(enum req_op, blk_opf_t, int, struct buffer_head * bh[]); 205 + void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]); 206 206 int sync_dirty_buffer(struct buffer_head *bh); 207 207 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 208 208 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags); 209 - int submit_bh(enum req_op, blk_opf_t, struct buffer_head *); 209 + int submit_bh(blk_opf_t, struct buffer_head *); 210 210 void write_boundary_block(struct block_device *bdev, 211 211 sector_t bblock, unsigned blocksize); 212 212 int bh_uptodate_or_lock(struct buffer_head *bh);