Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: have submit_bh users pass in op and flags separately

This has submit_bh users pass in the operation and flags separately,
so submit_bh_wbc can setup the bio op and bi_rw flags on the bio that
is submitted.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Mike Christie and committed by
Jens Axboe
2a222ca9 f2150821

+102 -96
+2 -2
drivers/md/bitmap.c
··· 297 297 atomic_inc(&bitmap->pending_writes); 298 298 set_buffer_locked(bh); 299 299 set_buffer_mapped(bh); 300 - submit_bh(WRITE | REQ_SYNC, bh); 300 + submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 301 301 bh = bh->b_this_page; 302 302 } 303 303 ··· 392 392 atomic_inc(&bitmap->pending_writes); 393 393 set_buffer_locked(bh); 394 394 set_buffer_mapped(bh); 395 - submit_bh(READ, bh); 395 + submit_bh(REQ_OP_READ, 0, bh); 396 396 } 397 397 block++; 398 398 bh = bh->b_this_page;
+12 -12
fs/btrfs/check-integrity.c
··· 2856 2856 return ds; 2857 2857 } 2858 2858 2859 - int btrfsic_submit_bh(int rw, struct buffer_head *bh) 2859 + int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh) 2860 2860 { 2861 2861 struct btrfsic_dev_state *dev_state; 2862 2862 2863 2863 if (!btrfsic_is_initialized) 2864 - return submit_bh(rw, bh); 2864 + return submit_bh(op, op_flags, bh); 2865 2865 2866 2866 mutex_lock(&btrfsic_mutex); 2867 2867 /* since btrfsic_submit_bh() might also be called before ··· 2870 2870 2871 2871 /* Only called to write the superblock (incl. FLUSH/FUA) */ 2872 2872 if (NULL != dev_state && 2873 - (rw & WRITE) && bh->b_size > 0) { 2873 + (op == REQ_OP_WRITE) && bh->b_size > 0) { 2874 2874 u64 dev_bytenr; 2875 2875 2876 2876 dev_bytenr = 4096 * bh->b_blocknr; 2877 2877 if (dev_state->state->print_mask & 2878 2878 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2879 2879 printk(KERN_INFO 2880 - "submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu)," 2881 - " size=%zu, data=%p, bdev=%p)\n", 2882 - rw, (unsigned long long)bh->b_blocknr, 2880 + "submit_bh(op=0x%x,0x%x, blocknr=%llu " 2881 + "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n", 2882 + op, op_flags, (unsigned long long)bh->b_blocknr, 2883 2883 dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev); 2884 2884 btrfsic_process_written_block(dev_state, dev_bytenr, 2885 2885 &bh->b_data, 1, NULL, 2886 - NULL, bh, rw); 2887 - } else if (NULL != dev_state && (rw & REQ_FLUSH)) { 2886 + NULL, bh, op_flags); 2887 + } else if (NULL != dev_state && (op_flags & REQ_FLUSH)) { 2888 2888 if (dev_state->state->print_mask & 2889 2889 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 2890 2890 printk(KERN_INFO 2891 - "submit_bh(rw=0x%x FLUSH, bdev=%p)\n", 2892 - rw, bh->b_bdev); 2891 + "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n", 2892 + op, op_flags, bh->b_bdev); 2893 2893 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 2894 2894 if ((dev_state->state->print_mask & 2895 2895 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | ··· 2907 2907 block->never_written = 0; 2908 2908 block->iodone_w_error = 0; 2909 2909 block->flush_gen = dev_state->last_flush_gen + 1; 2910 - block->submit_bio_bh_rw = rw; 2910 + block->submit_bio_bh_rw = op_flags; 2911 2911 block->orig_bio_bh_private = bh->b_private; 2912 2912 block->orig_bio_bh_end_io.bh = bh->b_end_io; 2913 2913 block->next_in_same_bio = NULL; ··· 2916 2916 } 2917 2917 } 2918 2918 mutex_unlock(&btrfsic_mutex); 2919 - return submit_bh(rw, bh); 2919 + return submit_bh(op, op_flags, bh); 2920 2920 } 2921 2921 2922 2922 static void __btrfsic_submit_bio(struct bio *bio)
+1 -1
fs/btrfs/check-integrity.h
··· 20 20 #define __BTRFS_CHECK_INTEGRITY__ 21 21 22 22 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 23 - int btrfsic_submit_bh(int rw, struct buffer_head *bh); 23 + int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh); 24 24 void btrfsic_submit_bio(struct bio *bio); 25 25 int btrfsic_submit_bio_wait(struct bio *bio); 26 26 #else
+2 -2
fs/btrfs/disk-io.c
··· 3420 3420 * to go down lazy. 3421 3421 */ 3422 3422 if (i == 0) 3423 - ret = btrfsic_submit_bh(WRITE_FUA, bh); 3423 + ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); 3424 3424 else 3425 - ret = btrfsic_submit_bh(WRITE_SYNC, bh); 3425 + ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 3426 3426 if (ret) 3427 3427 errors++; 3428 3428 }
+27 -26
fs/buffer.c
··· 45 45 #include <trace/events/block.h> 46 46 47 47 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); 48 - static int submit_bh_wbc(int rw, struct buffer_head *bh, 48 + static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 49 49 unsigned long bio_flags, 50 50 struct writeback_control *wbc); 51 51 ··· 1225 1225 } else { 1226 1226 get_bh(bh); 1227 1227 bh->b_end_io = end_buffer_read_sync; 1228 - submit_bh(READ, bh); 1228 + submit_bh(REQ_OP_READ, 0, bh); 1229 1229 wait_on_buffer(bh); 1230 1230 if (buffer_uptodate(bh)) 1231 1231 return bh; ··· 1697 1697 struct buffer_head *bh, *head; 1698 1698 unsigned int blocksize, bbits; 1699 1699 int nr_underway = 0; 1700 - int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 1700 + int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); 1701 1701 1702 1702 head = create_page_buffers(page, inode, 1703 1703 (1 << BH_Dirty)|(1 << BH_Uptodate)); ··· 1786 1786 do { 1787 1787 struct buffer_head *next = bh->b_this_page; 1788 1788 if (buffer_async_write(bh)) { 1789 - submit_bh_wbc(write_op, bh, 0, wbc); 1789 + submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); 1790 1790 nr_underway++; 1791 1791 } 1792 1792 bh = next; ··· 1840 1840 struct buffer_head *next = bh->b_this_page; 1841 1841 if (buffer_async_write(bh)) { 1842 1842 clear_buffer_dirty(bh); 1843 - submit_bh_wbc(write_op, bh, 0, wbc); 1843 + submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc); 1844 1844 nr_underway++; 1845 1845 } 1846 1846 bh = next; ··· 2248 2248 if (buffer_uptodate(bh)) 2249 2249 end_buffer_async_read(bh, 1); 2250 2250 else 2251 - submit_bh(READ, bh); 2251 + submit_bh(REQ_OP_READ, 0, bh); 2252 2252 } 2253 2253 return 0; 2254 2254 } ··· 2582 2582 if (block_start < from || block_end > to) { 2583 2583 lock_buffer(bh); 2584 2584 bh->b_end_io = end_buffer_read_nobh; 2585 - submit_bh(READ, bh); 2585 + submit_bh(REQ_OP_READ, 0, bh); 2586 2586 nr_reads++; 2587 2587 } 2588 2588 } ··· 2949 2949 * errors, this only handles the "we need to be able to 2950 2950 * do IO at the final sector" case. 2951 2951 */ 2952 - void guard_bio_eod(int rw, struct bio *bio) 2952 + void guard_bio_eod(int op, struct bio *bio) 2953 2953 { 2954 2954 sector_t maxsector; 2955 2955 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; ··· 2979 2979 bvec->bv_len -= truncated_bytes; 2980 2980 2981 2981 /* ..and clear the end of the buffer for reads */ 2982 - if ((rw & RW_MASK) == READ) { 2982 + if (op == REQ_OP_READ) { 2983 2983 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, 2984 2984 truncated_bytes); 2985 2985 } 2986 2986 } 2987 2987 2988 - static int submit_bh_wbc(int rw, struct buffer_head *bh, 2988 + static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, 2989 2989 unsigned long bio_flags, struct writeback_control *wbc) 2990 2990 { 2991 2991 struct bio *bio; ··· 2999 2999 /* 3000 3000 * Only clear out a write error when rewriting 3001 3001 */ 3002 - if (test_set_buffer_req(bh) && (rw & WRITE)) 3002 + if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) 3003 3003 clear_buffer_write_io_error(bh); 3004 3004 3005 3005 /* ··· 3024 3024 bio->bi_flags |= bio_flags; 3025 3025 3026 3026 /* Take care of bh's that straddle the end of the device */ 3027 - guard_bio_eod(rw, bio); 3027 + guard_bio_eod(op, bio); 3028 3028 3029 3029 if (buffer_meta(bh)) 3030 - rw |= REQ_META; 3030 + op_flags |= REQ_META; 3031 3031 if (buffer_prio(bh)) 3032 - rw |= REQ_PRIO; 3033 - bio->bi_rw = rw; 3032 + op_flags |= REQ_PRIO; 3033 + bio_set_op_attrs(bio, op, op_flags); 3034 3034 3035 3035 submit_bio(bio); 3036 3036 return 0; 3037 3037 } 3038 3038 3039 - int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags) 3039 + int _submit_bh(int op, int op_flags, struct buffer_head *bh, 3040 + unsigned long bio_flags) 3040 3041 { 3041 - return submit_bh_wbc(rw, bh, bio_flags, NULL); 3042 + return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL); 3042 3043 } 3043 3044 EXPORT_SYMBOL_GPL(_submit_bh); 3044 3045 3045 - int submit_bh(int rw, struct buffer_head *bh) 3046 + int submit_bh(int op, int op_flags, struct buffer_head *bh) 3046 3047 { 3047 - return submit_bh_wbc(rw, bh, 0, NULL); 3048 + return submit_bh_wbc(op, op_flags, bh, 0, NULL); 3048 3049 } 3049 3050 EXPORT_SYMBOL(submit_bh); 3050 3051 ··· 3087 3086 if (test_clear_buffer_dirty(bh)) { 3088 3087 bh->b_end_io = end_buffer_write_sync; 3089 3088 get_bh(bh); 3090 - submit_bh(WRITE, bh); 3089 + submit_bh(rw, 0, bh); 3091 3090 continue; 3092 3091 } 3093 3092 } else { 3094 3093 if (!buffer_uptodate(bh)) { 3095 3094 bh->b_end_io = end_buffer_read_sync; 3096 3095 get_bh(bh); 3097 - submit_bh(rw, bh); 3096 + submit_bh(rw, 0, bh); 3098 3097 continue; 3099 3098 } 3100 3099 } ··· 3103 3102 } 3104 3103 EXPORT_SYMBOL(ll_rw_block); 3105 3104 3106 - void write_dirty_buffer(struct buffer_head *bh, int rw) 3105 + void write_dirty_buffer(struct buffer_head *bh, int op_flags) 3107 3106 { 3108 3107 lock_buffer(bh); 3109 3108 if (!test_clear_buffer_dirty(bh)) { ··· 3112 3111 } 3113 3112 bh->b_end_io = end_buffer_write_sync; 3114 3113 get_bh(bh); 3115 - submit_bh(rw, bh); 3114 + submit_bh(REQ_OP_WRITE, op_flags, bh); 3116 3115 } 3117 3116 EXPORT_SYMBOL(write_dirty_buffer); 3118 3117 ··· 3121 3120 * and then start new I/O and then wait upon it. The caller must have a ref on 3122 3121 * the buffer_head. 3123 3122 */ 3124 - int __sync_dirty_buffer(struct buffer_head *bh, int rw) 3123 + int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) 3125 3124 { 3126 3125 int ret = 0; 3127 3126 ··· 3130 3129 if (test_clear_buffer_dirty(bh)) { 3131 3130 get_bh(bh); 3132 3131 bh->b_end_io = end_buffer_write_sync; 3133 - ret = submit_bh(rw, bh); 3132 + ret = submit_bh(REQ_OP_WRITE, op_flags, bh); 3134 3133 wait_on_buffer(bh); 3135 3134 if (!ret && !buffer_uptodate(bh)) 3136 3135 ret = -EIO; ··· 3393 3392 3394 3393 get_bh(bh); 3395 3394 bh->b_end_io = end_buffer_read_sync; 3396 - submit_bh(READ, bh); 3395 + submit_bh(REQ_OP_READ, 0, bh); 3397 3396 wait_on_buffer(bh); 3398 3397 if (buffer_uptodate(bh)) 3399 3398 return 0;
+1 -1
fs/ext4/balloc.c
··· 470 470 trace_ext4_read_block_bitmap_load(sb, block_group); 471 471 bh->b_end_io = ext4_end_bitmap_read; 472 472 get_bh(bh); 473 - submit_bh(READ | REQ_META | REQ_PRIO, bh); 473 + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 474 474 return bh; 475 475 verify: 476 476 err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
+1 -1
fs/ext4/ialloc.c
··· 214 214 trace_ext4_load_inode_bitmap(sb, block_group); 215 215 bh->b_end_io = ext4_end_bitmap_read; 216 216 get_bh(bh); 217 - submit_bh(READ | REQ_META | REQ_PRIO, bh); 217 + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 218 218 wait_on_buffer(bh); 219 219 if (!buffer_uptodate(bh)) { 220 220 put_bh(bh);
+1 -1
fs/ext4/inode.c
··· 4281 4281 trace_ext4_load_inode(inode); 4282 4282 get_bh(bh); 4283 4283 bh->b_end_io = end_buffer_read_sync; 4284 - submit_bh(READ | REQ_META | REQ_PRIO, bh); 4284 + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); 4285 4285 wait_on_buffer(bh); 4286 4286 if (!buffer_uptodate(bh)) { 4287 4287 EXT4_ERROR_INODE_BLOCK(inode, block,
+2 -2
fs/ext4/mmp.c
··· 52 52 lock_buffer(bh); 53 53 bh->b_end_io = end_buffer_write_sync; 54 54 get_bh(bh); 55 - submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh); 55 + submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh); 56 56 wait_on_buffer(bh); 57 57 sb_end_write(sb); 58 58 if (unlikely(!buffer_uptodate(bh))) ··· 88 88 get_bh(*bh); 89 89 lock_buffer(*bh); 90 90 (*bh)->b_end_io = end_buffer_read_sync; 91 - submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh); 91 + submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh); 92 92 wait_on_buffer(*bh); 93 93 if (!buffer_uptodate(*bh)) { 94 94 ret = -EIO;
+1 -1
fs/fat/misc.c
··· 267 267 int i, err = 0; 268 268 269 269 for (i = 0; i < nr_bhs; i++) 270 - write_dirty_buffer(bhs[i], WRITE); 270 + write_dirty_buffer(bhs[i], 0); 271 271 272 272 for (i = 0; i < nr_bhs; i++) { 273 273 wait_on_buffer(bhs[i]);
+1 -1
fs/gfs2/bmap.c
··· 285 285 if (trylock_buffer(rabh)) { 286 286 if (!buffer_uptodate(rabh)) { 287 287 rabh->b_end_io = end_buffer_read_sync; 288 - submit_bh(READA | REQ_META, rabh); 288 + submit_bh(REQ_OP_READ, READA | REQ_META, rabh); 289 289 continue; 290 290 } 291 291 unlock_buffer(rabh);
+1 -1
fs/gfs2/dir.c
··· 1513 1513 continue; 1514 1514 } 1515 1515 bh->b_end_io = end_buffer_read_sync; 1516 - submit_bh(READA | REQ_META, bh); 1516 + submit_bh(REQ_OP_READ, READA | REQ_META, bh); 1517 1517 continue; 1518 1518 } 1519 1519 brelse(bh);
+3 -3
fs/gfs2/meta_io.c
··· 37 37 { 38 38 struct buffer_head *bh, *head; 39 39 int nr_underway = 0; 40 - int write_op = REQ_META | REQ_PRIO | 41 - (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 40 + int write_flags = REQ_META | REQ_PRIO | 41 + (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); 42 42 43 43 BUG_ON(!PageLocked(page)); 44 44 BUG_ON(!page_has_buffers(page)); ··· 79 79 do { 80 80 struct buffer_head *next = bh->b_this_page; 81 81 if (buffer_async_write(bh)) { 82 - submit_bh(write_op, bh); 82 + submit_bh(REQ_OP_WRITE, write_flags, bh); 83 83 nr_underway++; 84 84 } 85 85 bh = next;
+3 -3
fs/jbd2/commit.c
··· 155 155 156 156 if (journal->j_flags & JBD2_BARRIER && 157 157 !jbd2_has_feature_async_commit(journal)) 158 - ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh); 158 + ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh); 159 159 else 160 - ret = submit_bh(WRITE_SYNC, bh); 160 + ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 161 161 162 162 *cbh = bh; 163 163 return ret; ··· 718 718 clear_buffer_dirty(bh); 719 719 set_buffer_uptodate(bh); 720 720 bh->b_end_io = journal_end_buffer_io_sync; 721 - submit_bh(WRITE_SYNC, bh); 721 + submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 722 722 } 723 723 cond_resched(); 724 724 stats.run.rs_blocks_logged += bufs;
+4 -4
fs/jbd2/journal.c
··· 1346 1346 return jbd2_journal_start_thread(journal); 1347 1347 } 1348 1348 1349 - static int jbd2_write_superblock(journal_t *journal, int write_op) 1349 + static int jbd2_write_superblock(journal_t *journal, int write_flags) 1350 1350 { 1351 1351 struct buffer_head *bh = journal->j_sb_buffer; 1352 1352 journal_superblock_t *sb = journal->j_superblock; 1353 1353 int ret; 1354 1354 1355 - trace_jbd2_write_superblock(journal, write_op); 1355 + trace_jbd2_write_superblock(journal, write_flags); 1356 1356 if (!(journal->j_flags & JBD2_BARRIER)) 1357 - write_op &= ~(REQ_FUA | REQ_FLUSH); 1357 + write_flags &= ~(REQ_FUA | REQ_FLUSH); 1358 1358 lock_buffer(bh); 1359 1359 if (buffer_write_io_error(bh)) { 1360 1360 /* ··· 1374 1374 jbd2_superblock_csum_set(journal, sb); 1375 1375 get_bh(bh); 1376 1376 bh->b_end_io = end_buffer_write_sync; 1377 - ret = submit_bh(write_op, bh); 1377 + ret = submit_bh(REQ_OP_WRITE, write_flags, bh); 1378 1378 wait_on_buffer(bh); 1379 1379 if (buffer_write_io_error(bh)) { 1380 1380 clear_buffer_write_io_error(bh);
+3 -3
fs/nilfs2/btnode.c
··· 62 62 } 63 63 64 64 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, 65 - sector_t pblocknr, int mode, 65 + sector_t pblocknr, int mode, int mode_flags, 66 66 struct buffer_head **pbh, sector_t *submit_ptr) 67 67 { 68 68 struct buffer_head *bh; ··· 95 95 } 96 96 } 97 97 98 - if (mode == READA) { 98 + if (mode_flags & REQ_RAHEAD) { 99 99 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { 100 100 err = -EBUSY; /* internal code */ 101 101 brelse(bh); ··· 114 114 bh->b_blocknr = pblocknr; /* set block address for read */ 115 115 bh->b_end_io = end_buffer_read_sync; 116 116 get_bh(bh); 117 - submit_bh(mode, bh); 117 + submit_bh(mode, mode_flags, bh); 118 118 bh->b_blocknr = blocknr; /* set back to the given block address */ 119 119 *submit_ptr = pblocknr; 120 120 err = 0;
+1 -1
fs/nilfs2/btnode.h
··· 43 43 struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, 44 44 __u64 blocknr); 45 45 int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int, 46 - struct buffer_head **, sector_t *); 46 + int, struct buffer_head **, sector_t *); 47 47 void nilfs_btnode_delete(struct buffer_head *); 48 48 int nilfs_btnode_prepare_change_key(struct address_space *, 49 49 struct nilfs_btnode_chkey_ctxt *);
+4 -2
fs/nilfs2/btree.c
··· 476 476 sector_t submit_ptr = 0; 477 477 int ret; 478 478 479 - ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr); 479 + ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh, 480 + &submit_ptr); 480 481 if (ret) { 481 482 if (ret != -EEXIST) 482 483 return ret; ··· 493 492 n > 0 && i < ra->ncmax; n--, i++) { 494 493 ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); 495 494 496 - ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA, 495 + ret = nilfs_btnode_submit_block(btnc, ptr2, 0, 496 + REQ_OP_READ, REQ_RAHEAD, 497 497 &ra_bh, &submit_ptr); 498 498 if (likely(!ret || ret == -EEXIST)) 499 499 brelse(ra_bh);
+3 -2
fs/nilfs2/gcinode.c
··· 101 101 bh->b_blocknr = pbn; 102 102 bh->b_end_io = end_buffer_read_sync; 103 103 get_bh(bh); 104 - submit_bh(READ, bh); 104 + submit_bh(REQ_OP_READ, 0, bh); 105 105 if (vbn) 106 106 bh->b_blocknr = vbn; 107 107 out: ··· 138 138 int ret; 139 139 140 140 ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, 141 - vbn ? : pbn, pbn, READ, out_bh, &pbn); 141 + vbn ? : pbn, pbn, REQ_OP_READ, 0, 142 + out_bh, &pbn); 142 143 if (ret == -EEXIST) /* internal code (cache hit) */ 143 144 ret = 0; 144 145 return ret;
+6 -5
fs/nilfs2/mdt.c
··· 121 121 122 122 static int 123 123 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, 124 - int mode, struct buffer_head **out_bh) 124 + int mode, int mode_flags, struct buffer_head **out_bh) 125 125 { 126 126 struct buffer_head *bh; 127 127 __u64 blknum = 0; ··· 135 135 if (buffer_uptodate(bh)) 136 136 goto out; 137 137 138 - if (mode == READA) { 138 + if (mode_flags & REQ_RAHEAD) { 139 139 if (!trylock_buffer(bh)) { 140 140 ret = -EBUSY; 141 141 goto failed_bh; ··· 157 157 158 158 bh->b_end_io = end_buffer_read_sync; 159 159 get_bh(bh); 160 - submit_bh(mode, bh); 160 + submit_bh(mode, mode_flags, bh); 161 161 ret = 0; 162 162 163 163 trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode); ··· 181 181 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS; 182 182 int err; 183 183 184 - err = nilfs_mdt_submit_block(inode, block, READ, &first_bh); 184 + err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh); 185 185 if (err == -EEXIST) /* internal code */ 186 186 goto out; 187 187 ··· 191 191 if (readahead) { 192 192 blkoff = block + 1; 193 193 for (i = 0; i < nr_ra_blocks; i++, blkoff++) { 194 - err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh); 194 + err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ, 195 + REQ_RAHEAD, &bh); 195 196 if (likely(!err || err == -EEXIST)) 196 197 brelse(bh); 197 198 else if (err != -EBUSY)
+3 -3
fs/ntfs/aops.c
··· 362 362 for (i = 0; i < nr; i++) { 363 363 tbh = arr[i]; 364 364 if (likely(!buffer_uptodate(tbh))) 365 - submit_bh(READ, tbh); 365 + submit_bh(REQ_OP_READ, 0, tbh); 366 366 else 367 367 ntfs_end_buffer_async_read(tbh, 1); 368 368 } ··· 877 877 do { 878 878 struct buffer_head *next = bh->b_this_page; 879 879 if (buffer_async_write(bh)) { 880 - submit_bh(WRITE, bh); 880 + submit_bh(REQ_OP_WRITE, 0, bh); 881 881 need_end_writeback = false; 882 882 } 883 883 bh = next; ··· 1202 1202 BUG_ON(!buffer_mapped(tbh)); 1203 1203 get_bh(tbh); 1204 1204 tbh->b_end_io = end_buffer_write_sync; 1205 - submit_bh(WRITE, tbh); 1205 + submit_bh(REQ_OP_WRITE, 0, tbh); 1206 1206 } 1207 1207 /* Synchronize the mft mirror now if not @sync. */ 1208 1208 if (is_mft && !sync)
+1 -1
fs/ntfs/compress.c
··· 670 670 } 671 671 get_bh(tbh); 672 672 tbh->b_end_io = end_buffer_read_sync; 673 - submit_bh(READ, tbh); 673 + submit_bh(REQ_OP_READ, 0, tbh); 674 674 } 675 675 676 676 /* Wait for io completion on all buffer heads. */
+1 -1
fs/ntfs/file.c
··· 553 553 lock_buffer(bh); 554 554 get_bh(bh); 555 555 bh->b_end_io = end_buffer_read_sync; 556 - return submit_bh(READ, bh); 556 + return submit_bh(REQ_OP_READ, 0, bh); 557 557 } 558 558 559 559 /**
+1 -1
fs/ntfs/logfile.c
··· 821 821 * completed ignore errors afterwards as we can assume 822 822 * that if one buffer worked all of them will work. 823 823 */ 824 - submit_bh(WRITE, bh); 824 + submit_bh(REQ_OP_WRITE, 0, bh); 825 825 if (should_wait) { 826 826 should_wait = false; 827 827 wait_on_buffer(bh);
+2 -2
fs/ntfs/mft.c
··· 592 592 clear_buffer_dirty(tbh); 593 593 get_bh(tbh); 594 594 tbh->b_end_io = end_buffer_write_sync; 595 - submit_bh(WRITE, tbh); 595 + submit_bh(REQ_OP_WRITE, 0, tbh); 596 596 } 597 597 /* Wait on i/o completion of buffers. */ 598 598 for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) { ··· 785 785 clear_buffer_dirty(tbh); 786 786 get_bh(tbh); 787 787 tbh->b_end_io = end_buffer_write_sync; 788 - submit_bh(WRITE, tbh); 788 + submit_bh(REQ_OP_WRITE, 0, tbh); 789 789 } 790 790 /* Synchronize the mft mirror now if not @sync. */ 791 791 if (!sync && ni->mft_no < vol->mftmirr_size)
+4 -4
fs/ocfs2/buffer_head_io.c
··· 79 79 80 80 get_bh(bh); /* for end_buffer_write_sync() */ 81 81 bh->b_end_io = end_buffer_write_sync; 82 - submit_bh(WRITE, bh); 82 + submit_bh(REQ_OP_WRITE, 0, bh); 83 83 84 84 wait_on_buffer(bh); 85 85 ··· 149 149 clear_buffer_uptodate(bh); 150 150 get_bh(bh); /* for end_buffer_read_sync() */ 151 151 bh->b_end_io = end_buffer_read_sync; 152 - submit_bh(READ, bh); 152 + submit_bh(REQ_OP_READ, 0, bh); 153 153 } 154 154 155 155 for (i = nr; i > 0; i--) { ··· 305 305 if (validate) 306 306 set_buffer_needs_validate(bh); 307 307 bh->b_end_io = end_buffer_read_sync; 308 - submit_bh(READ, bh); 308 + submit_bh(REQ_OP_READ, 0, bh); 309 309 continue; 310 310 } 311 311 } ··· 419 419 get_bh(bh); /* for end_buffer_write_sync() */ 420 420 bh->b_end_io = end_buffer_write_sync; 421 421 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check); 422 - submit_bh(WRITE, bh); 422 + submit_bh(REQ_OP_WRITE, 0, bh); 423 423 424 424 wait_on_buffer(bh); 425 425
+2 -2
fs/reiserfs/inode.c
··· 2668 2668 do { 2669 2669 struct buffer_head *next = bh->b_this_page; 2670 2670 if (buffer_async_write(bh)) { 2671 - submit_bh(WRITE, bh); 2671 + submit_bh(REQ_OP_WRITE, 0, bh); 2672 2672 nr++; 2673 2673 } 2674 2674 put_bh(bh); ··· 2728 2728 struct buffer_head *next = bh->b_this_page; 2729 2729 if (buffer_async_write(bh)) { 2730 2730 clear_buffer_dirty(bh); 2731 - submit_bh(WRITE, bh); 2731 + submit_bh(REQ_OP_WRITE, 0, bh); 2732 2732 nr++; 2733 2733 } 2734 2734 put_bh(bh);
+3 -3
fs/reiserfs/journal.c
··· 652 652 BUG(); 653 653 if (!buffer_uptodate(bh)) 654 654 BUG(); 655 - submit_bh(WRITE, bh); 655 + submit_bh(REQ_OP_WRITE, 0, bh); 656 656 } 657 657 658 658 static void submit_ordered_buffer(struct buffer_head *bh) ··· 662 662 clear_buffer_dirty(bh); 663 663 if (!buffer_uptodate(bh)) 664 664 BUG(); 665 - submit_bh(WRITE, bh); 665 + submit_bh(REQ_OP_WRITE, 0, bh); 666 666 } 667 667 668 668 #define CHUNK_SIZE 32 ··· 2269 2269 /* flush out the real blocks */ 2270 2270 for (i = 0; i < get_desc_trans_len(desc); i++) { 2271 2271 set_buffer_dirty(real_blocks[i]); 2272 - write_dirty_buffer(real_blocks[i], WRITE); 2272 + write_dirty_buffer(real_blocks[i], 0); 2273 2273 } 2274 2274 for (i = 0; i < get_desc_trans_len(desc); i++) { 2275 2275 wait_on_buffer(real_blocks[i]);
+1 -1
fs/ufs/util.c
··· 118 118 unsigned i; 119 119 120 120 for (i = 0; i < ubh->count; i++) 121 - write_dirty_buffer(ubh->bh[i], WRITE); 121 + write_dirty_buffer(ubh->bh[i], 0); 122 122 123 123 for (i = 0; i < ubh->count; i++) 124 124 wait_on_buffer(ubh->bh[i]);
+5 -4
include/linux/buffer_head.h
··· 189 189 void __lock_buffer(struct buffer_head *bh); 190 190 void ll_rw_block(int, int, struct buffer_head * bh[]); 191 191 int sync_dirty_buffer(struct buffer_head *bh); 192 - int __sync_dirty_buffer(struct buffer_head *bh, int rw); 193 - void write_dirty_buffer(struct buffer_head *bh, int rw); 194 - int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags); 195 - int submit_bh(int, struct buffer_head *); 192 + int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); 193 + void write_dirty_buffer(struct buffer_head *bh, int op_flags); 194 + int _submit_bh(int op, int op_flags, struct buffer_head *bh, 195 + unsigned long bio_flags); 196 + int submit_bh(int, int, struct buffer_head *); 196 197 void write_boundary_block(struct block_device *bdev, 197 198 sector_t bblock, unsigned blocksize); 198 199 int bh_uptodate_or_lock(struct buffer_head *bh);