Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: have ll_rw_block users pass in op and flags separately

This has ll_rw_block users pass in the operation and flags separately,
so ll_rw_block can setup the bio op and bi_rw flags on the bio that
is submitted.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Mike Christie and committed by
Jens Axboe
dfec8a14 2a222ca9

+40 -38
+10 -9
fs/buffer.c
··· 588 588 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); 589 589 if (bh) { 590 590 if (buffer_dirty(bh)) 591 - ll_rw_block(WRITE, 1, &bh); 591 + ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 592 592 put_bh(bh); 593 593 } 594 594 } ··· 1395 1395 { 1396 1396 struct buffer_head *bh = __getblk(bdev, block, size); 1397 1397 if (likely(bh)) { 1398 - ll_rw_block(READA, 1, &bh); 1398 + ll_rw_block(REQ_OP_READ, READA, 1, &bh); 1399 1399 brelse(bh); 1400 1400 } 1401 1401 } ··· 1955 1955 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1956 1956 !buffer_unwritten(bh) && 1957 1957 (block_start < from || block_end > to)) { 1958 - ll_rw_block(READ, 1, &bh); 1958 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1959 1959 *wait_bh++=bh; 1960 1960 } 1961 1961 } ··· 2852 2852 2853 2853 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) { 2854 2854 err = -EIO; 2855 - ll_rw_block(READ, 1, &bh); 2855 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 2856 2856 wait_on_buffer(bh); 2857 2857 /* Uhhuh. Read error. Complain and punt. */ 2858 2858 if (!buffer_uptodate(bh)) ··· 3051 3051 3052 3052 /** 3053 3053 * ll_rw_block: low-level access to block devices (DEPRECATED) 3054 - * @rw: whether to %READ or %WRITE or maybe %READA (readahead) 3054 + * @op: whether to %READ or %WRITE 3055 + * @op_flags: rq_flag_bits or %READA (readahead) 3055 3056 * @nr: number of &struct buffer_heads in the array 3056 3057 * @bhs: array of pointers to &struct buffer_head 3057 3058 * ··· 3075 3074 * All of the buffers must be for the same device, and must also be a 3076 3075 * multiple of the current approved size for the device. 3077 3076 */ 3078 - void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) 3077 + void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[]) 3079 3078 { 3080 3079 int i; 3081 3080 ··· 3084 3083 3085 3084 if (!trylock_buffer(bh)) 3086 3085 continue; 3087 - if (rw == WRITE) { 3086 + if (op == WRITE) { 3088 3087 if (test_clear_buffer_dirty(bh)) { 3089 3088 bh->b_end_io = end_buffer_write_sync; 3090 3089 get_bh(bh); 3091 - submit_bh(rw, 0, bh); 3090 + submit_bh(op, op_flags, bh); 3092 3091 continue; 3093 3092 } 3094 3093 } else { 3095 3094 if (!buffer_uptodate(bh)) { 3096 3095 bh->b_end_io = end_buffer_read_sync; 3097 3096 get_bh(bh); 3098 - submit_bh(rw, 0, bh); 3097 + submit_bh(op, op_flags, bh); 3099 3098 continue; 3100 3099 } 3101 3100 }
+3 -3
fs/ext4/inode.c
··· 981 981 return bh; 982 982 if (!bh || buffer_uptodate(bh)) 983 983 return bh; 984 - ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 984 + ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh); 985 985 wait_on_buffer(bh); 986 986 if (buffer_uptodate(bh)) 987 987 return bh; ··· 1135 1135 if (!buffer_uptodate(bh) && !buffer_delay(bh) && 1136 1136 !buffer_unwritten(bh) && 1137 1137 (block_start < from || block_end > to)) { 1138 - ll_rw_block(READ, 1, &bh); 1138 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1139 1139 *wait_bh++ = bh; 1140 1140 decrypt = ext4_encrypted_inode(inode) && 1141 1141 S_ISREG(inode->i_mode); ··· 3698 3698 3699 3699 if (!buffer_uptodate(bh)) { 3700 3700 err = -EIO; 3701 - ll_rw_block(READ, 1, &bh); 3701 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 3702 3702 wait_on_buffer(bh); 3703 3703 /* Uhhuh. Read error. Complain and punt. */ 3704 3704 if (!buffer_uptodate(bh))
+2 -1
fs/ext4/namei.c
··· 1443 1443 } 1444 1444 bh_use[ra_max] = bh; 1445 1445 if (bh) 1446 - ll_rw_block(READ | REQ_META | REQ_PRIO, 1446 + ll_rw_block(REQ_OP_READ, 1447 + REQ_META | REQ_PRIO, 1447 1448 1, &bh); 1448 1449 } 1449 1450 }
+1 -1
fs/ext4/super.c
··· 4204 4204 goto out_bdev; 4205 4205 } 4206 4206 journal->j_private = sb; 4207 - ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); 4207 + ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer); 4208 4208 wait_on_buffer(journal->j_sb_buffer); 4209 4209 if (!buffer_uptodate(journal->j_sb_buffer)) { 4210 4210 ext4_msg(sb, KERN_ERR, "I/O error on journal device");
+1 -1
fs/gfs2/bmap.c
··· 974 974 975 975 if (!buffer_uptodate(bh)) { 976 976 err = -EIO; 977 - ll_rw_block(READ, 1, &bh); 977 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 978 978 wait_on_buffer(bh); 979 979 /* Uhhuh. Read error. Complain and punt. */ 980 980 if (!buffer_uptodate(bh))
+2 -2
fs/gfs2/meta_io.c
··· 449 449 if (buffer_uptodate(first_bh)) 450 450 goto out; 451 451 if (!buffer_locked(first_bh)) 452 - ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); 452 + ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh); 453 453 454 454 dblock++; 455 455 extlen--; ··· 458 458 bh = gfs2_getbuf(gl, dblock, CREATE); 459 459 460 460 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 461 - ll_rw_block(READA | REQ_META, 1, &bh); 461 + ll_rw_block(REQ_OP_READ, READA | REQ_META, 1, &bh); 462 462 brelse(bh); 463 463 dblock++; 464 464 extlen--;
+1 -1
fs/gfs2/quota.c
··· 730 730 if (PageUptodate(page)) 731 731 set_buffer_uptodate(bh); 732 732 if (!buffer_uptodate(bh)) { 733 - ll_rw_block(READ | REQ_META, 1, &bh); 733 + ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh); 734 734 wait_on_buffer(bh); 735 735 if (!buffer_uptodate(bh)) 736 736 goto unlock_out;
+1 -1
fs/isofs/compress.c
··· 81 81 blocknum = block_start >> bufshift; 82 82 memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *)); 83 83 haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks); 84 - ll_rw_block(READ, haveblocks, bhs); 84 + ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs); 85 85 86 86 curbh = 0; 87 87 curpage = 0;
+1 -1
fs/jbd2/journal.c
··· 1498 1498 1499 1499 J_ASSERT(bh != NULL); 1500 1500 if (!buffer_uptodate(bh)) { 1501 - ll_rw_block(READ, 1, &bh); 1501 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1502 1502 wait_on_buffer(bh); 1503 1503 if (!buffer_uptodate(bh)) { 1504 1504 printk(KERN_ERR
+2 -2
fs/jbd2/recovery.c
··· 104 104 if (!buffer_uptodate(bh) && !buffer_locked(bh)) { 105 105 bufs[nbufs++] = bh; 106 106 if (nbufs == MAXBUF) { 107 - ll_rw_block(READ, nbufs, bufs); 107 + ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); 108 108 journal_brelse_array(bufs, nbufs); 109 109 nbufs = 0; 110 110 } ··· 113 113 } 114 114 115 115 if (nbufs) 116 - ll_rw_block(READ, nbufs, bufs); 116 + ll_rw_block(REQ_OP_READ, 0, nbufs, bufs); 117 117 err = 0; 118 118 119 119 failed:
+1 -1
fs/ocfs2/aops.c
··· 640 640 !buffer_new(bh) && 641 641 ocfs2_should_read_blk(inode, page, block_start) && 642 642 (block_start < from || block_end > to)) { 643 - ll_rw_block(READ, 1, &bh); 643 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 644 644 *wait_bh++=bh; 645 645 } 646 646
+1 -1
fs/ocfs2/super.c
··· 1819 1819 if (!buffer_dirty(*bh)) 1820 1820 clear_buffer_uptodate(*bh); 1821 1821 unlock_buffer(*bh); 1822 - ll_rw_block(READ, 1, bh); 1822 + ll_rw_block(REQ_OP_READ, 0, 1, bh); 1823 1823 wait_on_buffer(*bh); 1824 1824 if (!buffer_uptodate(*bh)) { 1825 1825 mlog_errno(-EIO);
+4 -4
fs/reiserfs/journal.c
··· 870 870 */ 871 871 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { 872 872 spin_unlock(lock); 873 - ll_rw_block(WRITE, 1, &bh); 873 + ll_rw_block(REQ_OP_WRITE, 0, 1, &bh); 874 874 spin_lock(lock); 875 875 } 876 876 put_bh(bh); ··· 1057 1057 if (tbh) { 1058 1058 if (buffer_dirty(tbh)) { 1059 1059 depth = reiserfs_write_unlock_nested(s); 1060 - ll_rw_block(WRITE, 1, &tbh); 1060 + ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh); 1061 1061 reiserfs_write_lock_nested(s, depth); 1062 1062 } 1063 1063 put_bh(tbh) ; ··· 2244 2244 } 2245 2245 } 2246 2246 /* read in the log blocks, memcpy to the corresponding real block */ 2247 - ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); 2247 + ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks); 2248 2248 for (i = 0; i < get_desc_trans_len(desc); i++) { 2249 2249 2250 2250 wait_on_buffer(log_blocks[i]); ··· 2346 2346 } else 2347 2347 bhlist[j++] = bh; 2348 2348 } 2349 - ll_rw_block(READ, j, bhlist); 2349 + ll_rw_block(REQ_OP_READ, 0, j, bhlist); 2350 2350 for (i = 1; i < j; i++) 2351 2351 brelse(bhlist[i]); 2352 2352 bh = bhlist[0];
+2 -2
fs/reiserfs/stree.c
··· 551 551 if (!buffer_uptodate(bh[j])) { 552 552 if (depth == -1) 553 553 depth = reiserfs_write_unlock_nested(s); 554 - ll_rw_block(READA, 1, bh + j); 554 + ll_rw_block(REQ_OP_READ, READA, 1, bh + j); 555 555 } 556 556 brelse(bh[j]); 557 557 } ··· 660 660 if (!buffer_uptodate(bh) && depth == -1) 661 661 depth = reiserfs_write_unlock_nested(sb); 662 662 663 - ll_rw_block(READ, 1, &bh); 663 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 664 664 wait_on_buffer(bh); 665 665 666 666 if (depth != -1)
+1 -1
fs/reiserfs/super.c
··· 1661 1661 /* after journal replay, reread all bitmap and super blocks */ 1662 1662 static int reread_meta_blocks(struct super_block *s) 1663 1663 { 1664 - ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s)); 1664 + ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s)); 1665 1665 wait_on_buffer(SB_BUFFER_WITH_SB(s)); 1666 1666 if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) { 1667 1667 reiserfs_warning(s, "reiserfs-2504", "error reading the super");
+2 -2
fs/squashfs/block.c
··· 124 124 goto block_release; 125 125 bytes += msblk->devblksize; 126 126 } 127 - ll_rw_block(READ, b, bh); 127 + ll_rw_block(REQ_OP_READ, 0, b, bh); 128 128 } else { 129 129 /* 130 130 * Metadata block. ··· 156 156 goto block_release; 157 157 bytes += msblk->devblksize; 158 158 } 159 - ll_rw_block(READ, b - 1, bh + 1); 159 + ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1); 160 160 } 161 161 162 162 for (i = 0; i < b; i++) {
+1 -1
fs/udf/dir.c
··· 113 113 brelse(tmp); 114 114 } 115 115 if (num) { 116 - ll_rw_block(READA, num, bha); 116 + ll_rw_block(REQ_OP_READ, READA, num, bha); 117 117 for (i = 0; i < num; i++) 118 118 brelse(bha[i]); 119 119 }
+1 -1
fs/udf/directory.c
··· 87 87 brelse(tmp); 88 88 } 89 89 if (num) { 90 - ll_rw_block(READA, num, bha); 90 + ll_rw_block(REQ_OP_READ, READA, num, bha); 91 91 for (i = 0; i < num; i++) 92 92 brelse(bha[i]); 93 93 }
+1 -1
fs/udf/inode.c
··· 1199 1199 if (buffer_uptodate(bh)) 1200 1200 return bh; 1201 1201 1202 - ll_rw_block(READ, 1, &bh); 1202 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1203 1203 1204 1204 wait_on_buffer(bh); 1205 1205 if (buffer_uptodate(bh))
+1 -1
fs/ufs/balloc.c
··· 292 292 if (!buffer_mapped(bh)) 293 293 map_bh(bh, inode->i_sb, oldb + pos); 294 294 if (!buffer_uptodate(bh)) { 295 - ll_rw_block(READ, 1, &bh); 295 + ll_rw_block(REQ_OP_READ, 0, 1, &bh); 296 296 wait_on_buffer(bh); 297 297 if (!buffer_uptodate(bh)) { 298 298 ufs_error(inode->i_sb, __func__,
+1 -1
include/linux/buffer_head.h
··· 187 187 void free_buffer_head(struct buffer_head * bh); 188 188 void unlock_buffer(struct buffer_head *bh); 189 189 void __lock_buffer(struct buffer_head *bh); 190 - void ll_rw_block(int, int, struct buffer_head * bh[]); 190 + void ll_rw_block(int, int, int, struct buffer_head * bh[]); 191 191 int sync_dirty_buffer(struct buffer_head *bh); 192 192 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags); 193 193 void write_dirty_buffer(struct buffer_head *bh, int op_flags);