Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

f2fs: use bio op accessors

Separate the op from the rq_flag_bits and have f2fs
set/get the bio using bio_set_op_attrs/bio_op.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Mike Christie and committed by
Jens Axboe
04d328de 81a75f67

+81 -54
+6 -4
fs/f2fs/checkpoint.c
··· 63 63 struct f2fs_io_info fio = { 64 64 .sbi = sbi, 65 65 .type = META, 66 - .rw = READ_SYNC | REQ_META | REQ_PRIO, 66 + .op = REQ_OP_READ, 67 + .op_flags = READ_SYNC | REQ_META | REQ_PRIO, 67 68 .old_blkaddr = index, 68 69 .new_blkaddr = index, 69 70 .encrypted_page = NULL, 70 71 }; 71 72 72 73 if (unlikely(!is_meta)) 73 - fio.rw &= ~REQ_META; 74 + fio.op_flags &= ~REQ_META; 74 75 repeat: 75 76 page = f2fs_grab_cache_page(mapping, index, false); 76 77 if (!page) { ··· 158 157 struct f2fs_io_info fio = { 159 158 .sbi = sbi, 160 159 .type = META, 161 - .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, 160 + .op = REQ_OP_READ, 161 + .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA, 162 162 .encrypted_page = NULL, 163 163 }; 164 164 struct blk_plug plug; 165 165 166 166 if (unlikely(type == META_POR)) 167 - fio.rw &= ~REQ_META; 167 + fio.op_flags &= ~REQ_META; 168 168 169 169 blk_start_plug(&plug); 170 170 for (; nrpages-- > 0; blkno++) {
+27 -20
fs/f2fs/data.c
··· 97 97 return bio; 98 98 } 99 99 100 - static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw, 101 - struct bio *bio) 100 + static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio) 102 101 { 103 - if (!is_read_io(rw)) 102 + if (!is_read_io(bio_op(bio))) 104 103 atomic_inc(&sbi->nr_wb_bios); 105 - bio->bi_rw = rw; 106 104 submit_bio(bio); 107 105 } 108 106 ··· 111 113 if (!io->bio) 112 114 return; 113 115 114 - if (is_read_io(fio->rw)) 116 + if (is_read_io(fio->op)) 115 117 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 116 118 else 117 119 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 118 120 119 - __submit_bio(io->sbi, fio->rw, io->bio); 121 + bio_set_op_attrs(io->bio, fio->op, fio->op_flags); 122 + 123 + __submit_bio(io->sbi, io->bio); 120 124 io->bio = NULL; 121 125 } 122 126 ··· 184 184 /* change META to META_FLUSH in the checkpoint procedure */ 185 185 if (type >= META_FLUSH) { 186 186 io->fio.type = META_FLUSH; 187 + io->fio.op = REQ_OP_WRITE; 187 188 if (test_opt(sbi, NOBARRIER)) 188 - io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 189 + io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO; 189 190 else 190 - io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 191 + io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META | 192 + REQ_PRIO; 191 193 } 192 194 __submit_merged_bio(io); 193 195 out: ··· 231 229 f2fs_trace_ios(fio, 0); 232 230 233 231 /* Allocate a new bio */ 234 - bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); 232 + bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op)); 235 233 236 234 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 237 235 bio_put(bio); 238 236 return -EFAULT; 239 237 } 238 + bio->bi_rw = fio->op_flags; 239 + bio_set_op_attrs(bio, fio->op, fio->op_flags); 240 240 241 - __submit_bio(fio->sbi, fio->rw, bio); 241 + __submit_bio(fio->sbi, bio); 242 242 return 0; 243 243 } 244 244 ··· 249 245 struct f2fs_sb_info *sbi = fio->sbi; 250 246 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 251 247 struct f2fs_bio_info *io; 252 - bool is_read = is_read_io(fio->rw); 248 + bool is_read = is_read_io(fio->op); 253 249 struct page *bio_page; 254 250 255 251 io = is_read ? &sbi->read_io : &sbi->write_io[btype]; ··· 261 257 down_write(&io->io_rwsem); 262 258 263 259 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || 264 - io->fio.rw != fio->rw)) 260 + (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags))) 265 261 __submit_merged_bio(io); 266 262 alloc_new: 267 263 if (io->bio == NULL) { ··· 395 391 } 396 392 397 393 struct page *get_read_data_page(struct inode *inode, pgoff_t index, 398 - int rw, bool for_write) 394 + int op_flags, bool for_write) 399 395 { 400 396 struct address_space *mapping = inode->i_mapping; 401 397 struct dnode_of_data dn; ··· 405 401 struct f2fs_io_info fio = { 406 402 .sbi = F2FS_I_SB(inode), 407 403 .type = DATA, 408 - .rw = rw, 404 + .op = REQ_OP_READ, 405 + .op_flags = op_flags, 409 406 .encrypted_page = NULL, 410 407 }; 411 408 ··· 1057 1052 */ 1058 1053 if (bio && (last_block_in_bio != block_nr - 1)) { 1059 1054 submit_and_realloc: 1060 - __submit_bio(F2FS_I_SB(inode), READ, bio); 1055 + __submit_bio(F2FS_I_SB(inode), bio); 1061 1056 bio = NULL; 1062 1057 } 1063 1058 if (bio == NULL) { ··· 1086 1081 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); 1087 1082 bio->bi_end_io = f2fs_read_end_io; 1088 1083 bio->bi_private = ctx; 1089 - bio->bi_rw = READ; 1084 + bio_set_op_attrs(bio, REQ_OP_READ, 0); 1090 1085 } 1091 1086 1092 1087 if (bio_add_page(bio, page, blocksize, 0) < blocksize) ··· 1101 1096 goto next_page; 1102 1097 confused: 1103 1098 if (bio) { 1104 - __submit_bio(F2FS_I_SB(inode), READ, bio); 1099 + __submit_bio(F2FS_I_SB(inode), bio); 1105 1100 bio = NULL; 1106 1101 } 1107 1102 unlock_page(page); ··· 1111 1106 } 1112 1107 BUG_ON(pages && !list_empty(pages)); 1113 1108 if (bio) 1114 - __submit_bio(F2FS_I_SB(inode), READ, bio); 1109 + __submit_bio(F2FS_I_SB(inode), bio); 1115 1110 return 0; 1116 1111 } 1117 1112 ··· 1228 1223 struct f2fs_io_info fio = { 1229 1224 .sbi = sbi, 1230 1225 .type = DATA, 1231 - .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1226 + .op = REQ_OP_WRITE, 1227 + .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, 1232 1228 .page = page, 1233 1229 .encrypted_page = NULL, 1234 1230 }; ··· 1670 1664 struct f2fs_io_info fio = { 1671 1665 .sbi = sbi, 1672 1666 .type = DATA, 1673 - .rw = READ_SYNC, 1667 + .op = REQ_OP_READ, 1668 + .op_flags = READ_SYNC, 1674 1669 .old_blkaddr = blkaddr, 1675 1670 .new_blkaddr = blkaddr, 1676 1671 .page = page,
+3 -2
fs/f2fs/f2fs.h
··· 686 686 struct f2fs_io_info { 687 687 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 688 688 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 689 - int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */ 689 + int op; /* contains REQ_OP_ */ 690 + int op_flags; /* rq_flag_bits */ 690 691 block_t new_blkaddr; /* new block address to be written */ 691 692 block_t old_blkaddr; /* old block address before Cow */ 692 693 struct page *page; /* page to be written */ 693 694 struct page *encrypted_page; /* encrypted page */ 694 695 }; 695 696 696 - #define is_read_io(rw) (((rw) & 1) == READ) 697 + #define is_read_io(rw) (rw == READ) 697 698 struct f2fs_bio_info { 698 699 struct f2fs_sb_info *sbi; /* f2fs superblock */ 699 700 struct bio *bio; /* bios to merge */
+6 -3
fs/f2fs/gc.c
··· 538 538 struct f2fs_io_info fio = { 539 539 .sbi = F2FS_I_SB(inode), 540 540 .type = DATA, 541 - .rw = READ_SYNC, 541 + .op = REQ_OP_READ, 542 + .op_flags = READ_SYNC, 542 543 .encrypted_page = NULL, 543 544 }; 544 545 struct dnode_of_data dn; ··· 613 612 /* allocate block address */ 614 613 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 615 614 616 - fio.rw = WRITE_SYNC; 615 + fio.op = REQ_OP_WRITE; 616 + fio.op_flags = WRITE_SYNC; 617 617 fio.new_blkaddr = newaddr; 618 618 f2fs_submit_page_mbio(&fio); 619 619 ··· 651 649 struct f2fs_io_info fio = { 652 650 .sbi = F2FS_I_SB(inode), 653 651 .type = DATA, 654 - .rw = WRITE_SYNC, 652 + .op = REQ_OP_WRITE, 653 + .op_flags = WRITE_SYNC, 655 654 .page = page, 656 655 .encrypted_page = NULL, 657 656 };
+2 -1
fs/f2fs/inline.c
··· 108 108 struct f2fs_io_info fio = { 109 109 .sbi = F2FS_I_SB(dn->inode), 110 110 .type = DATA, 111 - .rw = WRITE_SYNC | REQ_PRIO, 111 + .op = REQ_OP_WRITE, 112 + .op_flags = WRITE_SYNC | REQ_PRIO, 112 113 .page = page, 113 114 .encrypted_page = NULL, 114 115 };
+5 -3
fs/f2fs/node.c
··· 1070 1070 * 0: f2fs_put_page(page, 0) 1071 1071 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1072 1072 */ 1073 - static int read_node_page(struct page *page, int rw) 1073 + static int read_node_page(struct page *page, int op_flags) 1074 1074 { 1075 1075 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1076 1076 struct node_info ni; 1077 1077 struct f2fs_io_info fio = { 1078 1078 .sbi = sbi, 1079 1079 .type = NODE, 1080 - .rw = rw, 1080 + .op = REQ_OP_READ, 1081 + .op_flags = op_flags, 1081 1082 .page = page, 1082 1083 .encrypted_page = NULL, 1083 1084 }; ··· 1569 1568 struct f2fs_io_info fio = { 1570 1569 .sbi = sbi, 1571 1570 .type = NODE, 1572 - .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 1571 + .op = REQ_OP_WRITE, 1572 + .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, 1573 1573 .page = page, 1574 1574 .encrypted_page = NULL, 1575 1575 };
+7 -5
fs/f2fs/segment.c
··· 257 257 struct f2fs_io_info fio = { 258 258 .sbi = sbi, 259 259 .type = DATA, 260 - .rw = WRITE_SYNC | REQ_PRIO, 260 + .op = REQ_OP_WRITE, 261 + .op_flags = WRITE_SYNC | REQ_PRIO, 261 262 .encrypted_page = NULL, 262 263 }; 263 264 bool submit_bio = false; ··· 407 406 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 408 407 409 408 bio->bi_bdev = sbi->sb->s_bdev; 410 - bio->bi_rw = WRITE_FLUSH; 409 + bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 411 410 ret = submit_bio_wait(bio); 412 411 413 412 llist_for_each_entry_safe(cmd, next, ··· 440 439 int ret; 441 440 442 441 bio->bi_bdev = sbi->sb->s_bdev; 443 - bio->bi_rw = WRITE_FLUSH; 442 + bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 444 443 ret = submit_bio_wait(bio); 445 444 bio_put(bio); 446 445 return ret; ··· 1404 1403 struct f2fs_io_info fio = { 1405 1404 .sbi = sbi, 1406 1405 .type = META, 1407 - .rw = WRITE_SYNC | REQ_META | REQ_PRIO, 1406 + .op = REQ_OP_WRITE, 1407 + .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, 1408 1408 .old_blkaddr = page->index, 1409 1409 .new_blkaddr = page->index, 1410 1410 .page = page, ··· 1413 1411 }; 1414 1412 1415 1413 if (unlikely(page->index >= MAIN_BLKADDR(sbi))) 1416 - fio.rw &= ~REQ_META; 1414 + fio.op_flags &= ~REQ_META; 1417 1415 1418 1416 set_page_writeback(page); 1419 1417 f2fs_submit_page_mbio(&fio);
+4 -3
fs/f2fs/trace.c
··· 25 25 if (!last_io.len) 26 26 return; 27 27 28 - trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n", 28 + trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n", 29 29 last_io.major, last_io.minor, 30 30 last_io.pid, "----------------", 31 31 last_io.type, 32 - last_io.fio.rw, 32 + last_io.fio.op, last_io.fio.op_flags, 33 33 last_io.fio.new_blkaddr, 34 34 last_io.len); 35 35 memset(&last_io, 0, sizeof(last_io)); ··· 101 101 if (last_io.major == major && last_io.minor == minor && 102 102 last_io.pid == pid && 103 103 last_io.type == __file_type(inode, pid) && 104 - last_io.fio.rw == fio->rw && 104 + last_io.fio.op == fio->op && 105 + last_io.fio.op_flags == fio->op_flags && 105 106 last_io.fio.new_blkaddr + last_io.len == 106 107 fio->new_blkaddr) { 107 108 last_io.len++;
+21 -13
include/trace/events/f2fs.h
··· 56 56 { IPU, "IN-PLACE" }, \ 57 57 { OPU, "OUT-OF-PLACE" }) 58 58 59 - #define F2FS_BIO_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) 59 + #define F2FS_BIO_FLAG_MASK(t) (t & (READA | WRITE_FLUSH_FUA)) 60 60 #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) 61 61 62 - #define show_bio_type(type) show_bio_base(type), show_bio_extra(type) 62 + #define show_bio_type(op, op_flags) show_bio_op(op), \ 63 + show_bio_op_flags(op_flags), show_bio_extra(op_flags) 63 64 64 - #define show_bio_base(type) \ 65 - __print_symbolic(F2FS_BIO_MASK(type), \ 65 + #define show_bio_op(op) \ 66 + __print_symbolic(op, \ 66 67 { READ, "READ" }, \ 68 + { WRITE, "WRITE" }) 69 + 70 + #define show_bio_op_flags(flags) \ 71 + __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ 67 72 { READA, "READAHEAD" }, \ 68 73 { READ_SYNC, "READ_SYNC" }, \ 69 - { WRITE, "WRITE" }, \ 70 74 { WRITE_SYNC, "WRITE_SYNC" }, \ 71 75 { WRITE_FLUSH, "WRITE_FLUSH" }, \ 72 76 { WRITE_FUA, "WRITE_FUA" }, \ ··· 738 734 __field(pgoff_t, index) 739 735 __field(block_t, old_blkaddr) 740 736 __field(block_t, new_blkaddr) 741 - __field(int, rw) 737 + __field(int, op) 738 + __field(int, op_flags) 742 739 __field(int, type) 743 740 ), 744 741 ··· 749 744 __entry->index = page->index; 750 745 __entry->old_blkaddr = fio->old_blkaddr; 751 746 __entry->new_blkaddr = fio->new_blkaddr; 752 - __entry->rw = fio->rw; 747 + __entry->op = fio->op; 748 + __entry->op_flags = fio->op_flags; 753 749 __entry->type = fio->type; 754 750 ), 755 751 756 752 TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " 757 - "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s", 753 + "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%si%s, type = %s", 758 754 show_dev_ino(__entry), 759 755 (unsigned long)__entry->index, 760 756 (unsigned long long)__entry->old_blkaddr, 761 757 (unsigned long long)__entry->new_blkaddr, 762 - show_bio_type(__entry->rw), 758 + show_bio_type(__entry->op, __entry->op_flags), 763 759 show_block_type(__entry->type)) 764 760 ); 765 761 ··· 791 785 792 786 TP_STRUCT__entry( 793 787 __field(dev_t, dev) 794 - __field(int, rw) 788 + __field(int, op) 789 + __field(int, op_flags) 795 790 __field(int, type) 796 791 __field(sector_t, sector) 797 792 __field(unsigned int, size) ··· 800 793 801 794 TP_fast_assign( 802 795 __entry->dev = sb->s_dev; 803 - __entry->rw = fio->rw; 796 + __entry->op = fio->op; 797 + __entry->op_flags = fio->op_flags; 804 798 __entry->type = fio->type; 805 799 __entry->sector = bio->bi_iter.bi_sector; 806 800 __entry->size = bio->bi_iter.bi_size; 807 801 ), 808 802 809 - TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u", 803 + TP_printk("dev = (%d,%d), %s%s%s, %s, sector = %lld, size = %u", 810 804 show_dev(__entry), 811 - show_bio_type(__entry->rw), 805 + show_bio_type(__entry->op, __entry->op_flags), 812 806 show_block_type(__entry->type), 813 807 (unsigned long long)__entry->sector, 814 808 __entry->size)