Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block,fs: use REQ_* flags directly

Remove the WRITE_* and READ_SYNC wrappers, and just use the flags
directly. Where applicable this also drops usage of the
bio_set_op_attrs wrapper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
70fd7614 a2b80967

+133 -182
+2 -2
block/blk-flush.c
··· 330 330 } 331 331 332 332 flush_rq->cmd_type = REQ_TYPE_FS; 333 - flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH; 333 + flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; 334 334 flush_rq->rq_flags |= RQF_FLUSH_SEQ; 335 335 flush_rq->rq_disk = first_rq->rq_disk; 336 336 flush_rq->end_io = flush_end_io; ··· 486 486 487 487 bio = bio_alloc(gfp_mask, 0); 488 488 bio->bi_bdev = bdev; 489 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 489 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 490 490 491 491 ret = submit_bio_wait(bio); 492 492
+1 -1
drivers/block/drbd/drbd_receiver.c
··· 1266 1266 bio->bi_bdev = device->ldev->backing_bdev; 1267 1267 bio->bi_private = octx; 1268 1268 bio->bi_end_io = one_flush_endio; 1269 - bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH); 1269 + bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; 1270 1270 1271 1271 device->flush_jif = jiffies; 1272 1272 set_bit(FLUSH_PENDING, &device->flags);
+5 -5
drivers/block/xen-blkback/blkback.c
··· 1253 1253 case BLKIF_OP_WRITE: 1254 1254 ring->st_wr_req++; 1255 1255 operation = REQ_OP_WRITE; 1256 - operation_flags = WRITE_ODIRECT; 1256 + operation_flags = REQ_SYNC | REQ_IDLE; 1257 1257 break; 1258 1258 case BLKIF_OP_WRITE_BARRIER: 1259 1259 drain = true; 1260 1260 case BLKIF_OP_FLUSH_DISKCACHE: 1261 1261 ring->st_f_req++; 1262 1262 operation = REQ_OP_WRITE; 1263 - operation_flags = WRITE_FLUSH; 1263 + operation_flags = REQ_PREFLUSH; 1264 1264 break; 1265 1265 default: 1266 1266 operation = 0; /* make gcc happy */ ··· 1272 1272 nseg = req->operation == BLKIF_OP_INDIRECT ? 1273 1273 req->u.indirect.nr_segments : req->u.rw.nr_segments; 1274 1274 1275 - if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) || 1275 + if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) || 1276 1276 unlikely((req->operation != BLKIF_OP_INDIRECT) && 1277 1277 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || 1278 1278 unlikely((req->operation == BLKIF_OP_INDIRECT) && ··· 1334 1334 } 1335 1335 1336 1336 /* Wait on all outstanding I/O's and once that has been completed 1337 - * issue the WRITE_FLUSH. 1337 + * issue the flush. 1338 1338 */ 1339 1339 if (drain) 1340 1340 xen_blk_drain_io(pending_req->ring); ··· 1380 1380 1381 1381 /* This will be hit if the operation was a flush or discard. */ 1382 1382 if (!bio) { 1383 - BUG_ON(operation_flags != WRITE_FLUSH); 1383 + BUG_ON(operation_flags != REQ_PREFLUSH); 1384 1384 1385 1385 bio = bio_alloc(GFP_KERNEL, 0); 1386 1386 if (unlikely(bio == NULL))
+2 -2
drivers/md/bcache/btree.c
··· 297 297 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; 298 298 bio->bi_end_io = btree_node_read_endio; 299 299 bio->bi_private = &cl; 300 - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); 300 + bio->bi_opf = REQ_OP_READ | REQ_META; 301 301 302 302 bch_bio_map(bio, b->keys.set[0].data); 303 303 ··· 393 393 b->bio->bi_end_io = btree_node_write_endio; 394 394 b->bio->bi_private = cl; 395 395 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); 396 - bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA); 396 + b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA; 397 397 bch_bio_map(b->bio, i); 398 398 399 399 /*
+2 -2
drivers/md/bcache/debug.c
··· 52 52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; 53 53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); 54 54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; 55 - bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC); 55 + bio->bi_opf = REQ_OP_READ | REQ_META; 56 56 bch_bio_map(bio, sorted); 57 57 58 58 submit_bio_wait(bio); ··· 113 113 check = bio_clone(bio, GFP_NOIO); 114 114 if (!check) 115 115 return; 116 - bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC); 116 + check->bi_opf = REQ_OP_READ; 117 117 118 118 if (bio_alloc_pages(check, GFP_NOIO)) 119 119 goto out_put;
+1 -1
drivers/md/bcache/request.c
··· 923 923 flush->bi_bdev = bio->bi_bdev; 924 924 flush->bi_end_io = request_endio; 925 925 flush->bi_private = cl; 926 - bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH); 926 + flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 927 927 928 928 closure_bio_submit(flush, cl); 929 929 }
+2 -2
drivers/md/bcache/super.c
··· 381 381 return "bad uuid pointer"; 382 382 383 383 bkey_copy(&c->uuid_bucket, k); 384 - uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl); 384 + uuid_io(c, REQ_OP_READ, 0, k, cl); 385 385 386 386 if (j->version < BCACHE_JSET_VERSION_UUIDv1) { 387 387 struct uuid_entry_v0 *u0 = (void *) c->uuids; ··· 600 600 ca->prio_last_buckets[bucket_nr] = bucket; 601 601 bucket_nr++; 602 602 603 - prio_io(ca, bucket, REQ_OP_READ, READ_SYNC); 603 + prio_io(ca, bucket, REQ_OP_READ, 0); 604 604 605 605 if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8)) 606 606 pr_warn("bad csum reading priorities");
+1 -1
drivers/md/dm-bufio.c
··· 1316 1316 { 1317 1317 struct dm_io_request io_req = { 1318 1318 .bi_op = REQ_OP_WRITE, 1319 - .bi_op_flags = WRITE_FLUSH, 1319 + .bi_op_flags = REQ_PREFLUSH, 1320 1320 .mem.type = DM_IO_KMEM, 1321 1321 .mem.ptr.addr = NULL, 1322 1322 .client = c->dm_io,
+1 -1
drivers/md/dm-log.c
··· 308 308 }; 309 309 310 310 lc->io_req.bi_op = REQ_OP_WRITE; 311 - lc->io_req.bi_op_flags = WRITE_FLUSH; 311 + lc->io_req.bi_op_flags = REQ_PREFLUSH; 312 312 313 313 return dm_io(&lc->io_req, 1, &null_location, NULL); 314 314 }
+2 -2
drivers/md/dm-raid1.c
··· 261 261 struct mirror *m; 262 262 struct dm_io_request io_req = { 263 263 .bi_op = REQ_OP_WRITE, 264 - .bi_op_flags = WRITE_FLUSH, 264 + .bi_op_flags = REQ_PREFLUSH, 265 265 .mem.type = DM_IO_KMEM, 266 266 .mem.ptr.addr = NULL, 267 267 .client = ms->io_client, ··· 657 657 struct mirror *m; 658 658 struct dm_io_request io_req = { 659 659 .bi_op = REQ_OP_WRITE, 660 - .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA, 660 + .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), 661 661 .mem.type = DM_IO_BIO, 662 662 .mem.ptr.bio = bio, 663 663 .notify.fn = write_callback,
+2 -2
drivers/md/dm-snap-persistent.c
··· 741 741 /* 742 742 * Commit exceptions to disk. 743 743 */ 744 - if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA)) 744 + if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) 745 745 ps->valid = 0; 746 746 747 747 /* ··· 818 818 for (i = 0; i < nr_merged; i++) 819 819 clear_exception(ps, ps->current_committed - 1 - i); 820 820 821 - r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA); 821 + r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); 822 822 if (r < 0) 823 823 return r; 824 824
+1 -1
drivers/md/dm.c
··· 1527 1527 1528 1528 bio_init(&md->flush_bio); 1529 1529 md->flush_bio.bi_bdev = md->bdev; 1530 - bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); 1530 + md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1531 1531 1532 1532 dm_stats_init(&md->stats); 1533 1533
+2 -2
drivers/md/md.c
··· 394 394 bi->bi_end_io = md_end_flush; 395 395 bi->bi_private = rdev; 396 396 bi->bi_bdev = rdev->bdev; 397 - bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH); 397 + bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 398 398 atomic_inc(&mddev->flush_pending); 399 399 submit_bio(bi); 400 400 rcu_read_lock(); ··· 743 743 bio_add_page(bio, page, size, 0); 744 744 bio->bi_private = rdev; 745 745 bio->bi_end_io = super_written; 746 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); 746 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA; 747 747 748 748 atomic_inc(&mddev->pending_writes); 749 749 submit_bio(bio);
+2 -2
drivers/md/raid5-cache.c
··· 685 685 bio_reset(&log->flush_bio); 686 686 log->flush_bio.bi_bdev = log->rdev->bdev; 687 687 log->flush_bio.bi_end_io = r5l_log_flush_endio; 688 - bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH); 688 + log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 689 689 submit_bio(&log->flush_bio); 690 690 } 691 691 ··· 1053 1053 mb->checksum = cpu_to_le32(crc); 1054 1054 1055 1055 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1056 - WRITE_FUA, false)) { 1056 + REQ_FUA, false)) { 1057 1057 __free_page(page); 1058 1058 return -EIO; 1059 1059 }
+1 -1
drivers/md/raid5.c
··· 913 913 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 914 914 op = REQ_OP_WRITE; 915 915 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 916 - op_flags = WRITE_FUA; 916 + op_flags = REQ_FUA; 917 917 if (test_bit(R5_Discard, &sh->dev[i].flags)) 918 918 op = REQ_OP_DISCARD; 919 919 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
+2 -2
drivers/nvme/target/io-cmd.c
··· 58 58 59 59 if (req->cmd->rw.opcode == nvme_cmd_write) { 60 60 op = REQ_OP_WRITE; 61 - op_flags = WRITE_ODIRECT; 61 + op_flags = REQ_SYNC | REQ_IDLE; 62 62 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) 63 63 op_flags |= REQ_FUA; 64 64 } else { ··· 109 109 bio->bi_bdev = req->ns->bdev; 110 110 bio->bi_private = req; 111 111 bio->bi_end_io = nvmet_bio_done; 112 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 112 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 113 113 114 114 submit_bio(bio); 115 115 }
+4 -4
drivers/target/target_core_iblock.c
··· 388 388 bio = bio_alloc(GFP_KERNEL, 0); 389 389 bio->bi_end_io = iblock_end_io_flush; 390 390 bio->bi_bdev = ib_dev->ibd_bd; 391 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 391 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 392 392 if (!immed) 393 393 bio->bi_private = cmd; 394 394 submit_bio(bio); ··· 686 686 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 687 687 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); 688 688 /* 689 - * Force writethrough using WRITE_FUA if a volatile write cache 689 + * Force writethrough using REQ_FUA if a volatile write cache 690 690 * is not enabled, or if initiator set the Force Unit Access bit. 691 691 */ 692 692 op = REQ_OP_WRITE; 693 693 if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { 694 694 if (cmd->se_cmd_flags & SCF_FUA) 695 - op_flags = WRITE_FUA; 695 + op_flags = REQ_FUA; 696 696 else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 697 - op_flags = WRITE_FUA; 697 + op_flags = REQ_FUA; 698 698 } 699 699 } else { 700 700 op = REQ_OP_READ;
+3 -3
fs/btrfs/disk-io.c
··· 3485 3485 * to go down lazy. 3486 3486 */ 3487 3487 if (i == 0) 3488 - ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh); 3488 + ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); 3489 3489 else 3490 - ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 3490 + ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 3491 3491 if (ret) 3492 3492 errors++; 3493 3493 } ··· 3551 3551 3552 3552 bio->bi_end_io = btrfs_end_empty_barrier; 3553 3553 bio->bi_bdev = device->bdev; 3554 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 3554 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 3555 3555 init_completion(&device->flush_wait); 3556 3556 bio->bi_private = &device->flush_wait; 3557 3557 device->flush_bio = bio;
+7 -9
fs/btrfs/extent_io.c
··· 127 127 */ 128 128 unsigned int extent_locked:1; 129 129 130 - /* tells the submit_bio code to use a WRITE_SYNC */ 130 + /* tells the submit_bio code to use REQ_SYNC */ 131 131 unsigned int sync_io:1; 132 132 }; 133 133 ··· 2047 2047 return -EIO; 2048 2048 } 2049 2049 bio->bi_bdev = dev->bdev; 2050 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); 2050 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 2051 2051 bio_add_page(bio, page, length, pg_offset); 2052 2052 2053 2053 if (btrfsic_submit_bio_wait(bio)) { ··· 2388 2388 struct inode *inode = page->mapping->host; 2389 2389 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2390 2390 struct bio *bio; 2391 - int read_mode; 2391 + int read_mode = 0; 2392 2392 int ret; 2393 2393 2394 2394 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); ··· 2404 2404 } 2405 2405 2406 2406 if (failed_bio->bi_vcnt > 1) 2407 - read_mode = READ_SYNC | REQ_FAILFAST_DEV; 2408 - else 2409 - read_mode = READ_SYNC; 2407 + read_mode |= REQ_FAILFAST_DEV; 2410 2408 2411 2409 phy_offset >>= inode->i_sb->s_blocksize_bits; 2412 2410 bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, ··· 3482 3484 unsigned long nr_written = 0; 3483 3485 3484 3486 if (wbc->sync_mode == WB_SYNC_ALL) 3485 - write_flags = WRITE_SYNC; 3487 + write_flags = REQ_SYNC; 3486 3488 3487 3489 trace___extent_writepage(page, inode, wbc); 3488 3490 ··· 3727 3729 unsigned long i, num_pages; 3728 3730 unsigned long bio_flags = 0; 3729 3731 unsigned long start, end; 3730 - int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META; 3732 + int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META; 3731 3733 int ret = 0; 3732 3734 3733 3735 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); ··· 4074 4076 int ret; 4075 4077 4076 4078 bio_set_op_attrs(epd->bio, REQ_OP_WRITE, 4077 - epd->sync_io ? WRITE_SYNC : 0); 4079 + epd->sync_io ? REQ_SYNC : 0); 4078 4080 4079 4081 ret = submit_one_bio(epd->bio, 0, epd->bio_flags); 4080 4082 BUG_ON(ret < 0); /* -ENOMEM */
+2 -4
fs/btrfs/inode.c
··· 7917 7917 struct io_failure_record *failrec; 7918 7918 struct bio *bio; 7919 7919 int isector; 7920 - int read_mode; 7920 + int read_mode = 0; 7921 7921 int ret; 7922 7922 7923 7923 BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE); ··· 7936 7936 if ((failed_bio->bi_vcnt > 1) 7937 7937 || (failed_bio->bi_io_vec->bv_len 7938 7938 > BTRFS_I(inode)->root->sectorsize)) 7939 - read_mode = READ_SYNC | REQ_FAILFAST_DEV; 7940 - else 7941 - read_mode = READ_SYNC; 7939 + read_mode |= REQ_FAILFAST_DEV; 7942 7940 7943 7941 isector = start - btrfs_io_bio(failed_bio)->logical; 7944 7942 isector >>= inode->i_sb->s_blocksize_bits;
+1 -1
fs/btrfs/scrub.c
··· 4440 4440 bio->bi_iter.bi_size = 0; 4441 4441 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; 4442 4442 bio->bi_bdev = dev->bdev; 4443 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); 4443 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 4444 4444 ret = bio_add_page(bio, page, PAGE_SIZE, 0); 4445 4445 if (ret != PAGE_SIZE) { 4446 4446 leave_with_eio:
+1 -1
fs/btrfs/volumes.c
··· 6023 6023 else 6024 6024 btrfs_dev_stat_inc(dev, 6025 6025 BTRFS_DEV_STAT_READ_ERRS); 6026 - if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH) 6026 + if (bio->bi_opf & REQ_PREFLUSH) 6027 6027 btrfs_dev_stat_inc(dev, 6028 6028 BTRFS_DEV_STAT_FLUSH_ERRS); 6029 6029 btrfs_dev_stat_print_on_error(dev);
+1 -1
fs/btrfs/volumes.h
··· 62 62 int running_pending; 63 63 /* regular prio bios */ 64 64 struct btrfs_pending_bios pending_bios; 65 - /* WRITE_SYNC bios */ 65 + /* sync bios */ 66 66 struct btrfs_pending_bios pending_sync_bios; 67 67 68 68 struct block_device *bdev;
+4 -4
fs/buffer.c
··· 753 753 * still in flight on potentially older 754 754 * contents. 755 755 */ 756 - write_dirty_buffer(bh, WRITE_SYNC); 756 + write_dirty_buffer(bh, REQ_SYNC); 757 757 758 758 /* 759 759 * Kick off IO for the previous mapping. Note ··· 1684 1684 * prevents this contention from occurring. 1685 1685 * 1686 1686 * If block_write_full_page() is called with wbc->sync_mode == 1687 - * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this 1687 + * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this 1688 1688 * causes the writes to be flagged as synchronous writes. 1689 1689 */ 1690 1690 int __block_write_full_page(struct inode *inode, struct page *page, ··· 1697 1697 struct buffer_head *bh, *head; 1698 1698 unsigned int blocksize, bbits; 1699 1699 int nr_underway = 0; 1700 - int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); 1700 + int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); 1701 1701 1702 1702 head = create_page_buffers(page, inode, 1703 1703 (1 << BH_Dirty)|(1 << BH_Uptodate)); ··· 3210 3210 3211 3211 int sync_dirty_buffer(struct buffer_head *bh) 3212 3212 { 3213 - return __sync_dirty_buffer(bh, WRITE_SYNC); 3213 + return __sync_dirty_buffer(bh, REQ_SYNC); 3214 3214 } 3215 3215 EXPORT_SYMBOL(sync_dirty_buffer); 3216 3216
+1 -1
fs/direct-io.c
··· 1209 1209 dio->inode = inode; 1210 1210 if (iov_iter_rw(iter) == WRITE) { 1211 1211 dio->op = REQ_OP_WRITE; 1212 - dio->op_flags = WRITE_ODIRECT; 1212 + dio->op_flags = REQ_SYNC | REQ_IDLE; 1213 1213 } else { 1214 1214 dio->op = REQ_OP_READ; 1215 1215 }
+3 -3
fs/ext4/mmp.c
··· 35 35 } 36 36 37 37 /* 38 - * Write the MMP block using WRITE_SYNC to try to get the block on-disk 38 + * Write the MMP block using REQ_SYNC to try to get the block on-disk 39 39 * faster. 40 40 */ 41 41 static int write_mmp_block(struct super_block *sb, struct buffer_head *bh) ··· 52 52 lock_buffer(bh); 53 53 bh->b_end_io = end_buffer_write_sync; 54 54 get_bh(bh); 55 - submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh); 55 + submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh); 56 56 wait_on_buffer(bh); 57 57 sb_end_write(sb); 58 58 if (unlikely(!buffer_uptodate(bh))) ··· 88 88 get_bh(*bh); 89 89 lock_buffer(*bh); 90 90 (*bh)->b_end_io = end_buffer_read_sync; 91 - submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh); 91 + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh); 92 92 wait_on_buffer(*bh); 93 93 if (!buffer_uptodate(*bh)) { 94 94 ret = -EIO;
+1 -1
fs/ext4/page-io.c
··· 340 340 341 341 if (bio) { 342 342 int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ? 343 - WRITE_SYNC : 0; 343 + REQ_SYNC : 0; 344 344 bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags); 345 345 submit_bio(io->io_bio); 346 346 }
+1 -1
fs/ext4/super.c
··· 4553 4553 unlock_buffer(sbh); 4554 4554 if (sync) { 4555 4555 error = __sync_dirty_buffer(sbh, 4556 - test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); 4556 + test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC); 4557 4557 if (error) 4558 4558 return error; 4559 4559
+2 -2
fs/f2fs/checkpoint.c
··· 65 65 .sbi = sbi, 66 66 .type = META, 67 67 .op = REQ_OP_READ, 68 - .op_flags = READ_SYNC | REQ_META | REQ_PRIO, 68 + .op_flags = REQ_META | REQ_PRIO, 69 69 .old_blkaddr = index, 70 70 .new_blkaddr = index, 71 71 .encrypted_page = NULL, ··· 160 160 .sbi = sbi, 161 161 .type = META, 162 162 .op = REQ_OP_READ, 163 - .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD, 163 + .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD, 164 164 .encrypted_page = NULL, 165 165 }; 166 166 struct blk_plug plug;
+7 -9
fs/f2fs/data.c
··· 198 198 if (type >= META_FLUSH) { 199 199 io->fio.type = META_FLUSH; 200 200 io->fio.op = REQ_OP_WRITE; 201 - if (test_opt(sbi, NOBARRIER)) 202 - io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO; 203 - else 204 - io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META | 205 - REQ_PRIO; 201 + io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO; 202 + if (!test_opt(sbi, NOBARRIER)) 203 + io->fio.op_flags |= REQ_FUA; 206 204 } 207 205 __submit_merged_bio(io); 208 206 out: ··· 481 483 return page; 482 484 f2fs_put_page(page, 0); 483 485 484 - page = get_read_data_page(inode, index, READ_SYNC, false); 486 + page = get_read_data_page(inode, index, 0, false); 485 487 if (IS_ERR(page)) 486 488 return page; 487 489 ··· 507 509 struct address_space *mapping = inode->i_mapping; 508 510 struct page *page; 509 511 repeat: 510 - page = get_read_data_page(inode, index, READ_SYNC, for_write); 512 + page = get_read_data_page(inode, index, 0, for_write); 511 513 if (IS_ERR(page)) 512 514 return page; 513 515 ··· 1249 1251 .sbi = sbi, 1250 1252 .type = DATA, 1251 1253 .op = REQ_OP_WRITE, 1252 - .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, 1254 + .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0, 1253 1255 .page = page, 1254 1256 .encrypted_page = NULL, 1255 1257 }; ··· 1661 1663 err = PTR_ERR(bio); 1662 1664 goto fail; 1663 1665 } 1664 - bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); 1666 + bio->bi_opf = REQ_OP_READ; 1665 1667 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 1666 1668 bio_put(bio); 1667 1669 err = -EFAULT;
+3 -3
fs/f2fs/gc.c
··· 550 550 .sbi = F2FS_I_SB(inode), 551 551 .type = DATA, 552 552 .op = REQ_OP_READ, 553 - .op_flags = READ_SYNC, 553 + .op_flags = 0, 554 554 .encrypted_page = NULL, 555 555 }; 556 556 struct dnode_of_data dn; ··· 625 625 f2fs_wait_on_page_writeback(dn.node_page, NODE, true); 626 626 627 627 fio.op = REQ_OP_WRITE; 628 - fio.op_flags = WRITE_SYNC; 628 + fio.op_flags = REQ_SYNC; 629 629 fio.new_blkaddr = newaddr; 630 630 f2fs_submit_page_mbio(&fio); 631 631 ··· 663 663 .sbi = F2FS_I_SB(inode), 664 664 .type = DATA, 665 665 .op = REQ_OP_WRITE, 666 - .op_flags = WRITE_SYNC, 666 + .op_flags = REQ_SYNC, 667 667 .page = page, 668 668 .encrypted_page = NULL, 669 669 };
+1 -1
fs/f2fs/inline.c
··· 111 111 .sbi = F2FS_I_SB(dn->inode), 112 112 .type = DATA, 113 113 .op = REQ_OP_WRITE, 114 - .op_flags = WRITE_SYNC | REQ_PRIO, 114 + .op_flags = REQ_SYNC | REQ_PRIO, 115 115 .page = page, 116 116 .encrypted_page = NULL, 117 117 };
+2 -2
fs/f2fs/node.c
··· 1134 1134 if (!page) 1135 1135 return ERR_PTR(-ENOMEM); 1136 1136 1137 - err = read_node_page(page, READ_SYNC); 1137 + err = read_node_page(page, 0); 1138 1138 if (err < 0) { 1139 1139 f2fs_put_page(page, 1); 1140 1140 return ERR_PTR(err); ··· 1570 1570 .sbi = sbi, 1571 1571 .type = NODE, 1572 1572 .op = REQ_OP_WRITE, 1573 - .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0, 1573 + .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0, 1574 1574 .page = page, 1575 1575 .encrypted_page = NULL, 1576 1576 };
+4 -4
fs/f2fs/segment.c
··· 259 259 .sbi = sbi, 260 260 .type = DATA, 261 261 .op = REQ_OP_WRITE, 262 - .op_flags = WRITE_SYNC | REQ_PRIO, 262 + .op_flags = REQ_SYNC | REQ_PRIO, 263 263 .encrypted_page = NULL, 264 264 }; 265 265 bool submit_bio = false; ··· 420 420 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); 421 421 422 422 bio->bi_bdev = sbi->sb->s_bdev; 423 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 423 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 424 424 ret = submit_bio_wait(bio); 425 425 426 426 llist_for_each_entry_safe(cmd, next, ··· 454 454 455 455 atomic_inc(&fcc->submit_flush); 456 456 bio->bi_bdev = sbi->sb->s_bdev; 457 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH); 457 + bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 458 458 ret = submit_bio_wait(bio); 459 459 atomic_dec(&fcc->submit_flush); 460 460 bio_put(bio); ··· 1515 1515 .sbi = sbi, 1516 1516 .type = META, 1517 1517 .op = REQ_OP_WRITE, 1518 - .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO, 1518 + .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, 1519 1519 .old_blkaddr = page->index, 1520 1520 .new_blkaddr = page->index, 1521 1521 .page = page,
+1 -1
fs/f2fs/super.c
··· 1238 1238 unlock_buffer(bh); 1239 1239 1240 1240 /* it's rare case, we can do fua all the time */ 1241 - return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); 1241 + return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA); 1242 1242 } 1243 1243 1244 1244 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+2 -2
fs/gfs2/log.c
··· 657 657 struct gfs2_log_header *lh; 658 658 unsigned int tail; 659 659 u32 hash; 660 - int op_flags = WRITE_FLUSH_FUA | REQ_META; 660 + int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; 661 661 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 662 662 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 663 663 lh = page_address(page); ··· 682 682 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { 683 683 gfs2_ordered_wait(sdp); 684 684 log_flush_wait(sdp); 685 - op_flags = WRITE_SYNC | REQ_META | REQ_PRIO; 685 + op_flags = REQ_SYNC | REQ_META | REQ_PRIO; 686 686 } 687 687 688 688 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
+3 -3
fs/gfs2/meta_io.c
··· 38 38 struct buffer_head *bh, *head; 39 39 int nr_underway = 0; 40 40 int write_flags = REQ_META | REQ_PRIO | 41 - (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); 41 + (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); 42 42 43 43 BUG_ON(!PageLocked(page)); 44 44 BUG_ON(!page_has_buffers(page)); ··· 285 285 } 286 286 } 287 287 288 - gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num); 288 + gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num); 289 289 if (!(flags & DIO_WAIT)) 290 290 return 0; 291 291 ··· 453 453 if (buffer_uptodate(first_bh)) 454 454 goto out; 455 455 if (!buffer_locked(first_bh)) 456 - ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh); 456 + ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh); 457 457 458 458 dblock++; 459 459 extlen--;
+1 -1
fs/gfs2/ops_fstype.c
··· 246 246 247 247 bio->bi_end_io = end_bio_io_page; 248 248 bio->bi_private = page; 249 - bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META); 249 + bio_set_op_attrs(bio, REQ_OP_READ, REQ_META); 250 250 submit_bio(bio); 251 251 wait_on_page_locked(page); 252 252 bio_put(bio);
+2 -2
fs/hfsplus/super.c
··· 221 221 error2 = hfsplus_submit_bio(sb, 222 222 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, 223 223 sbi->s_vhdr_buf, NULL, REQ_OP_WRITE, 224 - WRITE_SYNC); 224 + REQ_SYNC); 225 225 if (!error) 226 226 error = error2; 227 227 if (!write_backup) ··· 230 230 error2 = hfsplus_submit_bio(sb, 231 231 sbi->part_start + sbi->sect_count - 2, 232 232 sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE, 233 - WRITE_SYNC); 233 + REQ_SYNC); 234 234 if (!error) 235 235 error2 = error; 236 236 out:
+1 -1
fs/jbd2/checkpoint.c
··· 186 186 187 187 blk_start_plug(&plug); 188 188 for (i = 0; i < *batch_count; i++) 189 - write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC); 189 + write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC); 190 190 blk_finish_plug(&plug); 191 191 192 192 for (i = 0; i < *batch_count; i++) {
+5 -4
fs/jbd2/commit.c
··· 155 155 156 156 if (journal->j_flags & JBD2_BARRIER && 157 157 !jbd2_has_feature_async_commit(journal)) 158 - ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh); 158 + ret = submit_bh(REQ_OP_WRITE, 159 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh); 159 160 else 160 - ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 161 + ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 161 162 162 163 *cbh = bh; 163 164 return ret; ··· 403 402 jbd2_journal_update_sb_log_tail(journal, 404 403 journal->j_tail_sequence, 405 404 journal->j_tail, 406 - WRITE_SYNC); 405 + REQ_SYNC); 407 406 mutex_unlock(&journal->j_checkpoint_mutex); 408 407 } else { 409 408 jbd_debug(3, "superblock not updated\n"); ··· 718 717 clear_buffer_dirty(bh); 719 718 set_buffer_uptodate(bh); 720 719 bh->b_end_io = journal_end_buffer_io_sync; 721 - submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh); 720 + submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 722 721 } 723 722 cond_resched(); 724 723 stats.run.rs_blocks_logged += bufs;
+8 -7
fs/jbd2/journal.c
··· 913 913 * space and if we lose sb update during power failure we'd replay 914 914 * old transaction with possibly newly overwritten data. 915 915 */ 916 - ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA); 916 + ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA); 917 917 if (ret) 918 918 goto out; 919 919 ··· 1306 1306 /* Lock here to make assertions happy... */ 1307 1307 mutex_lock(&journal->j_checkpoint_mutex); 1308 1308 /* 1309 - * Update log tail information. We use WRITE_FUA since new 1309 + * Update log tail information. We use REQ_FUA since new 1310 1310 * transaction will start reusing journal space and so we 1311 1311 * must make sure information about current log tail is on 1312 1312 * disk before that. ··· 1314 1314 jbd2_journal_update_sb_log_tail(journal, 1315 1315 journal->j_tail_sequence, 1316 1316 journal->j_tail, 1317 - WRITE_FUA); 1317 + REQ_FUA); 1318 1318 mutex_unlock(&journal->j_checkpoint_mutex); 1319 1319 } 1320 1320 return jbd2_journal_start_thread(journal); ··· 1454 1454 sb->s_errno = cpu_to_be32(journal->j_errno); 1455 1455 read_unlock(&journal->j_state_lock); 1456 1456 1457 - jbd2_write_superblock(journal, WRITE_FUA); 1457 + jbd2_write_superblock(journal, REQ_FUA); 1458 1458 } 1459 1459 EXPORT_SYMBOL(jbd2_journal_update_sb_errno); 1460 1460 ··· 1720 1720 ++journal->j_transaction_sequence; 1721 1721 write_unlock(&journal->j_state_lock); 1722 1722 1723 - jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA); 1723 + jbd2_mark_journal_empty(journal, 1724 + REQ_PREFLUSH | REQ_FUA); 1724 1725 mutex_unlock(&journal->j_checkpoint_mutex); 1725 1726 } else 1726 1727 err = -EIO; ··· 1980 1979 * the magic code for a fully-recovered superblock. Any future 1981 1980 * commits of data to the journal will restore the current 1982 1981 * s_start value. */ 1983 - jbd2_mark_journal_empty(journal, WRITE_FUA); 1982 + jbd2_mark_journal_empty(journal, REQ_FUA); 1984 1983 mutex_unlock(&journal->j_checkpoint_mutex); 1985 1984 write_lock(&journal->j_state_lock); 1986 1985 J_ASSERT(!journal->j_running_transaction); ··· 2026 2025 if (write) { 2027 2026 /* Lock to make assertions happy... */ 2028 2027 mutex_lock(&journal->j_checkpoint_mutex); 2029 - jbd2_mark_journal_empty(journal, WRITE_FUA); 2028 + jbd2_mark_journal_empty(journal, REQ_FUA); 2030 2029 mutex_unlock(&journal->j_checkpoint_mutex); 2031 2030 } 2032 2031
+1 -1
fs/jbd2/revoke.c
··· 648 648 set_buffer_jwrite(descriptor); 649 649 BUFFER_TRACE(descriptor, "write"); 650 650 set_buffer_dirty(descriptor); 651 - write_dirty_buffer(descriptor, WRITE_SYNC); 651 + write_dirty_buffer(descriptor, REQ_SYNC); 652 652 } 653 653 #endif 654 654
+2 -2
fs/jfs/jfs_logmgr.c
··· 2002 2002 2003 2003 bio->bi_end_io = lbmIODone; 2004 2004 bio->bi_private = bp; 2005 - bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC); 2005 + bio->bi_opf = REQ_OP_READ; 2006 2006 /*check if journaling to disk has been disabled*/ 2007 2007 if (log->no_integrity) { 2008 2008 bio->bi_iter.bi_size = 0; ··· 2146 2146 2147 2147 bio->bi_end_io = lbmIODone; 2148 2148 bio->bi_private = bp; 2149 - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC); 2149 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; 2150 2150 2151 2151 /* check if journaling to disk has been disabled */ 2152 2152 if (log->no_integrity) {
+3 -3
fs/mpage.c
··· 489 489 struct buffer_head map_bh; 490 490 loff_t i_size = i_size_read(inode); 491 491 int ret = 0; 492 - int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0); 492 + int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0); 493 493 494 494 if (page_has_buffers(page)) { 495 495 struct buffer_head *head = page_buffers(page); ··· 705 705 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); 706 706 if (mpd.bio) { 707 707 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? 708 - WRITE_SYNC : 0); 708 + REQ_SYNC : 0); 709 709 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); 710 710 } 711 711 } ··· 726 726 int ret = __mpage_writepage(page, wbc, &mpd); 727 727 if (mpd.bio) { 728 728 int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? 729 - WRITE_SYNC : 0); 729 + REQ_SYNC : 0); 730 730 mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); 731 731 } 732 732 return ret;
+1 -1
fs/nilfs2/super.c
··· 189 189 set_buffer_dirty(nilfs->ns_sbh[0]); 190 190 if (nilfs_test_opt(nilfs, BARRIER)) { 191 191 err = __sync_dirty_buffer(nilfs->ns_sbh[0], 192 - WRITE_SYNC | WRITE_FLUSH_FUA); 192 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 193 193 } else { 194 194 err = sync_dirty_buffer(nilfs->ns_sbh[0]); 195 195 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 627 627 slot = o2nm_this_node(); 628 628 629 629 bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE, 630 - WRITE_SYNC); 630 + REQ_SYNC); 631 631 if (IS_ERR(bio)) { 632 632 status = PTR_ERR(bio); 633 633 mlog_errno(status);
+4 -2
fs/reiserfs/journal.c
··· 1111 1111 mark_buffer_dirty(jl->j_commit_bh) ; 1112 1112 depth = reiserfs_write_unlock_nested(s); 1113 1113 if (reiserfs_barrier_flush(s)) 1114 - __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); 1114 + __sync_dirty_buffer(jl->j_commit_bh, 1115 + REQ_PREFLUSH | REQ_FUA); 1115 1116 else 1116 1117 sync_dirty_buffer(jl->j_commit_bh); 1117 1118 reiserfs_write_lock_nested(s, depth); ··· 1270 1269 depth = reiserfs_write_unlock_nested(sb); 1271 1270 1272 1271 if (reiserfs_barrier_flush(sb)) 1273 - __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); 1272 + __sync_dirty_buffer(journal->j_header_bh, 1273 + REQ_PREFLUSH | REQ_FUA); 1274 1274 else 1275 1275 sync_dirty_buffer(journal->j_header_bh); 1276 1276
+7 -4
fs/xfs/xfs_aops.c
··· 495 495 496 496 ioend->io_bio->bi_private = ioend; 497 497 ioend->io_bio->bi_end_io = xfs_end_bio; 498 - bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, 499 - (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); 498 + ioend->io_bio->bi_opf = REQ_OP_WRITE; 499 + if (wbc->sync_mode == WB_SYNC_ALL) 500 + ioend->io_bio->bi_opf |= REQ_SYNC; 501 + 500 502 /* 501 503 * If we are failing the IO now, just mark the ioend with an 502 504 * error and finish it. This will run IO completion immediately ··· 569 567 570 568 bio_chain(ioend->io_bio, new); 571 569 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ 572 - bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE, 573 - (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0); 570 + ioend->io_bio->bi_opf = REQ_OP_WRITE; 571 + if (wbc->sync_mode == WB_SYNC_ALL) 572 + ioend->io_bio->bi_opf |= REQ_SYNC; 574 573 submit_bio(ioend->io_bio); 575 574 ioend->io_bio = new; 576 575 }
+1 -1
fs/xfs/xfs_buf.c
··· 1304 1304 if (bp->b_flags & XBF_WRITE) { 1305 1305 op = REQ_OP_WRITE; 1306 1306 if (bp->b_flags & XBF_SYNCIO) 1307 - op_flags = WRITE_SYNC; 1307 + op_flags = REQ_SYNC; 1308 1308 if (bp->b_flags & XBF_FUA) 1309 1309 op_flags |= REQ_FUA; 1310 1310 if (bp->b_flags & XBF_FLUSH)
-47
include/linux/fs.h
··· 151 151 */ 152 152 #define CHECK_IOVEC_ONLY -1 153 153 154 - /* 155 - * The below are the various read and write flags that we support. Some of 156 - * them include behavioral modifiers that send information down to the 157 - * block layer and IO scheduler. They should be used along with a req_op. 158 - * Terminology: 159 - * 160 - * The block layer uses device plugging to defer IO a little bit, in 161 - * the hope that we will see more IO very shortly. This increases 162 - * coalescing of adjacent IO and thus reduces the number of IOs we 163 - * have to send to the device. It also allows for better queuing, 164 - * if the IO isn't mergeable. If the caller is going to be waiting 165 - * for the IO, then he must ensure that the device is unplugged so 166 - * that the IO is dispatched to the driver. 167 - * 168 - * All IO is handled async in Linux. This is fine for background 169 - * writes, but for reads or writes that someone waits for completion 170 - * on, we want to notify the block layer and IO scheduler so that they 171 - * know about it. That allows them to make better scheduling 172 - * decisions. So when the below references 'sync' and 'async', it 173 - * is referencing this priority hint. 174 - * 175 - * With that in mind, the available types are: 176 - * 177 - * READ A normal read operation. Device will be plugged. 178 - * READ_SYNC A synchronous read. Device is not plugged, caller can 179 - * immediately wait on this read without caring about 180 - * unplugging. 181 - * WRITE A normal async write. Device will be plugged. 182 - * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down 183 - * the hint that someone will be waiting on this IO 184 - * shortly. The write equivalent of READ_SYNC. 185 - * WRITE_ODIRECT Special case write for O_DIRECT only. 186 - * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. 187 - * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on 188 - * non-volatile media on completion. 189 - * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded 190 - * by a cache flush and data is guaranteed to be on 191 - * non-volatile media on completion. 192 - * 193 - */ 194 154 #define RW_MASK REQ_OP_WRITE 195 155 196 156 #define READ REQ_OP_READ 197 157 #define WRITE REQ_OP_WRITE 198 - 199 - #define READ_SYNC 0 200 - #define WRITE_SYNC REQ_SYNC 201 - #define WRITE_ODIRECT (REQ_SYNC | REQ_IDLE) 202 - #define WRITE_FLUSH REQ_PREFLUSH 203 - #define WRITE_FUA REQ_FUA 204 - #define WRITE_FLUSH_FUA (REQ_PREFLUSH | REQ_FUA) 205 158 206 159 /* 207 160 * Attribute flags. These should be or-ed together to figure out what
+4 -6
include/trace/events/f2fs.h
··· 55 55 { IPU, "IN-PLACE" }, \ 56 56 { OPU, "OUT-OF-PLACE" }) 57 57 58 - #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | WRITE_FLUSH_FUA)) 58 + #define F2FS_BIO_FLAG_MASK(t) (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA)) 59 59 #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO)) 60 60 61 61 #define show_bio_type(op_flags) show_bio_op_flags(op_flags), \ ··· 65 65 __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \ 66 66 { 0, "WRITE" }, \ 67 67 { REQ_RAHEAD, "READAHEAD" }, \ 68 - { READ_SYNC, "READ_SYNC" }, \ 69 - { WRITE_SYNC, "WRITE_SYNC" }, \ 70 - { WRITE_FLUSH, "WRITE_FLUSH" }, \ 71 - { WRITE_FUA, "WRITE_FUA" }, \ 72 - { WRITE_FLUSH_FUA, "WRITE_FLUSH_FUA" }) 68 + { REQ_SYNC, "REQ_SYNC" }, \ 69 + { REQ_PREFLUSH, "REQ_PREFLUSH" }, \ 70 + { REQ_FUA, "REQ_FUA" }) 73 71 74 72 #define show_bio_extra(type) \ 75 73 __print_symbolic(F2FS_BIO_EXTRA_MASK(type), \
+9 -10
kernel/power/swap.c
··· 307 307 { 308 308 int error; 309 309 310 - hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, 310 + hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, 311 311 swsusp_header, NULL); 312 312 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || 313 313 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { ··· 317 317 swsusp_header->flags = flags; 318 318 if (flags & SF_CRC32_MODE) 319 319 swsusp_header->crc32 = handle->crc32; 320 - error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, 320 + error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, 321 321 swsusp_resume_block, swsusp_header, NULL); 322 322 } else { 323 323 printk(KERN_ERR "PM: Swap header not found!\n"); ··· 397 397 } else { 398 398 src = buf; 399 399 } 400 - return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb); 400 + return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb); 401 401 } 402 402 403 403 static void release_swap_writer(struct swap_map_handle *handle) ··· 1000 1000 return -ENOMEM; 1001 1001 } 1002 1002 1003 - error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, 1004 - tmp->map, NULL); 1003 + error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL); 1005 1004 if (error) { 1006 1005 release_swap_reader(handle); 1007 1006 return error; ··· 1024 1025 offset = handle->cur->entries[handle->k]; 1025 1026 if (!offset) 1026 1027 return -EFAULT; 1027 - error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb); 1028 + error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb); 1028 1029 if (error) 1029 1030 return error; 1030 1031 if (++handle->k >= MAP_PAGE_ENTRIES) { ··· 1533 1534 if (!IS_ERR(hib_resume_bdev)) { 1534 1535 set_blocksize(hib_resume_bdev, PAGE_SIZE); 1535 1536 clear_page(swsusp_header); 1536 - error = hib_submit_io(REQ_OP_READ, READ_SYNC, 1537 + error = hib_submit_io(REQ_OP_READ, 0, 1537 1538 swsusp_resume_block, 1538 1539 swsusp_header, NULL); 1539 1540 if (error) ··· 1542 1543 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { 1543 1544 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 1544 1545 /* Reset swap signature now */ 1545 - error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, 1546 + error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, 1546 1547 swsusp_resume_block, 1547 1548 swsusp_header, NULL); 1548 1549 } else { ··· 1587 1588 { 1588 1589 int error; 1589 1590 1590 - hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block, 1591 + hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, 1591 1592 swsusp_header, NULL); 1592 1593 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { 1593 1594 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); 1594 - error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, 1595 + error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, 1595 1596 swsusp_resume_block, 1596 1597 swsusp_header, NULL); 1597 1598 } else {