+2
-2
Documentation/block/biodoc.txt
+2
-2
Documentation/block/biodoc.txt
···
269
269
requests which haven't aged too much on the queue. Potentially this priority
270
270
could even be exposed to applications in some manner, providing higher level
271
271
tunability. Time based aging avoids starvation of lower priority
272
-
requests. Some bits in the bi_rw flags field in the bio structure are
272
+
requests. Some bits in the bi_opf flags field in the bio structure are
273
273
intended to be used for this priority information.
274
274
275
275
···
432
432
struct bio *bi_next; /* request queue link */
433
433
struct block_device *bi_bdev; /* target device */
434
434
unsigned long bi_flags; /* status, command, etc */
435
-
unsigned long bi_rw; /* low bits: r/w, high: priority */
435
+
unsigned long bi_opf; /* low bits: r/w, high: priority */
436
436
437
437
unsigned int bi_vcnt; /* how may bio_vec's */
438
438
struct bvec_iter bi_iter; /* current index into bio_vec array */
+1
-1
Documentation/device-mapper/dm-flakey.txt
+1
-1
Documentation/device-mapper/dm-flakey.txt
···
42
42
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
43
43
'w' is incompatible with drop_writes.
44
44
<value>: The value (from 0-255) to write.
45
-
<flags>: Perform the replacement only if bio->bi_rw has all the
45
+
<flags>: Perform the replacement only if bio->bi_opf has all the
46
46
selected flags set.
47
47
48
48
Examples:
+1
-1
block/bio-integrity.c
+1
-1
block/bio-integrity.c
+3
-3
block/bio.c
+3
-3
block/bio.c
···
580
580
*/
581
581
bio->bi_bdev = bio_src->bi_bdev;
582
582
bio_set_flag(bio, BIO_CLONED);
583
-
bio->bi_rw = bio_src->bi_rw;
583
+
bio->bi_opf = bio_src->bi_opf;
584
584
bio->bi_iter = bio_src->bi_iter;
585
585
bio->bi_io_vec = bio_src->bi_io_vec;
586
586
···
663
663
if (!bio)
664
664
return NULL;
665
665
bio->bi_bdev = bio_src->bi_bdev;
666
-
bio->bi_rw = bio_src->bi_rw;
666
+
bio->bi_opf = bio_src->bi_opf;
667
667
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
668
668
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
669
669
···
873
873
init_completion(&ret.event);
874
874
bio->bi_private = &ret;
875
875
bio->bi_end_io = submit_bio_wait_endio;
876
-
bio->bi_rw |= REQ_SYNC;
876
+
bio->bi_opf |= REQ_SYNC;
877
877
submit_bio(bio);
878
878
wait_for_completion_io(&ret.event);
879
879
+13
-13
block/blk-core.c
+13
-13
block/blk-core.c
···
1029
1029
* Flush requests do not use the elevator so skip initialization.
1030
1030
* This allows a request to share the flush and elevator data.
1031
1031
*/
1032
-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
1032
+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
1033
1033
return false;
1034
1034
1035
1035
return true;
···
1504
1504
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1505
1505
struct bio *bio)
1506
1506
{
1507
-
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1507
+
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1508
1508
1509
1509
if (!ll_back_merge_fn(q, req, bio))
1510
1510
return false;
···
1526
1526
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1527
1527
struct bio *bio)
1528
1528
{
1529
-
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1529
+
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1530
1530
1531
1531
if (!ll_front_merge_fn(q, req, bio))
1532
1532
return false;
···
1648
1648
{
1649
1649
req->cmd_type = REQ_TYPE_FS;
1650
1650
1651
-
req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1652
-
if (bio->bi_rw & REQ_RAHEAD)
1651
+
req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
1652
+
if (bio->bi_opf & REQ_RAHEAD)
1653
1653
req->cmd_flags |= REQ_FAILFAST_MASK;
1654
1654
1655
1655
req->errors = 0;
···
1660
1660
1661
1661
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1662
1662
{
1663
-
const bool sync = !!(bio->bi_rw & REQ_SYNC);
1663
+
const bool sync = !!(bio->bi_opf & REQ_SYNC);
1664
1664
struct blk_plug *plug;
1665
1665
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
1666
1666
struct request *req;
···
1681
1681
return BLK_QC_T_NONE;
1682
1682
}
1683
1683
1684
-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
1684
+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
1685
1685
spin_lock_irq(q->queue_lock);
1686
1686
where = ELEVATOR_INSERT_FLUSH;
1687
1687
goto get_rq;
···
1728
1728
/*
1729
1729
* Add in META/PRIO flags, if set, before we get to the IO scheduler
1730
1730
*/
1731
-
rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
1731
+
rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
1732
1732
1733
1733
/*
1734
1734
* Grab a free request. This is might sleep but can not fail.
···
1805
1805
printk(KERN_INFO "attempt to access beyond end of device\n");
1806
1806
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1807
1807
bdevname(bio->bi_bdev, b),
1808
-
bio->bi_rw,
1808
+
bio->bi_opf,
1809
1809
(unsigned long long)bio_end_sector(bio),
1810
1810
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1811
1811
}
···
1918
1918
* drivers without flush support don't have to worry
1919
1919
* about them.
1920
1920
*/
1921
-
if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
1921
+
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
1922
1922
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1923
-
bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
1923
+
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1924
1924
if (!nr_sectors) {
1925
1925
err = 0;
1926
1926
goto end_io;
···
2219
2219
* one.
2220
2220
*/
2221
2221
for (bio = rq->bio; bio; bio = bio->bi_next) {
2222
-
if ((bio->bi_rw & ff) != ff)
2222
+
if ((bio->bi_opf & ff) != ff)
2223
2223
break;
2224
2224
bytes += bio->bi_iter.bi_size;
2225
2225
}
···
2630
2630
/* mixed attributes always follow the first bio */
2631
2631
if (req->cmd_flags & REQ_MIXED_MERGE) {
2632
2632
req->cmd_flags &= ~REQ_FAILFAST_MASK;
2633
-
req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2633
+
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2634
2634
}
2635
2635
2636
2636
/*
+4
-4
block/blk-merge.c
+4
-4
block/blk-merge.c
···
186
186
187
187
if (split) {
188
188
/* there isn't chance to merge the splitted bio */
189
-
split->bi_rw |= REQ_NOMERGE;
189
+
split->bi_opf |= REQ_NOMERGE;
190
190
191
191
bio_chain(split, *bio);
192
192
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
···
616
616
* Distributes the attributs to each bio.
617
617
*/
618
618
for (bio = rq->bio; bio; bio = bio->bi_next) {
619
-
WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
620
-
(bio->bi_rw & REQ_FAILFAST_MASK) != ff);
621
-
bio->bi_rw |= ff;
619
+
WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
620
+
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
621
+
bio->bi_opf |= ff;
622
622
}
623
623
rq->cmd_flags |= REQ_MIXED_MERGE;
624
624
}
+5
-5
block/blk-mq.c
+5
-5
block/blk-mq.c
···
1234
1234
ctx = blk_mq_get_ctx(q);
1235
1235
hctx = q->mq_ops->map_queue(q, ctx->cpu);
1236
1236
1237
-
if (rw_is_sync(bio_op(bio), bio->bi_rw))
1237
+
if (rw_is_sync(bio_op(bio), bio->bi_opf))
1238
1238
op_flags |= REQ_SYNC;
1239
1239
1240
1240
trace_block_getrq(q, bio, op);
···
1302
1302
*/
1303
1303
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1304
1304
{
1305
-
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1306
-
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1305
+
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1306
+
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1307
1307
struct blk_map_ctx data;
1308
1308
struct request *rq;
1309
1309
unsigned int request_count = 0;
···
1396
1396
*/
1397
1397
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1398
1398
{
1399
-
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
1400
-
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1399
+
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
1400
+
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1401
1401
struct blk_plug *plug;
1402
1402
unsigned int request_count = 0;
1403
1403
struct blk_map_ctx data;
+4
-4
block/blk-throttle.c
+4
-4
block/blk-throttle.c
···
821
821
* second time when it eventually gets issued. Set it when a bio
822
822
* is being charged to a tg.
823
823
*/
824
-
if (!(bio->bi_rw & REQ_THROTTLED))
825
-
bio->bi_rw |= REQ_THROTTLED;
824
+
if (!(bio->bi_opf & REQ_THROTTLED))
825
+
bio->bi_opf |= REQ_THROTTLED;
826
826
}
827
827
828
828
/**
···
1399
1399
WARN_ON_ONCE(!rcu_read_lock_held());
1400
1400
1401
1401
/* see throtl_charge_bio() */
1402
-
if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
1402
+
if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
1403
1403
goto out;
1404
1404
1405
1405
spin_lock_irq(q->queue_lock);
···
1478
1478
* being issued.
1479
1479
*/
1480
1480
if (!throttled)
1481
-
bio->bi_rw &= ~REQ_THROTTLED;
1481
+
bio->bi_opf &= ~REQ_THROTTLED;
1482
1482
return throttled;
1483
1483
}
1484
1484
+2
-2
block/cfq-iosched.c
+2
-2
block/cfq-iosched.c
···
918
918
*/
919
919
static inline bool cfq_bio_sync(struct bio *bio)
920
920
{
921
-
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
921
+
return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
922
922
}
923
923
924
924
/*
···
2565
2565
static void cfq_bio_merged(struct request_queue *q, struct request *req,
2566
2566
struct bio *bio)
2567
2567
{
2568
-
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
2568
+
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
2569
2569
}
2570
2570
2571
2571
static void
+4
-4
drivers/block/drbd/drbd_main.c
+4
-4
drivers/block/drbd/drbd_main.c
···
1663
1663
struct bio *bio)
1664
1664
{
1665
1665
if (connection->agreed_pro_version >= 95)
1666
-
return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1667
-
(bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
1668
-
(bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
1666
+
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1667
+
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1668
+
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1669
1669
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1670
1670
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
1671
1671
else
1672
-
return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1672
+
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1673
1673
}
1674
1674
1675
1675
/* Used to send write or TRIM aka REQ_DISCARD requests
+1
-1
drivers/block/drbd/drbd_receiver.c
+1
-1
drivers/block/drbd/drbd_receiver.c
···
1564
1564
* drbd_submit_peer_request()
1565
1565
* @device: DRBD device.
1566
1566
* @peer_req: peer request
1567
-
* @rw: flag field, see bio->bi_rw
1567
+
* @rw: flag field, see bio->bi_opf
1568
1568
*
1569
1569
* May spread the pages to multiple bios,
1570
1570
* depending on bio_add_page restrictions.
+3
-3
drivers/block/drbd/drbd_req.c
+3
-3
drivers/block/drbd/drbd_req.c
···
288
288
*/
289
289
if (!ok &&
290
290
bio_op(req->master_bio) == REQ_OP_READ &&
291
-
!(req->master_bio->bi_rw & REQ_RAHEAD) &&
291
+
!(req->master_bio->bi_opf & REQ_RAHEAD) &&
292
292
!list_empty(&req->tl_requests))
293
293
req->rq_state |= RQ_POSTPONED;
294
294
···
1137
1137
* replicating, in which case there is no point. */
1138
1138
if (unlikely(req->i.size == 0)) {
1139
1139
/* The only size==0 bios we expect are empty flushes. */
1140
-
D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
1140
+
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1141
1141
if (remote)
1142
1142
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
1143
1143
return remote;
···
1176
1176
1177
1177
if (bio_op(bio) != REQ_OP_READ)
1178
1178
type = DRBD_FAULT_DT_WR;
1179
-
else if (bio->bi_rw & REQ_RAHEAD)
1179
+
else if (bio->bi_opf & REQ_RAHEAD)
1180
1180
type = DRBD_FAULT_DT_RA;
1181
1181
else
1182
1182
type = DRBD_FAULT_DT_RD;
+1
-1
drivers/block/drbd/drbd_worker.c
+1
-1
drivers/block/drbd/drbd_worker.c
+1
-1
drivers/block/pktcdvd.c
+1
-1
drivers/block/pktcdvd.c
···
1157
1157
1158
1158
bio_reset(pkt->bio);
1159
1159
pkt->bio->bi_bdev = pd->bdev;
1160
-
pkt->bio->bi_rw = REQ_WRITE;
1160
+
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
1161
1161
pkt->bio->bi_iter.bi_sector = new_sector;
1162
1162
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1163
1163
pkt->bio->bi_vcnt = pkt->frames;
+1
-1
drivers/block/umem.c
+1
-1
drivers/block/umem.c
+6
-6
drivers/md/bcache/request.c
+6
-6
drivers/md/bcache/request.c
···
208
208
* Journal writes are marked REQ_PREFLUSH; if the original write was a
209
209
* flush, it'll wait on the journal write.
210
210
*/
211
-
bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
211
+
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
212
212
213
213
do {
214
214
unsigned i;
···
405
405
if (!congested &&
406
406
mode == CACHE_MODE_WRITEBACK &&
407
407
op_is_write(bio_op(bio)) &&
408
-
(bio->bi_rw & REQ_SYNC))
408
+
(bio->bi_opf & REQ_SYNC))
409
409
goto rescale;
410
410
411
411
spin_lock(&dc->io_lock);
···
668
668
s->iop.write_prio = 0;
669
669
s->iop.error = 0;
670
670
s->iop.flags = 0;
671
-
s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
671
+
s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
672
672
s->iop.wq = bcache_wq;
673
673
674
674
return s;
···
796
796
goto out_submit;
797
797
}
798
798
799
-
if (!(bio->bi_rw & REQ_RAHEAD) &&
800
-
!(bio->bi_rw & REQ_META) &&
799
+
if (!(bio->bi_opf & REQ_RAHEAD) &&
800
+
!(bio->bi_opf & REQ_META) &&
801
801
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
802
802
reada = min_t(sector_t, dc->readahead >> 9,
803
803
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
···
920
920
bch_writeback_add(dc);
921
921
s->iop.bio = bio;
922
922
923
-
if (bio->bi_rw & REQ_PREFLUSH) {
923
+
if (bio->bi_opf & REQ_PREFLUSH) {
924
924
/* Also need to send a flush to the backing device */
925
925
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
926
926
dc->disk.bio_split);
+1
-1
drivers/md/bcache/super.c
+1
-1
drivers/md/bcache/super.c
+1
-1
drivers/md/bcache/writeback.h
+1
-1
drivers/md/bcache/writeback.h
+4
-4
drivers/md/dm-cache-target.c
+4
-4
drivers/md/dm-cache-target.c
···
788
788
789
789
spin_lock_irqsave(&cache->lock, flags);
790
790
if (cache->need_tick_bio &&
791
-
!(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
791
+
!(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
792
792
bio_op(bio) != REQ_OP_DISCARD) {
793
793
pb->tick = true;
794
794
cache->need_tick_bio = false;
···
830
830
831
831
static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832
832
{
833
-
return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
833
+
return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
834
834
}
835
835
836
836
/*
···
1069
1069
static bool discard_or_flush(struct bio *bio)
1070
1070
{
1071
1071
return bio_op(bio) == REQ_OP_DISCARD ||
1072
-
bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
1072
+
bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1073
1073
}
1074
1074
1075
1075
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
···
1980
1980
1981
1981
bio = bio_list_pop(&bios);
1982
1982
1983
-
if (bio->bi_rw & REQ_PREFLUSH)
1983
+
if (bio->bi_opf & REQ_PREFLUSH)
1984
1984
process_flush_bio(cache, bio);
1985
1985
else if (bio_op(bio) == REQ_OP_DISCARD)
1986
1986
process_discard_bio(cache, &structs, bio);
+2
-2
drivers/md/dm-crypt.c
+2
-2
drivers/md/dm-crypt.c
···
1136
1136
clone->bi_private = io;
1137
1137
clone->bi_end_io = crypt_endio;
1138
1138
clone->bi_bdev = cc->dev->bdev;
1139
-
bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
1139
+
bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
1140
1140
}
1141
1141
1142
1142
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
···
1915
1915
* - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
1916
1916
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
1917
1917
*/
1918
-
if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
1918
+
if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
1919
1919
bio_op(bio) == REQ_OP_DISCARD)) {
1920
1920
bio->bi_bdev = cc->dev->bdev;
1921
1921
if (bio_sectors(bio))
+1
-1
drivers/md/dm-era-target.c
+1
-1
drivers/md/dm-era-target.c
···
1542
1542
/*
1543
1543
* REQ_PREFLUSH bios carry no data, so we're not interested in them.
1544
1544
*/
1545
-
if (!(bio->bi_rw & REQ_PREFLUSH) &&
1545
+
if (!(bio->bi_opf & REQ_PREFLUSH) &&
1546
1546
(bio_data_dir(bio) == WRITE) &&
1547
1547
!metadata_current_marked(era->md, block)) {
1548
1548
defer_bio(era, bio);
+3
-3
drivers/md/dm-flakey.c
+3
-3
drivers/md/dm-flakey.c
···
16
16
#define DM_MSG_PREFIX "flakey"
17
17
18
18
#define all_corrupt_bio_flags_match(bio, fc) \
19
-
(((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
19
+
(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
20
20
21
21
/*
22
22
* Flakey: Used for testing only, simulates intermittent,
···
266
266
data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
267
267
268
268
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
269
-
"(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
269
+
"(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
270
270
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
271
-
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
271
+
(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
272
272
(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
273
273
}
274
274
}
+3
-3
drivers/md/dm-io.c
+3
-3
drivers/md/dm-io.c
···
505
505
* New collapsed (a)synchronous interface.
506
506
*
507
507
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
508
-
* the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
509
-
* If you fail to do one of these, the IO will be submitted to the disk after
510
-
* q->unplug_delay, which defaults to 3ms in blk-settings.c.
508
+
* the queue with blk_unplug() some time later or set REQ_SYNC in
509
+
* io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
510
+
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
511
511
*/
512
512
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
513
513
struct dm_io_region *where, unsigned long *sync_error_bits)
+2
-2
drivers/md/dm-log-writes.c
+2
-2
drivers/md/dm-log-writes.c
···
555
555
struct bio_vec bv;
556
556
size_t alloc_size;
557
557
int i = 0;
558
-
bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
559
-
bool fua_bio = (bio->bi_rw & REQ_FUA);
558
+
bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
559
+
bool fua_bio = (bio->bi_opf & REQ_FUA);
560
560
bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
561
561
562
562
pb->block = NULL;
+1
-1
drivers/md/dm-mpath.c
+1
-1
drivers/md/dm-mpath.c
+5
-5
drivers/md/dm-raid1.c
+5
-5
drivers/md/dm-raid1.c
···
657
657
struct mirror *m;
658
658
struct dm_io_request io_req = {
659
659
.bi_op = REQ_OP_WRITE,
660
-
.bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
660
+
.bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
661
661
.mem.type = DM_IO_BIO,
662
662
.mem.ptr.bio = bio,
663
663
.notify.fn = write_callback,
···
704
704
bio_list_init(&requeue);
705
705
706
706
while ((bio = bio_list_pop(writes))) {
707
-
if ((bio->bi_rw & REQ_PREFLUSH) ||
707
+
if ((bio->bi_opf & REQ_PREFLUSH) ||
708
708
(bio_op(bio) == REQ_OP_DISCARD)) {
709
709
bio_list_add(&sync, bio);
710
710
continue;
···
1217
1217
* If region is not in-sync queue the bio.
1218
1218
*/
1219
1219
if (!r || (r == -EWOULDBLOCK)) {
1220
-
if (bio->bi_rw & REQ_RAHEAD)
1220
+
if (bio->bi_opf & REQ_RAHEAD)
1221
1221
return -EWOULDBLOCK;
1222
1222
1223
1223
queue_bio(ms, bio, rw);
···
1253
1253
* We need to dec pending if this was a write.
1254
1254
*/
1255
1255
if (rw == WRITE) {
1256
-
if (!(bio->bi_rw & REQ_PREFLUSH) &&
1256
+
if (!(bio->bi_opf & REQ_PREFLUSH) &&
1257
1257
bio_op(bio) != REQ_OP_DISCARD)
1258
1258
dm_rh_dec(ms->rh, bio_record->write_region);
1259
1259
return error;
···
1262
1262
if (error == -EOPNOTSUPP)
1263
1263
goto out;
1264
1264
1265
-
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1265
+
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
1266
1266
goto out;
1267
1267
1268
1268
if (unlikely(error)) {
+2
-2
drivers/md/dm-region-hash.c
+2
-2
drivers/md/dm-region-hash.c
···
398
398
region_t region = dm_rh_bio_to_region(rh, bio);
399
399
int recovering = 0;
400
400
401
-
if (bio->bi_rw & REQ_PREFLUSH) {
401
+
if (bio->bi_opf & REQ_PREFLUSH) {
402
402
rh->flush_failure = 1;
403
403
return;
404
404
}
···
526
526
struct bio *bio;
527
527
528
528
for (bio = bios->head; bio; bio = bio->bi_next) {
529
-
if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
529
+
if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
530
530
continue;
531
531
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
532
532
}
+3
-3
drivers/md/dm-snap.c
+3
-3
drivers/md/dm-snap.c
···
1680
1680
1681
1681
init_tracked_chunk(bio);
1682
1682
1683
-
if (bio->bi_rw & REQ_PREFLUSH) {
1683
+
if (bio->bi_opf & REQ_PREFLUSH) {
1684
1684
bio->bi_bdev = s->cow->bdev;
1685
1685
return DM_MAPIO_REMAPPED;
1686
1686
}
···
1800
1800
1801
1801
init_tracked_chunk(bio);
1802
1802
1803
-
if (bio->bi_rw & REQ_PREFLUSH) {
1803
+
if (bio->bi_opf & REQ_PREFLUSH) {
1804
1804
if (!dm_bio_get_target_bio_nr(bio))
1805
1805
bio->bi_bdev = s->origin->bdev;
1806
1806
else
···
2286
2286
2287
2287
bio->bi_bdev = o->dev->bdev;
2288
2288
2289
-
if (unlikely(bio->bi_rw & REQ_PREFLUSH))
2289
+
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2290
2290
return DM_MAPIO_REMAPPED;
2291
2291
2292
2292
if (bio_data_dir(bio) != WRITE)
+2
-2
drivers/md/dm-stripe.c
+2
-2
drivers/md/dm-stripe.c
···
286
286
uint32_t stripe;
287
287
unsigned target_bio_nr;
288
288
289
-
if (bio->bi_rw & REQ_PREFLUSH) {
289
+
if (bio->bi_opf & REQ_PREFLUSH) {
290
290
target_bio_nr = dm_bio_get_target_bio_nr(bio);
291
291
BUG_ON(target_bio_nr >= sc->stripes);
292
292
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
···
383
383
if (!error)
384
384
return 0; /* I/O complete */
385
385
386
-
if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
386
+
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
387
387
return error;
388
388
389
389
if (error == -EOPNOTSUPP)
+4
-4
drivers/md/dm-thin.c
+4
-4
drivers/md/dm-thin.c
···
699
699
700
700
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701
701
{
702
-
return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
702
+
return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
703
703
dm_thin_changed_this_transaction(tc->td);
704
704
}
705
705
···
870
870
struct bio *bio;
871
871
872
872
while ((bio = bio_list_pop(&cell->bios))) {
873
-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
873
+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
874
874
bio_op(bio) == REQ_OP_DISCARD)
875
875
bio_list_add(&info->defer_bios, bio);
876
876
else {
···
1717
1717
1718
1718
while ((bio = bio_list_pop(&cell->bios))) {
1719
1719
if ((bio_data_dir(bio) == WRITE) ||
1720
-
(bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
1720
+
(bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
1721
1721
bio_op(bio) == REQ_OP_DISCARD))
1722
1722
bio_list_add(&info->defer_bios, bio);
1723
1723
else {
···
2635
2635
return DM_MAPIO_SUBMITTED;
2636
2636
}
2637
2637
2638
-
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
2638
+
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
2639
2639
bio_op(bio) == REQ_OP_DISCARD) {
2640
2640
thin_defer_bio_with_throttle(tc, bio);
2641
2641
return DM_MAPIO_SUBMITTED;
+1
-1
drivers/md/dm-zero.c
+1
-1
drivers/md/dm-zero.c
+5
-5
drivers/md/dm.c
+5
-5
drivers/md/dm.c
···
798
798
if (io_error == DM_ENDIO_REQUEUE)
799
799
return;
800
800
801
-
if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
801
+
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
802
802
/*
803
803
* Preflush done for flush with data, reissue
804
804
* without REQ_PREFLUSH.
805
805
*/
806
-
bio->bi_rw &= ~REQ_PREFLUSH;
806
+
bio->bi_opf &= ~REQ_PREFLUSH;
807
807
queue_io(md, bio);
808
808
} else {
809
809
/* done with normal IO or empty flush */
···
964
964
{
965
965
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
966
966
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
967
-
BUG_ON(bio->bi_rw & REQ_PREFLUSH);
967
+
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
968
968
BUG_ON(bi_size > *tio->len_ptr);
969
969
BUG_ON(n_sectors > bi_size);
970
970
*tio->len_ptr -= bi_size - n_sectors;
···
1252
1252
1253
1253
start_io_acct(ci.io);
1254
1254
1255
-
if (bio->bi_rw & REQ_PREFLUSH) {
1255
+
if (bio->bi_opf & REQ_PREFLUSH) {
1256
1256
ci.bio = &ci.md->flush_bio;
1257
1257
ci.sector_count = 0;
1258
1258
error = __send_empty_flush(&ci);
···
1290
1290
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1291
1291
dm_put_live_table(md, srcu_idx);
1292
1292
1293
-
if (!(bio->bi_rw & REQ_RAHEAD))
1293
+
if (!(bio->bi_opf & REQ_RAHEAD))
1294
1294
queue_io(md, bio);
1295
1295
else
1296
1296
bio_io_error(bio);
+1
-1
drivers/md/linear.c
+1
-1
drivers/md/linear.c
+2
-2
drivers/md/md.c
+2
-2
drivers/md/md.c
···
285
285
*/
286
286
sectors = bio_sectors(bio);
287
287
/* bio could be mergeable after passing to underlayer */
288
-
bio->bi_rw &= ~REQ_NOMERGE;
288
+
bio->bi_opf &= ~REQ_NOMERGE;
289
289
mddev->pers->make_request(mddev, bio);
290
290
291
291
cpu = part_stat_lock();
···
414
414
/* an empty barrier - all done */
415
415
bio_endio(bio);
416
416
else {
417
-
bio->bi_rw &= ~REQ_PREFLUSH;
417
+
bio->bi_opf &= ~REQ_PREFLUSH;
418
418
mddev->pers->make_request(mddev, bio);
419
419
}
420
420
+4
-4
drivers/md/multipath.c
+4
-4
drivers/md/multipath.c
···
91
91
92
92
if (!bio->bi_error)
93
93
multipath_end_bh_io(mp_bh, 0);
94
-
else if (!(bio->bi_rw & REQ_RAHEAD)) {
94
+
else if (!(bio->bi_opf & REQ_RAHEAD)) {
95
95
/*
96
96
* oops, IO error:
97
97
*/
···
112
112
struct multipath_bh * mp_bh;
113
113
struct multipath_info *multipath;
114
114
115
-
if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
115
+
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
116
116
md_flush_request(mddev, bio);
117
117
return;
118
118
}
···
135
135
136
136
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
137
137
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
138
-
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138
+
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
139
139
mp_bh->bio.bi_end_io = multipath_end_request;
140
140
mp_bh->bio.bi_private = mp_bh;
141
141
generic_make_request(&mp_bh->bio);
···
360
360
bio->bi_iter.bi_sector +=
361
361
conf->multipaths[mp_bh->path].rdev->data_offset;
362
362
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
363
-
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
363
+
bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
364
364
bio->bi_end_io = multipath_end_request;
365
365
bio->bi_private = mp_bh;
366
366
generic_make_request(bio);
+1
-1
drivers/md/raid0.c
+1
-1
drivers/md/raid0.c
+3
-3
drivers/md/raid1.c
+3
-3
drivers/md/raid1.c
···
1043
1043
unsigned long flags;
1044
1044
const int op = bio_op(bio);
1045
1045
const int rw = bio_data_dir(bio);
1046
-
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1047
-
const unsigned long do_flush_fua = (bio->bi_rw &
1046
+
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1047
+
const unsigned long do_flush_fua = (bio->bi_opf &
1048
1048
(REQ_PREFLUSH | REQ_FUA));
1049
1049
struct md_rdev *blocked_rdev;
1050
1050
struct blk_plug_cb *cb;
···
2318
2318
raid_end_bio_io(r1_bio);
2319
2319
} else {
2320
2320
const unsigned long do_sync
2321
-
= r1_bio->master_bio->bi_rw & REQ_SYNC;
2321
+
= r1_bio->master_bio->bi_opf & REQ_SYNC;
2322
2322
if (bio) {
2323
2323
r1_bio->bios[r1_bio->read_disk] =
2324
2324
mddev->ro ? IO_BLOCKED : NULL;
+4
-4
drivers/md/raid10.c
+4
-4
drivers/md/raid10.c
···
1054
1054
int i;
1055
1055
const int op = bio_op(bio);
1056
1056
const int rw = bio_data_dir(bio);
1057
-
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1058
-
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1057
+
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1058
+
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
1059
1059
unsigned long flags;
1060
1060
struct md_rdev *blocked_rdev;
1061
1061
struct blk_plug_cb *cb;
···
1440
1440
1441
1441
struct bio *split;
1442
1442
1443
-
if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
1443
+
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1444
1444
md_flush_request(mddev, bio);
1445
1445
return;
1446
1446
}
···
2533
2533
return;
2534
2534
}
2535
2535
2536
-
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2536
+
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
2537
2537
slot = r10_bio->read_slot;
2538
2538
printk_ratelimited(
2539
2539
KERN_ERR
+1
-1
drivers/md/raid5-cache.c
+1
-1
drivers/md/raid5-cache.c
+10
-10
drivers/md/raid5.c
+10
-10
drivers/md/raid5.c
···
806
806
dd_idx = 0;
807
807
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
808
808
dd_idx++;
809
-
if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
809
+
if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
810
810
bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
811
811
goto unlock_out;
812
812
···
1003
1003
1004
1004
pr_debug("%s: for %llu schedule op %d on disc %d\n",
1005
1005
__func__, (unsigned long long)sh->sector,
1006
-
bi->bi_rw, i);
1006
+
bi->bi_opf, i);
1007
1007
atomic_inc(&sh->count);
1008
1008
if (sh != head_sh)
1009
1009
atomic_inc(&head_sh->count);
···
1014
1014
bi->bi_iter.bi_sector = (sh->sector
1015
1015
+ rdev->data_offset);
1016
1016
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
1017
-
bi->bi_rw |= REQ_NOMERGE;
1017
+
bi->bi_opf |= REQ_NOMERGE;
1018
1018
1019
1019
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
1020
1020
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
···
1055
1055
pr_debug("%s: for %llu schedule op %d on "
1056
1056
"replacement disc %d\n",
1057
1057
__func__, (unsigned long long)sh->sector,
1058
-
rbi->bi_rw, i);
1058
+
rbi->bi_opf, i);
1059
1059
atomic_inc(&sh->count);
1060
1060
if (sh != head_sh)
1061
1061
atomic_inc(&head_sh->count);
···
1088
1088
if (op_is_write(op))
1089
1089
set_bit(STRIPE_DEGRADED, &sh->state);
1090
1090
pr_debug("skip op %d on disc %d for sector %llu\n",
1091
-
bi->bi_rw, i, (unsigned long long)sh->sector);
1091
+
bi->bi_opf, i, (unsigned long long)sh->sector);
1092
1092
clear_bit(R5_LOCKED, &sh->dev[i].flags);
1093
1093
set_bit(STRIPE_HANDLE, &sh->state);
1094
1094
}
···
1619
1619
1620
1620
while (wbi && wbi->bi_iter.bi_sector <
1621
1621
dev->sector + STRIPE_SECTORS) {
1622
-
if (wbi->bi_rw & REQ_FUA)
1622
+
if (wbi->bi_opf & REQ_FUA)
1623
1623
set_bit(R5_WantFUA, &dev->flags);
1624
-
if (wbi->bi_rw & REQ_SYNC)
1624
+
if (wbi->bi_opf & REQ_SYNC)
1625
1625
set_bit(R5_SyncIO, &dev->flags);
1626
1626
if (bio_op(wbi) == REQ_OP_DISCARD)
1627
1627
set_bit(R5_Discard, &dev->flags);
···
5154
5154
DEFINE_WAIT(w);
5155
5155
bool do_prepare;
5156
5156
5157
-
if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
5157
+
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
5158
5158
int ret = r5l_handle_flush_request(conf->log, bi);
5159
5159
5160
5160
if (ret == 0)
···
5237
5237
(unsigned long long)logical_sector);
5238
5238
5239
5239
sh = raid5_get_active_stripe(conf, new_sector, previous,
5240
-
(bi->bi_rw & REQ_RAHEAD), 0);
5240
+
(bi->bi_opf & REQ_RAHEAD), 0);
5241
5241
if (sh) {
5242
5242
if (unlikely(previous)) {
5243
5243
/* expansion might have moved on while waiting for a
···
5305
5305
set_bit(STRIPE_HANDLE, &sh->state);
5306
5306
clear_bit(STRIPE_DELAYED, &sh->state);
5307
5307
if ((!sh->batch_head || sh == sh->batch_head) &&
5308
-
(bi->bi_rw & REQ_SYNC) &&
5308
+
(bi->bi_opf & REQ_SYNC) &&
5309
5309
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
5310
5310
atomic_inc(&conf->preread_active_stripes);
5311
5311
release_stripe_plug(mddev, sh);
+2
-2
drivers/nvdimm/pmem.c
+2
-2
drivers/nvdimm/pmem.c
···
128
128
struct pmem_device *pmem = q->queuedata;
129
129
struct nd_region *nd_region = to_region(pmem);
130
130
131
-
if (bio->bi_rw & REQ_FLUSH)
131
+
if (bio->bi_opf & REQ_FLUSH)
132
132
nvdimm_flush(nd_region);
133
133
134
134
do_acct = nd_iostat_start(bio, &start);
···
144
144
if (do_acct)
145
145
nd_iostat_end(bio, start);
146
146
147
-
if (bio->bi_rw & REQ_FUA)
147
+
if (bio->bi_opf & REQ_FUA)
148
148
nvdimm_flush(nd_region);
149
149
150
150
bio_endio(bio);
+5
-5
fs/btrfs/check-integrity.c
+5
-5
fs/btrfs/check-integrity.c
···
2945
2945
printk(KERN_INFO
2946
2946
"submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
2947
2947
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
2948
-
bio_op(bio), bio->bi_rw, bio->bi_vcnt,
2948
+
bio_op(bio), bio->bi_opf, bio->bi_vcnt,
2949
2949
(unsigned long long)bio->bi_iter.bi_sector,
2950
2950
dev_bytenr, bio->bi_bdev);
2951
2951
···
2976
2976
btrfsic_process_written_block(dev_state, dev_bytenr,
2977
2977
mapped_datav, bio->bi_vcnt,
2978
2978
bio, &bio_is_patched,
2979
-
NULL, bio->bi_rw);
2979
+
NULL, bio->bi_opf);
2980
2980
while (i > 0) {
2981
2981
i--;
2982
2982
kunmap(bio->bi_io_vec[i].bv_page);
2983
2983
}
2984
2984
kfree(mapped_datav);
2985
-
} else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
2985
+
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
2986
2986
if (dev_state->state->print_mask &
2987
2987
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2988
2988
printk(KERN_INFO
2989
2989
"submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
2990
-
bio_op(bio), bio->bi_rw, bio->bi_bdev);
2990
+
bio_op(bio), bio->bi_opf, bio->bi_bdev);
2991
2991
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2992
2992
if ((dev_state->state->print_mask &
2993
2993
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
···
3005
3005
block->never_written = 0;
3006
3006
block->iodone_w_error = 0;
3007
3007
block->flush_gen = dev_state->last_flush_gen + 1;
3008
-
block->submit_bio_bh_rw = bio->bi_rw;
3008
+
block->submit_bio_bh_rw = bio->bi_opf;
3009
3009
block->orig_bio_bh_private = bio->bi_private;
3010
3010
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
3011
3011
block->next_in_same_bio = NULL;
+1
-1
fs/btrfs/disk-io.c
+1
-1
fs/btrfs/disk-io.c
+3
-3
fs/btrfs/inode.c
+3
-3
fs/btrfs/inode.c
···
8209
8209
if (err)
8210
8210
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
8211
8211
"direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8212
-
btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
8212
+
btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf,
8213
8213
(unsigned long long)bio->bi_iter.bi_sector,
8214
8214
bio->bi_iter.bi_size, err);
8215
8215
···
8373
8373
if (!bio)
8374
8374
return -ENOMEM;
8375
8375
8376
-
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
8376
+
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
8377
8377
bio->bi_private = dip;
8378
8378
bio->bi_end_io = btrfs_end_dio_bio;
8379
8379
btrfs_io_bio(bio)->logical = file_offset;
···
8411
8411
start_sector, GFP_NOFS);
8412
8412
if (!bio)
8413
8413
goto out_err;
8414
-
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
8414
+
bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
8415
8415
bio->bi_private = dip;
8416
8416
bio->bi_end_io = btrfs_end_dio_bio;
8417
8417
btrfs_io_bio(bio)->logical = file_offset;
+3
-3
fs/btrfs/volumes.c
+3
-3
fs/btrfs/volumes.c
···
6012
6012
else
6013
6013
btrfs_dev_stat_inc(dev,
6014
6014
BTRFS_DEV_STAT_READ_ERRS);
6015
-
if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
6015
+
if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
6016
6016
btrfs_dev_stat_inc(dev,
6017
6017
BTRFS_DEV_STAT_FLUSH_ERRS);
6018
6018
btrfs_dev_stat_print_on_error(dev);
···
6089
6089
bio->bi_next = NULL;
6090
6090
6091
6091
spin_lock(&device->io_lock);
6092
-
if (bio->bi_rw & REQ_SYNC)
6092
+
if (bio->bi_opf & REQ_SYNC)
6093
6093
pending_bios = &device->pending_sync_bios;
6094
6094
else
6095
6095
pending_bios = &device->pending_bios;
···
6127
6127
rcu_read_lock();
6128
6128
name = rcu_dereference(dev->name);
6129
6129
pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
6130
-
"(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
6130
+
"(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
6131
6131
(u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
6132
6132
name->str, dev->devid, bio->bi_iter.bi_size);
6133
6133
rcu_read_unlock();
+2
-2
include/linux/bio.h
+2
-2
include/linux/bio.h
···
95
95
96
96
static inline bool bio_mergeable(struct bio *bio)
97
97
{
98
-
if (bio->bi_rw & REQ_NOMERGE_FLAGS)
98
+
if (bio->bi_opf & REQ_NOMERGE_FLAGS)
99
99
return false;
100
100
101
101
return true;
···
318
318
319
319
static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
320
320
{
321
-
if (bio->bi_rw & REQ_INTEGRITY)
321
+
if (bio->bi_opf & REQ_INTEGRITY)
322
322
return bio->bi_integrity;
323
323
324
324
return NULL;
+2
-2
include/linux/blk-cgroup.h
+2
-2
include/linux/blk-cgroup.h
···
714
714
715
715
if (!throtl) {
716
716
blkg = blkg ?: q->root_blkg;
717
-
blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
717
+
blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
718
718
bio->bi_iter.bi_size);
719
-
blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
719
+
blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
720
720
}
721
721
722
722
rcu_read_unlock();
+8
-7
include/linux/blk_types.h
+8
-7
include/linux/blk_types.h
···
27
27
struct bio *bi_next; /* request queue link */
28
28
struct block_device *bi_bdev;
29
29
int bi_error;
30
-
unsigned int bi_rw; /* bottom bits req flags,
31
-
* top bits REQ_OP
30
+
unsigned int bi_opf; /* bottom bits req flags,
31
+
* top bits REQ_OP. Use
32
+
* accessors.
32
33
*/
33
34
unsigned short bi_flags; /* status, command, etc */
34
35
unsigned short bi_ioprio;
···
90
89
};
91
90
92
91
#define BIO_OP_SHIFT (8 * sizeof(unsigned int) - REQ_OP_BITS)
93
-
#define bio_op(bio) ((bio)->bi_rw >> BIO_OP_SHIFT)
92
+
#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
94
93
95
94
#define bio_set_op_attrs(bio, op, op_flags) do { \
96
95
WARN_ON(op >= (1 << REQ_OP_BITS)); \
97
-
(bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1); \
98
-
(bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT); \
99
-
(bio)->bi_rw |= op_flags; \
96
+
(bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1); \
97
+
(bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT); \
98
+
(bio)->bi_opf |= op_flags; \
100
99
} while (0)
101
100
102
101
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
···
139
138
140
139
/*
141
140
* Request flags. For use in the cmd_flags field of struct request, and in
142
-
* bi_rw of struct bio. Note that some flags are only valid in either one.
141
+
* bi_opf of struct bio. Note that some flags are only valid in either one.
143
142
*/
144
143
enum rq_flag_bits {
145
144
/* common flags */
+4
-4
include/trace/events/bcache.h
+4
-4
include/trace/events/bcache.h
···
27
27
__entry->sector = bio->bi_iter.bi_sector;
28
28
__entry->orig_sector = bio->bi_iter.bi_sector - 16;
29
29
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
30
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
30
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
31
31
bio->bi_iter.bi_size);
32
32
),
33
33
···
102
102
__entry->dev = bio->bi_bdev->bd_dev;
103
103
__entry->sector = bio->bi_iter.bi_sector;
104
104
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
105
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
105
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
106
106
bio->bi_iter.bi_size);
107
107
),
108
108
···
138
138
__entry->dev = bio->bi_bdev->bd_dev;
139
139
__entry->sector = bio->bi_iter.bi_sector;
140
140
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
141
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
141
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
142
142
bio->bi_iter.bi_size);
143
143
__entry->cache_hit = hit;
144
144
__entry->bypass = bypass;
···
170
170
__entry->inode = inode;
171
171
__entry->sector = bio->bi_iter.bi_sector;
172
172
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
173
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
173
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
174
174
bio->bi_iter.bi_size);
175
175
__entry->writeback = writeback;
176
176
__entry->bypass = bypass;
+7
-7
include/trace/events/block.h
+7
-7
include/trace/events/block.h
···
274
274
bio->bi_bdev->bd_dev : 0;
275
275
__entry->sector = bio->bi_iter.bi_sector;
276
276
__entry->nr_sector = bio_sectors(bio);
277
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
277
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
278
278
bio->bi_iter.bi_size);
279
279
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
280
280
),
···
313
313
__entry->sector = bio->bi_iter.bi_sector;
314
314
__entry->nr_sector = bio_sectors(bio);
315
315
__entry->error = error;
316
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
316
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
317
317
bio->bi_iter.bi_size);
318
318
),
319
319
···
341
341
__entry->dev = bio->bi_bdev->bd_dev;
342
342
__entry->sector = bio->bi_iter.bi_sector;
343
343
__entry->nr_sector = bio_sectors(bio);
344
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
344
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
345
345
bio->bi_iter.bi_size);
346
346
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
347
347
),
···
409
409
__entry->dev = bio->bi_bdev->bd_dev;
410
410
__entry->sector = bio->bi_iter.bi_sector;
411
411
__entry->nr_sector = bio_sectors(bio);
412
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
412
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
413
413
bio->bi_iter.bi_size);
414
414
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
415
415
),
···
439
439
__entry->sector = bio ? bio->bi_iter.bi_sector : 0;
440
440
__entry->nr_sector = bio ? bio_sectors(bio) : 0;
441
441
blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
442
-
bio ? bio->bi_rw : 0, __entry->nr_sector);
442
+
bio ? bio->bi_opf : 0, __entry->nr_sector);
443
443
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
444
444
),
445
445
···
573
573
__entry->dev = bio->bi_bdev->bd_dev;
574
574
__entry->sector = bio->bi_iter.bi_sector;
575
575
__entry->new_sector = new_sector;
576
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
576
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
577
577
bio->bi_iter.bi_size);
578
578
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
579
579
),
···
617
617
__entry->nr_sector = bio_sectors(bio);
618
618
__entry->old_dev = dev;
619
619
__entry->old_sector = from;
620
-
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
620
+
blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
621
621
bio->bi_iter.bi_size);
622
622
),
623
623
+3
-3
kernel/trace/blktrace.c
+3
-3
kernel/trace/blktrace.c
···
776
776
return;
777
777
778
778
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
779
-
bio_op(bio), bio->bi_rw, what, error, 0, NULL);
779
+
bio_op(bio), bio->bi_opf, what, error, 0, NULL);
780
780
}
781
781
782
782
static void blk_add_trace_bio_bounce(void *ignore,
···
881
881
__be64 rpdu = cpu_to_be64(pdu);
882
882
883
883
__blk_add_trace(bt, bio->bi_iter.bi_sector,
884
-
bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
884
+
bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
885
885
BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
886
886
&rpdu);
887
887
}
···
915
915
r.sector_from = cpu_to_be64(from);
916
916
917
917
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
918
-
bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
918
+
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
919
919
sizeof(r), &r);
920
920
}
921
921