Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: better op and flags encoding

Now that we don't need the common flags to overflow outside the range
of a 32-bit type we can encode them the same way for both the bio and
request fields. This in addition allows us to place the operation
first (and make some room for more ops while we're at it) and to
stop having to shift around the operation values.

In addition this allows passing around only one value in the block layer
instead of two (and eventuall also in the file systems, but we can do
that later) and thus clean up a lot of code.

Last but not least this allows decreasing the size of the cmd_flags
field in struct request to 32-bits. Various functions passing this
value could also be updated, but I'd like to avoid the churn for now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
ef295ecf e8064021

+148 -221
+2 -2
Documentation/block/biodoc.txt
··· 553 553 struct request_list *rl; 554 554 } 555 555 556 - See the rq_flag_bits definitions for an explanation of the various flags 557 - available. Some bits are used by the block layer or i/o scheduler. 556 + See the req_ops and req_flag_bits definitions for an explanation of the various 557 + flags available. Some bits are used by the block layer or i/o scheduler. 558 558 559 559 The behaviour of the various sector counts are almost the same as before, 560 560 except that since we have multi-segment bios, current_nr_sectors refers
+20 -40
block/blk-core.c
··· 1056 1056 /** 1057 1057 * __get_request - get a free request 1058 1058 * @rl: request list to allocate from 1059 - * @op: REQ_OP_READ/REQ_OP_WRITE 1060 - * @op_flags: rq_flag_bits 1059 + * @op: operation and flags 1061 1060 * @bio: bio to allocate request for (can be %NULL) 1062 1061 * @gfp_mask: allocation mask 1063 1062 * ··· 1067 1068 * Returns ERR_PTR on failure, with @q->queue_lock held. 1068 1069 * Returns request pointer on success, with @q->queue_lock *not held*. 1069 1070 */ 1070 - static struct request *__get_request(struct request_list *rl, int op, 1071 - int op_flags, struct bio *bio, 1072 - gfp_t gfp_mask) 1071 + static struct request *__get_request(struct request_list *rl, unsigned int op, 1072 + struct bio *bio, gfp_t gfp_mask) 1073 1073 { 1074 1074 struct request_queue *q = rl->q; 1075 1075 struct request *rq; 1076 1076 struct elevator_type *et = q->elevator->type; 1077 1077 struct io_context *ioc = rq_ioc(bio); 1078 1078 struct io_cq *icq = NULL; 1079 - const bool is_sync = rw_is_sync(op, op_flags) != 0; 1079 + const bool is_sync = op_is_sync(op); 1080 1080 int may_queue; 1081 1081 req_flags_t rq_flags = RQF_ALLOCED; 1082 1082 1083 1083 if (unlikely(blk_queue_dying(q))) 1084 1084 return ERR_PTR(-ENODEV); 1085 1085 1086 - may_queue = elv_may_queue(q, op, op_flags); 1086 + may_queue = elv_may_queue(q, op); 1087 1087 if (may_queue == ELV_MQUEUE_NO) 1088 1088 goto rq_starved; 1089 1089 ··· 1152 1154 1153 1155 blk_rq_init(q, rq); 1154 1156 blk_rq_set_rl(rq, rl); 1155 - req_set_op_attrs(rq, op, op_flags); 1157 + rq->cmd_flags = op; 1156 1158 rq->rq_flags = rq_flags; 1157 1159 1158 1160 /* init elvpriv */ ··· 1230 1232 /** 1231 1233 * get_request - get a free request 1232 1234 * @q: request_queue to allocate request from 1233 - * @op: REQ_OP_READ/REQ_OP_WRITE 1234 - * @op_flags: rq_flag_bits 1235 + * @op: operation and flags 1235 1236 * @bio: bio to allocate request for (can be %NULL) 1236 1237 * @gfp_mask: allocation mask 1237 1238 * ··· 1241 1244 * Returns ERR_PTR on failure, with @q->queue_lock held. 1242 1245 * Returns request pointer on success, with @q->queue_lock *not held*. 1243 1246 */ 1244 - static struct request *get_request(struct request_queue *q, int op, 1245 - int op_flags, struct bio *bio, 1246 - gfp_t gfp_mask) 1247 + static struct request *get_request(struct request_queue *q, unsigned int op, 1248 + struct bio *bio, gfp_t gfp_mask) 1247 1249 { 1248 - const bool is_sync = rw_is_sync(op, op_flags) != 0; 1250 + const bool is_sync = op_is_sync(op); 1249 1251 DEFINE_WAIT(wait); 1250 1252 struct request_list *rl; 1251 1253 struct request *rq; 1252 1254 1253 1255 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1254 1256 retry: 1255 - rq = __get_request(rl, op, op_flags, bio, gfp_mask); 1257 + rq = __get_request(rl, op, bio, gfp_mask); 1256 1258 if (!IS_ERR(rq)) 1257 1259 return rq; 1258 1260 ··· 1293 1297 create_io_context(gfp_mask, q->node); 1294 1298 1295 1299 spin_lock_irq(q->queue_lock); 1296 - rq = get_request(q, rw, 0, NULL, gfp_mask); 1300 + rq = get_request(q, rw, NULL, gfp_mask); 1297 1301 if (IS_ERR(rq)) { 1298 1302 spin_unlock_irq(q->queue_lock); 1299 1303 return rq; ··· 1442 1446 */ 1443 1447 if (rq_flags & RQF_ALLOCED) { 1444 1448 struct request_list *rl = blk_rq_rl(req); 1445 - bool sync = rw_is_sync(req_op(req), req->cmd_flags); 1449 + bool sync = op_is_sync(req->cmd_flags); 1446 1450 1447 1451 BUG_ON(!list_empty(&req->queuelist)); 1448 1452 BUG_ON(ELV_ON_HASH(req)); ··· 1648 1652 void init_request_from_bio(struct request *req, struct bio *bio) 1649 1653 { 1650 1654 req->cmd_type = REQ_TYPE_FS; 1651 - 1652 - req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK; 1653 1655 if (bio->bi_opf & REQ_RAHEAD) 1654 1656 req->cmd_flags |= REQ_FAILFAST_MASK; 1655 1657 ··· 1659 1665 1660 1666 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) 1661 1667 { 1662 - const bool sync = !!(bio->bi_opf & REQ_SYNC); 1663 1668 struct blk_plug *plug; 1664 - int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT; 1669 + int el_ret, where = ELEVATOR_INSERT_SORT; 1665 1670 struct request *req; 1666 1671 unsigned int request_count = 0; 1667 1672 ··· 1716 1723 1717 1724 get_rq: 1718 1725 /* 1719 - * This sync check and mask will be re-done in init_request_from_bio(), 1720 - * but we need to set it earlier to expose the sync flag to the 1721 - * rq allocator and io schedulers. 1722 - */ 1723 - if (sync) 1724 - rw_flags |= REQ_SYNC; 1725 - 1726 - /* 1727 - * Add in META/PRIO flags, if set, before we get to the IO scheduler 1728 - */ 1729 - rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO)); 1730 - 1731 - /* 1732 1726 * Grab a free request. This is might sleep but can not fail. 1733 1727 * Returns with the queue unlocked. 1734 1728 */ 1735 - req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO); 1729 + req = get_request(q, bio->bi_opf, bio, GFP_NOIO); 1736 1730 if (IS_ERR(req)) { 1737 1731 bio->bi_error = PTR_ERR(req); 1738 1732 bio_endio(bio); ··· 2926 2946 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2927 2947 struct bio *bio) 2928 2948 { 2929 - req_set_op(rq, bio_op(bio)); 2930 - 2931 2949 if (bio_has_data(bio)) 2932 2950 rq->nr_phys_segments = bio_phys_segments(q, bio); 2933 2951 ··· 3009 3031 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 3010 3032 { 3011 3033 dst->cpu = src->cpu; 3012 - req_set_op_attrs(dst, req_op(src), 3013 - (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE); 3034 + dst->cmd_flags = src->cmd_flags | REQ_NOMERGE; 3014 3035 dst->cmd_type = src->cmd_type; 3015 3036 dst->__sector = blk_rq_pos(src); 3016 3037 dst->__data_len = blk_rq_bytes(src); ··· 3514 3537 3515 3538 int __init blk_dev_init(void) 3516 3539 { 3517 - BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3540 + BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 3541 + BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3518 3542 FIELD_SIZEOF(struct request, cmd_flags)); 3543 + BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 3544 + FIELD_SIZEOF(struct bio, bi_opf)); 3519 3545 3520 3546 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3521 3547 kblockd_workqueue = alloc_workqueue("kblockd",
+1 -1
block/blk-flush.c
··· 330 330 } 331 331 332 332 flush_rq->cmd_type = REQ_TYPE_FS; 333 - req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH); 333 + flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH; 334 334 flush_rq->rq_flags |= RQF_FLUSH_SEQ; 335 335 flush_rq->rq_disk = first_rq->rq_disk; 336 336 flush_rq->end_io = flush_end_io;
+1 -1
block/blk-lib.c
··· 29 29 struct request_queue *q = bdev_get_queue(bdev); 30 30 struct bio *bio = *biop; 31 31 unsigned int granularity; 32 - enum req_op op; 32 + unsigned int op; 33 33 int alignment; 34 34 sector_t bs_mask; 35 35
+2
block/blk-map.c
··· 16 16 int blk_rq_append_bio(struct request *rq, struct bio *bio) 17 17 { 18 18 if (!rq->bio) { 19 + rq->cmd_flags &= REQ_OP_MASK; 20 + rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK); 19 21 blk_rq_bio_prep(rq->q, rq, bio); 20 22 } else { 21 23 if (!ll_back_merge_fn(rq->q, rq, bio))
+11 -17
block/blk-mq.c
··· 139 139 EXPORT_SYMBOL(blk_mq_can_queue); 140 140 141 141 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, 142 - struct request *rq, int op, 143 - unsigned int op_flags) 142 + struct request *rq, unsigned int op) 144 143 { 145 144 INIT_LIST_HEAD(&rq->queuelist); 146 145 /* csd/requeue_work/fifo_time is initialized before use */ 147 146 rq->q = q; 148 147 rq->mq_ctx = ctx; 149 - req_set_op_attrs(rq, op, op_flags); 148 + rq->cmd_flags = op; 150 149 if (blk_queue_io_stat(q)) 151 150 rq->rq_flags |= RQF_IO_STAT; 152 151 /* do not touch atomic flags, it needs atomic ops against the timer */ ··· 182 183 rq->end_io_data = NULL; 183 184 rq->next_rq = NULL; 184 185 185 - ctx->rq_dispatched[rw_is_sync(op, op_flags)]++; 186 + ctx->rq_dispatched[op_is_sync(op)]++; 186 187 } 187 188 188 189 static struct request * 189 - __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int op, int op_flags) 190 + __blk_mq_alloc_request(struct blk_mq_alloc_data *data, unsigned int op) 190 191 { 191 192 struct request *rq; 192 193 unsigned int tag; ··· 201 202 } 202 203 203 204 rq->tag = tag; 204 - blk_mq_rq_ctx_init(data->q, data->ctx, rq, op, op_flags); 205 + blk_mq_rq_ctx_init(data->q, data->ctx, rq, op); 205 206 return rq; 206 207 } 207 208 ··· 224 225 ctx = blk_mq_get_ctx(q); 225 226 hctx = blk_mq_map_queue(q, ctx->cpu); 226 227 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 227 - rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 228 + rq = __blk_mq_alloc_request(&alloc_data, rw); 228 229 blk_mq_put_ctx(ctx); 229 230 230 231 if (!rq) { ··· 276 277 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 277 278 278 279 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 279 - rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 280 + rq = __blk_mq_alloc_request(&alloc_data, rw); 280 281 if (!rq) { 281 282 ret = -EWOULDBLOCK; 282 283 goto out_queue_exit; ··· 1195 1196 struct blk_mq_hw_ctx *hctx; 1196 1197 struct blk_mq_ctx *ctx; 1197 1198 struct request *rq; 1198 - int op = bio_data_dir(bio); 1199 - int op_flags = 0; 1200 1199 1201 1200 blk_queue_enter_live(q); 1202 1201 ctx = blk_mq_get_ctx(q); 1203 1202 hctx = blk_mq_map_queue(q, ctx->cpu); 1204 1203 1205 - if (rw_is_sync(bio_op(bio), bio->bi_opf)) 1206 - op_flags |= REQ_SYNC; 1207 - 1208 - trace_block_getrq(q, bio, op); 1204 + trace_block_getrq(q, bio, bio->bi_opf); 1209 1205 blk_mq_set_alloc_data(data, q, 0, ctx, hctx); 1210 - rq = __blk_mq_alloc_request(data, op, op_flags); 1206 + rq = __blk_mq_alloc_request(data, bio->bi_opf); 1211 1207 1212 1208 data->hctx->queued++; 1213 1209 return rq; ··· 1250 1256 */ 1251 1257 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1252 1258 { 1253 - const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1259 + const int is_sync = op_is_sync(bio->bi_opf); 1254 1260 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1255 1261 struct blk_mq_alloc_data data; 1256 1262 struct request *rq; ··· 1344 1350 */ 1345 1351 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1346 1352 { 1347 - const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf); 1353 + const int is_sync = op_is_sync(bio->bi_opf); 1348 1354 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1349 1355 struct blk_plug *plug; 1350 1356 unsigned int request_count = 0;
+32 -34
block/cfq-iosched.c
··· 667 667 } while (0) 668 668 669 669 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 670 - struct cfq_group *curr_cfqg, int op, 671 - int op_flags) 670 + struct cfq_group *curr_cfqg, 671 + unsigned int op) 672 672 { 673 - blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1); 673 + blkg_rwstat_add(&cfqg->stats.queued, op, 1); 674 674 cfqg_stats_end_empty_time(&cfqg->stats); 675 675 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); 676 676 } ··· 684 684 #endif 685 685 } 686 686 687 - static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, 688 - int op_flags) 687 + static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, 688 + unsigned int op) 689 689 { 690 - blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1); 690 + blkg_rwstat_add(&cfqg->stats.queued, op, -1); 691 691 } 692 692 693 - static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, 694 - int op_flags) 693 + static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, 694 + unsigned int op) 695 695 { 696 - blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1); 696 + blkg_rwstat_add(&cfqg->stats.merged, op, 1); 697 697 } 698 698 699 699 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 700 - uint64_t start_time, uint64_t io_start_time, int op, 701 - int op_flags) 700 + uint64_t start_time, uint64_t io_start_time, 701 + unsigned int op) 702 702 { 703 703 struct cfqg_stats *stats = &cfqg->stats; 704 704 unsigned long long now = sched_clock(); 705 705 706 706 if (time_after64(now, io_start_time)) 707 - blkg_rwstat_add(&stats->service_time, op, op_flags, 708 - now - io_start_time); 707 + blkg_rwstat_add(&stats->service_time, op, now - io_start_time); 709 708 if (time_after64(io_start_time, start_time)) 710 - blkg_rwstat_add(&stats->wait_time, op, op_flags, 709 + blkg_rwstat_add(&stats->wait_time, op, 711 710 io_start_time - start_time); 712 711 } 713 712 ··· 785 786 #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) 786 787 787 788 static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 788 - struct cfq_group *curr_cfqg, int op, int op_flags) { } 789 + struct cfq_group *curr_cfqg, unsigned int op) { } 789 790 static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, 790 791 uint64_t time, unsigned long unaccounted_time) { } 791 - static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op, 792 - int op_flags) { } 793 - static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op, 794 - int op_flags) { } 792 + static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, 793 + unsigned int op) { } 794 + static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, 795 + unsigned int op) { } 795 796 static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 796 - uint64_t start_time, uint64_t io_start_time, int op, 797 - int op_flags) { } 797 + uint64_t start_time, uint64_t io_start_time, 798 + unsigned int op) { } 798 799 799 800 #endif /* CONFIG_CFQ_GROUP_IOSCHED */ 800 801 ··· 2473 2474 { 2474 2475 elv_rb_del(&cfqq->sort_list, rq); 2475 2476 cfqq->queued[rq_is_sync(rq)]--; 2476 - cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); 2477 + cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2477 2478 cfq_add_rq_rb(rq); 2478 2479 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, 2479 - req_op(rq), rq->cmd_flags); 2480 + rq->cmd_flags); 2480 2481 } 2481 2482 2482 2483 static struct request * ··· 2529 2530 cfq_del_rq_rb(rq); 2530 2531 2531 2532 cfqq->cfqd->rq_queued--; 2532 - cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags); 2533 + cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2533 2534 if (rq->cmd_flags & REQ_PRIO) { 2534 2535 WARN_ON(!cfqq->prio_pending); 2535 2536 cfqq->prio_pending--; ··· 2564 2565 static void cfq_bio_merged(struct request_queue *q, struct request *req, 2565 2566 struct bio *bio) 2566 2567 { 2567 - cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf); 2568 + cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf); 2568 2569 } 2569 2570 2570 2571 static void ··· 2587 2588 if (cfqq->next_rq == next) 2588 2589 cfqq->next_rq = rq; 2589 2590 cfq_remove_request(next); 2590 - cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags); 2591 + cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); 2591 2592 2592 2593 cfqq = RQ_CFQQ(next); 2593 2594 /* ··· 4141 4142 rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; 4142 4143 list_add_tail(&rq->queuelist, &cfqq->fifo); 4143 4144 cfq_add_rq_rb(rq); 4144 - cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq), 4145 + cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, 4145 4146 rq->cmd_flags); 4146 4147 cfq_rq_enqueued(cfqd, cfqq, rq); 4147 4148 } ··· 4239 4240 cfqq->dispatched--; 4240 4241 (RQ_CFQG(rq))->dispatched--; 4241 4242 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), 4242 - rq_io_start_time_ns(rq), req_op(rq), 4243 - rq->cmd_flags); 4243 + rq_io_start_time_ns(rq), rq->cmd_flags); 4244 4244 4245 4245 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 4246 4246 ··· 4317 4319 cfq_schedule_dispatch(cfqd); 4318 4320 } 4319 4321 4320 - static void cfqq_boost_on_prio(struct cfq_queue *cfqq, int op_flags) 4322 + static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op) 4321 4323 { 4322 4324 /* 4323 4325 * If REQ_PRIO is set, boost class and prio level, if it's below 4324 4326 * BE/NORM. If prio is not set, restore the potentially boosted 4325 4327 * class/prio level. 4326 4328 */ 4327 - if (!(op_flags & REQ_PRIO)) { 4329 + if (!(op & REQ_PRIO)) { 4328 4330 cfqq->ioprio_class = cfqq->org_ioprio_class; 4329 4331 cfqq->ioprio = cfqq->org_ioprio; 4330 4332 } else { ··· 4345 4347 return ELV_MQUEUE_MAY; 4346 4348 } 4347 4349 4348 - static int cfq_may_queue(struct request_queue *q, int op, int op_flags) 4350 + static int cfq_may_queue(struct request_queue *q, unsigned int op) 4349 4351 { 4350 4352 struct cfq_data *cfqd = q->elevator->elevator_data; 4351 4353 struct task_struct *tsk = current; ··· 4362 4364 if (!cic) 4363 4365 return ELV_MQUEUE_MAY; 4364 4366 4365 - cfqq = cic_to_cfqq(cic, rw_is_sync(op, op_flags)); 4367 + cfqq = cic_to_cfqq(cic, op_is_sync(op)); 4366 4368 if (cfqq) { 4367 4369 cfq_init_prio_data(cfqq, cic); 4368 - cfqq_boost_on_prio(cfqq, op_flags); 4370 + cfqq_boost_on_prio(cfqq, op); 4369 4371 4370 4372 return __cfq_may_queue(cfqq); 4371 4373 }
+2 -2
block/elevator.c
··· 714 714 e->type->ops.elevator_put_req_fn(rq); 715 715 } 716 716 717 - int elv_may_queue(struct request_queue *q, int op, int op_flags) 717 + int elv_may_queue(struct request_queue *q, unsigned int op) 718 718 { 719 719 struct elevator_queue *e = q->elevator; 720 720 721 721 if (e->type->ops.elevator_may_queue_fn) 722 - return e->type->ops.elevator_may_queue_fn(q, op, op_flags); 722 + return e->type->ops.elevator_may_queue_fn(q, op); 723 723 724 724 return ELV_MQUEUE_MAY; 725 725 }
+1 -1
drivers/md/dm-crypt.c
··· 1135 1135 clone->bi_private = io; 1136 1136 clone->bi_end_io = crypt_endio; 1137 1137 clone->bi_bdev = cc->dev->bdev; 1138 - bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio)); 1138 + clone->bi_opf = io->base_bio->bi_opf; 1139 1139 } 1140 1140 1141 1141 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
+1 -2
drivers/scsi/sd.c
··· 1031 1031 } else if (rq_data_dir(rq) == READ) { 1032 1032 SCpnt->cmnd[0] = READ_6; 1033 1033 } else { 1034 - scmd_printk(KERN_ERR, SCpnt, "Unknown command %llu,%llx\n", 1035 - req_op(rq), (unsigned long long) rq->cmd_flags); 1034 + scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq)); 1036 1035 goto out; 1037 1036 } 1038 1037
+2 -3
fs/btrfs/inode.c
··· 8427 8427 if (!bio) 8428 8428 return -ENOMEM; 8429 8429 8430 - bio_set_op_attrs(bio, bio_op(orig_bio), bio_flags(orig_bio)); 8430 + bio->bi_opf = orig_bio->bi_opf; 8431 8431 bio->bi_private = dip; 8432 8432 bio->bi_end_io = btrfs_end_dio_bio; 8433 8433 btrfs_io_bio(bio)->logical = file_offset; ··· 8465 8465 start_sector, GFP_NOFS); 8466 8466 if (!bio) 8467 8467 goto out_err; 8468 - bio_set_op_attrs(bio, bio_op(orig_bio), 8469 - bio_flags(orig_bio)); 8468 + bio->bi_opf = orig_bio->bi_opf; 8470 8469 bio->bi_private = dip; 8471 8470 bio->bi_end_io = btrfs_end_dio_bio; 8472 8471 btrfs_io_bio(bio)->logical = file_offset;
+1 -1
fs/buffer.c
··· 3118 3118 /** 3119 3119 * ll_rw_block: low-level access to block devices (DEPRECATED) 3120 3120 * @op: whether to %READ or %WRITE 3121 - * @op_flags: rq_flag_bits 3121 + * @op_flags: req_flag_bits 3122 3122 * @nr: number of &struct buffer_heads in the array 3123 3123 * @bhs: array of pointers to &struct buffer_head 3124 3124 *
+1 -1
fs/f2fs/f2fs.h
··· 688 688 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ 689 689 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ 690 690 int op; /* contains REQ_OP_ */ 691 - int op_flags; /* rq_flag_bits */ 691 + int op_flags; /* req_flag_bits */ 692 692 block_t new_blkaddr; /* new block address to be written */ 693 693 block_t old_blkaddr; /* old block address before Cow */ 694 694 struct page *page; /* page to be written */
+1 -1
fs/gfs2/lops.c
··· 231 231 * gfs2_log_flush_bio - Submit any pending log bio 232 232 * @sdp: The superblock 233 233 * @op: REQ_OP 234 - * @op_flags: rq_flag_bits 234 + * @op_flags: req_flag_bits 235 235 * 236 236 * Submit any pending part-built or full bio to the block device. If 237 237 * there is no pending bio, then this is a no-op.
+5 -6
include/linux/blk-cgroup.h
··· 581 581 /** 582 582 * blkg_rwstat_add - add a value to a blkg_rwstat 583 583 * @rwstat: target blkg_rwstat 584 - * @op: REQ_OP 585 - * @op_flags: rq_flag_bits 584 + * @op: REQ_OP and flags 586 585 * @val: value to add 587 586 * 588 587 * Add @val to @rwstat. The counters are chosen according to @rw. The 589 588 * caller is responsible for synchronizing calls to this function. 590 589 */ 591 590 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 592 - int op, int op_flags, uint64_t val) 591 + unsigned int op, uint64_t val) 593 592 { 594 593 struct percpu_counter *cnt; 595 594 ··· 599 600 600 601 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); 601 602 602 - if (op_flags & REQ_SYNC) 603 + if (op & REQ_SYNC) 603 604 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; 604 605 else 605 606 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; ··· 704 705 705 706 if (!throtl) { 706 707 blkg = blkg ?: q->root_blkg; 707 - blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, 708 + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf, 708 709 bio->bi_iter.bi_size); 709 - blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); 710 + blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1); 710 711 } 711 712 712 713 rcu_read_unlock();
+37 -46
include/linux/blk_types.h
··· 88 88 struct bio_vec bi_inline_vecs[0]; 89 89 }; 90 90 91 - #define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS) 92 - #define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1)) 93 - #define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT) 94 - 95 - #define bio_set_op_attrs(bio, op, op_flags) do { \ 96 - if (__builtin_constant_p(op)) \ 97 - BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \ 98 - else \ 99 - WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \ 100 - if (__builtin_constant_p(op_flags)) \ 101 - BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ 102 - else \ 103 - WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \ 104 - (bio)->bi_opf = bio_flags(bio); \ 105 - (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \ 106 - (bio)->bi_opf |= (op_flags); \ 107 - } while (0) 108 - 109 91 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 110 92 111 93 /* ··· 129 147 #endif /* CONFIG_BLOCK */ 130 148 131 149 /* 132 - * Request flags. For use in the cmd_flags field of struct request, and in 133 - * bi_opf of struct bio. Note that some flags are only valid in either one. 150 + * Operations and flags common to the bio and request structures. 151 + * We use 8 bits for encoding the operation, and the remaining 24 for flags. 134 152 */ 135 - enum rq_flag_bits { 136 - /* common flags */ 137 - __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 153 + #define REQ_OP_BITS 8 154 + #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) 155 + #define REQ_FLAG_BITS 24 156 + 157 + enum req_opf { 158 + REQ_OP_READ, 159 + REQ_OP_WRITE, 160 + REQ_OP_DISCARD, /* request to discard sectors */ 161 + REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ 162 + REQ_OP_WRITE_SAME, /* write same block many times */ 163 + REQ_OP_FLUSH, /* request for cache flush */ 164 + REQ_OP_ZONE_REPORT, /* Get zone information */ 165 + REQ_OP_ZONE_RESET, /* Reset a zone write pointer */ 166 + 167 + REQ_OP_LAST, 168 + }; 169 + 170 + enum req_flag_bits { 171 + __REQ_FAILFAST_DEV = /* no driver retries of device errors */ 172 + REQ_OP_BITS, 138 173 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 139 174 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 140 - 141 175 __REQ_SYNC, /* request is sync (sync write or read) */ 142 176 __REQ_META, /* metadata io request */ 143 177 __REQ_PRIO, /* boost priority in cfq */ 144 - 145 178 __REQ_NOMERGE, /* don't touch this for merging */ 146 179 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 147 180 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 148 181 __REQ_FUA, /* forced unit access */ 149 182 __REQ_PREFLUSH, /* request for cache flush */ 150 183 __REQ_RAHEAD, /* read ahead, can fail anytime */ 151 - 152 184 __REQ_NR_BITS, /* stops here */ 153 185 }; 154 186 ··· 172 176 #define REQ_SYNC (1ULL << __REQ_SYNC) 173 177 #define REQ_META (1ULL << __REQ_META) 174 178 #define REQ_PRIO (1ULL << __REQ_PRIO) 179 + #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 175 180 #define REQ_NOIDLE (1ULL << __REQ_NOIDLE) 176 181 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 182 + #define REQ_FUA (1ULL << __REQ_FUA) 183 + #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 184 + #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 177 185 178 186 #define REQ_FAILFAST_MASK \ 179 187 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 180 - #define REQ_COMMON_MASK \ 181 - (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \ 182 - REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_RAHEAD) 183 - #define REQ_CLONE_MASK REQ_COMMON_MASK 184 188 185 - /* This mask is used for both bio and request merge checking */ 186 189 #define REQ_NOMERGE_FLAGS \ 187 190 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 188 191 189 - #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 190 - #define REQ_FUA (1ULL << __REQ_FUA) 191 - #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 192 - #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 192 + #define bio_op(bio) \ 193 + ((bio)->bi_opf & REQ_OP_MASK) 194 + #define req_op(req) \ 195 + ((req)->cmd_flags & REQ_OP_MASK) 193 196 194 - enum req_op { 195 - REQ_OP_READ, 196 - REQ_OP_WRITE, 197 - REQ_OP_DISCARD, /* request to discard sectors */ 198 - REQ_OP_SECURE_ERASE, /* request to securely erase sectors */ 199 - REQ_OP_WRITE_SAME, /* write same block many times */ 200 - REQ_OP_FLUSH, /* request for cache flush */ 201 - REQ_OP_ZONE_REPORT, /* Get zone information */ 202 - REQ_OP_ZONE_RESET, /* Reset a zone write pointer */ 203 - }; 197 + /* obsolete, don't use in new code */ 198 + #define bio_set_op_attrs(bio, op, op_flags) \ 199 + ((bio)->bi_opf |= (op | op_flags)) 204 200 205 - #define REQ_OP_BITS 3 201 + static inline bool op_is_sync(unsigned int op) 202 + { 203 + return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC); 204 + } 206 205 207 206 typedef unsigned int blk_qc_t; 208 207 #define BLK_QC_T_NONE -1U
+2 -24
include/linux/blkdev.h
··· 142 142 143 143 int cpu; 144 144 unsigned cmd_type; 145 - u64 cmd_flags; 145 + unsigned int cmd_flags; /* op and common flags */ 146 146 req_flags_t rq_flags; 147 147 unsigned long atomic_flags; 148 148 ··· 243 243 /* for bidi */ 244 244 struct request *next_rq; 245 245 }; 246 - 247 - #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS) 248 - #define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT) 249 - 250 - #define req_set_op(req, op) do { \ 251 - WARN_ON(op >= (1 << REQ_OP_BITS)); \ 252 - (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \ 253 - (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \ 254 - } while (0) 255 - 256 - #define req_set_op_attrs(req, op, flags) do { \ 257 - req_set_op(req, op); \ 258 - (req)->cmd_flags |= flags; \ 259 - } while (0) 260 246 261 247 static inline unsigned short req_get_ioprio(struct request *req) 262 248 { ··· 727 741 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 728 742 } 729 743 730 - /* 731 - * We regard a request as sync, if either a read or a sync write 732 - */ 733 - static inline bool rw_is_sync(int op, unsigned int rw_flags) 734 - { 735 - return op == REQ_OP_READ || (rw_flags & REQ_SYNC); 736 - } 737 - 738 744 static inline bool rq_is_sync(struct request *rq) 739 745 { 740 - return rw_is_sync(req_op(rq), rq->cmd_flags); 746 + return op_is_sync(rq->cmd_flags); 741 747 } 742 748 743 749 static inline bool blk_rl_full(struct request_list *rl, bool sync)
+1 -1
include/linux/blktrace_api.h
··· 118 118 } 119 119 120 120 extern void blk_dump_cmd(char *buf, struct request *rq); 121 - extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); 121 + extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); 122 122 123 123 #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 124 124
+1 -1
include/linux/dm-io.h
··· 58 58 struct dm_io_client; 59 59 struct dm_io_request { 60 60 int bi_op; /* REQ_OP */ 61 - int bi_op_flags; /* rq_flag_bits */ 61 + int bi_op_flags; /* req_flag_bits */ 62 62 struct dm_io_memory mem; /* Memory to use for io */ 63 63 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ 64 64 struct dm_io_client *client; /* Client memory handler */
+2 -2
include/linux/elevator.h
··· 30 30 typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); 31 31 typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); 32 32 typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 33 - typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); 33 + typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int); 34 34 35 35 typedef void (elevator_init_icq_fn) (struct io_cq *); 36 36 typedef void (elevator_exit_icq_fn) (struct io_cq *); ··· 139 139 extern struct request *elv_latter_request(struct request_queue *, struct request *); 140 140 extern int elv_register_queue(struct request_queue *q); 141 141 extern void elv_unregister_queue(struct request_queue *q); 142 - extern int elv_may_queue(struct request_queue *, int, int); 142 + extern int elv_may_queue(struct request_queue *, unsigned int); 143 143 extern void elv_completed_request(struct request_queue *, struct request *); 144 144 extern int elv_set_request(struct request_queue *q, struct request *rq, 145 145 struct bio *bio, gfp_t gfp_mask);
+4 -8
include/trace/events/bcache.h
··· 27 27 __entry->sector = bio->bi_iter.bi_sector; 28 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 29 29 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 30 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 31 - bio->bi_iter.bi_size); 30 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 32 31 ), 33 32 34 33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", ··· 101 102 __entry->dev = bio->bi_bdev->bd_dev; 102 103 __entry->sector = bio->bi_iter.bi_sector; 103 104 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 104 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 105 - bio->bi_iter.bi_size); 105 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 106 106 ), 107 107 108 108 TP_printk("%d,%d %s %llu + %u", ··· 136 138 __entry->dev = bio->bi_bdev->bd_dev; 137 139 __entry->sector = bio->bi_iter.bi_sector; 138 140 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 140 - bio->bi_iter.bi_size); 141 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 141 142 __entry->cache_hit = hit; 142 143 __entry->bypass = bypass; 143 144 ), ··· 167 170 __entry->inode = inode; 168 171 __entry->sector = bio->bi_iter.bi_sector; 169 172 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 170 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 171 - bio->bi_iter.bi_size); 173 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 172 174 __entry->writeback = writeback; 173 175 __entry->bypass = bypass; 174 176 ),
+11 -20
include/trace/events/block.h
··· 84 84 0 : blk_rq_sectors(rq); 85 85 __entry->errors = rq->errors; 86 86 87 - blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, 88 - blk_rq_bytes(rq)); 87 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 89 88 blk_dump_cmd(__get_str(cmd), rq); 90 89 ), 91 90 ··· 162 163 __entry->nr_sector = nr_bytes >> 9; 163 164 __entry->errors = rq->errors; 164 165 165 - blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, nr_bytes); 166 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes); 166 167 blk_dump_cmd(__get_str(cmd), rq); 167 168 ), 168 169 ··· 198 199 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 199 200 blk_rq_bytes(rq) : 0; 200 201 201 - blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, 202 - blk_rq_bytes(rq)); 202 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 203 203 blk_dump_cmd(__get_str(cmd), rq); 204 204 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 205 205 ), ··· 272 274 bio->bi_bdev->bd_dev : 0; 273 275 __entry->sector = bio->bi_iter.bi_sector; 274 276 __entry->nr_sector = bio_sectors(bio); 275 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 276 - bio->bi_iter.bi_size); 277 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 277 278 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 278 279 ), 279 280 ··· 310 313 __entry->sector = bio->bi_iter.bi_sector; 311 314 __entry->nr_sector = bio_sectors(bio); 312 315 __entry->error = error; 313 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 314 - bio->bi_iter.bi_size); 316 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 315 317 ), 316 318 317 319 TP_printk("%d,%d %s %llu + %u [%d]", ··· 337 341 __entry->dev = bio->bi_bdev->bd_dev; 338 342 __entry->sector = bio->bi_iter.bi_sector; 339 343 __entry->nr_sector = bio_sectors(bio); 340 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 341 - bio->bi_iter.bi_size); 344 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 342 345 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 343 346 ), 344 347 ··· 404 409 __entry->dev = bio->bi_bdev->bd_dev; 405 410 __entry->sector = bio->bi_iter.bi_sector; 406 411 __entry->nr_sector = bio_sectors(bio); 407 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 408 - bio->bi_iter.bi_size); 412 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 409 413 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 410 414 ), 411 415 ··· 432 438 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 433 439 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 434 440 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 435 - blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0, 441 + blk_fill_rwbs(__entry->rwbs, 436 442 bio ? bio->bi_opf : 0, __entry->nr_sector); 437 443 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 438 444 ), ··· 567 573 __entry->dev = bio->bi_bdev->bd_dev; 568 574 __entry->sector = bio->bi_iter.bi_sector; 569 575 __entry->new_sector = new_sector; 570 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 571 - bio->bi_iter.bi_size); 576 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 572 577 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 573 578 ), 574 579 ··· 610 617 __entry->nr_sector = bio_sectors(bio); 611 618 __entry->old_dev = dev; 612 619 __entry->old_sector = from; 613 - blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf, 614 - bio->bi_iter.bi_size); 620 + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); 615 621 ), 616 622 617 623 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", ··· 656 664 __entry->old_dev = dev; 657 665 __entry->old_sector = from; 658 666 __entry->nr_bios = blk_rq_count_bios(rq); 659 - blk_fill_rwbs(__entry->rwbs, req_op(rq), rq->cmd_flags, 660 - blk_rq_bytes(rq)); 667 + blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); 661 668 ), 662 669 663 670 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu %u",
+7 -7
kernel/trace/blktrace.c
··· 1777 1777 } 1778 1778 } 1779 1779 1780 - void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes) 1780 + void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) 1781 1781 { 1782 1782 int i = 0; 1783 1783 1784 - if (rw & REQ_PREFLUSH) 1784 + if (op & REQ_PREFLUSH) 1785 1785 rwbs[i++] = 'F'; 1786 1786 1787 - switch (op) { 1787 + switch (op & REQ_OP_MASK) { 1788 1788 case REQ_OP_WRITE: 1789 1789 case REQ_OP_WRITE_SAME: 1790 1790 rwbs[i++] = 'W'; ··· 1806 1806 rwbs[i++] = 'N'; 1807 1807 } 1808 1808 1809 - if (rw & REQ_FUA) 1809 + if (op & REQ_FUA) 1810 1810 rwbs[i++] = 'F'; 1811 - if (rw & REQ_RAHEAD) 1811 + if (op & REQ_RAHEAD) 1812 1812 rwbs[i++] = 'A'; 1813 - if (rw & REQ_SYNC) 1813 + if (op & REQ_SYNC) 1814 1814 rwbs[i++] = 'S'; 1815 - if (rw & REQ_META) 1815 + if (op & REQ_META) 1816 1816 rwbs[i++] = 'M'; 1817 1817 1818 1818 rwbs[i] = '\0';