Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: split out request-only flags into a new namespace

A lot of the REQ_* flags are only used on struct requests, and only of
use to the block layer and a few drivers that dig into struct request
internals.

This patch adds a new req_flags_t rq_flags field to struct request for
them, and thus dramatically shrinks the number of common requests. It
also removes the unfortunate situation where we have to fit the fields
from the same enum into 32 bits for struct bio and 64 bits for
struct request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
e8064021 8d2bbd4c

+242 -218
+1 -1
Documentation/block/biodoc.txt
··· 348 348 block layer would invoke to pre-build device commands for a given request, 349 349 or perform other preparatory processing for the request. This is routine is 350 350 called by elv_next_request(), i.e. typically just before servicing a request. 351 - (The prepare function would not be called for requests that have REQ_DONTPREP 351 + (The prepare function would not be called for requests that have RQF_DONTPREP 352 352 enabled) 353 353 354 354 Aside:
+37 -34
block/blk-core.c
··· 145 145 if (error) 146 146 bio->bi_error = error; 147 147 148 - if (unlikely(rq->cmd_flags & REQ_QUIET)) 148 + if (unlikely(rq->rq_flags & RQF_QUIET)) 149 149 bio_set_flag(bio, BIO_QUIET); 150 150 151 151 bio_advance(bio, nbytes); 152 152 153 153 /* don't actually finish bio if it's part of flush sequence */ 154 - if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 154 + if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) 155 155 bio_endio(bio); 156 156 } 157 157 ··· 899 899 900 900 static inline void blk_free_request(struct request_list *rl, struct request *rq) 901 901 { 902 - if (rq->cmd_flags & REQ_ELVPRIV) { 902 + if (rq->rq_flags & RQF_ELVPRIV) { 903 903 elv_put_request(rl->q, rq); 904 904 if (rq->elv.icq) 905 905 put_io_context(rq->elv.icq->ioc); ··· 961 961 * A request has just been released. Account for it, update the full and 962 962 * congestion status, wake up any waiters. Called under q->queue_lock. 963 963 */ 964 - static void freed_request(struct request_list *rl, int op, unsigned int flags) 964 + static void freed_request(struct request_list *rl, bool sync, 965 + req_flags_t rq_flags) 965 966 { 966 967 struct request_queue *q = rl->q; 967 - int sync = rw_is_sync(op, flags); 968 968 969 969 q->nr_rqs[sync]--; 970 970 rl->count[sync]--; 971 - if (flags & REQ_ELVPRIV) 971 + if (rq_flags & RQF_ELVPRIV) 972 972 q->nr_rqs_elvpriv--; 973 973 974 974 __freed_request(rl, sync); ··· 1079 1079 struct io_cq *icq = NULL; 1080 1080 const bool is_sync = rw_is_sync(op, op_flags) != 0; 1081 1081 int may_queue; 1082 + req_flags_t rq_flags = RQF_ALLOCED; 1082 1083 1083 1084 if (unlikely(blk_queue_dying(q))) 1084 1085 return ERR_PTR(-ENODEV); ··· 1128 1127 1129 1128 /* 1130 1129 * Decide whether the new request will be managed by elevator. If 1131 - * so, mark @op_flags and increment elvpriv. Non-zero elvpriv will 1130 + * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will 1132 1131 * prevent the current elevator from being destroyed until the new 1133 1132 * request is freed. This guarantees icq's won't be destroyed and 1134 1133 * makes creating new ones safe. ··· 1137 1136 * it will be created after releasing queue_lock. 1138 1137 */ 1139 1138 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 1140 - op_flags |= REQ_ELVPRIV; 1139 + rq_flags |= RQF_ELVPRIV; 1141 1140 q->nr_rqs_elvpriv++; 1142 1141 if (et->icq_cache && ioc) 1143 1142 icq = ioc_lookup_icq(ioc, q); 1144 1143 } 1145 1144 1146 1145 if (blk_queue_io_stat(q)) 1147 - op_flags |= REQ_IO_STAT; 1146 + rq_flags |= RQF_IO_STAT; 1148 1147 spin_unlock_irq(q->queue_lock); 1149 1148 1150 1149 /* allocate and init request */ ··· 1154 1153 1155 1154 blk_rq_init(q, rq); 1156 1155 blk_rq_set_rl(rq, rl); 1157 - req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED); 1156 + req_set_op_attrs(rq, op, op_flags); 1157 + rq->rq_flags = rq_flags; 1158 1158 1159 1159 /* init elvpriv */ 1160 - if (op_flags & REQ_ELVPRIV) { 1160 + if (rq_flags & RQF_ELVPRIV) { 1161 1161 if (unlikely(et->icq_cache && !icq)) { 1162 1162 if (ioc) 1163 1163 icq = ioc_create_icq(ioc, q, gfp_mask); ··· 1197 1195 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", 1198 1196 __func__, dev_name(q->backing_dev_info.dev)); 1199 1197 1200 - rq->cmd_flags &= ~REQ_ELVPRIV; 1198 + rq->rq_flags &= ~RQF_ELVPRIV; 1201 1199 rq->elv.icq = NULL; 1202 1200 1203 1201 spin_lock_irq(q->queue_lock); ··· 1214 1212 * queue, but this is pretty rare. 1215 1213 */ 1216 1214 spin_lock_irq(q->queue_lock); 1217 - freed_request(rl, op, op_flags); 1215 + freed_request(rl, is_sync, rq_flags); 1218 1216 1219 1217 /* 1220 1218 * in the very unlikely event that allocation failed and no ··· 1349 1347 blk_clear_rq_complete(rq); 1350 1348 trace_block_rq_requeue(q, rq); 1351 1349 1352 - if (rq->cmd_flags & REQ_QUEUED) 1350 + if (rq->rq_flags & RQF_QUEUED) 1353 1351 blk_queue_end_tag(q, rq); 1354 1352 1355 1353 BUG_ON(blk_queued_rq(rq)); ··· 1411 1409 #ifdef CONFIG_PM 1412 1410 static void blk_pm_put_request(struct request *rq) 1413 1411 { 1414 - if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) 1412 + if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending) 1415 1413 pm_runtime_mark_last_busy(rq->q->dev); 1416 1414 } 1417 1415 #else ··· 1423 1421 */ 1424 1422 void __blk_put_request(struct request_queue *q, struct request *req) 1425 1423 { 1424 + req_flags_t rq_flags = req->rq_flags; 1425 + 1426 1426 if (unlikely(!q)) 1427 1427 return; 1428 1428 ··· 1444 1440 * Request may not have originated from ll_rw_blk. if not, 1445 1441 * it didn't come out of our reserved rq pools 1446 1442 */ 1447 - if (req->cmd_flags & REQ_ALLOCED) { 1448 - unsigned int flags = req->cmd_flags; 1449 - int op = req_op(req); 1443 + if (rq_flags & RQF_ALLOCED) { 1450 1444 struct request_list *rl = blk_rq_rl(req); 1445 + bool sync = rw_is_sync(req_op(req), req->cmd_flags); 1451 1446 1452 1447 BUG_ON(!list_empty(&req->queuelist)); 1453 1448 BUG_ON(ELV_ON_HASH(req)); 1454 1449 1455 1450 blk_free_request(rl, req); 1456 - freed_request(rl, op, flags); 1451 + freed_request(rl, sync, rq_flags); 1457 1452 blk_put_rl(rl); 1458 1453 } 1459 1454 } ··· 2217 2214 unsigned int bytes = 0; 2218 2215 struct bio *bio; 2219 2216 2220 - if (!(rq->cmd_flags & REQ_MIXED_MERGE)) 2217 + if (!(rq->rq_flags & RQF_MIXED_MERGE)) 2221 2218 return blk_rq_bytes(rq); 2222 2219 2223 2220 /* ··· 2260 2257 * normal IO on queueing nor completion. Accounting the 2261 2258 * containing request is enough. 2262 2259 */ 2263 - if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) { 2260 + if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) { 2264 2261 unsigned long duration = jiffies - req->start_time; 2265 2262 const int rw = rq_data_dir(req); 2266 2263 struct hd_struct *part; ··· 2288 2285 struct request *rq) 2289 2286 { 2290 2287 if (q->dev && (q->rpm_status == RPM_SUSPENDED || 2291 - (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) 2288 + (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM)))) 2292 2289 return NULL; 2293 2290 else 2294 2291 return rq; ··· 2364 2361 if (!rq) 2365 2362 break; 2366 2363 2367 - if (!(rq->cmd_flags & REQ_STARTED)) { 2364 + if (!(rq->rq_flags & RQF_STARTED)) { 2368 2365 /* 2369 2366 * This is the first time the device driver 2370 2367 * sees this request (possibly after 2371 2368 * requeueing). Notify IO scheduler. 2372 2369 */ 2373 - if (rq->cmd_flags & REQ_SORTED) 2370 + if (rq->rq_flags & RQF_SORTED) 2374 2371 elv_activate_rq(q, rq); 2375 2372 2376 2373 /* ··· 2378 2375 * it, a request that has been delayed should 2379 2376 * not be passed by new incoming requests 2380 2377 */ 2381 - rq->cmd_flags |= REQ_STARTED; 2378 + rq->rq_flags |= RQF_STARTED; 2382 2379 trace_block_rq_issue(q, rq); 2383 2380 } 2384 2381 ··· 2387 2384 q->boundary_rq = NULL; 2388 2385 } 2389 2386 2390 - if (rq->cmd_flags & REQ_DONTPREP) 2387 + if (rq->rq_flags & RQF_DONTPREP) 2391 2388 break; 2392 2389 2393 2390 if (q->dma_drain_size && blk_rq_bytes(rq)) { ··· 2410 2407 /* 2411 2408 * the request may have been (partially) prepped. 2412 2409 * we need to keep this request in the front to 2413 - * avoid resource deadlock. REQ_STARTED will 2410 + * avoid resource deadlock. RQF_STARTED will 2414 2411 * prevent other fs requests from passing this one. 2415 2412 */ 2416 2413 if (q->dma_drain_size && blk_rq_bytes(rq) && 2417 - !(rq->cmd_flags & REQ_DONTPREP)) { 2414 + !(rq->rq_flags & RQF_DONTPREP)) { 2418 2415 /* 2419 2416 * remove the space for the drain we added 2420 2417 * so that we don't add it again ··· 2427 2424 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { 2428 2425 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; 2429 2426 2430 - rq->cmd_flags |= REQ_QUIET; 2427 + rq->rq_flags |= RQF_QUIET; 2431 2428 /* 2432 2429 * Mark this request as started so we don't trigger 2433 2430 * any debug logic in the end I/O path. ··· 2564 2561 req->errors = 0; 2565 2562 2566 2563 if (error && req->cmd_type == REQ_TYPE_FS && 2567 - !(req->cmd_flags & REQ_QUIET)) { 2564 + !(req->rq_flags & RQF_QUIET)) { 2568 2565 char *error_type; 2569 2566 2570 2567 switch (error) { ··· 2637 2634 req->__sector += total_bytes >> 9; 2638 2635 2639 2636 /* mixed attributes always follow the first bio */ 2640 - if (req->cmd_flags & REQ_MIXED_MERGE) { 2637 + if (req->rq_flags & RQF_MIXED_MERGE) { 2641 2638 req->cmd_flags &= ~REQ_FAILFAST_MASK; 2642 2639 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; 2643 2640 } ··· 2690 2687 { 2691 2688 struct request_queue *q = req->q; 2692 2689 2693 - req->cmd_flags &= ~REQ_DONTPREP; 2690 + req->rq_flags &= ~RQF_DONTPREP; 2694 2691 if (q->unprep_rq_fn) 2695 2692 q->unprep_rq_fn(q, req); 2696 2693 } ··· 2701 2698 */ 2702 2699 void blk_finish_request(struct request *req, int error) 2703 2700 { 2704 - if (req->cmd_flags & REQ_QUEUED) 2701 + if (req->rq_flags & RQF_QUEUED) 2705 2702 blk_queue_end_tag(req->q, req); 2706 2703 2707 2704 BUG_ON(blk_queued_rq(req)); ··· 2711 2708 2712 2709 blk_delete_timer(req); 2713 2710 2714 - if (req->cmd_flags & REQ_DONTPREP) 2711 + if (req->rq_flags & RQF_DONTPREP) 2715 2712 blk_unprep_request(req); 2716 2713 2717 2714 blk_account_io_done(req);
+1 -1
block/blk-exec.c
··· 72 72 spin_lock_irq(q->queue_lock); 73 73 74 74 if (unlikely(blk_queue_dying(q))) { 75 - rq->cmd_flags |= REQ_QUIET; 75 + rq->rq_flags |= RQF_QUIET; 76 76 rq->errors = -ENXIO; 77 77 __blk_end_request_all(rq, rq->errors); 78 78 spin_unlock_irq(q->queue_lock);
+5 -4
block/blk-flush.c
··· 56 56 * Once while executing DATA and again after the whole sequence is 57 57 * complete. The first completion updates the contained bio but doesn't 58 58 * finish it so that the bio submitter is notified only after the whole 59 - * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in 59 + * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in 60 60 * req_bio_endio(). 61 61 * 62 62 * The above peculiarity requires that each FLUSH/FUA request has only one ··· 127 127 rq->bio = rq->biotail; 128 128 129 129 /* make @rq a normal request */ 130 - rq->cmd_flags &= ~REQ_FLUSH_SEQ; 130 + rq->rq_flags &= ~RQF_FLUSH_SEQ; 131 131 rq->end_io = rq->flush.saved_end_io; 132 132 } 133 133 ··· 330 330 } 331 331 332 332 flush_rq->cmd_type = REQ_TYPE_FS; 333 - req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ); 333 + req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH); 334 + flush_rq->rq_flags |= RQF_FLUSH_SEQ; 334 335 flush_rq->rq_disk = first_rq->rq_disk; 335 336 flush_rq->end_io = flush_end_io; 336 337 ··· 434 433 */ 435 434 memset(&rq->flush, 0, sizeof(rq->flush)); 436 435 INIT_LIST_HEAD(&rq->flush.list); 437 - rq->cmd_flags |= REQ_FLUSH_SEQ; 436 + rq->rq_flags |= RQF_FLUSH_SEQ; 438 437 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ 439 438 if (q->mq_ops) { 440 439 rq->end_io = mq_flush_data_end_io;
+2 -2
block/blk-map.c
··· 135 135 } while (iov_iter_count(&i)); 136 136 137 137 if (!bio_flagged(bio, BIO_USER_MAPPED)) 138 - rq->cmd_flags |= REQ_COPY_USER; 138 + rq->rq_flags |= RQF_COPY_USER; 139 139 return 0; 140 140 141 141 unmap_rq: ··· 232 232 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 233 233 234 234 if (do_copy) 235 - rq->cmd_flags |= REQ_COPY_USER; 235 + rq->rq_flags |= RQF_COPY_USER; 236 236 237 237 ret = blk_rq_append_bio(rq, bio); 238 238 if (unlikely(ret)) {
+4 -4
block/blk-merge.c
··· 456 456 if (rq->bio) 457 457 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); 458 458 459 - if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 459 + if (unlikely(rq->rq_flags & RQF_COPY_USER) && 460 460 (blk_rq_bytes(rq) & q->dma_pad_mask)) { 461 461 unsigned int pad_len = 462 462 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; ··· 634 634 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; 635 635 struct bio *bio; 636 636 637 - if (rq->cmd_flags & REQ_MIXED_MERGE) 637 + if (rq->rq_flags & RQF_MIXED_MERGE) 638 638 return; 639 639 640 640 /* ··· 647 647 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); 648 648 bio->bi_opf |= ff; 649 649 } 650 - rq->cmd_flags |= REQ_MIXED_MERGE; 650 + rq->rq_flags |= RQF_MIXED_MERGE; 651 651 } 652 652 653 653 static void blk_account_io_merge(struct request *req) ··· 709 709 * makes sure that all involved bios have mixable attributes 710 710 * set properly. 711 711 */ 712 - if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || 712 + if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || 713 713 (req->cmd_flags & REQ_FAILFAST_MASK) != 714 714 (next->cmd_flags & REQ_FAILFAST_MASK)) { 715 715 blk_rq_set_mixed_merge(req);
+9 -10
block/blk-mq.c
··· 142 142 struct request *rq, int op, 143 143 unsigned int op_flags) 144 144 { 145 - if (blk_queue_io_stat(q)) 146 - op_flags |= REQ_IO_STAT; 147 - 148 145 INIT_LIST_HEAD(&rq->queuelist); 149 146 /* csd/requeue_work/fifo_time is initialized before use */ 150 147 rq->q = q; 151 148 rq->mq_ctx = ctx; 152 149 req_set_op_attrs(rq, op, op_flags); 150 + if (blk_queue_io_stat(q)) 151 + rq->rq_flags |= RQF_IO_STAT; 153 152 /* do not touch atomic flags, it needs atomic ops against the timer */ 154 153 rq->cpu = -1; 155 154 INIT_HLIST_NODE(&rq->hash); ··· 197 198 rq = data->hctx->tags->rqs[tag]; 198 199 199 200 if (blk_mq_tag_busy(data->hctx)) { 200 - rq->cmd_flags = REQ_MQ_INFLIGHT; 201 + rq->rq_flags = RQF_MQ_INFLIGHT; 201 202 atomic_inc(&data->hctx->nr_active); 202 203 } 203 204 ··· 297 298 const int tag = rq->tag; 298 299 struct request_queue *q = rq->q; 299 300 300 - if (rq->cmd_flags & REQ_MQ_INFLIGHT) 301 + if (rq->rq_flags & RQF_MQ_INFLIGHT) 301 302 atomic_dec(&hctx->nr_active); 302 - rq->cmd_flags = 0; 303 + rq->rq_flags = 0; 303 304 304 305 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); 305 306 blk_mq_put_tag(hctx, ctx, tag); ··· 488 489 spin_unlock_irqrestore(&q->requeue_lock, flags); 489 490 490 491 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { 491 - if (!(rq->cmd_flags & REQ_SOFTBARRIER)) 492 + if (!(rq->rq_flags & RQF_SOFTBARRIER)) 492 493 continue; 493 494 494 - rq->cmd_flags &= ~REQ_SOFTBARRIER; 495 + rq->rq_flags &= ~RQF_SOFTBARRIER; 495 496 list_del_init(&rq->queuelist); 496 497 blk_mq_insert_request(rq, true, false, false); 497 498 } ··· 518 519 * We abuse this flag that is otherwise used by the I/O scheduler to 519 520 * request head insertation from the workqueue. 520 521 */ 521 - BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); 522 + BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); 522 523 523 524 spin_lock_irqsave(&q->requeue_lock, flags); 524 525 if (at_head) { 525 - rq->cmd_flags |= REQ_SOFTBARRIER; 526 + rq->rq_flags |= RQF_SOFTBARRIER; 526 527 list_add(&rq->queuelist, &q->requeue_list); 527 528 } else { 528 529 list_add_tail(&rq->queuelist, &q->requeue_list);
+3 -3
block/blk-tag.c
··· 270 270 BUG_ON(tag >= bqt->real_max_depth); 271 271 272 272 list_del_init(&rq->queuelist); 273 - rq->cmd_flags &= ~REQ_QUEUED; 273 + rq->rq_flags &= ~RQF_QUEUED; 274 274 rq->tag = -1; 275 275 276 276 if (unlikely(bqt->tag_index[tag] == NULL)) ··· 316 316 unsigned max_depth; 317 317 int tag; 318 318 319 - if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 319 + if (unlikely((rq->rq_flags & RQF_QUEUED))) { 320 320 printk(KERN_ERR 321 321 "%s: request %p for device [%s] already tagged %d", 322 322 __func__, rq, ··· 371 371 */ 372 372 373 373 bqt->next_tag = (tag + 1) % bqt->max_depth; 374 - rq->cmd_flags |= REQ_QUEUED; 374 + rq->rq_flags |= RQF_QUEUED; 375 375 rq->tag = tag; 376 376 bqt->tag_index[tag] = rq; 377 377 blk_start_request(rq);
+2 -2
block/blk.h
··· 130 130 /* 131 131 * Internal elevator interface 132 132 */ 133 - #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) 133 + #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) 134 134 135 135 void blk_insert_flush(struct request *rq); 136 136 ··· 247 247 static inline int blk_do_io_stat(struct request *rq) 248 248 { 249 249 return rq->rq_disk && 250 - (rq->cmd_flags & REQ_IO_STAT) && 250 + (rq->rq_flags & RQF_IO_STAT) && 251 251 (rq->cmd_type == REQ_TYPE_FS); 252 252 } 253 253
+15 -17
block/elevator.c
··· 245 245 static inline void __elv_rqhash_del(struct request *rq) 246 246 { 247 247 hash_del(&rq->hash); 248 - rq->cmd_flags &= ~REQ_HASHED; 248 + rq->rq_flags &= ~RQF_HASHED; 249 249 } 250 250 251 251 static void elv_rqhash_del(struct request_queue *q, struct request *rq) ··· 260 260 261 261 BUG_ON(ELV_ON_HASH(rq)); 262 262 hash_add(e->hash, &rq->hash, rq_hash_key(rq)); 263 - rq->cmd_flags |= REQ_HASHED; 263 + rq->rq_flags |= RQF_HASHED; 264 264 } 265 265 266 266 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) ··· 352 352 { 353 353 sector_t boundary; 354 354 struct list_head *entry; 355 - int stop_flags; 356 355 357 356 if (q->last_merge == rq) 358 357 q->last_merge = NULL; ··· 361 362 q->nr_sorted--; 362 363 363 364 boundary = q->end_sector; 364 - stop_flags = REQ_SOFTBARRIER | REQ_STARTED; 365 365 list_for_each_prev(entry, &q->queue_head) { 366 366 struct request *pos = list_entry_rq(entry); 367 367 ··· 368 370 break; 369 371 if (rq_data_dir(rq) != rq_data_dir(pos)) 370 372 break; 371 - if (pos->cmd_flags & stop_flags) 373 + if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER)) 372 374 break; 373 375 if (blk_rq_pos(rq) >= boundary) { 374 376 if (blk_rq_pos(pos) < boundary) ··· 508 510 struct request *next) 509 511 { 510 512 struct elevator_queue *e = q->elevator; 511 - const int next_sorted = next->cmd_flags & REQ_SORTED; 513 + const int next_sorted = next->rq_flags & RQF_SORTED; 512 514 513 515 if (next_sorted && e->type->ops.elevator_merge_req_fn) 514 516 e->type->ops.elevator_merge_req_fn(q, rq, next); ··· 535 537 #ifdef CONFIG_PM 536 538 static void blk_pm_requeue_request(struct request *rq) 537 539 { 538 - if (rq->q->dev && !(rq->cmd_flags & REQ_PM)) 540 + if (rq->q->dev && !(rq->rq_flags & RQF_PM)) 539 541 rq->q->nr_pending--; 540 542 } 541 543 542 544 static void blk_pm_add_request(struct request_queue *q, struct request *rq) 543 545 { 544 - if (q->dev && !(rq->cmd_flags & REQ_PM) && q->nr_pending++ == 0 && 546 + if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 && 545 547 (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING)) 546 548 pm_request_resume(q->dev); 547 549 } ··· 561 563 */ 562 564 if (blk_account_rq(rq)) { 563 565 q->in_flight[rq_is_sync(rq)]--; 564 - if (rq->cmd_flags & REQ_SORTED) 566 + if (rq->rq_flags & RQF_SORTED) 565 567 elv_deactivate_rq(q, rq); 566 568 } 567 569 568 - rq->cmd_flags &= ~REQ_STARTED; 570 + rq->rq_flags &= ~RQF_STARTED; 569 571 570 572 blk_pm_requeue_request(rq); 571 573 ··· 595 597 596 598 rq->q = q; 597 599 598 - if (rq->cmd_flags & REQ_SOFTBARRIER) { 600 + if (rq->rq_flags & RQF_SOFTBARRIER) { 599 601 /* barriers are scheduling boundary, update end_sector */ 600 602 if (rq->cmd_type == REQ_TYPE_FS) { 601 603 q->end_sector = rq_end_sector(rq); 602 604 q->boundary_rq = rq; 603 605 } 604 - } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 606 + } else if (!(rq->rq_flags & RQF_ELVPRIV) && 605 607 (where == ELEVATOR_INSERT_SORT || 606 608 where == ELEVATOR_INSERT_SORT_MERGE)) 607 609 where = ELEVATOR_INSERT_BACK; ··· 609 611 switch (where) { 610 612 case ELEVATOR_INSERT_REQUEUE: 611 613 case ELEVATOR_INSERT_FRONT: 612 - rq->cmd_flags |= REQ_SOFTBARRIER; 614 + rq->rq_flags |= RQF_SOFTBARRIER; 613 615 list_add(&rq->queuelist, &q->queue_head); 614 616 break; 615 617 616 618 case ELEVATOR_INSERT_BACK: 617 - rq->cmd_flags |= REQ_SOFTBARRIER; 619 + rq->rq_flags |= RQF_SOFTBARRIER; 618 620 elv_drain_elevator(q); 619 621 list_add_tail(&rq->queuelist, &q->queue_head); 620 622 /* ··· 640 642 break; 641 643 case ELEVATOR_INSERT_SORT: 642 644 BUG_ON(rq->cmd_type != REQ_TYPE_FS); 643 - rq->cmd_flags |= REQ_SORTED; 645 + rq->rq_flags |= RQF_SORTED; 644 646 q->nr_sorted++; 645 647 if (rq_mergeable(rq)) { 646 648 elv_rqhash_add(q, rq); ··· 657 659 break; 658 660 659 661 case ELEVATOR_INSERT_FLUSH: 660 - rq->cmd_flags |= REQ_SOFTBARRIER; 662 + rq->rq_flags |= RQF_SOFTBARRIER; 661 663 blk_insert_flush(rq); 662 664 break; 663 665 default: ··· 733 735 */ 734 736 if (blk_account_rq(rq)) { 735 737 q->in_flight[rq_is_sync(rq)]--; 736 - if ((rq->cmd_flags & REQ_SORTED) && 738 + if ((rq->rq_flags & RQF_SORTED) && 737 739 e->type->ops.elevator_completed_req_fn) 738 740 e->type->ops.elevator_completed_req_fn(q, rq); 739 741 }
+1 -1
drivers/block/pktcdvd.c
··· 721 721 722 722 rq->timeout = 60*HZ; 723 723 if (cgc->quiet) 724 - rq->cmd_flags |= REQ_QUIET; 724 + rq->rq_flags |= RQF_QUIET; 725 725 726 726 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); 727 727 if (rq->errors)
+3 -3
drivers/ide/ide-atapi.c
··· 211 211 sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; 212 212 sense_rq->cmd[4] = cmd_len; 213 213 sense_rq->cmd_type = REQ_TYPE_ATA_SENSE; 214 - sense_rq->cmd_flags |= REQ_PREEMPT; 214 + sense_rq->rq_flags |= RQF_PREEMPT; 215 215 216 216 if (drive->media == ide_tape) 217 217 sense_rq->cmd[13] = REQ_IDETAPE_PC1; ··· 295 295 wait = ATAPI_WAIT_PC; 296 296 break; 297 297 default: 298 - if (!(rq->cmd_flags & REQ_QUIET)) 298 + if (!(rq->rq_flags & RQF_QUIET)) 299 299 printk(KERN_INFO PFX "cmd 0x%x timed out\n", 300 300 rq->cmd[0]); 301 301 wait = 0; ··· 375 375 } 376 376 377 377 if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC) 378 - rq->cmd_flags |= REQ_FAILED; 378 + rq->rq_flags |= RQF_FAILED; 379 379 380 380 return 1; 381 381 }
+23 -23
drivers/ide/ide-cd.c
··· 98 98 struct request_sense *sense = &drive->sense_data; 99 99 int log = 0; 100 100 101 - if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) 101 + if (!sense || !rq || (rq->rq_flags & RQF_QUIET)) 102 102 return 0; 103 103 104 104 ide_debug_log(IDE_DBG_SENSE, "sense_key: 0x%x", sense->sense_key); ··· 291 291 * (probably while trying to recover from a former error). 292 292 * Just give up. 293 293 */ 294 - rq->cmd_flags |= REQ_FAILED; 294 + rq->rq_flags |= RQF_FAILED; 295 295 return 2; 296 296 } 297 297 ··· 311 311 cdrom_saw_media_change(drive); 312 312 313 313 if (rq->cmd_type == REQ_TYPE_FS && 314 - !(rq->cmd_flags & REQ_QUIET)) 314 + !(rq->rq_flags & RQF_QUIET)) 315 315 printk(KERN_ERR PFX "%s: tray open\n", 316 316 drive->name); 317 317 } ··· 346 346 * No point in retrying after an illegal request or data 347 347 * protect error. 348 348 */ 349 - if (!(rq->cmd_flags & REQ_QUIET)) 349 + if (!(rq->rq_flags & RQF_QUIET)) 350 350 ide_dump_status(drive, "command error", stat); 351 351 do_end_request = 1; 352 352 break; ··· 355 355 * No point in re-trying a zillion times on a bad sector. 356 356 * If we got here the error is not correctable. 357 357 */ 358 - if (!(rq->cmd_flags & REQ_QUIET)) 358 + if (!(rq->rq_flags & RQF_QUIET)) 359 359 ide_dump_status(drive, "media error " 360 360 "(bad sector)", stat); 361 361 do_end_request = 1; 362 362 break; 363 363 case BLANK_CHECK: 364 364 /* disk appears blank? */ 365 - if (!(rq->cmd_flags & REQ_QUIET)) 365 + if (!(rq->rq_flags & RQF_QUIET)) 366 366 ide_dump_status(drive, "media error (blank)", 367 367 stat); 368 368 do_end_request = 1; ··· 380 380 } 381 381 382 382 if (rq->cmd_type != REQ_TYPE_FS) { 383 - rq->cmd_flags |= REQ_FAILED; 383 + rq->rq_flags |= RQF_FAILED; 384 384 do_end_request = 1; 385 385 } 386 386 ··· 422 422 int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, 423 423 int write, void *buffer, unsigned *bufflen, 424 424 struct request_sense *sense, int timeout, 425 - unsigned int cmd_flags) 425 + req_flags_t rq_flags) 426 426 { 427 427 struct cdrom_info *info = drive->driver_data; 428 428 struct request_sense local_sense; 429 429 int retries = 10; 430 - unsigned int flags = 0; 430 + req_flags_t flags = 0; 431 431 432 432 if (!sense) 433 433 sense = &local_sense; 434 434 435 435 ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, " 436 - "cmd_flags: 0x%x", 437 - cmd[0], write, timeout, cmd_flags); 436 + "rq_flags: 0x%x", 437 + cmd[0], write, timeout, rq_flags); 438 438 439 439 /* start of retry loop */ 440 440 do { ··· 446 446 memcpy(rq->cmd, cmd, BLK_MAX_CDB); 447 447 rq->cmd_type = REQ_TYPE_ATA_PC; 448 448 rq->sense = sense; 449 - rq->cmd_flags |= cmd_flags; 449 + rq->rq_flags |= rq_flags; 450 450 rq->timeout = timeout; 451 451 if (buffer) { 452 452 error = blk_rq_map_kern(drive->queue, rq, buffer, ··· 462 462 if (buffer) 463 463 *bufflen = rq->resid_len; 464 464 465 - flags = rq->cmd_flags; 465 + flags = rq->rq_flags; 466 466 blk_put_request(rq); 467 467 468 468 /* 469 469 * FIXME: we should probably abort/retry or something in case of 470 470 * failure. 471 471 */ 472 - if (flags & REQ_FAILED) { 472 + if (flags & RQF_FAILED) { 473 473 /* 474 474 * The request failed. Retry if it was due to a unit 475 475 * attention status (usually means media was changed). ··· 494 494 } 495 495 496 496 /* end of retry loop */ 497 - } while ((flags & REQ_FAILED) && retries >= 0); 497 + } while ((flags & RQF_FAILED) && retries >= 0); 498 498 499 499 /* return an error if the command failed */ 500 - return (flags & REQ_FAILED) ? -EIO : 0; 500 + return (flags & RQF_FAILED) ? -EIO : 0; 501 501 } 502 502 503 503 /* ··· 589 589 "(%u bytes)\n", drive->name, __func__, 590 590 cmd->nleft); 591 591 if (!write) 592 - rq->cmd_flags |= REQ_FAILED; 592 + rq->rq_flags |= RQF_FAILED; 593 593 uptodate = 0; 594 594 } 595 595 } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { ··· 607 607 } 608 608 609 609 if (!uptodate) 610 - rq->cmd_flags |= REQ_FAILED; 610 + rq->rq_flags |= RQF_FAILED; 611 611 } 612 612 goto out_end; 613 613 } ··· 745 745 rq->cmd[0], rq->cmd_type); 746 746 747 747 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 748 - rq->cmd_flags |= REQ_QUIET; 748 + rq->rq_flags |= RQF_QUIET; 749 749 else 750 - rq->cmd_flags &= ~REQ_FAILED; 750 + rq->rq_flags &= ~RQF_FAILED; 751 751 752 752 drive->dma = 0; 753 753 ··· 867 867 */ 868 868 cmd[7] = cdi->sanyo_slot % 3; 869 869 870 - return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, REQ_QUIET); 870 + return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET); 871 871 } 872 872 873 873 static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, ··· 890 890 cmd[0] = GPCMD_READ_CDVD_CAPACITY; 891 891 892 892 stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, 893 - REQ_QUIET); 893 + RQF_QUIET); 894 894 if (stat) 895 895 return stat; 896 896 ··· 943 943 if (msf_flag) 944 944 cmd[1] = 2; 945 945 946 - return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, REQ_QUIET); 946 + return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET); 947 947 } 948 948 949 949 /* Try to read the entire TOC for the disk into our internal buffer. */
+1 -1
drivers/ide/ide-cd.h
··· 101 101 102 102 /* ide-cd.c functions used by ide-cd_ioctl.c */ 103 103 int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *, 104 - unsigned *, struct request_sense *, int, unsigned int); 104 + unsigned *, struct request_sense *, int, req_flags_t); 105 105 int ide_cd_read_toc(ide_drive_t *, struct request_sense *); 106 106 int ide_cdrom_get_capabilities(ide_drive_t *, u8 *); 107 107 void ide_cdrom_update_speed(ide_drive_t *, u8 *);
+3 -3
drivers/ide/ide-cd_ioctl.c
··· 305 305 306 306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 307 307 rq->cmd_type = REQ_TYPE_DRV_PRIV; 308 - rq->cmd_flags = REQ_QUIET; 308 + rq->rq_flags = RQF_QUIET; 309 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); 310 310 blk_put_request(rq); 311 311 /* ··· 449 449 struct packet_command *cgc) 450 450 { 451 451 ide_drive_t *drive = cdi->handle; 452 - unsigned int flags = 0; 452 + req_flags_t flags = 0; 453 453 unsigned len = cgc->buflen; 454 454 455 455 if (cgc->timeout <= 0) ··· 463 463 memset(cgc->sense, 0, sizeof(struct request_sense)); 464 464 465 465 if (cgc->quiet) 466 - flags |= REQ_QUIET; 466 + flags |= RQF_QUIET; 467 467 468 468 cgc->stat = ide_cd_queue_pc(drive, cgc->cmd, 469 469 cgc->data_direction == CGC_DATA_WRITE,
+3 -3
drivers/ide/ide-io.c
··· 307 307 { 308 308 ide_startstop_t startstop; 309 309 310 - BUG_ON(!(rq->cmd_flags & REQ_STARTED)); 310 + BUG_ON(!(rq->rq_flags & RQF_STARTED)); 311 311 312 312 #ifdef DEBUG 313 313 printk("%s: start_request: current=0x%08lx\n", ··· 316 316 317 317 /* bail early if we've exceeded max_failures */ 318 318 if (drive->max_failures && (drive->failures > drive->max_failures)) { 319 - rq->cmd_flags |= REQ_FAILED; 319 + rq->rq_flags |= RQF_FAILED; 320 320 goto kill_rq; 321 321 } 322 322 ··· 539 539 */ 540 540 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 541 541 ata_pm_request(rq) == 0 && 542 - (rq->cmd_flags & REQ_PREEMPT) == 0) { 542 + (rq->rq_flags & RQF_PREEMPT) == 0) { 543 543 /* there should be no pending command at this point */ 544 544 ide_unlock_port(hwif); 545 545 goto plug_device;
+2 -2
drivers/ide/ide-pm.c
··· 53 53 54 54 spin_lock_irq(q->queue_lock); 55 55 if (unlikely(blk_queue_dying(q))) { 56 - rq->cmd_flags |= REQ_QUIET; 56 + rq->rq_flags |= RQF_QUIET; 57 57 rq->errors = -ENXIO; 58 58 __blk_end_request_all(rq, rq->errors); 59 59 spin_unlock_irq(q->queue_lock); ··· 90 90 memset(&rqpm, 0, sizeof(rqpm)); 91 91 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 92 92 rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; 93 - rq->cmd_flags |= REQ_PREEMPT; 93 + rq->rq_flags |= RQF_PREEMPT; 94 94 rq->special = &rqpm; 95 95 rqpm.pm_step = IDE_PM_START_RESUME; 96 96 rqpm.pm_state = PM_EVENT_ON;
+6 -6
drivers/md/dm-rq.c
··· 313 313 314 314 if (!rq->q->mq_ops) { 315 315 rq->special = NULL; 316 - rq->cmd_flags &= ~REQ_DONTPREP; 316 + rq->rq_flags &= ~RQF_DONTPREP; 317 317 } 318 318 319 319 if (clone) ··· 431 431 return; 432 432 } 433 433 434 - if (rq->cmd_flags & REQ_FAILED) 434 + if (rq->rq_flags & RQF_FAILED) 435 435 mapped = false; 436 436 437 437 dm_done(clone, tio->error, mapped); ··· 460 460 */ 461 461 static void dm_kill_unmapped_request(struct request *rq, int error) 462 462 { 463 - rq->cmd_flags |= REQ_FAILED; 463 + rq->rq_flags |= RQF_FAILED; 464 464 dm_complete_request(rq, error); 465 465 } 466 466 ··· 476 476 * For just cleaning up the information of the queue in which 477 477 * the clone was dispatched. 478 478 * The clone is *NOT* freed actually here because it is alloced 479 - * from dm own mempool (REQ_ALLOCED isn't set). 479 + * from dm own mempool (RQF_ALLOCED isn't set). 480 480 */ 481 481 __blk_put_request(clone->q, clone); 482 482 } ··· 497 497 int r; 498 498 499 499 if (blk_queue_io_stat(clone->q)) 500 - clone->cmd_flags |= REQ_IO_STAT; 500 + clone->rq_flags |= RQF_IO_STAT; 501 501 502 502 clone->start_time = jiffies; 503 503 r = blk_insert_cloned_request(clone->q, clone); ··· 633 633 return BLKPREP_DEFER; 634 634 635 635 rq->special = tio; 636 - rq->cmd_flags |= REQ_DONTPREP; 636 + rq->rq_flags |= RQF_DONTPREP; 637 637 638 638 return BLKPREP_OK; 639 639 }
+1 -1
drivers/memstick/core/ms_block.c
··· 2006 2006 blk_dump_rq_flags(req, "MS unsupported request"); 2007 2007 return BLKPREP_KILL; 2008 2008 } 2009 - req->cmd_flags |= REQ_DONTPREP; 2009 + req->rq_flags |= RQF_DONTPREP; 2010 2010 return BLKPREP_OK; 2011 2011 } 2012 2012
+1 -1
drivers/memstick/core/mspro_block.c
··· 834 834 return BLKPREP_KILL; 835 835 } 836 836 837 - req->cmd_flags |= REQ_DONTPREP; 837 + req->rq_flags |= RQF_DONTPREP; 838 838 839 839 return BLKPREP_OK; 840 840 }
+2 -2
drivers/mmc/card/block.c
··· 2117 2117 mmc_blk_abort_packed_req(mq_rq); 2118 2118 } else { 2119 2119 if (mmc_card_removed(card)) 2120 - req->cmd_flags |= REQ_QUIET; 2120 + req->rq_flags |= RQF_QUIET; 2121 2121 while (ret) 2122 2122 ret = blk_end_request(req, -EIO, 2123 2123 blk_rq_cur_bytes(req)); ··· 2126 2126 start_new_req: 2127 2127 if (rqc) { 2128 2128 if (mmc_card_removed(card)) { 2129 - rqc->cmd_flags |= REQ_QUIET; 2129 + rqc->rq_flags |= RQF_QUIET; 2130 2130 blk_end_request_all(rqc, -EIO); 2131 2131 } else { 2132 2132 /*
+2 -2
drivers/mmc/card/queue.c
··· 44 44 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) 45 45 return BLKPREP_KILL; 46 46 47 - req->cmd_flags |= REQ_DONTPREP; 47 + req->rq_flags |= RQF_DONTPREP; 48 48 49 49 return BLKPREP_OK; 50 50 } ··· 120 120 121 121 if (!mq) { 122 122 while ((req = blk_fetch_request(q)) != NULL) { 123 - req->cmd_flags |= REQ_QUIET; 123 + req->rq_flags |= RQF_QUIET; 124 124 __blk_end_request_all(req, -EIO); 125 125 } 126 126 return;
+2 -2
drivers/nvme/host/pci.c
··· 323 323 iod->nents = 0; 324 324 iod->length = size; 325 325 326 - if (!(rq->cmd_flags & REQ_DONTPREP)) { 326 + if (!(rq->rq_flags & RQF_DONTPREP)) { 327 327 rq->retries = 0; 328 - rq->cmd_flags |= REQ_DONTPREP; 328 + rq->rq_flags |= RQF_DONTPREP; 329 329 } 330 330 return 0; 331 331 }
+5 -3
drivers/scsi/device_handler/scsi_dh_alua.c
··· 154 154 return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE, 155 155 buff, bufflen, sshdr, 156 156 ALUA_FAILOVER_TIMEOUT * HZ, 157 - ALUA_FAILOVER_RETRIES, NULL, req_flags); 157 + ALUA_FAILOVER_RETRIES, NULL, 158 + req_flags, 0); 158 159 } 159 160 160 161 /* ··· 188 187 return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE, 189 188 stpg_data, stpg_len, 190 189 sshdr, ALUA_FAILOVER_TIMEOUT * HZ, 191 - ALUA_FAILOVER_RETRIES, NULL, req_flags); 190 + ALUA_FAILOVER_RETRIES, NULL, 191 + req_flags, 0); 192 192 } 193 193 194 194 static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, ··· 1065 1063 state != SCSI_ACCESS_STATE_ACTIVE && 1066 1064 state != SCSI_ACCESS_STATE_LBA) { 1067 1065 ret = BLKPREP_KILL; 1068 - req->cmd_flags |= REQ_QUIET; 1066 + req->rq_flags |= RQF_QUIET; 1069 1067 } 1070 1068 return ret; 1071 1069
+1 -1
drivers/scsi/device_handler/scsi_dh_emc.c
··· 452 452 453 453 if (h->lun_state != CLARIION_LUN_OWNED) { 454 454 ret = BLKPREP_KILL; 455 - req->cmd_flags |= REQ_QUIET; 455 + req->rq_flags |= RQF_QUIET; 456 456 } 457 457 return ret; 458 458
+1 -1
drivers/scsi/device_handler/scsi_dh_hp_sw.c
··· 266 266 267 267 if (h->path_state != HP_SW_PATH_ACTIVE) { 268 268 ret = BLKPREP_KILL; 269 - req->cmd_flags |= REQ_QUIET; 269 + req->rq_flags |= RQF_QUIET; 270 270 } 271 271 return ret; 272 272
+1 -1
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 724 724 725 725 if (h->state != RDAC_STATE_ACTIVE) { 726 726 ret = BLKPREP_KILL; 727 - req->cmd_flags |= REQ_QUIET; 727 + req->rq_flags |= RQF_QUIET; 728 728 } 729 729 return ret; 730 730
+1 -1
drivers/scsi/osd/osd_initiator.c
··· 1595 1595 } 1596 1596 1597 1597 or->request = req; 1598 - req->cmd_flags |= REQ_QUIET; 1598 + req->rq_flags |= RQF_QUIET; 1599 1599 1600 1600 req->timeout = or->timeout; 1601 1601 req->retries = or->retries;
+1 -1
drivers/scsi/osst.c
··· 368 368 return DRIVER_ERROR << 24; 369 369 370 370 blk_rq_set_block_pc(req); 371 - req->cmd_flags |= REQ_QUIET; 371 + req->rq_flags |= RQF_QUIET; 372 372 373 373 SRpnt->bio = NULL; 374 374
+1 -1
drivers/scsi/scsi_error.c
··· 1988 1988 1989 1989 req->cmd_len = COMMAND_SIZE(req->cmd[0]); 1990 1990 1991 - req->cmd_flags |= REQ_QUIET; 1991 + req->rq_flags |= RQF_QUIET; 1992 1992 req->timeout = 10 * HZ; 1993 1993 req->retries = 5; 1994 1994
+43 -32
drivers/scsi/scsi_lib.c
··· 163 163 { 164 164 __scsi_queue_insert(cmd, reason, 1); 165 165 } 166 - /** 167 - * scsi_execute - insert request and wait for the result 168 - * @sdev: scsi device 169 - * @cmd: scsi command 170 - * @data_direction: data direction 171 - * @buffer: data buffer 172 - * @bufflen: len of buffer 173 - * @sense: optional sense buffer 174 - * @timeout: request timeout in seconds 175 - * @retries: number of times to retry request 176 - * @flags: or into request flags; 177 - * @resid: optional residual length 178 - * 179 - * returns the req->errors value which is the scsi_cmnd result 180 - * field. 181 - */ 182 - int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 166 + 167 + static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 183 168 int data_direction, void *buffer, unsigned bufflen, 184 169 unsigned char *sense, int timeout, int retries, u64 flags, 185 - int *resid) 170 + req_flags_t rq_flags, int *resid) 186 171 { 187 172 struct request *req; 188 173 int write = (data_direction == DMA_TO_DEVICE); ··· 188 203 req->sense_len = 0; 189 204 req->retries = retries; 190 205 req->timeout = timeout; 191 - req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 206 + req->cmd_flags |= flags; 207 + req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT; 192 208 193 209 /* 194 210 * head injection *required* here otherwise quiesce won't work ··· 213 227 214 228 return ret; 215 229 } 230 + 231 + /** 232 + * scsi_execute - insert request and wait for the result 233 + * @sdev: scsi device 234 + * @cmd: scsi command 235 + * @data_direction: data direction 236 + * @buffer: data buffer 237 + * @bufflen: len of buffer 238 + * @sense: optional sense buffer 239 + * @timeout: request timeout in seconds 240 + * @retries: number of times to retry request 241 + * @flags: or into request flags; 242 + * @resid: optional residual length 243 + * 244 + * returns the req->errors value which is the scsi_cmnd result 245 + * field. 246 + */ 247 + int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, 248 + int data_direction, void *buffer, unsigned bufflen, 249 + unsigned char *sense, int timeout, int retries, u64 flags, 250 + int *resid) 251 + { 252 + return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, 253 + timeout, retries, flags, 0, resid); 254 + } 216 255 EXPORT_SYMBOL(scsi_execute); 217 256 218 257 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, 219 258 int data_direction, void *buffer, unsigned bufflen, 220 259 struct scsi_sense_hdr *sshdr, int timeout, int retries, 221 - int *resid, u64 flags) 260 + int *resid, u64 flags, req_flags_t rq_flags) 222 261 { 223 262 char *sense = NULL; 224 263 int result; ··· 253 242 if (!sense) 254 243 return DRIVER_ERROR << 24; 255 244 } 256 - result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 257 - sense, timeout, retries, flags, resid); 245 + result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, 246 + sense, timeout, retries, flags, rq_flags, resid); 258 247 if (sshdr) 259 248 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); 260 249 ··· 824 813 */ 825 814 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) 826 815 ; 827 - else if (!(req->cmd_flags & REQ_QUIET)) 816 + else if (!(req->rq_flags & RQF_QUIET)) 828 817 scsi_print_sense(cmd); 829 818 result = 0; 830 819 /* BLOCK_PC may have set error */ ··· 954 943 switch (action) { 955 944 case ACTION_FAIL: 956 945 /* Give up and fail the remainder of the request */ 957 - if (!(req->cmd_flags & REQ_QUIET)) { 946 + if (!(req->rq_flags & RQF_QUIET)) { 958 947 static DEFINE_RATELIMIT_STATE(_rs, 959 948 DEFAULT_RATELIMIT_INTERVAL, 960 949 DEFAULT_RATELIMIT_BURST); ··· 983 972 * A new command will be prepared and issued. 984 973 */ 985 974 if (q->mq_ops) { 986 - cmd->request->cmd_flags &= ~REQ_DONTPREP; 975 + cmd->request->rq_flags &= ~RQF_DONTPREP; 987 976 scsi_mq_uninit_cmd(cmd); 988 977 scsi_mq_requeue_cmd(cmd); 989 978 } else { ··· 1245 1234 /* 1246 1235 * If the devices is blocked we defer normal commands. 1247 1236 */ 1248 - if (!(req->cmd_flags & REQ_PREEMPT)) 1237 + if (!(req->rq_flags & RQF_PREEMPT)) 1249 1238 ret = BLKPREP_DEFER; 1250 1239 break; 1251 1240 default: ··· 1254 1243 * special commands. In particular any user initiated 1255 1244 * command is not allowed. 1256 1245 */ 1257 - if (!(req->cmd_flags & REQ_PREEMPT)) 1246 + if (!(req->rq_flags & RQF_PREEMPT)) 1258 1247 ret = BLKPREP_KILL; 1259 1248 break; 1260 1249 } ··· 1290 1279 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1291 1280 break; 1292 1281 default: 1293 - req->cmd_flags |= REQ_DONTPREP; 1282 + req->rq_flags |= RQF_DONTPREP; 1294 1283 } 1295 1284 1296 1285 return ret; ··· 1747 1736 * we add the dev to the starved list so it eventually gets 1748 1737 * a run when a tag is freed. 1749 1738 */ 1750 - if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) { 1739 + if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) { 1751 1740 spin_lock_irq(shost->host_lock); 1752 1741 if (list_empty(&sdev->starved_entry)) 1753 1742 list_add_tail(&sdev->starved_entry, ··· 1914 1903 goto out_dec_target_busy; 1915 1904 1916 1905 1917 - if (!(req->cmd_flags & REQ_DONTPREP)) { 1906 + if (!(req->rq_flags & RQF_DONTPREP)) { 1918 1907 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1919 1908 if (ret) 1920 1909 goto out_dec_host_busy; 1921 - req->cmd_flags |= REQ_DONTPREP; 1910 + req->rq_flags |= RQF_DONTPREP; 1922 1911 } else { 1923 1912 blk_mq_start_request(req); 1924 1913 } ··· 1963 1952 * we hit an error, as we will never see this command 1964 1953 * again. 1965 1954 */ 1966 - if (req->cmd_flags & REQ_DONTPREP) 1955 + if (req->rq_flags & RQF_DONTPREP) 1967 1956 scsi_mq_uninit_cmd(cmd); 1968 1957 break; 1969 1958 default:
+3 -3
drivers/scsi/sd.c
··· 1520 1520 */ 1521 1521 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, 1522 1522 &sshdr, timeout, SD_MAX_RETRIES, 1523 - NULL, REQ_PM); 1523 + NULL, 0, RQF_PM); 1524 1524 if (res == 0) 1525 1525 break; 1526 1526 } ··· 1879 1879 1880 1880 good_bytes = 0; 1881 1881 req->__data_len = blk_rq_bytes(req); 1882 - req->cmd_flags |= REQ_QUIET; 1882 + req->rq_flags |= RQF_QUIET; 1883 1883 } 1884 1884 } 1885 1885 } ··· 3278 3278 return -ENODEV; 3279 3279 3280 3280 res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 3281 - SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); 3281 + SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM); 3282 3282 if (res) { 3283 3283 sd_print_result(sdkp, "Start/Stop Unit failed", res); 3284 3284 if (driver_byte(res) & DRIVER_SENSE)
+1 -1
drivers/scsi/sd_zbc.c
··· 348 348 * this case, so be quiet about the error. 349 349 */ 350 350 if (req_op(rq) == REQ_OP_ZONE_RESET) 351 - rq->cmd_flags |= REQ_QUIET; 351 + rq->rq_flags |= RQF_QUIET; 352 352 break; 353 353 case 0x21: 354 354 /*
+1 -1
drivers/scsi/st.c
··· 546 546 return DRIVER_ERROR << 24; 547 547 548 548 blk_rq_set_block_pc(req); 549 - req->cmd_flags |= REQ_QUIET; 549 + req->rq_flags |= RQF_QUIET; 550 550 551 551 mdata->null_mapped = 1; 552 552
+3 -3
drivers/scsi/ufs/ufshcd.c
··· 5590 5590 5591 5591 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, 5592 5592 SCSI_SENSE_BUFFERSIZE, NULL, 5593 - msecs_to_jiffies(1000), 3, NULL, REQ_PM); 5593 + msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM); 5594 5594 if (ret) 5595 5595 pr_err("%s: failed with err %d\n", __func__, ret); 5596 5596 ··· 5652 5652 5653 5653 /* 5654 5654 * Current function would be generally called from the power management 5655 - * callbacks hence set the REQ_PM flag so that it doesn't resume the 5655 + * callbacks hence set the RQF_PM flag so that it doesn't resume the 5656 5656 * already suspended childs. 5657 5657 */ 5658 5658 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, 5659 - START_STOP_TIMEOUT, 0, NULL, REQ_PM); 5659 + START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM); 5660 5660 if (ret) { 5661 5661 sdev_printk(KERN_WARNING, sdp, 5662 5662 "START_STOP failed for power mode: %d, result %x\n",
+1 -38
include/linux/blk_types.h
··· 167 167 __REQ_PREFLUSH, /* request for cache flush */ 168 168 __REQ_RAHEAD, /* read ahead, can fail anytime */ 169 169 170 - /* request only flags */ 171 - __REQ_SORTED, /* elevator knows about this request */ 172 - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 173 - __REQ_STARTED, /* drive already may have started this one */ 174 - __REQ_DONTPREP, /* don't call prep for this one */ 175 - __REQ_QUEUED, /* uses queueing */ 176 - __REQ_ELVPRIV, /* elevator private data attached */ 177 - __REQ_FAILED, /* set if the request failed */ 178 - __REQ_QUIET, /* don't worry about errors */ 179 - __REQ_PREEMPT, /* set for "ide_preempt" requests and also 180 - for requests for which the SCSI "quiesce" 181 - state must be ignored. */ 182 - __REQ_ALLOCED, /* request came from our alloc pool */ 183 - __REQ_COPY_USER, /* contains copies of user pages */ 184 - __REQ_FLUSH_SEQ, /* request for flush sequence */ 185 - __REQ_IO_STAT, /* account I/O stat */ 186 - __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 187 - __REQ_PM, /* runtime pm request */ 188 - __REQ_HASHED, /* on IO scheduler merge hash */ 189 - __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 190 170 __REQ_NR_BITS, /* stops here */ 191 171 }; 192 172 ··· 188 208 189 209 /* This mask is used for both bio and request merge checking */ 190 210 #define REQ_NOMERGE_FLAGS \ 191 - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ) 211 + (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 192 212 193 213 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 194 - #define REQ_SORTED (1ULL << __REQ_SORTED) 195 - #define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER) 196 214 #define REQ_FUA (1ULL << __REQ_FUA) 197 215 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 198 - #define REQ_STARTED (1ULL << __REQ_STARTED) 199 - #define REQ_DONTPREP (1ULL << __REQ_DONTPREP) 200 - #define REQ_QUEUED (1ULL << __REQ_QUEUED) 201 - #define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV) 202 - #define REQ_FAILED (1ULL << __REQ_FAILED) 203 - #define REQ_QUIET (1ULL << __REQ_QUIET) 204 - #define REQ_PREEMPT (1ULL << __REQ_PREEMPT) 205 - #define REQ_ALLOCED (1ULL << __REQ_ALLOCED) 206 - #define REQ_COPY_USER (1ULL << __REQ_COPY_USER) 207 216 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 208 - #define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ) 209 - #define REQ_IO_STAT (1ULL << __REQ_IO_STAT) 210 - #define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) 211 - #define REQ_PM (1ULL << __REQ_PM) 212 - #define REQ_HASHED (1ULL << __REQ_HASHED) 213 - #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 214 217 215 218 enum req_op { 216 219 REQ_OP_READ,
+48 -1
include/linux/blkdev.h
··· 78 78 REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 79 79 }; 80 80 81 + /* 82 + * request flags */ 83 + typedef __u32 __bitwise req_flags_t; 84 + 85 + /* elevator knows about this request */ 86 + #define RQF_SORTED ((__force req_flags_t)(1 << 0)) 87 + /* drive already may have started this one */ 88 + #define RQF_STARTED ((__force req_flags_t)(1 << 1)) 89 + /* uses tagged queueing */ 90 + #define RQF_QUEUED ((__force req_flags_t)(1 << 2)) 91 + /* may not be passed by ioscheduler */ 92 + #define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 93 + /* request for flush sequence */ 94 + #define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) 95 + /* merge of different types, fail separately */ 96 + #define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) 97 + /* track inflight for MQ */ 98 + #define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) 99 + /* don't call prep for this one */ 100 + #define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) 101 + /* set for "ide_preempt" requests and also for requests for which the SCSI 102 + "quiesce" state must be ignored. */ 103 + #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) 104 + /* contains copies of user pages */ 105 + #define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) 106 + /* vaguely specified driver internal error. Ignored by the block layer */ 107 + #define RQF_FAILED ((__force req_flags_t)(1 << 10)) 108 + /* don't warn about errors */ 109 + #define RQF_QUIET ((__force req_flags_t)(1 << 11)) 110 + /* elevator private data attached */ 111 + #define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) 112 + /* account I/O stat */ 113 + #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) 114 + /* request came from our alloc pool */ 115 + #define RQF_ALLOCED ((__force req_flags_t)(1 << 14)) 116 + /* runtime pm request */ 117 + #define RQF_PM ((__force req_flags_t)(1 << 15)) 118 + /* on IO scheduler merge hash */ 119 + #define RQF_HASHED ((__force req_flags_t)(1 << 16)) 120 + 121 + /* flags that prevent us from merging requests: */ 122 + #define RQF_NOMERGE_FLAGS \ 123 + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ) 124 + 81 125 #define BLK_MAX_CDB 16 82 126 83 127 /* ··· 143 99 int cpu; 144 100 unsigned cmd_type; 145 101 u64 cmd_flags; 102 + req_flags_t rq_flags; 146 103 unsigned long atomic_flags; 147 104 148 105 /* the following two fields are internal, NEVER access directly */ ··· 693 648 REQ_FAILFAST_DRIVER)) 694 649 695 650 #define blk_account_rq(rq) \ 696 - (((rq)->cmd_flags & REQ_STARTED) && \ 651 + (((rq)->rq_flags & RQF_STARTED) && \ 697 652 ((rq)->cmd_type == REQ_TYPE_FS)) 698 653 699 654 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) ··· 784 739 return false; 785 740 786 741 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 742 + return false; 743 + if (rq->rq_flags & RQF_NOMERGE_FLAGS) 787 744 return false; 788 745 789 746 return true;
+2 -2
include/scsi/scsi_device.h
··· 414 414 extern int scsi_execute_req_flags(struct scsi_device *sdev, 415 415 const unsigned char *cmd, int data_direction, void *buffer, 416 416 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 417 - int retries, int *resid, u64 flags); 417 + int retries, int *resid, u64 flags, req_flags_t rq_flags); 418 418 static inline int scsi_execute_req(struct scsi_device *sdev, 419 419 const unsigned char *cmd, int data_direction, void *buffer, 420 420 unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, 421 421 int retries, int *resid) 422 422 { 423 423 return scsi_execute_req_flags(sdev, cmd, data_direction, buffer, 424 - bufflen, sshdr, timeout, retries, resid, 0); 424 + bufflen, sshdr, timeout, retries, resid, 0, 0); 425 425 } 426 426 extern void sdev_disable_disk_events(struct scsi_device *sdev); 427 427 extern void sdev_enable_disk_events(struct scsi_device *sdev);