Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: fold cmd_type into the REQ_OP_ space

Instead of keeping two levels of indirection for requests types, fold it
all into the operations. The little caveat here is that previously
cmd_type only applied to struct request, while the request and bio op
fields were set to plain REQ_OP_READ/WRITE even for passthrough
operations.

Instead this patch adds new REQ_OP_* for SCSI passthrough and driver
private requests, althought it has to add two for each so that we
can communicate the data in/out nature of the request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
aebf526b 2f5a8e80

+338 -326
-10
block/bio.c
··· 1227 1227 if (!bio) 1228 1228 goto out_bmd; 1229 1229 1230 - if (iter->type & WRITE) 1231 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1232 - 1233 1230 ret = 0; 1234 1231 1235 1232 if (map_data) { ··· 1390 1393 } 1391 1394 1392 1395 kfree(pages); 1393 - 1394 - /* 1395 - * set data direction, and check if mapped pages need bouncing 1396 - */ 1397 - if (iter->type & WRITE) 1398 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1399 1396 1400 1397 bio_set_flag(bio, BIO_USER_MAPPED); 1401 1398 ··· 1581 1590 bio->bi_private = data; 1582 1591 } else { 1583 1592 bio->bi_end_io = bio_copy_kern_endio; 1584 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1585 1593 } 1586 1594 1587 1595 return bio;
+2 -4
block/blk-core.c
··· 158 158 159 159 void blk_dump_rq_flags(struct request *rq, char *msg) 160 160 { 161 - printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg, 162 - rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 161 + printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, 162 + rq->rq_disk ? rq->rq_disk->disk_name : "?", 163 163 (unsigned long long) rq->cmd_flags); 164 164 165 165 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", ··· 1593 1593 1594 1594 void init_request_from_bio(struct request *req, struct bio *bio) 1595 1595 { 1596 - req->cmd_type = REQ_TYPE_FS; 1597 1596 if (bio->bi_opf & REQ_RAHEAD) 1598 1597 req->cmd_flags |= REQ_FAILFAST_MASK; 1599 1598 ··· 2982 2983 static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2983 2984 { 2984 2985 dst->cpu = src->cpu; 2985 - dst->cmd_type = src->cmd_type; 2986 2986 dst->__sector = blk_rq_pos(src); 2987 2987 dst->__data_len = blk_rq_bytes(src); 2988 2988 dst->nr_phys_segments = src->nr_phys_segments;
-1
block/blk-flush.c
··· 327 327 blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); 328 328 } 329 329 330 - flush_rq->cmd_type = REQ_TYPE_FS; 331 330 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; 332 331 flush_rq->rq_flags |= RQF_FLUSH_SEQ; 333 332 flush_rq->rq_disk = first_rq->rq_disk;
+7 -6
block/blk-map.c
··· 16 16 int blk_rq_append_bio(struct request *rq, struct bio *bio) 17 17 { 18 18 if (!rq->bio) { 19 - rq->cmd_flags &= REQ_OP_MASK; 20 - rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK); 21 19 blk_rq_bio_prep(rq->q, rq, bio); 22 20 } else { 23 21 if (!ll_back_merge_fn(rq->q, rq, bio)) ··· 60 62 if (IS_ERR(bio)) 61 63 return PTR_ERR(bio); 62 64 65 + bio->bi_opf &= ~REQ_OP_MASK; 66 + bio->bi_opf |= req_op(rq); 67 + 63 68 if (map_data && map_data->null_mapped) 64 69 bio_set_flag(bio, BIO_NULL_MAPPED); 65 70 ··· 91 90 } 92 91 93 92 /** 94 - * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 93 + * blk_rq_map_user_iov - map user data to a request, for passthrough requests 95 94 * @q: request queue where request should be inserted 96 95 * @rq: request to map data to 97 96 * @map_data: pointer to the rq_map_data holding pages (if necessary) ··· 200 199 EXPORT_SYMBOL(blk_rq_unmap_user); 201 200 202 201 /** 203 - * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 202 + * blk_rq_map_kern - map kernel data to a request, for passthrough requests 204 203 * @q: request queue where request should be inserted 205 204 * @rq: request to fill 206 205 * @kbuf: the kernel buffer ··· 235 234 if (IS_ERR(bio)) 236 235 return PTR_ERR(bio); 237 236 238 - if (!reading) 239 - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 237 + bio->bi_opf &= ~REQ_OP_MASK; 238 + bio->bi_opf |= req_op(rq); 240 239 241 240 if (do_copy) 242 241 rq->rq_flags |= RQF_COPY_USER;
+2 -2
block/blk-mq-debugfs.c
··· 88 88 { 89 89 struct request *rq = list_entry_rq(v); 90 90 91 - seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n", 92 - rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags, 91 + seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n", 92 + rq, rq->cmd_flags, (unsigned int)rq->rq_flags, 93 93 rq->tag, rq->internal_tag); 94 94 return 0; 95 95 }
+8 -9
block/bsg.c
··· 177 177 * Check if sg_io_v4 from user is allowed and valid 178 178 */ 179 179 static int 180 - bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw) 180 + bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op) 181 181 { 182 182 int ret = 0; 183 183 ··· 198 198 ret = -EINVAL; 199 199 } 200 200 201 - *rw = hdr->dout_xfer_len ? WRITE : READ; 201 + *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN; 202 202 return ret; 203 203 } 204 204 ··· 210 210 { 211 211 struct request_queue *q = bd->queue; 212 212 struct request *rq, *next_rq = NULL; 213 - int ret, rw; 214 - unsigned int dxfer_len; 213 + int ret; 214 + unsigned int op, dxfer_len; 215 215 void __user *dxferp = NULL; 216 216 struct bsg_class_device *bcd = &q->bsg_dev; 217 217 ··· 226 226 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, 227 227 hdr->din_xfer_len); 228 228 229 - ret = bsg_validate_sgv4_hdr(hdr, &rw); 229 + ret = bsg_validate_sgv4_hdr(hdr, &op); 230 230 if (ret) 231 231 return ERR_PTR(ret); 232 232 233 233 /* 234 234 * map scatter-gather elements separately and string them to request 235 235 */ 236 - rq = blk_get_request(q, rw, GFP_KERNEL); 236 + rq = blk_get_request(q, op, GFP_KERNEL); 237 237 if (IS_ERR(rq)) 238 238 return rq; 239 239 scsi_req_init(rq); ··· 242 242 if (ret) 243 243 goto out; 244 244 245 - if (rw == WRITE && hdr->din_xfer_len) { 245 + if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { 246 246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { 247 247 ret = -EOPNOTSUPP; 248 248 goto out; 249 249 } 250 250 251 - next_rq = blk_get_request(q, READ, GFP_KERNEL); 251 + next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); 252 252 if (IS_ERR(next_rq)) { 253 253 ret = PTR_ERR(next_rq); 254 254 next_rq = NULL; 255 255 goto out; 256 256 } 257 257 rq->next_rq = next_rq; 258 - next_rq->cmd_type = rq->cmd_type; 259 258 260 259 dxferp = (void __user *)(unsigned long)hdr->din_xferp; 261 260 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
+5 -4
block/scsi_ioctl.c
··· 321 321 at_head = 1; 322 322 323 323 ret = -ENOMEM; 324 - rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 324 + rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 325 + GFP_KERNEL); 325 326 if (IS_ERR(rq)) 326 327 return PTR_ERR(rq); 327 328 req = scsi_req(rq); ··· 449 448 450 449 } 451 450 452 - rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM); 451 + rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 452 + __GFP_RECLAIM); 453 453 if (IS_ERR(rq)) { 454 454 err = PTR_ERR(rq); 455 455 goto error_free_buffer; ··· 539 537 struct request *rq; 540 538 int err; 541 539 542 - rq = blk_get_request(q, WRITE, __GFP_RECLAIM); 540 + rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM); 543 541 if (IS_ERR(rq)) 544 542 return PTR_ERR(rq); 545 543 scsi_req_init(rq); ··· 747 745 { 748 746 struct scsi_request *req = scsi_req(rq); 749 747 750 - rq->cmd_type = REQ_TYPE_BLOCK_PC; 751 748 memset(req->__cmd, 0, sizeof(req->__cmd)); 752 749 req->cmd = req->__cmd; 753 750 req->cmd_len = BLK_MAX_CDB;
+9 -4
drivers/block/cciss.c
··· 3394 3394 c->Header.SGList = h->max_cmd_sgentries; 3395 3395 set_performant_mode(h, c); 3396 3396 3397 - if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3397 + switch (req_op(creq)) { 3398 + case REQ_OP_READ: 3399 + case REQ_OP_WRITE: 3398 3400 if(h->cciss_read == CCISS_READ_10) { 3399 3401 c->Request.CDB[1] = 0; 3400 3402 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ ··· 3426 3424 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3427 3425 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3428 3426 } 3429 - } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3427 + break; 3428 + case REQ_OP_SCSI_IN: 3429 + case REQ_OP_SCSI_OUT: 3430 3430 c->Request.CDBLen = scsi_req(creq)->cmd_len; 3431 3431 memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB); 3432 3432 scsi_req(creq)->sense = c->err_info->SenseInfo; 3433 - } else { 3433 + break; 3434 + default: 3434 3435 dev_warn(&h->pdev->dev, "bad request type %d\n", 3435 - creq->cmd_type); 3436 + creq->cmd_flags); 3436 3437 BUG(); 3437 3438 } 3438 3439
+2 -2
drivers/block/floppy.c
··· 2900 2900 return; 2901 2901 2902 2902 if (WARN(atomic_read(&usage_count) == 0, 2903 - "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n", 2904 - current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, 2903 + "warning: usage count=0, current_req=%p sect=%ld flags=%llx\n", 2904 + current_req, (long)blk_rq_pos(current_req), 2905 2905 (unsigned long long) current_req->cmd_flags)) 2906 2906 return; 2907 2907
+22 -23
drivers/block/hd.c
··· 626 626 req_data_dir(req) == READ ? "read" : "writ", 627 627 cyl, head, sec, nsect, bio_data(req->bio)); 628 628 #endif 629 - if (req->cmd_type == REQ_TYPE_FS) { 630 - switch (rq_data_dir(req)) { 631 - case READ: 632 - hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, 633 - &read_intr); 634 - if (reset) 635 - goto repeat; 636 - break; 637 - case WRITE: 638 - hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, 639 - &write_intr); 640 - if (reset) 641 - goto repeat; 642 - if (wait_DRQ()) { 643 - bad_rw_intr(); 644 - goto repeat; 645 - } 646 - outsw(HD_DATA, bio_data(req->bio), 256); 647 - break; 648 - default: 649 - printk("unknown hd-command\n"); 650 - hd_end_request_cur(-EIO); 651 - break; 629 + 630 + switch (req_op(req)) { 631 + case REQ_OP_READ: 632 + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, 633 + &read_intr); 634 + if (reset) 635 + goto repeat; 636 + break; 637 + case REQ_OP_WRITE: 638 + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, 639 + &write_intr); 640 + if (reset) 641 + goto repeat; 642 + if (wait_DRQ()) { 643 + bad_rw_intr(); 644 + goto repeat; 652 645 } 646 + outsw(HD_DATA, bio_data(req->bio), 256); 647 + break; 648 + default: 649 + printk("unknown hd-command\n"); 650 + hd_end_request_cur(-EIO); 651 + break; 653 652 } 654 653 } 655 654
+17 -14
drivers/block/mg_disk.c
··· 670 670 break; 671 671 } 672 672 673 - if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { 674 - mg_end_request_cur(host, -EIO); 675 - continue; 676 - } 677 - 678 - if (rq_data_dir(host->req) == READ) 673 + switch (req_op(host->req)) { 674 + case REQ_OP_READ: 679 675 mg_read(host->req); 680 - else 676 + break; 677 + case REQ_OP_WRITE: 681 678 mg_write(host->req); 679 + break; 680 + default: 681 + mg_end_request_cur(host, -EIO); 682 + break; 683 + } 682 684 } 683 685 } 684 686 ··· 689 687 unsigned int sect_num, 690 688 unsigned int sect_cnt) 691 689 { 692 - if (rq_data_dir(req) == READ) { 690 + switch (req_op(host->req)) { 691 + case REQ_OP_READ: 693 692 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) 694 693 != MG_ERR_NONE) { 695 694 mg_bad_rw_intr(host); 696 695 return host->error; 697 696 } 698 - } else { 697 + break; 698 + case REQ_OP_WRITE: 699 699 /* TODO : handler */ 700 700 outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 701 701 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) ··· 716 712 mod_timer(&host->timer, jiffies + 3 * HZ); 717 713 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 718 714 MG_REG_COMMAND); 715 + break; 716 + default: 717 + mg_end_request_cur(host, -EIO); 718 + break; 719 719 } 720 720 return MG_ERR_NONE; 721 721 } ··· 757 749 "%s: bad access: sector=%d, count=%d\n", 758 750 req->rq_disk->disk_name, 759 751 sect_num, sect_cnt); 760 - mg_end_request_cur(host, -EIO); 761 - continue; 762 - } 763 - 764 - if (unlikely(req->cmd_type != REQ_TYPE_FS)) { 765 752 mg_end_request_cur(host, -EIO); 766 753 continue; 767 754 }
+12 -7
drivers/block/nbd.c
··· 271 271 u32 type; 272 272 u32 tag = blk_mq_unique_tag(req); 273 273 274 - if (req->cmd_type != REQ_TYPE_FS) 275 - return -EIO; 276 - 277 - if (req_op(req) == REQ_OP_DISCARD) 274 + switch (req_op(req)) { 275 + case REQ_OP_DISCARD: 278 276 type = NBD_CMD_TRIM; 279 - else if (req_op(req) == REQ_OP_FLUSH) 277 + break; 278 + case REQ_OP_FLUSH: 280 279 type = NBD_CMD_FLUSH; 281 - else if (rq_data_dir(req) == WRITE) 280 + break; 281 + case REQ_OP_WRITE: 282 282 type = NBD_CMD_WRITE; 283 - else 283 + break; 284 + case REQ_OP_READ: 284 285 type = NBD_CMD_READ; 286 + break; 287 + default: 288 + return -EIO; 289 + } 285 290 286 291 if (rq_data_dir(req) == WRITE && 287 292 (nbd->flags & NBD_FLAG_READ_ONLY)) {
+2 -2
drivers/block/null_blk.c
··· 431 431 struct request *rq; 432 432 struct bio *bio = rqd->bio; 433 433 434 - rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0); 434 + rq = blk_mq_alloc_request(q, 435 + op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); 435 436 if (IS_ERR(rq)) 436 437 return -ENOMEM; 437 438 438 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 439 439 rq->__sector = bio->bi_iter.bi_sector; 440 440 rq->ioprio = bio_prio(bio); 441 441
-6
drivers/block/osdblk.c
··· 308 308 if (!rq) 309 309 break; 310 310 311 - /* filter out block requests we don't understand */ 312 - if (rq->cmd_type != REQ_TYPE_FS) { 313 - blk_end_request_all(rq, 0); 314 - continue; 315 - } 316 - 317 311 /* deduce our operation (read, write, flush) */ 318 312 /* I wish the block layer simplified cmd_type/cmd_flags/cmd[] 319 313 * into a clearly defined set of RPC commands:
+6 -9
drivers/block/paride/pd.c
··· 439 439 static int pd_block; /* address of next requested block */ 440 440 static int pd_count; /* number of blocks still to do */ 441 441 static int pd_run; /* sectors in current cluster */ 442 - static int pd_cmd; /* current command READ/WRITE */ 443 442 static char *pd_buf; /* buffer for request in progress */ 444 443 445 444 static enum action do_pd_io_start(void) 446 445 { 447 - if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) { 446 + switch (req_op(pd_req)) { 447 + case REQ_OP_DRV_IN: 448 448 phase = pd_special; 449 449 return pd_special(); 450 - } 451 - 452 - pd_cmd = rq_data_dir(pd_req); 453 - if (pd_cmd == READ || pd_cmd == WRITE) { 450 + case REQ_OP_READ: 451 + case REQ_OP_WRITE: 454 452 pd_block = blk_rq_pos(pd_req); 455 453 pd_count = blk_rq_cur_sectors(pd_req); 456 454 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) ··· 456 458 pd_run = blk_rq_sectors(pd_req); 457 459 pd_buf = bio_data(pd_req->bio); 458 460 pd_retries = 0; 459 - if (pd_cmd == READ) 461 + if (req_op(pd_req) == REQ_OP_READ) 460 462 return do_pd_read_start(); 461 463 else 462 464 return do_pd_write_start(); ··· 721 723 struct request *rq; 722 724 int err = 0; 723 725 724 - rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); 726 + rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 725 727 if (IS_ERR(rq)) 726 728 return PTR_ERR(rq); 727 729 728 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 729 730 rq->special = func; 730 731 731 732 err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
+1 -1
drivers/block/pktcdvd.c
··· 704 704 int ret = 0; 705 705 706 706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? 707 - WRITE : READ, __GFP_RECLAIM); 707 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); 708 708 if (IS_ERR(rq)) 709 709 return PTR_ERR(rq); 710 710 scsi_req_init(rq);
+9 -6
drivers/block/ps3disk.c
··· 196 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 197 197 198 198 while ((req = blk_fetch_request(q))) { 199 - if (req_op(req) == REQ_OP_FLUSH) { 199 + switch (req_op(req)) { 200 + case REQ_OP_FLUSH: 200 201 if (ps3disk_submit_flush_request(dev, req)) 201 - break; 202 - } else if (req->cmd_type == REQ_TYPE_FS) { 202 + return; 203 + break; 204 + case REQ_OP_READ: 205 + case REQ_OP_WRITE: 203 206 if (ps3disk_submit_request_sg(dev, req)) 204 - break; 205 - } else { 207 + return; 208 + break; 209 + default: 206 210 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 207 211 __blk_end_request_all(req, -EIO); 208 - continue; 209 212 } 210 213 } 211 214 }
+12 -10
drivers/block/rbd.c
··· 4099 4099 bool must_be_locked; 4100 4100 int result; 4101 4101 4102 - if (rq->cmd_type != REQ_TYPE_FS) { 4103 - dout("%s: non-fs request type %d\n", __func__, 4104 - (int) rq->cmd_type); 4102 + switch (req_op(rq)) { 4103 + case REQ_OP_DISCARD: 4104 + op_type = OBJ_OP_DISCARD; 4105 + break; 4106 + case REQ_OP_WRITE: 4107 + op_type = OBJ_OP_WRITE; 4108 + break; 4109 + case REQ_OP_READ: 4110 + op_type = OBJ_OP_READ; 4111 + break; 4112 + default: 4113 + dout("%s: non-fs request type %d\n", __func__, req_op(rq)); 4105 4114 result = -EIO; 4106 4115 goto err; 4107 4116 } 4108 - 4109 - if (req_op(rq) == REQ_OP_DISCARD) 4110 - op_type = OBJ_OP_DISCARD; 4111 - else if (req_op(rq) == REQ_OP_WRITE) 4112 - op_type = OBJ_OP_WRITE; 4113 - else 4114 - op_type = OBJ_OP_READ; 4115 4117 4116 4118 /* Ignore/skip any zero-length requests */ 4117 4119
+1 -3
drivers/block/sx8.c
··· 567 567 if (!crq) 568 568 return NULL; 569 569 570 - rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); 570 + rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL); 571 571 if (IS_ERR(rq)) { 572 572 spin_lock_irqsave(&host->lock, flags); 573 573 carm_put_request(host, crq); ··· 620 620 spin_unlock_irq(&host->lock); 621 621 622 622 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 623 - crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; 624 623 crq->rq->special = crq; 625 624 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 626 625 ··· 660 661 crq->msg_bucket = (u32) rc; 661 662 662 663 DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); 663 - crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; 664 664 crq->rq->special = crq; 665 665 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); 666 666
+30 -31
drivers/block/virtio_blk.c
··· 175 175 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); 176 176 int error = virtblk_result(vbr); 177 177 178 - switch (req->cmd_type) { 179 - case REQ_TYPE_BLOCK_PC: 178 + switch (req_op(req)) { 179 + case REQ_OP_SCSI_IN: 180 + case REQ_OP_SCSI_OUT: 180 181 virtblk_scsi_reques_done(req); 181 182 break; 182 - case REQ_TYPE_DRV_PRIV: 183 + case REQ_OP_DRV_IN: 183 184 req->errors = (error != 0); 184 185 break; 185 186 } ··· 227 226 int qid = hctx->queue_num; 228 227 int err; 229 228 bool notify = false; 229 + u32 type; 230 230 231 231 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 232 232 233 - if (req_op(req) == REQ_OP_FLUSH) { 234 - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH); 235 - vbr->out_hdr.sector = 0; 236 - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); 237 - } else { 238 - switch (req->cmd_type) { 239 - case REQ_TYPE_FS: 240 - vbr->out_hdr.type = 0; 241 - vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); 242 - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); 243 - break; 244 - case REQ_TYPE_BLOCK_PC: 245 - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD); 246 - vbr->out_hdr.sector = 0; 247 - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); 248 - break; 249 - case REQ_TYPE_DRV_PRIV: 250 - vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); 251 - vbr->out_hdr.sector = 0; 252 - vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); 253 - break; 254 - default: 255 - /* We don't put anything else in the queue. */ 256 - BUG(); 257 - } 233 + switch (req_op(req)) { 234 + case REQ_OP_READ: 235 + case REQ_OP_WRITE: 236 + type = 0; 237 + break; 238 + case REQ_OP_FLUSH: 239 + type = VIRTIO_BLK_T_FLUSH; 240 + break; 241 + case REQ_OP_SCSI_IN: 242 + case REQ_OP_SCSI_OUT: 243 + type = VIRTIO_BLK_T_SCSI_CMD; 244 + break; 245 + case REQ_OP_DRV_IN: 246 + type = VIRTIO_BLK_T_GET_ID; 247 + break; 248 + default: 249 + WARN_ON_ONCE(1); 250 + return BLK_MQ_RQ_QUEUE_ERROR; 258 251 } 252 + 253 + vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); 254 + vbr->out_hdr.sector = type ? 255 + 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); 256 + vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req)); 259 257 260 258 blk_mq_start_request(req); 261 259 ··· 267 267 } 268 268 269 269 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); 270 - if (req->cmd_type == REQ_TYPE_BLOCK_PC) 270 + if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT) 271 271 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num); 272 272 else 273 273 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); ··· 300 300 struct request *req; 301 301 int err; 302 302 303 - req = blk_get_request(q, READ, GFP_KERNEL); 303 + req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL); 304 304 if (IS_ERR(req)) 305 305 return PTR_ERR(req); 306 - req->cmd_type = REQ_TYPE_DRV_PRIV; 307 306 308 307 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 309 308 if (err)
+1 -1
drivers/block/xen-blkfront.c
··· 865 865 static inline bool blkif_request_flush_invalid(struct request *req, 866 866 struct blkfront_info *info) 867 867 { 868 - return ((req->cmd_type != REQ_TYPE_FS) || 868 + return (blk_rq_is_passthrough(req) || 869 869 ((req_op(req) == REQ_OP_FLUSH) && 870 870 !info->feature_flush) || 871 871 ((req->cmd_flags & REQ_FUA) &&
+1 -1
drivers/block/xsysace.c
··· 468 468 struct request *req; 469 469 470 470 while ((req = blk_peek_request(q)) != NULL) { 471 - if (req->cmd_type == REQ_TYPE_FS) 471 + if (!blk_rq_is_passthrough(req)) 472 472 break; 473 473 blk_start_request(req); 474 474 __blk_end_request_all(req, -EIO);
+1 -1
drivers/cdrom/cdrom.c
··· 2191 2191 2192 2192 len = nr * CD_FRAMESIZE_RAW; 2193 2193 2194 - rq = blk_get_request(q, READ, GFP_KERNEL); 2194 + rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); 2195 2195 if (IS_ERR(rq)) { 2196 2196 ret = PTR_ERR(rq); 2197 2197 break;
+15 -14
drivers/cdrom/gdrom.c
··· 659 659 struct request *req; 660 660 661 661 while ((req = blk_fetch_request(rq)) != NULL) { 662 - if (req->cmd_type != REQ_TYPE_FS) { 663 - printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 664 - __blk_end_request_all(req, -EIO); 665 - continue; 666 - } 667 - if (rq_data_dir(req) != READ) { 662 + switch (req_op(req)) { 663 + case REQ_OP_READ: 664 + /* 665 + * Add to list of deferred work and then schedule 666 + * workqueue. 667 + */ 668 + list_add_tail(&req->queuelist, &gdrom_deferred); 669 + schedule_work(&work); 670 + break; 671 + case REQ_OP_WRITE: 668 672 pr_notice("Read only device - write request ignored\n"); 669 673 __blk_end_request_all(req, -EIO); 670 - continue; 674 + break; 675 + default: 676 + printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 677 + __blk_end_request_all(req, -EIO); 678 + break; 671 679 } 672 - 673 - /* 674 - * Add to list of deferred work and then schedule 675 - * workqueue. 676 - */ 677 - list_add_tail(&req->queuelist, &gdrom_deferred); 678 - schedule_work(&work); 679 680 } 680 681 } 681 682
+11 -10
drivers/ide/ide-atapi.c
··· 92 92 struct request *rq; 93 93 int error; 94 94 95 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 95 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 96 96 scsi_req_init(rq); 97 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 98 97 ide_req(rq)->type = ATA_PRIV_MISC; 99 98 rq->special = (char *)pc; 100 99 ··· 211 212 } 212 213 213 214 sense_rq->rq_disk = rq->rq_disk; 214 - sense_rq->cmd_type = REQ_TYPE_DRV_PRIV; 215 + sense_rq->cmd_flags = REQ_OP_DRV_IN; 215 216 ide_req(sense_rq)->type = ATA_PRIV_SENSE; 216 217 sense_rq->rq_flags |= RQF_PREEMPT; 217 218 ··· 311 312 312 313 int ide_cd_get_xferlen(struct request *rq) 313 314 { 314 - switch (rq->cmd_type) { 315 - case REQ_TYPE_FS: 315 + switch (req_op(rq)) { 316 + default: 316 317 return 32768; 317 - case REQ_TYPE_BLOCK_PC: 318 + case REQ_OP_SCSI_IN: 319 + case REQ_OP_SCSI_OUT: 318 320 return blk_rq_bytes(rq); 319 - case REQ_TYPE_DRV_PRIV: 321 + case REQ_OP_DRV_IN: 322 + case REQ_OP_DRV_OUT: 320 323 switch (ide_req(rq)->type) { 321 324 case ATA_PRIV_PC: 322 325 case ATA_PRIV_SENSE: 323 326 return blk_rq_bytes(rq); 327 + default: 328 + return 0; 324 329 } 325 - default: 326 - return 0; 327 330 } 328 331 } 329 332 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); ··· 492 491 error = 0; 493 492 } else { 494 493 495 - if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { 494 + if (blk_rq_is_passthrough(rq) && uptodate <= 0) { 496 495 if (rq->errors == 0) 497 496 rq->errors = -EIO; 498 497 }
+45 -31
drivers/ide/ide-cd.c
··· 176 176 if (!sense->valid) 177 177 break; 178 178 if (failed_command == NULL || 179 - failed_command->cmd_type != REQ_TYPE_FS) 179 + blk_rq_is_passthrough(failed_command)) 180 180 break; 181 181 sector = (sense->information[0] << 24) | 182 182 (sense->information[1] << 16) | ··· 293 293 } 294 294 295 295 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ 296 - if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) 296 + if (blk_rq_is_scsi(rq) && !rq->errors) 297 297 rq->errors = SAM_STAT_CHECK_CONDITION; 298 298 299 299 if (blk_noretry_request(rq)) ··· 301 301 302 302 switch (sense_key) { 303 303 case NOT_READY: 304 - if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { 304 + if (req_op(rq) == REQ_OP_WRITE) { 305 305 if (ide_cd_breathe(drive, rq)) 306 306 return 1; 307 307 } else { 308 308 cdrom_saw_media_change(drive); 309 309 310 - if (rq->cmd_type == REQ_TYPE_FS && 310 + if (!blk_rq_is_passthrough(rq) && 311 311 !(rq->rq_flags & RQF_QUIET)) 312 312 printk(KERN_ERR PFX "%s: tray open\n", 313 313 drive->name); ··· 317 317 case UNIT_ATTENTION: 318 318 cdrom_saw_media_change(drive); 319 319 320 - if (rq->cmd_type != REQ_TYPE_FS) 320 + if (blk_rq_is_passthrough(rq)) 321 321 return 0; 322 322 323 323 /* ··· 365 365 do_end_request = 1; 366 366 break; 367 367 default: 368 - if (rq->cmd_type != REQ_TYPE_FS) 368 + if (blk_rq_is_passthrough(rq)) 369 369 break; 370 370 if (err & ~ATA_ABORTED) { 371 371 /* go to the default handler for other errors */ ··· 376 376 do_end_request = 1; 377 377 } 378 378 379 - if (rq->cmd_type != REQ_TYPE_FS) { 379 + if (blk_rq_is_passthrough(rq)) { 380 380 rq->rq_flags |= RQF_FAILED; 381 381 do_end_request = 1; 382 382 } ··· 435 435 int error; 436 436 bool delay = false; 437 437 438 - rq = blk_get_request(drive->queue, write, __GFP_RECLAIM); 438 + rq = blk_get_request(drive->queue, 439 + write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); 439 440 scsi_req_init(rq); 440 441 memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB); 441 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 442 442 ide_req(rq)->type = ATA_PRIV_PC; 443 443 rq->rq_flags |= rq_flags; 444 444 rq->timeout = timeout; ··· 564 564 565 565 ide_read_bcount_and_ireason(drive, &len, &ireason); 566 566 567 - thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; 567 + thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft; 568 568 if (thislen > len) 569 569 thislen = len; 570 570 ··· 573 573 574 574 /* If DRQ is clear, the command has completed. */ 575 575 if ((stat & ATA_DRQ) == 0) { 576 - if (rq->cmd_type == REQ_TYPE_FS) { 576 + switch (req_op(rq)) { 577 + default: 577 578 /* 578 579 * If we're not done reading/writing, complain. 579 580 * Otherwise, complete the command normally. ··· 588 587 rq->rq_flags |= RQF_FAILED; 589 588 uptodate = 0; 590 589 } 591 - } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 590 + goto out_end; 591 + case REQ_OP_DRV_IN: 592 + case REQ_OP_DRV_OUT: 592 593 ide_cd_request_sense_fixup(drive, cmd); 593 594 594 595 uptodate = cmd->nleft ? 0 : 1; ··· 606 603 607 604 if (!uptodate) 608 605 rq->rq_flags |= RQF_FAILED; 606 + goto out_end; 607 + case REQ_OP_SCSI_IN: 608 + case REQ_OP_SCSI_OUT: 609 + goto out_end; 609 610 } 610 - goto out_end; 611 611 } 612 612 613 613 rc = ide_check_ireason(drive, rq, len, ireason, write); ··· 642 636 643 637 /* pad, if necessary */ 644 638 if (len > 0) { 645 - if (rq->cmd_type != REQ_TYPE_FS || write == 0) 639 + if (blk_rq_is_passthrough(rq) || write == 0) 646 640 ide_pad_transfer(drive, write, len); 647 641 else { 648 642 printk(KERN_ERR PFX "%s: confused, missing data\n", ··· 651 645 } 652 646 } 653 647 654 - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 648 + switch (req_op(rq)) { 649 + case REQ_OP_SCSI_IN: 650 + case REQ_OP_SCSI_OUT: 655 651 timeout = rq->timeout; 656 - } else { 652 + break; 653 + case REQ_OP_DRV_IN: 654 + case REQ_OP_DRV_OUT: 655 + expiry = ide_cd_expiry; 656 + /*FALLTHRU*/ 657 + default: 657 658 timeout = ATAPI_WAIT_PC; 658 - if (rq->cmd_type != REQ_TYPE_FS) 659 - expiry = ide_cd_expiry; 659 + break; 660 660 } 661 661 662 662 hwif->expiry = expiry; ··· 670 658 return ide_started; 671 659 672 660 out_end: 673 - if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { 661 + if (blk_rq_is_scsi(rq) && rc == 0) { 674 662 scsi_req(rq)->resid_len = 0; 675 663 blk_end_request_all(rq, 0); 676 664 hwif->rq = NULL; ··· 678 666 if (sense && uptodate) 679 667 ide_cd_complete_failed_rq(drive, rq); 680 668 681 - if (rq->cmd_type == REQ_TYPE_FS) { 669 + if (!blk_rq_is_passthrough(rq)) { 682 670 if (cmd->nleft == 0) 683 671 uptodate = 1; 684 672 } else { ··· 691 679 return ide_stopped; 692 680 693 681 /* make sure it's fully ended */ 694 - if (rq->cmd_type != REQ_TYPE_FS) { 682 + if (blk_rq_is_passthrough(rq)) { 695 683 scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft; 696 684 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 697 685 scsi_req(rq)->resid_len += cmd->last_xfer_len; ··· 751 739 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", 752 740 rq->cmd[0], rq->cmd_type); 753 741 754 - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 742 + if (blk_rq_is_scsi(rq)) 755 743 rq->rq_flags |= RQF_QUIET; 756 744 else 757 745 rq->rq_flags &= ~RQF_FAILED; ··· 793 781 if (drive->debug_mask & IDE_DBG_RQ) 794 782 blk_dump_rq_flags(rq, "ide_cd_do_request"); 795 783 796 - switch (rq->cmd_type) { 797 - case REQ_TYPE_FS: 784 + switch (req_op(rq)) { 785 + default: 798 786 if (cdrom_start_rw(drive, rq) == ide_stopped) 799 787 goto out_end; 800 788 break; 801 - case REQ_TYPE_BLOCK_PC: 789 + case REQ_OP_SCSI_IN: 790 + case REQ_OP_SCSI_OUT: 802 791 handle_pc: 803 792 if (!rq->timeout) 804 793 rq->timeout = ATAPI_WAIT_PC; 805 794 cdrom_do_block_pc(drive, rq); 806 795 break; 807 - case REQ_TYPE_DRV_PRIV: 796 + case REQ_OP_DRV_IN: 797 + case REQ_OP_DRV_OUT: 808 798 switch (ide_req(rq)->type) { 809 799 case ATA_PRIV_MISC: 810 800 /* right now this can only be a reset... */ ··· 815 801 case ATA_PRIV_SENSE: 816 802 case ATA_PRIV_PC: 817 803 goto handle_pc; 804 + default: 805 + BUG(); 818 806 } 819 - default: 820 - BUG(); 821 807 } 822 808 823 809 /* prepare sense request for this command */ ··· 830 816 831 817 cmd.rq = rq; 832 818 833 - if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 819 + if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) { 834 820 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 835 821 ide_map_sg(drive, &cmd); 836 822 } ··· 1387 1373 1388 1374 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 1389 1375 { 1390 - if (rq->cmd_type == REQ_TYPE_FS) 1376 + if (!blk_rq_is_passthrough(rq)) 1391 1377 return ide_cdrom_prep_fs(q, rq); 1392 - else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1378 + else if (blk_rq_is_scsi(rq)) 1393 1379 return ide_cdrom_prep_pc(rq); 1394 1380 1395 1381 return 0;
+1 -2
drivers/ide/ide-cd_ioctl.c
··· 303 303 struct request *rq; 304 304 int ret; 305 305 306 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 306 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 307 307 scsi_req_init(rq); 308 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 309 308 ide_req(rq)->type = ATA_PRIV_MISC; 310 309 rq->rq_flags = RQF_QUIET; 311 310 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
+1 -2
drivers/ide/ide-devsets.c
··· 165 165 if (!(setting->flags & DS_SYNC)) 166 166 return setting->set(drive, arg); 167 167 168 - rq = blk_get_request(q, READ, __GFP_RECLAIM); 168 + rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM); 169 169 scsi_req_init(rq); 170 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 171 170 ide_req(rq)->type = ATA_PRIV_MISC; 172 171 scsi_req(rq)->cmd_len = 5; 173 172 scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
+4 -5
drivers/ide/ide-disk.c
··· 184 184 ide_hwif_t *hwif = drive->hwif; 185 185 186 186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); 187 - BUG_ON(rq->cmd_type != REQ_TYPE_FS); 187 + BUG_ON(blk_rq_is_passthrough(rq)); 188 188 189 189 ledtrig_disk_activity(); 190 190 ··· 452 452 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 453 453 cmd->tf_flags = IDE_TFLAG_DYN; 454 454 cmd->protocol = ATA_PROT_NODATA; 455 - 456 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 455 + rq->cmd_flags &= ~REQ_OP_MASK; 456 + rq->cmd_flags |= REQ_OP_DRV_OUT; 457 457 ide_req(rq)->type = ATA_PRIV_TASKFILE; 458 458 rq->special = cmd; 459 459 cmd->rq = rq; ··· 478 478 if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) 479 479 return -EBUSY; 480 480 481 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 481 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 482 482 scsi_req_init(rq); 483 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 484 483 ide_req(rq)->type = ATA_PRIV_TASKFILE; 485 484 486 485 drive->mult_req = arg;
+1 -1
drivers/ide/ide-eh.c
··· 123 123 return ide_stopped; 124 124 125 125 /* retry only "normal" I/O: */ 126 - if (rq->cmd_type != REQ_TYPE_FS) { 126 + if (blk_rq_is_passthrough(rq)) { 127 127 if (ata_taskfile_request(rq)) { 128 128 struct ide_cmd *cmd = rq->special; 129 129
+9 -10
drivers/ide/ide-floppy.c
··· 72 72 drive->failed_pc = NULL; 73 73 74 74 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 75 - rq->cmd_type == REQ_TYPE_BLOCK_PC) 75 + (req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT)) 76 76 uptodate = 1; /* FIXME */ 77 77 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 78 78 ··· 254 254 goto out_end; 255 255 } 256 256 257 - switch (rq->cmd_type) { 258 - case REQ_TYPE_FS: 257 + switch (req_op(rq)) { 258 + default: 259 259 if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 260 260 (blk_rq_sectors(rq) % floppy->bs_factor)) { 261 261 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", ··· 265 265 pc = &floppy->queued_pc; 266 266 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 267 267 break; 268 - case REQ_TYPE_BLOCK_PC: 268 + case REQ_OP_SCSI_IN: 269 + case REQ_OP_SCSI_OUT: 269 270 pc = &floppy->queued_pc; 270 271 idefloppy_blockpc_cmd(floppy, pc, rq); 271 272 break; 272 - case REQ_TYPE_DRV_PRIV: 273 + case REQ_OP_DRV_IN: 274 + case REQ_OP_DRV_OUT: 273 275 switch (ide_req(rq)->type) { 274 276 case ATA_PRIV_MISC: 275 277 case ATA_PRIV_SENSE: ··· 280 278 default: 281 279 BUG(); 282 280 } 283 - break; 284 - default: 285 - BUG(); 286 281 } 287 282 288 283 ide_prep_sense(drive, rq); ··· 291 292 292 293 cmd.rq = rq; 293 294 294 - if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 295 + if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) { 295 296 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 296 297 ide_map_sg(drive, &cmd); 297 298 } ··· 301 302 return ide_floppy_issue_pc(drive, &cmd, pc); 302 303 out_end: 303 304 drive->failed_pc = NULL; 304 - if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 305 + if (blk_rq_is_passthrough(rq) && rq->errors == 0) 305 306 rq->errors = -EIO; 306 307 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); 307 308 return ide_stopped;
+1 -1
drivers/ide/ide-io.c
··· 145 145 } else { 146 146 if (media == ide_tape) 147 147 rq->errors = IDE_DRV_ERROR_GENERAL; 148 - else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 148 + else if (blk_rq_is_passthrough(rq) && rq->errors == 0) 149 149 rq->errors = -EIO; 150 150 } 151 151
+2 -4
drivers/ide/ide-ioctls.c
··· 125 125 if (NULL == (void *) arg) { 126 126 struct request *rq; 127 127 128 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 128 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 129 129 scsi_req_init(rq); 130 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 131 130 ide_req(rq)->type = ATA_PRIV_TASKFILE; 132 131 err = blk_execute_rq(drive->queue, NULL, rq, 0); 133 132 blk_put_request(rq); ··· 222 223 struct request *rq; 223 224 int ret = 0; 224 225 225 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 226 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 226 227 scsi_req_init(rq); 227 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 228 228 ide_req(rq)->type = ATA_PRIV_MISC; 229 229 scsi_req(rq)->cmd_len = 1; 230 230 scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
+2 -4
drivers/ide/ide-park.c
··· 31 31 } 32 32 spin_unlock_irq(&hwif->lock); 33 33 34 - rq = blk_get_request(q, READ, __GFP_RECLAIM); 34 + rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM); 35 35 scsi_req_init(rq); 36 36 scsi_req(rq)->cmd[0] = REQ_PARK_HEADS; 37 37 scsi_req(rq)->cmd_len = 1; 38 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 39 38 ide_req(rq)->type = ATA_PRIV_MISC; 40 39 rq->special = &timeout; 41 40 rc = blk_execute_rq(q, NULL, rq, 1); ··· 46 47 * Make sure that *some* command is sent to the drive after the 47 48 * timeout has expired, so power management will be reenabled. 48 49 */ 49 - rq = blk_get_request(q, READ, GFP_NOWAIT); 50 + rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT); 50 51 scsi_req_init(rq); 51 52 if (IS_ERR(rq)) 52 53 goto out; 53 54 54 55 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 55 56 scsi_req(rq)->cmd_len = 1; 56 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 57 57 ide_req(rq)->type = ATA_PRIV_MISC; 58 58 elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); 59 59
+4 -6
drivers/ide/ide-pm.c
··· 18 18 } 19 19 20 20 memset(&rqpm, 0, sizeof(rqpm)); 21 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 21 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 22 22 scsi_req_init(rq); 23 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 24 23 ide_req(rq)->type = ATA_PRIV_PM_SUSPEND; 25 24 rq->special = &rqpm; 26 25 rqpm.pm_step = IDE_PM_START_SUSPEND; ··· 89 90 } 90 91 91 92 memset(&rqpm, 0, sizeof(rqpm)); 92 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 93 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 93 94 scsi_req_init(rq); 94 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 95 95 ide_req(rq)->type = ATA_PRIV_PM_RESUME; 96 96 rq->rq_flags |= RQF_PREEMPT; 97 97 rq->special = &rqpm; ··· 242 244 { 243 245 struct ide_pm_state *pm = rq->special; 244 246 245 - if (rq->cmd_type == REQ_TYPE_DRV_PRIV && 247 + if (blk_rq_is_private(rq) && 246 248 ide_req(rq)->type == ATA_PRIV_PM_SUSPEND && 247 249 pm->pm_step == IDE_PM_START_SUSPEND) 248 250 /* Mark drive blocked when starting the suspend sequence. */ 249 251 drive->dev_flags |= IDE_DFLAG_BLOCKED; 250 - else if (rq->cmd_type == REQ_TYPE_DRV_PRIV && 252 + else if (blk_rq_is_private(rq) && 251 253 ide_req(rq)->type == ATA_PRIV_PM_RESUME && 252 254 pm->pm_step == IDE_PM_START_RESUME) { 253 255 /*
+2 -3
drivers/ide/ide-tape.c
··· 577 577 req->cmd[0], (unsigned long long)blk_rq_pos(rq), 578 578 blk_rq_sectors(rq)); 579 579 580 - BUG_ON(rq->cmd_type != REQ_TYPE_DRV_PRIV); 580 + BUG_ON(!blk_rq_is_private(rq)); 581 581 BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC && 582 582 ide_req(rq)->type != ATA_PRIV_SENSE); 583 583 ··· 854 854 BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); 855 855 BUG_ON(size < 0 || size % tape->blk_size); 856 856 857 - rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); 857 + rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM); 858 858 scsi_req_init(rq); 859 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 860 859 ide_req(rq)->type = ATA_PRIV_MISC; 861 860 scsi_req(rq)->cmd[13] = cmd; 862 861 rq->rq_disk = tape->disk;
+3 -3
drivers/ide/ide-taskfile.c
··· 428 428 { 429 429 struct request *rq; 430 430 int error; 431 - int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; 432 431 433 - rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); 432 + rq = blk_get_request(drive->queue, 433 + (cmd->tf_flags & IDE_TFLAG_WRITE) ? 434 + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); 434 435 scsi_req_init(rq); 435 - rq->cmd_type = REQ_TYPE_DRV_PRIV; 436 436 ide_req(rq)->type = ATA_PRIV_TASKFILE; 437 437 438 438 /*
+6 -7
drivers/mtd/mtd_blkdevs.c
··· 84 84 nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 85 85 buf = bio_data(req->bio); 86 86 87 - if (req->cmd_type != REQ_TYPE_FS) 88 - return -EIO; 89 - 90 87 if (req_op(req) == REQ_OP_FLUSH) 91 88 return tr->flush(dev); 92 89 ··· 91 94 get_capacity(req->rq_disk)) 92 95 return -EIO; 93 96 94 - if (req_op(req) == REQ_OP_DISCARD) 97 + switch (req_op(req)) { 98 + case REQ_OP_DISCARD: 95 99 return tr->discard(dev, block, nsect); 96 - 97 - if (rq_data_dir(req) == READ) { 100 + case REQ_OP_READ: 98 101 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 99 102 if (tr->readsect(dev, block, buf)) 100 103 return -EIO; 101 104 rq_flush_dcache_pages(req); 102 105 return 0; 103 - } else { 106 + case REQ_OP_WRITE: 104 107 if (!tr->writesect) 105 108 return -EIO; 106 109 ··· 109 112 if (tr->writesect(dev, block, buf)) 110 113 return -EIO; 111 114 return 0; 115 + default: 116 + return -EIO; 112 117 } 113 118 } 114 119
+7 -8
drivers/mtd/ubi/block.c
··· 323 323 struct ubiblock *dev = hctx->queue->queuedata; 324 324 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); 325 325 326 - if (req->cmd_type != REQ_TYPE_FS) 326 + switch (req_op(req)) { 327 + case REQ_OP_READ: 328 + ubi_sgl_init(&pdu->usgl); 329 + queue_work(dev->wq, &pdu->work); 330 + return BLK_MQ_RQ_QUEUE_OK; 331 + default: 327 332 return BLK_MQ_RQ_QUEUE_ERROR; 333 + } 328 334 329 - if (rq_data_dir(req) != READ) 330 - return BLK_MQ_RQ_QUEUE_ERROR; /* Write not implemented */ 331 - 332 - ubi_sgl_init(&pdu->usgl); 333 - queue_work(dev->wq, &pdu->work); 334 - 335 - return BLK_MQ_RQ_QUEUE_OK; 336 335 } 337 336 338 337 static int ubiblock_init_request(void *data, struct request *req,
+18 -8
drivers/nvme/host/core.c
··· 208 208 struct request *nvme_alloc_request(struct request_queue *q, 209 209 struct nvme_command *cmd, unsigned int flags, int qid) 210 210 { 211 + unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 211 212 struct request *req; 212 213 213 214 if (qid == NVME_QID_ANY) { 214 - req = blk_mq_alloc_request(q, nvme_is_write(cmd), flags); 215 + req = blk_mq_alloc_request(q, op, flags); 215 216 } else { 216 - req = blk_mq_alloc_request_hctx(q, nvme_is_write(cmd), flags, 217 + req = blk_mq_alloc_request_hctx(q, op, flags, 217 218 qid ? qid - 1 : 0); 218 219 } 219 220 if (IS_ERR(req)) 220 221 return req; 221 222 222 - req->cmd_type = REQ_TYPE_DRV_PRIV; 223 223 req->cmd_flags |= REQ_FAILFAST_DRIVER; 224 224 nvme_req(req)->cmd = cmd; 225 225 ··· 309 309 { 310 310 int ret = BLK_MQ_RQ_QUEUE_OK; 311 311 312 - if (req->cmd_type == REQ_TYPE_DRV_PRIV) 312 + switch (req_op(req)) { 313 + case REQ_OP_DRV_IN: 314 + case REQ_OP_DRV_OUT: 313 315 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 314 - else if (req_op(req) == REQ_OP_FLUSH) 316 + break; 317 + case REQ_OP_FLUSH: 315 318 nvme_setup_flush(ns, cmd); 316 - else if (req_op(req) == REQ_OP_DISCARD) 319 + break; 320 + case REQ_OP_DISCARD: 317 321 ret = nvme_setup_discard(ns, req, cmd); 318 - else 322 + break; 323 + case REQ_OP_READ: 324 + case REQ_OP_WRITE: 319 325 nvme_setup_rw(ns, req, cmd); 326 + break; 327 + default: 328 + WARN_ON_ONCE(1); 329 + return BLK_MQ_RQ_QUEUE_ERROR; 330 + } 320 331 321 332 cmd->common.command_id = req->tag; 322 - 323 333 return ret; 324 334 } 325 335 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+1 -1
drivers/nvme/host/rdma.c
··· 1471 1471 ib_dma_sync_single_for_device(dev, sqe->dma, 1472 1472 sizeof(struct nvme_command), DMA_TO_DEVICE); 1473 1473 1474 - if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) 1474 + if (req_op(rq) == REQ_OP_FLUSH) 1475 1475 flush = true; 1476 1476 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1477 1477 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+2 -1
drivers/scsi/osd/osd_initiator.c
··· 1565 1565 struct bio *bio = oii->bio; 1566 1566 int ret; 1567 1567 1568 - req = blk_get_request(q, has_write ? WRITE : READ, flags); 1568 + req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 1569 + flags); 1569 1570 if (IS_ERR(req)) 1570 1571 return req; 1571 1572 scsi_req_init(req);
+2 -1
drivers/scsi/osst.c
··· 367 367 int err = 0; 368 368 int write = (data_direction == DMA_TO_DEVICE); 369 369 370 - req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL); 370 + req = blk_get_request(SRpnt->stp->device->request_queue, 371 + write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); 371 372 if (IS_ERR(req)) 372 373 return DRIVER_ERROR << 24; 373 374
+1 -1
drivers/scsi/scsi_error.c
··· 1974 1974 * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a 1975 1975 * request becomes available 1976 1976 */ 1977 - req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); 1977 + req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, GFP_KERNEL); 1978 1978 if (IS_ERR(req)) 1979 1979 return; 1980 1980 rq = scsi_req(req);
+13 -17
drivers/scsi/scsi_lib.c
··· 219 219 req_flags_t rq_flags, int *resid) 220 220 { 221 221 struct request *req; 222 - int write = (data_direction == DMA_TO_DEVICE); 223 222 struct scsi_request *rq; 224 223 int ret = DRIVER_ERROR << 24; 225 224 226 - req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); 225 + req = blk_get_request(sdev->request_queue, 226 + data_direction == DMA_TO_DEVICE ? 227 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); 227 228 if (IS_ERR(req)) 228 229 return ret; 229 230 rq = scsi_req(req); ··· 840 839 } 841 840 } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { 842 841 /* 843 - * Certain non BLOCK_PC requests are commands that don't 844 - * actually transfer anything (FLUSH), so cannot use 842 + * Flush commands do not transfers any data, and thus cannot use 845 843 * good_bytes != blk_rq_bytes(req) as the signal for an error. 846 844 * This sets the error explicitly for the problem case. 847 845 */ ··· 859 859 blk_rq_sectors(req), good_bytes)); 860 860 861 861 /* 862 - * Recovered errors need reporting, but they're always treated 863 - * as success, so fiddle the result code here. For BLOCK_PC 862 + * Recovered errors need reporting, but they're always treated as 863 + * success, so fiddle the result code here. For passthrough requests 864 864 * we already took a copy of the original into rq->errors which 865 865 * is what gets returned to the user 866 866 */ ··· 874 874 else if (!(req->rq_flags & RQF_QUIET)) 875 875 scsi_print_sense(cmd); 876 876 result = 0; 877 - /* BLOCK_PC may have set error */ 877 + /* for passthrough error may be set */ 878 878 error = 0; 879 879 } 880 880 ··· 1179 1179 spin_unlock_irqrestore(&dev->list_lock, flags); 1180 1180 } 1181 1181 1182 - static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) 1182 + static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) 1183 1183 { 1184 1184 struct scsi_cmnd *cmd = req->special; 1185 1185 1186 1186 /* 1187 - * BLOCK_PC requests may transfer data, in which case they must 1187 + * Passthrough requests may transfer data, in which case they must 1188 1188 * a bio attached to them. Or they might contain a SCSI command 1189 1189 * that does not transfer data, in which case they may optionally 1190 1190 * submit a request without an attached bio. ··· 1207 1207 } 1208 1208 1209 1209 /* 1210 - * Setup a REQ_TYPE_FS command. These are simple request from filesystems 1210 + * Setup a normal block command. These are simple request from filesystems 1211 1211 * that still need to be translated to SCSI CDBs from the ULD. 1212 1212 */ 1213 1213 static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) ··· 1236 1236 else 1237 1237 cmd->sc_data_direction = DMA_FROM_DEVICE; 1238 1238 1239 - switch (req->cmd_type) { 1240 - case REQ_TYPE_FS: 1239 + if (blk_rq_is_scsi(req)) 1240 + return scsi_setup_scsi_cmnd(sdev, req); 1241 + else 1241 1242 return scsi_setup_fs_cmnd(sdev, req); 1242 - case REQ_TYPE_BLOCK_PC: 1243 - return scsi_setup_blk_pc_cmnd(sdev, req); 1244 - default: 1245 - return BLKPREP_KILL; 1246 - } 1247 1243 } 1248 1244 1249 1245 static int
+2 -1
drivers/scsi/sg.c
··· 1698 1698 * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually 1699 1699 * does not sleep except under memory pressure. 1700 1700 */ 1701 - rq = blk_get_request(q, rw, GFP_KERNEL); 1701 + rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? 1702 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); 1702 1703 if (IS_ERR(rq)) { 1703 1704 kfree(long_cmdp); 1704 1705 return PTR_ERR(rq);
+6 -3
drivers/scsi/sr.c
··· 437 437 goto out; 438 438 } 439 439 440 - if (rq_data_dir(rq) == WRITE) { 440 + switch (req_op(rq)) { 441 + case REQ_OP_WRITE: 441 442 if (!cd->writeable) 442 443 goto out; 443 444 SCpnt->cmnd[0] = WRITE_10; 444 445 cd->cdi.media_written = 1; 445 - } else if (rq_data_dir(rq) == READ) { 446 + break; 447 + case REQ_OP_READ: 446 448 SCpnt->cmnd[0] = READ_10; 447 - } else { 449 + break; 450 + default: 448 451 blk_dump_rq_flags(rq, "Unknown sr command"); 449 452 goto out; 450 453 }
+3 -3
drivers/scsi/st.c
··· 541 541 struct scsi_request *rq; 542 542 struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data; 543 543 int err = 0; 544 - int write = (data_direction == DMA_TO_DEVICE); 545 544 struct scsi_tape *STp = SRpnt->stp; 546 545 547 - req = blk_get_request(SRpnt->stp->device->request_queue, write, 548 - GFP_KERNEL); 546 + req = blk_get_request(SRpnt->stp->device->request_queue, 547 + data_direction == DMA_TO_DEVICE ? 548 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, GFP_KERNEL); 549 549 if (IS_ERR(req)) 550 550 return DRIVER_ERROR << 24; 551 551 rq = scsi_req(req);
+2 -1
drivers/target/target_core_pscsi.c
··· 1005 1005 scsi_command_size(cmd->t_task_cdb)); 1006 1006 1007 1007 req = blk_get_request(pdv->pdv_sd->request_queue, 1008 - (cmd->data_direction == DMA_TO_DEVICE), 1008 + cmd->data_direction == DMA_TO_DEVICE ? 1009 + REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 1009 1010 GFP_KERNEL); 1010 1011 if (IS_ERR(req)) { 1011 1012 pr_err("PSCSI: blk_get_request() failed\n");
+1 -1
fs/nfsd/blocklayout.c
··· 223 223 if (!buf) 224 224 return -ENOMEM; 225 225 226 - rq = blk_get_request(q, READ, GFP_KERNEL); 226 + rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); 227 227 if (IS_ERR(rq)) { 228 228 error = -ENOMEM; 229 229 goto out_free_buf;
+7
include/linux/blk_types.h
··· 162 162 /* write the zero filled sector many times */ 163 163 REQ_OP_WRITE_ZEROES = 8, 164 164 165 + /* SCSI passthrough using struct scsi_request */ 166 + REQ_OP_SCSI_IN = 32, 167 + REQ_OP_SCSI_OUT = 33, 168 + /* Driver private requests */ 169 + REQ_OP_DRV_IN = 34, 170 + REQ_OP_DRV_OUT = 35, 171 + 165 172 REQ_OP_LAST, 166 173 }; 167 174
+11 -11
include/linux/blkdev.h
··· 71 71 }; 72 72 73 73 /* 74 - * request command types 75 - */ 76 - enum rq_cmd_type_bits { 77 - REQ_TYPE_FS = 1, /* fs request */ 78 - REQ_TYPE_BLOCK_PC, /* scsi command */ 79 - REQ_TYPE_DRV_PRIV, /* driver defined types from here */ 80 - }; 81 - 82 - /* 83 74 * request flags */ 84 75 typedef __u32 __bitwise req_flags_t; 85 76 ··· 136 145 struct blk_mq_ctx *mq_ctx; 137 146 138 147 int cpu; 139 - unsigned cmd_type; 140 148 unsigned int cmd_flags; /* op and common flags */ 141 149 req_flags_t rq_flags; 142 150 unsigned long atomic_flags; ··· 232 242 struct request *next_rq; 233 243 }; 234 244 245 + static inline bool blk_rq_is_scsi(struct request *rq) 246 + { 247 + return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT; 248 + } 249 + 250 + static inline bool blk_rq_is_private(struct request *rq) 251 + { 252 + return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT; 253 + } 254 + 235 255 static inline bool blk_rq_is_passthrough(struct request *rq) 236 256 { 237 - return rq->cmd_type != REQ_TYPE_FS; 257 + return blk_rq_is_scsi(rq) || blk_rq_is_private(rq); 238 258 } 239 259 240 260 static inline unsigned short req_get_ioprio(struct request *req)
+5 -9
include/linux/ide.h
··· 63 63 64 64 static inline bool ata_misc_request(struct request *rq) 65 65 { 66 - return rq->cmd_type == REQ_TYPE_DRV_PRIV && 67 - ide_req(rq)->type == ATA_PRIV_MISC; 66 + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC; 68 67 } 69 68 70 69 static inline bool ata_taskfile_request(struct request *rq) 71 70 { 72 - return rq->cmd_type == REQ_TYPE_DRV_PRIV && 73 - ide_req(rq)->type == ATA_PRIV_TASKFILE; 71 + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE; 74 72 } 75 73 76 74 static inline bool ata_pc_request(struct request *rq) 77 75 { 78 - return rq->cmd_type == REQ_TYPE_DRV_PRIV && 79 - ide_req(rq)->type == ATA_PRIV_PC; 76 + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC; 80 77 } 81 78 82 79 static inline bool ata_sense_request(struct request *rq) 83 80 { 84 - return rq->cmd_type == REQ_TYPE_DRV_PRIV && 85 - ide_req(rq)->type == ATA_PRIV_SENSE; 81 + return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE; 86 82 } 87 83 88 84 static inline bool ata_pm_request(struct request *rq) 89 85 { 90 - return rq->cmd_type == REQ_TYPE_DRV_PRIV && 86 + return blk_rq_is_private(rq) && 91 87 (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND || 92 88 ide_req(rq)->type == ATA_PRIV_PM_RESUME); 93 89 }