Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: remove wrappers for request type/flags

Remove all the trivial wrappers for the cmd_type and cmd_flags fields in
struct requests. This allows much easier grepping for different request
types instead of unwinding through macros.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

authored by

Christoph Hellwig and committed by
Jens Axboe
33659ebb 7e005f79

+234 -196
+4 -3
block/blk-barrier.c
··· 79 79 * 80 80 * http://thread.gmane.org/gmane.linux.kernel/537473 81 81 */ 82 - if (!blk_fs_request(rq)) 82 + if (rq->cmd_type != REQ_TYPE_FS) 83 83 return QUEUE_ORDSEQ_DRAIN; 84 84 85 85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == ··· 236 236 bool blk_do_ordered(struct request_queue *q, struct request **rqp) 237 237 { 238 238 struct request *rq = *rqp; 239 - const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 239 + const int is_barrier = rq->cmd_type == REQ_TYPE_FS && 240 + (rq->cmd_flags & REQ_HARDBARRIER); 240 241 241 242 if (!q->ordseq) { 242 243 if (!is_barrier) ··· 262 261 */ 263 262 264 263 /* Special requests are not subject to ordering rules. */ 265 - if (!blk_fs_request(rq) && 264 + if (rq->cmd_type != REQ_TYPE_FS && 266 265 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 267 266 return true; 268 267
+7 -6
block/blk-core.c
··· 184 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 185 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 186 186 187 - if (blk_pc_request(rq)) { 187 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 188 188 printk(KERN_INFO " cdb: "); 189 189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 190 190 printk("%02x ", rq->cmd[bit]); ··· 1796 1796 * sees this request (possibly after 1797 1797 * requeueing). Notify IO scheduler. 1798 1798 */ 1799 - if (blk_sorted_rq(rq)) 1799 + if (rq->cmd_flags & REQ_SORTED) 1800 1800 elv_activate_rq(q, rq); 1801 1801 1802 1802 /* ··· 1984 1984 * TODO: tj: This is too subtle. It would be better to let 1985 1985 * low level drivers do what they see fit. 1986 1986 */ 1987 - if (blk_fs_request(req)) 1987 + if (req->cmd_type == REQ_TYPE_FS) 1988 1988 req->errors = 0; 1989 1989 1990 - if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1990 + if (error && req->cmd_type == REQ_TYPE_FS && 1991 + !(req->cmd_flags & REQ_QUIET)) { 1991 1992 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1992 1993 req->rq_disk ? req->rq_disk->disk_name : "?", 1993 1994 (unsigned long long)blk_rq_pos(req)); ··· 2075 2074 req->buffer = bio_data(req->bio); 2076 2075 2077 2076 /* update sector only for requests with clear definition of sector */ 2078 - if (blk_fs_request(req) || blk_discard_rq(req)) 2077 + if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2079 2078 req->__sector += total_bytes >> 9; 2080 2079 2081 2080 /* mixed attributes always follow the first bio */ ··· 2128 2127 2129 2128 BUG_ON(blk_queued_rq(req)); 2130 2129 2131 - if (unlikely(laptop_mode) && blk_fs_request(req)) 2130 + if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2132 2131 laptop_io_completion(&req->q->backing_dev_info); 2133 2132 2134 2133 blk_delete_timer(req);
+1 -1
block/blk-exec.c
··· 57 57 __elv_add_request(q, rq, where, 1); 58 58 __generic_unplug_device(q); 59 59 /* the queue is stopped so it won't be plugged+unplugged */ 60 - if (blk_pm_resume_request(rq)) 60 + if (rq->cmd_type == REQ_TYPE_PM_RESUME) 61 61 q->request_fn(q); 62 62 spin_unlock_irq(q->queue_lock); 63 63 }
+2 -2
block/blk-merge.c
··· 226 226 { 227 227 unsigned short max_sectors; 228 228 229 - if (unlikely(blk_pc_request(req))) 229 + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 230 230 max_sectors = queue_max_hw_sectors(q); 231 231 else 232 232 max_sectors = queue_max_sectors(q); ··· 250 250 { 251 251 unsigned short max_sectors; 252 252 253 - if (unlikely(blk_pc_request(req))) 253 + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 254 254 max_sectors = queue_max_hw_sectors(q); 255 255 else 256 256 max_sectors = queue_max_sectors(q);
+4 -2
block/blk.h
··· 161 161 */ 162 162 static inline int blk_do_io_stat(struct request *rq) 163 163 { 164 - return rq->rq_disk && blk_rq_io_stat(rq) && 165 - (blk_fs_request(rq) || blk_discard_rq(rq)); 164 + return rq->rq_disk && 165 + (rq->cmd_flags & REQ_IO_STAT) && 166 + (rq->cmd_type == REQ_TYPE_FS || 167 + (rq->cmd_flags & REQ_DISCARD)); 166 168 } 167 169 168 170 #endif
+11 -8
block/cfq-iosched.c
··· 646 646 return rq1; 647 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 648 648 return rq2; 649 - if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 649 + if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META)) 650 650 return rq1; 651 - else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 651 + else if ((rq2->cmd_flags & REQ_RW_META) && 652 + !(rq1->cmd_flags & REQ_RW_META)) 652 653 return rq2; 653 654 654 655 s1 = blk_rq_pos(rq1); ··· 1485 1484 cfqq->cfqd->rq_queued--; 1486 1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1487 1486 rq_data_dir(rq), rq_is_sync(rq)); 1488 - if (rq_is_meta(rq)) { 1487 + if (rq->cmd_flags & REQ_RW_META) { 1489 1488 WARN_ON(!cfqq->meta_pending); 1490 1489 cfqq->meta_pending--; 1491 1490 } ··· 3177 3176 * So both queues are sync. Let the new request get disk time if 3178 3177 * it's a metadata request and the current queue is doing regular IO. 3179 3178 */ 3180 - if (rq_is_meta(rq) && !cfqq->meta_pending) 3179 + if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending) 3181 3180 return true; 3182 3181 3183 3182 /* ··· 3231 3230 struct cfq_io_context *cic = RQ_CIC(rq); 3232 3231 3233 3232 cfqd->rq_queued++; 3234 - if (rq_is_meta(rq)) 3233 + if (rq->cmd_flags & REQ_RW_META) 3235 3234 cfqq->meta_pending++; 3236 3235 3237 3236 cfq_update_io_thinktime(cfqd, cic); ··· 3366 3365 unsigned long now; 3367 3366 3368 3367 now = jiffies; 3369 - cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); 3368 + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", 3369 + !!(rq->cmd_flags & REQ_NOIDLE)); 3370 3370 3371 3371 cfq_update_hw_tag(cfqd); 3372 3372 ··· 3421 3419 cfq_slice_expired(cfqd, 1); 3422 3420 else if (sync && cfqq_empty && 3423 3421 !cfq_close_cooperator(cfqd, cfqq)) { 3424 - cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); 3422 + cfqd->noidle_tree_requires_idle |= 3423 + !(rq->cmd_flags & REQ_NOIDLE); 3425 3424 /* 3426 3425 * Idling is enabled for SYNC_WORKLOAD. 3427 3426 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree 3428 - * only if we processed at least one !rq_noidle request 3427 + * only if we processed at least one !REQ_NOIDLE request 3429 3428 */ 3430 3429 if (cfqd->serving_type == SYNC_WORKLOAD 3431 3430 || cfqd->noidle_tree_requires_idle
+10 -6
block/elevator.c
··· 428 428 list_for_each_prev(entry, &q->queue_head) { 429 429 struct request *pos = list_entry_rq(entry); 430 430 431 - if (blk_discard_rq(rq) != blk_discard_rq(pos)) 431 + if ((rq->cmd_flags & REQ_DISCARD) != 432 + (pos->cmd_flags & REQ_DISCARD)) 432 433 break; 433 434 if (rq_data_dir(rq) != rq_data_dir(pos)) 434 435 break; ··· 559 558 */ 560 559 if (blk_account_rq(rq)) { 561 560 q->in_flight[rq_is_sync(rq)]--; 562 - if (blk_sorted_rq(rq)) 561 + if (rq->cmd_flags & REQ_SORTED) 563 562 elv_deactivate_rq(q, rq); 564 563 } 565 564 ··· 645 644 break; 646 645 647 646 case ELEVATOR_INSERT_SORT: 648 - BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 647 + BUG_ON(rq->cmd_type != REQ_TYPE_FS && 648 + !(rq->cmd_flags & REQ_DISCARD)); 649 649 rq->cmd_flags |= REQ_SORTED; 650 650 q->nr_sorted++; 651 651 if (rq_mergeable(rq)) { ··· 718 716 /* 719 717 * toggle ordered color 720 718 */ 721 - if (blk_barrier_rq(rq)) 719 + if (rq->cmd_flags & REQ_HARDBARRIER) 722 720 q->ordcolor ^= 1; 723 721 724 722 /* ··· 731 729 * this request is scheduling boundary, update 732 730 * end_sector 733 731 */ 734 - if (blk_fs_request(rq) || blk_discard_rq(rq)) { 732 + if (rq->cmd_type == REQ_TYPE_FS || 733 + (rq->cmd_flags & REQ_DISCARD)) { 735 734 q->end_sector = rq_end_sector(rq); 736 735 q->boundary_rq = rq; 737 736 } ··· 846 843 */ 847 844 if (blk_account_rq(rq)) { 848 845 q->in_flight[rq_is_sync(rq)]--; 849 - if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 846 + if ((rq->cmd_flags & REQ_SORTED) && 847 + e->ops->elevator_completed_req_fn) 850 848 e->ops->elevator_completed_req_fn(q, rq); 851 849 } 852 850
+1 -1
drivers/ata/libata-scsi.c
··· 1111 1111 */ 1112 1112 static int atapi_drain_needed(struct request *rq) 1113 1113 { 1114 - if (likely(!blk_pc_request(rq))) 1114 + if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) 1115 1115 return 0; 1116 1116 1117 1117 if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
+30 -19
drivers/block/cciss.c
··· 1783 1783 #endif /* CCISS_DEBUG */ 1784 1784 1785 1785 /* set the residual count for pc requests */ 1786 - if (blk_pc_request(rq)) 1786 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1787 1787 rq->resid_len = cmd->err_info->ResidualCnt; 1788 1788 1789 1789 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); ··· 2983 2983 driver_byte = DRIVER_OK; 2984 2984 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 2985 2985 2986 - if (blk_pc_request(cmd->rq)) 2986 + if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) 2987 2987 host_byte = DID_PASSTHROUGH; 2988 2988 else 2989 2989 host_byte = DID_OK; ··· 2992 2992 host_byte, driver_byte); 2993 2993 2994 2994 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 2995 - if (!blk_pc_request(cmd->rq)) 2995 + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) 2996 2996 printk(KERN_WARNING "cciss: cmd %p " 2997 2997 "has SCSI Status 0x%x\n", 2998 2998 cmd, cmd->err_info->ScsiStatus); ··· 3002 3002 /* check the sense key */ 3003 3003 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 3004 3004 /* no status or recovered error */ 3005 - if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) 3005 + if (((sense_key == 0x0) || (sense_key == 0x1)) && 3006 + (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) 3006 3007 error_value = 0; 3007 3008 3008 3009 if (check_for_unit_attention(h, cmd)) { 3009 - *retry_cmd = !blk_pc_request(cmd->rq); 3010 + *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); 3010 3011 return 0; 3011 3012 } 3012 3013 3013 - if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ 3014 + /* Not SG_IO or similar? */ 3015 + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { 3014 3016 if (error_value != 0) 3015 3017 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" 3016 3018 " sense key = 0x%x\n", cmd, sense_key); ··· 3054 3052 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3055 3053 break; 3056 3054 case CMD_DATA_UNDERRUN: 3057 - if (blk_fs_request(cmd->rq)) { 3055 + if (cmd->rq->cmd_type == REQ_TYPE_FS) { 3058 3056 printk(KERN_WARNING "cciss: cmd %p has" 3059 3057 " completed with data underrun " 3060 3058 "reported\n", cmd); ··· 3062 3060 } 3063 3061 break; 3064 3062 case CMD_DATA_OVERRUN: 3065 - if (blk_fs_request(cmd->rq)) 3063 + if (cmd->rq->cmd_type == REQ_TYPE_FS) 3066 3064 printk(KERN_WARNING "cciss: cmd %p has" 3067 3065 " completed with data overrun " 3068 3066 "reported\n", cmd); ··· 3072 3070 "reported invalid\n", cmd); 3073 3071 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3074 3072 cmd->err_info->CommandStatus, DRIVER_OK, 3075 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3073 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3074 + DID_PASSTHROUGH : DID_ERROR); 3076 3075 break; 3077 3076 case CMD_PROTOCOL_ERR: 3078 3077 printk(KERN_WARNING "cciss: cmd %p has " 3079 3078 "protocol error \n", cmd); 3080 3079 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3081 3080 cmd->err_info->CommandStatus, DRIVER_OK, 3082 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3081 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3082 + DID_PASSTHROUGH : DID_ERROR); 3083 3083 break; 3084 3084 case CMD_HARDWARE_ERR: 3085 3085 printk(KERN_WARNING "cciss: cmd %p had " 3086 3086 " hardware error\n", cmd); 3087 3087 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3088 3088 cmd->err_info->CommandStatus, DRIVER_OK, 3089 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3089 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3090 + DID_PASSTHROUGH : DID_ERROR); 3090 3091 break; 3091 3092 case CMD_CONNECTION_LOST: 3092 3093 printk(KERN_WARNING "cciss: cmd %p had " 3093 3094 "connection lost\n", cmd); 3094 3095 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3095 3096 cmd->err_info->CommandStatus, DRIVER_OK, 3096 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3097 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3098 + DID_PASSTHROUGH : DID_ERROR); 3097 3099 break; 3098 3100 case CMD_ABORTED: 3099 3101 printk(KERN_WARNING "cciss: cmd %p was " 3100 3102 "aborted\n", cmd); 3101 3103 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3102 3104 cmd->err_info->CommandStatus, DRIVER_OK, 3103 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); 3105 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3106 + DID_PASSTHROUGH : DID_ABORT); 3104 3107 break; 3105 3108 case CMD_ABORT_FAILED: 3106 3109 printk(KERN_WARNING "cciss: cmd %p reports " 3107 3110 "abort failed\n", cmd); 3108 3111 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3109 3112 cmd->err_info->CommandStatus, DRIVER_OK, 3110 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3113 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3114 + DID_PASSTHROUGH : DID_ERROR); 3111 3115 break; 3112 3116 case CMD_UNSOLICITED_ABORT: 3113 3117 printk(KERN_WARNING "cciss%d: unsolicited " ··· 3129 3121 "many times\n", h->ctlr, cmd); 3130 3122 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3131 3123 cmd->err_info->CommandStatus, DRIVER_OK, 3132 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); 3124 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3125 + DID_PASSTHROUGH : DID_ABORT); 3133 3126 break; 3134 3127 case CMD_TIMEOUT: 3135 3128 printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); 3136 3129 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3137 3130 cmd->err_info->CommandStatus, DRIVER_OK, 3138 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3131 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3132 + DID_PASSTHROUGH : DID_ERROR); 3139 3133 break; 3140 3134 default: 3141 3135 printk(KERN_WARNING "cciss: cmd %p returned " ··· 3145 3135 cmd->err_info->CommandStatus); 3146 3136 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3147 3137 cmd->err_info->CommandStatus, DRIVER_OK, 3148 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3138 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3139 + DID_PASSTHROUGH : DID_ERROR); 3149 3140 } 3150 3141 3151 3142 after_error_processing: ··· 3305 3294 c->Header.SGList = h->max_cmd_sgentries; 3306 3295 set_performant_mode(h, c); 3307 3296 3308 - if (likely(blk_fs_request(creq))) { 3297 + if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3309 3298 if(h->cciss_read == CCISS_READ_10) { 3310 3299 c->Request.CDB[1] = 0; 3311 3300 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ ··· 3335 3324 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3336 3325 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3337 3326 } 3338 - } else if (blk_pc_request(creq)) { 3327 + } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3339 3328 c->Request.CDBLen = creq->cmd_len; 3340 3329 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3341 3330 } else {
+1 -1
drivers/block/hd.c
··· 627 627 req_data_dir(req) == READ ? "read" : "writ", 628 628 cyl, head, sec, nsect, req->buffer); 629 629 #endif 630 - if (blk_fs_request(req)) { 630 + if (req->cmd_type == REQ_TYPE_FS) { 631 631 switch (rq_data_dir(req)) { 632 632 case READ: 633 633 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
+2 -2
drivers/block/mg_disk.c
··· 670 670 break; 671 671 } 672 672 673 - if (unlikely(!blk_fs_request(host->req))) { 673 + if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { 674 674 mg_end_request_cur(host, -EIO); 675 675 continue; 676 676 } ··· 756 756 continue; 757 757 } 758 758 759 - if (unlikely(!blk_fs_request(req))) { 759 + if (unlikely(req->cmd_type != REQ_TYPE_FS)) { 760 760 mg_end_request_cur(host, -EIO); 761 761 continue; 762 762 }
+1 -1
drivers/block/nbd.c
··· 448 448 449 449 static void nbd_handle_req(struct nbd_device *lo, struct request *req) 450 450 { 451 - if (!blk_fs_request(req)) 451 + if (req->cmd_type != REQ_TYPE_FS) 452 452 goto error_out; 453 453 454 454 nbd_cmd(req) = NBD_CMD_READ;
+2 -1
drivers/block/osdblk.c
··· 310 310 break; 311 311 312 312 /* filter out block requests we don't understand */ 313 - if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) { 313 + if (rq->cmd_type != REQ_TYPE_FS && 314 + !(rq->cmd_flags & REQ_HARDBARRIER)) { 314 315 blk_end_request_all(rq, 0); 315 316 continue; 316 317 }
+1 -1
drivers/block/paride/pd.c
··· 439 439 440 440 static enum action do_pd_io_start(void) 441 441 { 442 - if (blk_special_request(pd_req)) { 442 + if (pd_req->cmd_type == REQ_TYPE_SPECIAL) { 443 443 phase = pd_special; 444 444 return pd_special(); 445 445 }
+1 -1
drivers/block/ps3disk.c
··· 196 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 197 197 198 198 while ((req = blk_fetch_request(q))) { 199 - if (blk_fs_request(req)) { 199 + if (req->cmd_type == REQ_TYPE_FS) { 200 200 if (ps3disk_submit_request_sg(dev, req)) 201 201 break; 202 202 } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
+4 -4
drivers/block/ub.c
··· 648 648 return 0; 649 649 } 650 650 651 - if (lun->changed && !blk_pc_request(rq)) { 651 + if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) 652 652 blk_start_request(rq); 653 653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); 654 654 return 0; ··· 684 684 } 685 685 urq->nsg = n_elem; 686 686 687 - if (blk_pc_request(rq)) { 687 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 688 688 ub_cmd_build_packet(sc, lun, cmd, urq); 689 689 } else { 690 690 ub_cmd_build_block(sc, lun, cmd, urq); ··· 781 781 rq = urq->rq; 782 782 783 783 if (cmd->error == 0) { 784 - if (blk_pc_request(rq)) { 784 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 785 785 if (cmd->act_len >= rq->resid_len) 786 786 rq->resid_len = 0; 787 787 else ··· 795 795 } 796 796 } 797 797 } else { 798 - if (blk_pc_request(rq)) { 798 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 799 799 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 800 800 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 801 801 rq->sense_len = UB_SENSE_SIZE;
+1 -1
drivers/block/viodasd.c
··· 361 361 if (req == NULL) 362 362 return; 363 363 /* check that request contains a valid command */ 364 - if (!blk_fs_request(req)) { 364 + if (req->cmd_type != REQ_TYPE_FS) { 365 365 viodasd_end_request(req, -EIO, blk_rq_sectors(req)); 366 366 continue; 367 367 }
+9 -6
drivers/block/virtio_blk.c
··· 65 65 break; 66 66 } 67 67 68 - if (blk_pc_request(vbr->req)) { 68 + switch (vbr->req->cmd_type) { 69 + case REQ_TYPE_BLOCK_PC: 69 70 vbr->req->resid_len = vbr->in_hdr.residual; 70 71 vbr->req->sense_len = vbr->in_hdr.sense_len; 71 72 vbr->req->errors = vbr->in_hdr.errors; 72 - } 73 - if (blk_special_request(vbr->req)) 73 + break; 74 + case REQ_TYPE_SPECIAL: 74 75 vbr->req->errors = (error != 0); 76 + break; 77 + } 75 78 76 79 __blk_end_request_all(vbr->req, error); 77 80 list_del(&vbr->list); ··· 126 123 BUG(); 127 124 } 128 125 129 - if (blk_barrier_rq(vbr->req)) 126 + if (vbr->req->cmd_flags & REQ_HARDBARRIER) 130 127 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 131 128 132 129 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); ··· 137 134 * block, and before the normal inhdr we put the sense data and the 138 135 * inhdr with additional status information before the normal inhdr. 139 136 */ 140 - if (blk_pc_request(vbr->req)) 137 + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) 141 138 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); 142 139 143 140 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 144 141 145 - if (blk_pc_request(vbr->req)) { 142 + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { 146 143 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 147 144 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 148 145 sizeof(vbr->in_hdr));
+1 -1
drivers/block/xd.c
··· 322 322 int res = -EIO; 323 323 int retry; 324 324 325 - if (!blk_fs_request(req)) 325 + if (req->cmd_type != REQ_TYPE_FS) { 326 326 goto done; 327 327 if (block + count > get_capacity(req->rq_disk)) 328 328 goto done;
+2 -2
drivers/block/xen-blkfront.c
··· 238 238 239 239 ring_req->operation = rq_data_dir(req) ? 240 240 BLKIF_OP_WRITE : BLKIF_OP_READ; 241 - if (blk_barrier_rq(req)) 241 + if (req->cmd_flags & REQ_HARDBARRIER) 242 242 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 243 243 244 244 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ··· 309 309 310 310 blk_start_request(req); 311 311 312 - if (!blk_fs_request(req)) { 312 + if (req->cmd_type != REQ_TYPE_FS) { 313 313 __blk_end_request_all(req, -EIO); 314 314 continue; 315 315 }
+1 -1
drivers/block/xsysace.c
··· 465 465 struct request *req; 466 466 467 467 while ((req = blk_peek_request(q)) != NULL) { 468 - if (blk_fs_request(req)) 468 + if (req->cmd_type == REQ_TYPE_FS) 469 469 break; 470 470 blk_start_request(req); 471 471 __blk_end_request_all(req, -EIO);
+1 -1
drivers/cdrom/gdrom.c
··· 643 643 struct request *req; 644 644 645 645 while ((req = blk_fetch_request(rq)) != NULL) { 646 - if (!blk_fs_request(req)) { 646 + if (req->cmd_type != REQ_TYPE_FS) { 647 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 648 648 __blk_end_request_all(req, -EIO); 649 649 continue;
+1 -1
drivers/cdrom/viocd.c
··· 298 298 struct request *req; 299 299 300 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { 301 - if (!blk_fs_request(req)) 301 + if (req->cmd_type != REQ_TYPE_FS) 302 302 __blk_end_request_all(req, -EIO); 303 303 else if (send_request(req) < 0) { 304 304 printk(VIOCD_KERN_WARNING
+10 -7
drivers/ide/ide-atapi.c
··· 190 190 191 191 BUG_ON(sense_len > sizeof(*sense)); 192 192 193 - if (blk_sense_request(rq) || drive->sense_rq_armed) 193 + if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed) 194 194 return; 195 195 196 196 memset(sense, 0, sizeof(*sense)); ··· 307 307 308 308 int ide_cd_get_xferlen(struct request *rq) 309 309 { 310 - if (blk_fs_request(rq)) 310 + switch (rq->cmd_type) 311 + case REQ_TYPE_FS: 311 312 return 32768; 312 - else if (blk_sense_request(rq) || blk_pc_request(rq) || 313 - rq->cmd_type == REQ_TYPE_ATA_PC) 313 + case REQ_TYPE_SENSE: 314 + case REQ_TYPE_BLOCK_PC: 315 + case REQ_TYPE_ATA_PC: 314 316 return blk_rq_bytes(rq); 315 - else 317 + default: 316 318 return 0; 319 + } 317 320 } 318 321 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); 319 322 ··· 477 474 if (uptodate == 0) 478 475 drive->failed_pc = NULL; 479 476 480 - if (blk_special_request(rq)) { 477 + if (rq->cmd_type == REQ_TYPE_SPECIAL) 481 478 rq->errors = 0; 482 479 error = 0; 483 480 } else { 484 481 485 - if (blk_fs_request(rq) == 0 && uptodate <= 0) { 482 + if (req->cmd_type != REQ_TYPE_FS && uptodate <= 0) { 486 483 if (rq->errors == 0) 487 484 rq->errors = -EIO; 488 485 }
+36 -30
drivers/ide/ide-cd.c
··· 176 176 if (!sense->valid) 177 177 break; 178 178 if (failed_command == NULL || 179 - !blk_fs_request(failed_command)) 179 + failed_command->cmd_type != REQ_TYPE_FS) 180 180 break; 181 181 sector = (sense->information[0] << 24) | 182 182 (sense->information[1] << 16) | ··· 292 292 "stat 0x%x", 293 293 rq->cmd[0], rq->cmd_type, err, stat); 294 294 295 - if (blk_sense_request(rq)) { 295 + if (rq->cmd_type == REQ_TYPE_SENSE) { 296 296 /* 297 297 * We got an error trying to get sense info from the drive 298 298 * (probably while trying to recover from a former error). ··· 303 303 } 304 304 305 305 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ 306 - if (blk_pc_request(rq) && !rq->errors) 306 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) 307 307 rq->errors = SAM_STAT_CHECK_CONDITION; 308 308 309 309 if (blk_noretry_request(rq)) ··· 311 311 312 312 switch (sense_key) { 313 313 case NOT_READY: 314 - if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) { 314 + if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { 315 315 if (ide_cd_breathe(drive, rq)) 316 316 return 1; 317 317 } else { 318 318 cdrom_saw_media_change(drive); 319 319 320 - if (blk_fs_request(rq) && !blk_rq_quiet(rq)) 320 + if (rq->cmd_type == REQ_TYPE_FS && 321 + !(rq->cmd_flags & REQ_QUIET)) { 321 322 printk(KERN_ERR PFX "%s: tray open\n", 322 323 drive->name); 323 324 } ··· 327 326 case UNIT_ATTENTION: 328 327 cdrom_saw_media_change(drive); 329 328 330 - if (blk_fs_request(rq) == 0) 329 + if (rq->cmd_type != REQ_TYPE_FS) 331 330 return 0; 332 331 333 332 /* ··· 353 352 * No point in retrying after an illegal request or data 354 353 * protect error. 355 354 */ 356 - if (!blk_rq_quiet(rq)) 355 + if (!(rq->cmd_flags & REQ_QUIET)) 357 356 ide_dump_status(drive, "command error", stat); 358 357 do_end_request = 1; 359 358 break; ··· 362 361 * No point in re-trying a zillion times on a bad sector. 363 362 * If we got here the error is not correctable. 364 363 */ 365 - if (!blk_rq_quiet(rq)) 364 + if (!(rq->cmd_flags & REQ_QUIET)) 366 365 ide_dump_status(drive, "media error " 367 366 "(bad sector)", stat); 368 367 do_end_request = 1; 369 368 break; 370 369 case BLANK_CHECK: 371 370 /* disk appears blank? */ 372 - if (!blk_rq_quiet(rq)) 371 + if (!(rq->cmd_flags & REQ_QUIET)) 373 372 ide_dump_status(drive, "media error (blank)", 374 373 stat); 375 374 do_end_request = 1; 376 375 break; 377 376 default: 378 - if (blk_fs_request(rq) == 0) 377 + if (req->cmd_type != REQ_TYPE_FS) 379 378 break; 380 379 if (err & ~ATA_ABORTED) { 381 380 /* go to the default handler for other errors */ ··· 386 385 do_end_request = 1; 387 386 } 388 387 389 - if (blk_fs_request(rq) == 0) { 388 + if (rq->cmd_type != REQ_TYPE_FS) { 390 389 rq->cmd_flags |= REQ_FAILED; 391 390 do_end_request = 1; 392 391 } ··· 526 525 ide_expiry_t *expiry = NULL; 527 526 int dma_error = 0, dma, thislen, uptodate = 0; 528 527 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; 529 - int sense = blk_sense_request(rq); 528 + int sense = (rq->cmd_type == REQ_TYPE_SENSE); 530 529 unsigned int timeout; 531 530 u16 len; 532 531 u8 ireason, stat; ··· 569 568 570 569 ide_read_bcount_and_ireason(drive, &len, &ireason); 571 570 572 - thislen = blk_fs_request(rq) ? len : cmd->nleft; 571 + thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; 573 572 if (thislen > len) 574 573 thislen = len; 575 574 ··· 578 577 579 578 /* If DRQ is clear, the command has completed. */ 580 579 if ((stat & ATA_DRQ) == 0) { 581 - if (blk_fs_request(rq)) { 580 + if (rq->cmd_type == REQ_TYPE_FS) { 582 581 /* 583 582 * If we're not done reading/writing, complain. 584 583 * Otherwise, complete the command normally. ··· 592 591 rq->cmd_flags |= REQ_FAILED; 593 592 uptodate = 0; 594 593 } 595 - } else if (!blk_pc_request(rq)) { 594 + } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 596 595 ide_cd_request_sense_fixup(drive, cmd); 597 596 598 597 uptodate = cmd->nleft ? 0 : 1; ··· 641 640 642 641 /* pad, if necessary */ 643 642 if (len > 0) { 644 - if (blk_fs_request(rq) == 0 || write == 0) 643 + if (rq->cmd_type != REQ_TYPE_FS || write == 0) 645 644 ide_pad_transfer(drive, write, len); 646 645 else { 647 646 printk(KERN_ERR PFX "%s: confused, missing data\n", ··· 650 649 } 651 650 } 652 651 653 - if (blk_pc_request(rq)) { 652 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 654 653 timeout = rq->timeout; 655 654 } else { 656 655 timeout = ATAPI_WAIT_PC; 657 - if (!blk_fs_request(rq)) 656 + if (rq->cmd_type != REQ_TYPE_FS) 658 657 expiry = ide_cd_expiry; 659 658 } 660 659 ··· 663 662 return ide_started; 664 663 665 664 out_end: 666 - if (blk_pc_request(rq) && rc == 0) { 665 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { 667 666 rq->resid_len = 0; 668 667 blk_end_request_all(rq, 0); 669 668 hwif->rq = NULL; ··· 671 670 if (sense && uptodate) 672 671 ide_cd_complete_failed_rq(drive, rq); 673 672 674 - if (blk_fs_request(rq)) { 673 + if (rq->cmd_type == REQ_TYPE_FS) { 675 674 if (cmd->nleft == 0) 676 675 uptodate = 1; 677 676 } else { ··· 683 682 ide_cd_error_cmd(drive, cmd); 684 683 685 684 /* make sure it's fully ended */ 686 - if (blk_fs_request(rq) == 0) { 685 + if (rq->cmd_type != REQ_TYPE_FS) { 687 686 rq->resid_len -= cmd->nbytes - cmd->nleft; 688 687 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 689 688 rq->resid_len += cmd->last_xfer_len; ··· 743 742 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", 744 743 rq->cmd[0], rq->cmd_type); 745 744 746 - if (blk_pc_request(rq)) 745 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 747 746 rq->cmd_flags |= REQ_QUIET; 748 747 else 749 748 rq->cmd_flags &= ~REQ_FAILED; ··· 784 783 if (drive->debug_mask & IDE_DBG_RQ) 785 784 blk_dump_rq_flags(rq, "ide_cd_do_request"); 786 785 787 - if (blk_fs_request(rq)) { 786 + switch (rq->cmd_type) { 787 + case REQ_TYPE_FS: 788 788 if (cdrom_start_rw(drive, rq) == ide_stopped) 789 789 goto out_end; 790 - } else if (blk_sense_request(rq) || blk_pc_request(rq) || 791 - rq->cmd_type == REQ_TYPE_ATA_PC) { 790 + break; 791 + case REQ_TYPE_SENSE: 792 + case REQ_TYPE_BLOCK_PC: 793 + case REQ_TYPE_ATA_PC: 792 794 if (!rq->timeout) 793 795 rq->timeout = ATAPI_WAIT_PC; 794 796 795 797 cdrom_do_block_pc(drive, rq); 796 - } else if (blk_special_request(rq)) { 798 + break; 799 + case REQ_TYPE_SPECIAL: 797 800 /* right now this can only be a reset... */ 798 801 uptodate = 1; 799 802 goto out_end; 800 - } else 803 + default: 801 804 BUG(); 805 + } 802 806 803 807 /* prepare sense request for this command */ 804 808 ide_prep_sense(drive, rq); ··· 815 809 816 810 cmd.rq = rq; 817 811 818 - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { 812 + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 819 813 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 820 814 ide_map_sg(drive, &cmd); 821 815 } ··· 1371 1365 1372 1366 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 1373 1367 { 1374 - if (blk_fs_request(rq)) 1368 + if (rq->cmd_type == REQ_TYPE_FS) 1375 1369 return ide_cdrom_prep_fs(q, rq); 1376 - else if (blk_pc_request(rq)) 1370 + else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1377 1371 return ide_cdrom_prep_pc(rq); 1378 1372 1379 1373 return 0;
+1 -1
drivers/ide/ide-disk.c
··· 184 184 ide_hwif_t *hwif = drive->hwif; 185 185 186 186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); 187 - BUG_ON(!blk_fs_request(rq)); 187 + BUG_ON(rq->cmd_type != REQ_TYPE_FS); 188 188 189 189 ledtrig_ide_activity(); 190 190
+3 -2
drivers/ide/ide-eh.c
··· 122 122 return ide_stopped; 123 123 124 124 /* retry only "normal" I/O: */ 125 - if (!blk_fs_request(rq)) { 125 + if (rq->cmd_type != REQ_TYPE_FS) { 126 126 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 127 127 struct ide_cmd *cmd = rq->special; 128 128 ··· 146 146 { 147 147 struct request *rq = drive->hwif->rq; 148 148 149 - if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { 149 + if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && 150 + rq->cmd[0] == REQ_DRIVE_RESET) { 150 151 if (err <= 0 && rq->errors == 0) 151 152 rq->errors = -EIO; 152 153 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
+16 -9
drivers/ide/ide-floppy.c
··· 73 73 drive->failed_pc = NULL; 74 74 75 75 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 76 - (rq && blk_pc_request(rq))) 76 + (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC)) 77 77 uptodate = 1; /* FIXME */ 78 78 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 79 79 ··· 98 98 "Aborting request!\n"); 99 99 } 100 100 101 - if (blk_special_request(rq)) 101 + if (rq->cmd_type == REQ_TYPE_SPECIAL) 102 102 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; 103 103 104 104 return uptodate; ··· 247 247 } else 248 248 printk(KERN_ERR PFX "%s: I/O error\n", drive->name); 249 249 250 - if (blk_special_request(rq)) { 250 + if (rq->cmd_type == REQ_TYPE_SPECIAL) { 251 251 rq->errors = 0; 252 252 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 253 253 return ide_stopped; 254 254 } else 255 255 goto out_end; 256 256 } 257 - if (blk_fs_request(rq)) { 257 + 258 + switch (rq->cmd_type) { 259 + case REQ_TYPE_FS: 258 260 if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 259 261 (blk_rq_sectors(rq) % floppy->bs_factor)) { 260 262 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", ··· 265 263 } 266 264 pc = &floppy->queued_pc; 267 265 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 268 - } else if (blk_special_request(rq) || blk_sense_request(rq)) { 266 + break; 267 + case REQ_TYPE_SPECIAL: 268 + case REQ_TYPE_SENSE: 269 269 pc = (struct ide_atapi_pc *)rq->special; 270 - } else if (blk_pc_request(rq)) { 270 + break; 271 + case REQ_TYPE_BLOCK_PC: 271 272 pc = &floppy->queued_pc; 272 273 idefloppy_blockpc_cmd(floppy, pc, rq); 273 - } else 274 + break; 275 + default: 274 276 BUG(); 277 + } 275 278 276 279 ide_prep_sense(drive, rq); 277 280 ··· 287 280 288 281 cmd.rq = rq; 289 282 290 - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { 283 + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 291 284 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 292 285 ide_map_sg(drive, &cmd); 293 286 } ··· 297 290 return ide_floppy_issue_pc(drive, &cmd, pc); 298 291 out_end: 299 292 drive->failed_pc = NULL; 300 - if (blk_fs_request(rq) == 0 && rq->errors == 0) 293 + if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 301 294 rq->errors = -EIO; 302 295 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); 303 296 return ide_stopped;
+4 -4
drivers/ide/ide-io.c
··· 135 135 136 136 void ide_kill_rq(ide_drive_t *drive, struct request *rq) 137 137 { 138 - u8 drv_req = blk_special_request(rq) && rq->rq_disk; 138 + u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; 139 139 u8 media = drive->media; 140 140 141 141 drive->failed_pc = NULL; ··· 145 145 } else { 146 146 if (media == ide_tape) 147 147 rq->errors = IDE_DRV_ERROR_GENERAL; 148 - else if (blk_fs_request(rq) == 0 && rq->errors == 0) 148 + else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 149 149 rq->errors = -EIO; 150 150 } 151 151 ··· 307 307 { 308 308 ide_startstop_t startstop; 309 309 310 - BUG_ON(!blk_rq_started(rq)); 310 + BUG_ON(!(rq->cmd_flags & REQ_STARTED)); 311 311 312 312 #ifdef DEBUG 313 313 printk("%s: start_request: current=0x%08lx\n", ··· 353 353 pm->pm_step == IDE_PM_COMPLETED) 354 354 ide_complete_pm_rq(drive, rq); 355 355 return startstop; 356 - } else if (!rq->rq_disk && blk_special_request(rq)) 356 + } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) { 357 357 /* 358 358 * TODO: Once all ULDs have been modified to 359 359 * check for specific op codes rather than
+4 -4
drivers/ide/ide-pm.c
··· 191 191 192 192 #ifdef DEBUG_PM 193 193 printk("%s: completing PM request, %s\n", drive->name, 194 - blk_pm_suspend_request(rq) ? "suspend" : "resume"); 194 + (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); 195 195 #endif 196 196 spin_lock_irqsave(q->queue_lock, flags); 197 - if (blk_pm_suspend_request(rq)) 197 + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) 198 198 blk_stop_queue(q); 199 199 else 200 200 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; ··· 210 210 { 211 211 struct request_pm_state *pm = rq->special; 212 212 213 - if (blk_pm_suspend_request(rq) && 213 + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && 214 214 pm->pm_step == IDE_PM_START_SUSPEND) 215 215 /* Mark drive blocked when starting the suspend sequence. */ 216 216 drive->dev_flags |= IDE_DFLAG_BLOCKED; 217 - else if (blk_pm_resume_request(rq) && 217 + else if (rq->cmd_type == REQ_TYPE_PM_RESUME && 218 218 pm->pm_step == IDE_PM_START_RESUME) { 219 219 /* 220 220 * The first thing we do on wakeup is to wait for BSY bit to
+2 -1
drivers/ide/ide-tape.c
··· 577 577 rq->cmd[0], (unsigned long long)blk_rq_pos(rq), 578 578 blk_rq_sectors(rq)); 579 579 580 - BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq))); 580 + BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL || 581 + rq->cmd_type == REQ_TYPE_SENSE)); 581 582 582 583 /* Retry a failed packet command */ 583 584 if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
+5 -5
drivers/md/dm.c
··· 792 792 { 793 793 int rw = rq_data_dir(clone); 794 794 int run_queue = 1; 795 - bool is_barrier = blk_barrier_rq(clone); 795 + bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER; 796 796 struct dm_rq_target_io *tio = clone->end_io_data; 797 797 struct mapped_device *md = tio->md; 798 798 struct request *rq = tio->orig; 799 799 800 - if (blk_pc_request(rq) && !is_barrier) { 800 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { 801 801 rq->errors = clone->errors; 802 802 rq->resid_len = clone->resid_len; 803 803 ··· 844 844 struct request_queue *q = rq->q; 845 845 unsigned long flags; 846 846 847 - if (unlikely(blk_barrier_rq(clone))) { 847 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 848 848 /* 849 849 * Barrier clones share an original request. 850 850 * Leave it to dm_end_request(), which handles this special ··· 943 943 struct dm_rq_target_io *tio = clone->end_io_data; 944 944 struct request *rq = tio->orig; 945 945 946 - if (unlikely(blk_barrier_rq(clone))) { 946 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 947 947 /* 948 948 * Barrier clones share an original request. So can't use 949 949 * softirq_done with the original. ··· 972 972 struct dm_rq_target_io *tio = clone->end_io_data; 973 973 struct request *rq = tio->orig; 974 974 975 - if (unlikely(blk_barrier_rq(clone))) { 975 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 976 976 /* 977 977 * Barrier clones share an original request. 978 978 * Leave it to dm_end_request(), which handles this special
+2 -1
drivers/memstick/core/mspro_block.c
··· 805 805 806 806 static int mspro_block_prepare_req(struct request_queue *q, struct request *req) 807 807 { 808 - if (!blk_fs_request(req) && !blk_pc_request(req)) { 808 + if (req->cmd_type != REQ_TYPE_FS && 809 + req->cmd_type != REQ_TYPE_BLOCK_PC) { 809 810 blk_dump_rq_flags(req, "MSPro unsupported request"); 810 811 return BLKPREP_KILL; 811 812 }
+1 -1
drivers/message/i2o/i2o_block.c
··· 883 883 if (!req) 884 884 break; 885 885 886 - if (blk_fs_request(req)) { 886 + if (req->cmd_type == REQ_TYPE_FS) { 887 887 struct i2o_block_delayed_request *dreq; 888 888 struct i2o_block_request *ireq = req->special; 889 889 unsigned int queue_depth;
+1 -1
drivers/mmc/card/queue.c
··· 32 32 /* 33 33 * We only like normal block requests. 34 34 */ 35 - if (!blk_fs_request(req)) { 35 + if (req->cmd_type != REQ_TYPE_FS) { 36 36 blk_dump_rq_flags(req, "MMC bad request"); 37 37 return BLKPREP_KILL; 38 38 }
+2 -2
drivers/mtd/mtd_blkdevs.c
··· 73 73 74 74 buf = req->buffer; 75 75 76 - if (!blk_fs_request(req)) 76 + if (req->cmd_type != REQ_TYPE_FS) 77 77 return -EIO; 78 78 79 79 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 80 80 get_capacity(req->rq_disk)) 81 81 return -EIO; 82 82 83 - if (blk_discard_rq(req)) 83 + if (req->cmd_flags & REQ_DISCARD) 84 84 return tr->discard(dev, block, nsect); 85 85 86 86 switch(rq_data_dir(req)) {
+5 -5
drivers/scsi/scsi_error.c
··· 307 307 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 308 308 return FAILED; 309 309 310 - if (blk_barrier_rq(scmd->request)) 310 + if (scmd->request->cmd_flags & REQ_HARDBARRIER) 311 311 /* 312 312 * barrier requests should always retry on UA 313 313 * otherwise block will get a spurious error ··· 1318 1318 case DID_OK: 1319 1319 break; 1320 1320 case DID_BUS_BUSY: 1321 - return blk_failfast_transport(scmd->request); 1321 + return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); 1322 1322 case DID_PARITY: 1323 - return blk_failfast_dev(scmd->request); 1323 + return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); 1324 1324 case DID_ERROR: 1325 1325 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1326 1326 status_byte(scmd->result) == RESERVATION_CONFLICT) 1327 1327 return 0; 1328 1328 /* fall through */ 1329 1329 case DID_SOFT_ERROR: 1330 - return blk_failfast_driver(scmd->request); 1330 + return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); 1331 1331 } 1332 1332 1333 1333 switch (status_byte(scmd->result)) { ··· 1336 1336 * assume caller has checked sense and determinted 1337 1337 * the check condition was retryable. 1338 1338 */ 1339 - return blk_failfast_dev(scmd->request); 1339 + return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); 1340 1340 } 1341 1341 1342 1342 return 0;
+3 -2
drivers/scsi/scsi_lib.c
··· 722 722 sense_deferred = scsi_sense_is_deferred(&sshdr); 723 723 } 724 724 725 - if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 725 + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 726 726 req->errors = result; 727 727 if (result) { 728 728 if (sense_valid && req->sense) { ··· 757 757 } 758 758 } 759 759 760 - BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 760 + /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 761 + BUG_ON(blk_bidi_rq(req)); 761 762 762 763 /* 763 764 * Next deal with any sectors which we were able to correctly
+6 -6
drivers/scsi/sd.c
··· 485 485 * Discard request come in as REQ_TYPE_FS but we turn them into 486 486 * block PC requests to make life easier. 487 487 */ 488 - if (blk_discard_rq(rq)) 488 + if (rq->cmd_flags & REQ_DISCARD) 489 489 ret = sd_prepare_discard(rq); 490 490 491 491 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { ··· 636 636 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; 637 637 SCpnt->cmnd[7] = 0x18; 638 638 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; 639 - SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 639 + SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 640 640 641 641 /* LBA */ 642 642 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; ··· 661 661 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; 662 662 } else if (block > 0xffffffff) { 663 663 SCpnt->cmnd[0] += READ_16 - READ_6; 664 - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 664 + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 665 665 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 666 666 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 667 667 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; ··· 682 682 this_count = 0xffff; 683 683 684 684 SCpnt->cmnd[0] += READ_10 - READ_6; 685 - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 685 + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 686 686 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 687 687 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 688 688 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; ··· 691 691 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; 692 692 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; 693 693 } else { 694 - if (unlikely(blk_fua_rq(rq))) { 694 + if (unlikely(rq->cmd_flags & REQ_FUA)) { 695 695 /* 696 696 * This happens only if this drive failed 697 697 * 10byte rw command with ILLEGAL_REQUEST ··· 1112 1112 u64 bad_lba; 1113 1113 int info_valid; 1114 1114 1115 - if (!blk_fs_request(scmd->request)) 1115 + if (scmd->request->cmd_type != REQ_TYPE_FS) 1116 1116 return 0; 1117 1117 1118 1118 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
+1 -1
drivers/scsi/sun3_NCR5380.c
··· 2022 2022 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done 2023 2023 != cmd)) 2024 2024 { 2025 - if(blk_fs_request(cmd->request)) { 2025 + if (cmd->request->cmd_type == REQ_TYPE_FS) { 2026 2026 sun3scsi_dma_setup(d, count, 2027 2027 rq_data_dir(cmd->request)); 2028 2028 sun3_dma_setup_done = cmd;
+1 -1
drivers/scsi/sun3_scsi.c
··· 524 524 struct scsi_cmnd *cmd, 525 525 int write_flag) 526 526 { 527 - if(blk_fs_request(cmd->request)) 527 + if (cmd->request->cmd_type == REQ_TYPE_FS) 528 528 return wanted; 529 529 else 530 530 return 0;
+1 -1
drivers/scsi/sun3_scsi_vme.c
··· 458 458 struct scsi_cmnd *cmd, 459 459 int write_flag) 460 460 { 461 - if(blk_fs_request(cmd->request)) 461 + if (cmd->request->cmd_type == REQ_TYPE_FS) 462 462 return wanted; 463 463 else 464 464 return 0;
+5 -3
drivers/staging/hv/blkvsc_drv.c
··· 823 823 blkvsc_req->cmnd[0] = READ_16; 824 824 } 825 825 826 - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; 826 + blkvsc_req->cmnd[1] |= 827 + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; 827 828 828 829 *(unsigned long long *)&blkvsc_req->cmnd[2] = 829 830 cpu_to_be64(blkvsc_req->sector_start); ··· 840 839 blkvsc_req->cmnd[0] = READ_10; 841 840 } 842 841 843 - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; 842 + blkvsc_req->cmnd[1] |= 843 + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; 844 844 845 845 *(unsigned int *)&blkvsc_req->cmnd[2] = 846 846 cpu_to_be32(blkvsc_req->sector_start); ··· 1288 1286 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); 1289 1287 1290 1288 blkdev = req->rq_disk->private_data; 1291 - if (blkdev->shutting_down || !blk_fs_request(req) || 1289 + if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS || 1292 1290 blkdev->media_not_present) { 1293 1291 __blk_end_request_cur(req, 0); 1294 1292 continue;
+11 -26
include/linux/blkdev.h
··· 604 604 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 605 605 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 606 606 607 - #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 608 - #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 609 - #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 610 - #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 607 + #define blk_noretry_request(rq) \ 608 + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 609 + REQ_FAILFAST_DRIVER)) 611 610 612 - #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 613 - #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 614 - #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 615 - #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 616 - blk_failfast_transport(rq) || \ 617 - blk_failfast_driver(rq)) 618 - #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 619 - #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 620 - #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) 611 + #define blk_account_rq(rq) \ 612 + (((rq)->cmd_flags & REQ_STARTED) && \ 613 + ((rq)->cmd_type == REQ_TYPE_FS || \ 614 + ((rq)->cmd_flags & REQ_DISCARD))) 621 615 622 - #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 623 - 624 - #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 625 - #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 626 616 #define blk_pm_request(rq) \ 627 - (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 617 + ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 618 + (rq)->cmd_type == REQ_TYPE_PM_RESUME) 628 619 629 620 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 630 - #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 631 - #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 632 - #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 633 - #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 634 621 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 635 622 /* rq->queuelist of dequeued request must be list_empty() */ 636 623 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) ··· 638 651 { 639 652 return rw_is_sync(rq->cmd_flags); 640 653 } 641 - 642 - #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 643 - #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) 644 654 645 655 static inline int blk_queue_full(struct request_queue *q, int sync) 646 656 { ··· 671 687 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 672 688 #define rq_mergeable(rq) \ 673 689 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 674 - (blk_discard_rq(rq) || blk_fs_request((rq)))) 690 + (((rq)->cmd_flags & REQ_DISCARD) || \ 691 + (rq)->cmd_type == REQ_TYPE_FS)) 675 692 676 693 /* 677 694 * q->prep_rq_fn return values
+1 -1
include/linux/blktrace_api.h
··· 224 224 225 225 static inline int blk_cmd_buf_len(struct request *rq) 226 226 { 227 - return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; 227 + return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; 228 228 } 229 229 230 230 extern void blk_dump_cmd(char *buf, struct request *rq);
+10 -5
include/trace/events/block.h
··· 25 25 26 26 TP_fast_assign( 27 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 29 - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 28 + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 29 + 0 : blk_rq_pos(rq); 30 + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 31 + 0 : blk_rq_sectors(rq); 30 32 __entry->errors = rq->errors; 31 33 32 34 blk_fill_rwbs_rq(__entry->rwbs, rq); ··· 111 109 112 110 TP_fast_assign( 113 111 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 114 - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 115 - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 116 - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 112 + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 113 + 0 : blk_rq_pos(rq); 114 + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 115 + 0 : blk_rq_sectors(rq); 116 + __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 117 + blk_rq_bytes(rq) : 0; 117 118 118 119 blk_fill_rwbs_rq(__entry->rwbs, rq); 119 120 blk_dump_cmd(__get_str(cmd), rq);
+5 -5
kernel/trace/blktrace.c
··· 661 661 if (likely(!bt)) 662 662 return; 663 663 664 - if (blk_discard_rq(rq)) 664 + if (rq->cmd_flags & REQ_DISCARD) 665 665 rw |= (1 << BIO_RW_DISCARD); 666 666 667 - if (blk_pc_request(rq)) { 667 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 668 668 what |= BLK_TC_ACT(BLK_TC_PC); 669 669 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 670 670 what, rq->errors, rq->cmd_len, rq->cmd); ··· 925 925 if (likely(!bt)) 926 926 return; 927 927 928 - if (blk_pc_request(rq)) 928 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 929 929 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 930 930 BLK_TA_DRV_DATA, rq->errors, len, data); 931 931 else ··· 1730 1730 int len = rq->cmd_len; 1731 1731 unsigned char *cmd = rq->cmd; 1732 1732 1733 - if (!blk_pc_request(rq)) { 1733 + if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 1734 1734 buf[0] = '\0'; 1735 1735 return; 1736 1736 } ··· 1779 1779 int rw = rq->cmd_flags & 0x03; 1780 1780 int bytes; 1781 1781 1782 - if (blk_discard_rq(rq)) 1782 + if (rq->cmd_flags & REQ_DISCARD) 1783 1783 rw |= (1 << BIO_RW_DISCARD); 1784 1784 1785 1785 bytes = blk_rq_bytes(rq);