Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] Split struct request ->flags into two parts

Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.

Signed-off-by: Jens Axboe <axboe@suse.de>

authored by

Jens Axboe and committed by
Jens Axboe
4aff5e23 77ed74da

+295 -301
+1 -1
block/as-iosched.c
··· 1335 1335 arq->state = AS_RQ_NEW; 1336 1336 1337 1337 if (rq_data_dir(arq->request) == READ 1338 - || (arq->request->flags & REQ_RW_SYNC)) 1338 + || (arq->request->cmd_flags & REQ_RW_SYNC)) 1339 1339 arq->is_sync = 1; 1340 1340 else 1341 1341 arq->is_sync = 0;
+13 -13
block/elevator.c
··· 242 242 list_for_each_prev(entry, &q->queue_head) { 243 243 struct request *pos = list_entry_rq(entry); 244 244 245 - if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 245 + if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 246 246 break; 247 247 if (rq->sector >= boundary) { 248 248 if (pos->sector < boundary) ··· 313 313 e->ops->elevator_deactivate_req_fn(q, rq); 314 314 } 315 315 316 - rq->flags &= ~REQ_STARTED; 316 + rq->cmd_flags &= ~REQ_STARTED; 317 317 318 318 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 319 319 } ··· 344 344 345 345 switch (where) { 346 346 case ELEVATOR_INSERT_FRONT: 347 - rq->flags |= REQ_SOFTBARRIER; 347 + rq->cmd_flags |= REQ_SOFTBARRIER; 348 348 349 349 list_add(&rq->queuelist, &q->queue_head); 350 350 break; 351 351 352 352 case ELEVATOR_INSERT_BACK: 353 - rq->flags |= REQ_SOFTBARRIER; 353 + rq->cmd_flags |= REQ_SOFTBARRIER; 354 354 elv_drain_elevator(q); 355 355 list_add_tail(&rq->queuelist, &q->queue_head); 356 356 /* ··· 369 369 370 370 case ELEVATOR_INSERT_SORT: 371 371 BUG_ON(!blk_fs_request(rq)); 372 - rq->flags |= REQ_SORTED; 372 + rq->cmd_flags |= REQ_SORTED; 373 373 q->nr_sorted++; 374 374 if (q->last_merge == NULL && rq_mergeable(rq)) 375 375 q->last_merge = rq; ··· 387 387 * insertion; otherwise, requests should be requeued 388 388 * in ordseq order. 389 389 */ 390 - rq->flags |= REQ_SOFTBARRIER; 390 + rq->cmd_flags |= REQ_SOFTBARRIER; 391 391 392 392 if (q->ordseq == 0) { 393 393 list_add(&rq->queuelist, &q->queue_head); ··· 429 429 int plug) 430 430 { 431 431 if (q->ordcolor) 432 - rq->flags |= REQ_ORDERED_COLOR; 432 + rq->cmd_flags |= REQ_ORDERED_COLOR; 433 433 434 - if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 434 + if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 435 435 /* 436 436 * toggle ordered color 437 437 */ ··· 452 452 q->end_sector = rq_end_sector(rq); 453 453 q->boundary_rq = rq; 454 454 } 455 - } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 455 + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 456 456 where = ELEVATOR_INSERT_BACK; 457 457 458 458 if (plug) ··· 493 493 int ret; 494 494 495 495 while ((rq = __elv_next_request(q)) != NULL) { 496 - if (!(rq->flags & REQ_STARTED)) { 496 + if (!(rq->cmd_flags & REQ_STARTED)) { 497 497 elevator_t *e = q->elevator; 498 498 499 499 /* ··· 510 510 * it, a request that has been delayed should 511 511 * not be passed by new incoming requests 512 512 */ 513 - rq->flags |= REQ_STARTED; 513 + rq->cmd_flags |= REQ_STARTED; 514 514 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 515 515 } 516 516 ··· 519 519 q->boundary_rq = NULL; 520 520 } 521 521 522 - if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) 522 + if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) 523 523 break; 524 524 525 525 ret = q->prep_rq_fn(q, rq); ··· 541 541 nr_bytes = rq->data_len; 542 542 543 543 blkdev_dequeue_request(rq); 544 - rq->flags |= REQ_QUIET; 544 + rq->cmd_flags |= REQ_QUIET; 545 545 end_that_request_chunk(rq, 0, nr_bytes); 546 546 end_that_request_last(rq, 0); 547 547 } else {
+35 -66
block/ll_rw_blk.c
··· 382 382 if (rq == &q->post_flush_rq) 383 383 return QUEUE_ORDSEQ_POSTFLUSH; 384 384 385 - if ((rq->flags & REQ_ORDERED_COLOR) == 386 - (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) 385 + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == 386 + (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) 387 387 return QUEUE_ORDSEQ_DRAIN; 388 388 else 389 389 return QUEUE_ORDSEQ_DONE; ··· 446 446 end_io = post_flush_end_io; 447 447 } 448 448 449 + rq->cmd_flags = REQ_HARDBARRIER; 449 450 rq_init(q, rq); 450 - rq->flags = REQ_HARDBARRIER; 451 451 rq->elevator_private = NULL; 452 452 rq->rq_disk = q->bar_rq.rq_disk; 453 453 rq->rl = NULL; ··· 471 471 blkdev_dequeue_request(rq); 472 472 q->orig_bar_rq = rq; 473 473 rq = &q->bar_rq; 474 + rq->cmd_flags = 0; 474 475 rq_init(q, rq); 475 - rq->flags = bio_data_dir(q->orig_bar_rq->bio); 476 - rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 476 + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 477 + rq->cmd_flags |= REQ_RW; 478 + rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; 477 479 rq->elevator_private = NULL; 478 480 rq->rl = NULL; 479 481 init_request_from_bio(rq, q->orig_bar_rq->bio); ··· 1126 1124 } 1127 1125 1128 1126 list_del_init(&rq->queuelist); 1129 - rq->flags &= ~REQ_QUEUED; 1127 + rq->cmd_flags &= ~REQ_QUEUED; 1130 1128 rq->tag = -1; 1131 1129 1132 1130 if (unlikely(bqt->tag_index[tag] == NULL)) ··· 1162 1160 struct blk_queue_tag *bqt = q->queue_tags; 1163 1161 int tag; 1164 1162 1165 - if (unlikely((rq->flags & REQ_QUEUED))) { 1163 + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 1166 1164 printk(KERN_ERR 1167 1165 "%s: request %p for device [%s] already tagged %d", 1168 1166 __FUNCTION__, rq, ··· 1176 1174 1177 1175 __set_bit(tag, bqt->tag_map); 1178 1176 1179 - rq->flags |= REQ_QUEUED; 1177 + rq->cmd_flags |= REQ_QUEUED; 1180 1178 rq->tag = tag; 1181 1179 bqt->tag_index[tag] = rq; 1182 1180 blkdev_dequeue_request(rq); ··· 1212 1210 printk(KERN_ERR 1213 1211 "%s: bad tag found on list\n", __FUNCTION__); 1214 1212 list_del_init(&rq->queuelist); 1215 - rq->flags &= ~REQ_QUEUED; 1213 + rq->cmd_flags &= ~REQ_QUEUED; 1216 1214 } else 1217 1215 blk_queue_end_tag(q, rq); 1218 1216 1219 - rq->flags &= ~REQ_STARTED; 1217 + rq->cmd_flags &= ~REQ_STARTED; 1220 1218 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); 1221 1219 } 1222 1220 } 1223 1221 1224 1222 EXPORT_SYMBOL(blk_queue_invalidate_tags); 1225 1223 1226 - static const char * const rq_flags[] = { 1227 - "REQ_RW", 1228 - "REQ_FAILFAST", 1229 - "REQ_SORTED", 1230 - "REQ_SOFTBARRIER", 1231 - "REQ_HARDBARRIER", 1232 - "REQ_FUA", 1233 - "REQ_CMD", 1234 - "REQ_NOMERGE", 1235 - "REQ_STARTED", 1236 - "REQ_DONTPREP", 1237 - "REQ_QUEUED", 1238 - "REQ_ELVPRIV", 1239 - "REQ_PC", 1240 - "REQ_BLOCK_PC", 1241 - "REQ_SENSE", 1242 - "REQ_FAILED", 1243 - "REQ_QUIET", 1244 - "REQ_SPECIAL", 1245 - "REQ_DRIVE_CMD", 1246 - "REQ_DRIVE_TASK", 1247 - "REQ_DRIVE_TASKFILE", 1248 - "REQ_PREEMPT", 1249 - "REQ_PM_SUSPEND", 1250 - "REQ_PM_RESUME", 1251 - "REQ_PM_SHUTDOWN", 1252 - "REQ_ORDERED_COLOR", 1253 - }; 1254 - 1255 1224 void blk_dump_rq_flags(struct request *rq, char *msg) 1256 1225 { 1257 1226 int bit; 1258 1227 1259 - printk("%s: dev %s: flags = ", msg, 1260 - rq->rq_disk ? rq->rq_disk->disk_name : "?"); 1261 - bit = 0; 1262 - do { 1263 - if (rq->flags & (1 << bit)) 1264 - printk("%s ", rq_flags[bit]); 1265 - bit++; 1266 - } while (bit < __REQ_NR_BITS); 1228 + printk("%s: dev %s: type=%x, flags=%x\n", msg, 1229 + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 1230 + rq->cmd_flags); 1267 1231 1268 1232 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 1269 1233 rq->nr_sectors, 1270 1234 rq->current_nr_sectors); 1271 1235 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 1272 1236 1273 - if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { 1237 + if (blk_pc_request(rq)) { 1274 1238 printk("cdb: "); 1275 1239 for (bit = 0; bit < sizeof(rq->cmd); bit++) 1276 1240 printk("%02x ", rq->cmd[bit]); ··· 1409 1441 int nr_phys_segs = bio_phys_segments(q, bio); 1410 1442 1411 1443 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1412 - req->flags |= REQ_NOMERGE; 1444 + req->cmd_flags |= REQ_NOMERGE; 1413 1445 if (req == q->last_merge) 1414 1446 q->last_merge = NULL; 1415 1447 return 0; ··· 1432 1464 1433 1465 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments 1434 1466 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 1435 - req->flags |= REQ_NOMERGE; 1467 + req->cmd_flags |= REQ_NOMERGE; 1436 1468 if (req == q->last_merge) 1437 1469 q->last_merge = NULL; 1438 1470 return 0; ··· 1459 1491 max_sectors = q->max_sectors; 1460 1492 1461 1493 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1462 - req->flags |= REQ_NOMERGE; 1494 + req->cmd_flags |= REQ_NOMERGE; 1463 1495 if (req == q->last_merge) 1464 1496 q->last_merge = NULL; 1465 1497 return 0; ··· 1498 1530 1499 1531 1500 1532 if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 1501 - req->flags |= REQ_NOMERGE; 1533 + req->cmd_flags |= REQ_NOMERGE; 1502 1534 if (req == q->last_merge) 1503 1535 q->last_merge = NULL; 1504 1536 return 0; ··· 1997 2029 1998 2030 static inline void blk_free_request(request_queue_t *q, struct request *rq) 1999 2031 { 2000 - if (rq->flags & REQ_ELVPRIV) 2032 + if (rq->cmd_flags & REQ_ELVPRIV) 2001 2033 elv_put_request(q, rq); 2002 2034 mempool_free(rq, q->rq.rq_pool); 2003 2035 } ··· 2012 2044 return NULL; 2013 2045 2014 2046 /* 2015 - * first three bits are identical in rq->flags and bio->bi_rw, 2047 + * first three bits are identical in rq->cmd_flags and bio->bi_rw, 2016 2048 * see bio.h and blkdev.h 2017 2049 */ 2018 - rq->flags = rw; 2050 + rq->cmd_flags = rw; 2019 2051 2020 2052 if (priv) { 2021 2053 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { 2022 2054 mempool_free(rq, q->rq.rq_pool); 2023 2055 return NULL; 2024 2056 } 2025 - rq->flags |= REQ_ELVPRIV; 2057 + rq->cmd_flags |= REQ_ELVPRIV; 2026 2058 } 2027 2059 2028 2060 return rq; ··· 2319 2351 * must not attempt merges on this) and that it acts as a soft 2320 2352 * barrier 2321 2353 */ 2322 - rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; 2354 + rq->cmd_type = REQ_TYPE_SPECIAL; 2355 + rq->cmd_flags |= REQ_SOFTBARRIER; 2323 2356 2324 2357 rq->special = data; 2325 2358 ··· 2527 2558 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2528 2559 2529 2560 rq->rq_disk = bd_disk; 2530 - rq->flags |= REQ_NOMERGE; 2561 + rq->cmd_flags |= REQ_NOMERGE; 2531 2562 rq->end_io = done; 2532 2563 WARN_ON(irqs_disabled()); 2533 2564 spin_lock_irq(q->queue_lock); ··· 2697 2728 */ 2698 2729 if (rl) { 2699 2730 int rw = rq_data_dir(req); 2700 - int priv = req->flags & REQ_ELVPRIV; 2731 + int priv = req->cmd_flags & REQ_ELVPRIV; 2701 2732 2702 2733 BUG_ON(!list_empty(&req->queuelist)); 2703 2734 ··· 2859 2890 2860 2891 static void init_request_from_bio(struct request *req, struct bio *bio) 2861 2892 { 2862 - req->flags |= REQ_CMD; 2893 + req->cmd_type = REQ_TYPE_FS; 2863 2894 2864 2895 /* 2865 2896 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) 2866 2897 */ 2867 2898 if (bio_rw_ahead(bio) || bio_failfast(bio)) 2868 - req->flags |= REQ_FAILFAST; 2899 + req->cmd_flags |= REQ_FAILFAST; 2869 2900 2870 2901 /* 2871 2902 * REQ_BARRIER implies no merging, but lets make it explicit 2872 2903 */ 2873 2904 if (unlikely(bio_barrier(bio))) 2874 - req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2905 + req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 2875 2906 2876 2907 if (bio_sync(bio)) 2877 - req->flags |= REQ_RW_SYNC; 2908 + req->cmd_flags |= REQ_RW_SYNC; 2878 2909 2879 2910 req->errors = 0; 2880 2911 req->hard_sector = req->sector = bio->bi_sector; ··· 3275 3306 req->errors = 0; 3276 3307 3277 3308 if (!uptodate) { 3278 - if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) 3309 + if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 3279 3310 printk("end_request: I/O error, dev %s, sector %llu\n", 3280 3311 req->rq_disk ? req->rq_disk->disk_name : "?", 3281 3312 (unsigned long long)req->sector); ··· 3538 3569 3539 3570 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) 3540 3571 { 3541 - /* first two bits are identical in rq->flags and bio->bi_rw */ 3542 - rq->flags |= (bio->bi_rw & 3); 3572 + /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ 3573 + rq->cmd_flags |= (bio->bi_rw & 3); 3543 3574 3544 3575 rq->nr_phys_segments = bio_phys_segments(q, bio); 3545 3576 rq->nr_hw_segments = bio_hw_segments(q, bio);
+3 -3
block/scsi_ioctl.c
··· 294 294 rq->sense = sense; 295 295 rq->sense_len = 0; 296 296 297 - rq->flags |= REQ_BLOCK_PC; 297 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 298 298 bio = rq->bio; 299 299 300 300 /* ··· 470 470 memset(sense, 0, sizeof(sense)); 471 471 rq->sense = sense; 472 472 rq->sense_len = 0; 473 - rq->flags |= REQ_BLOCK_PC; 473 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 474 474 475 475 blk_execute_rq(q, disk, rq, 0); 476 476 ··· 502 502 int err; 503 503 504 504 rq = blk_get_request(q, WRITE, __GFP_WAIT); 505 - rq->flags |= REQ_BLOCK_PC; 505 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 506 506 rq->data = NULL; 507 507 rq->data_len = 0; 508 508 rq->timeout = BLK_DEFAULT_TIMEOUT;
+2 -2
drivers/block/floppy.c
··· 2991 2991 if (usage_count == 0) { 2992 2992 printk("warning: usage count=0, current_req=%p exiting\n", 2993 2993 current_req); 2994 - printk("sect=%ld flags=%lx\n", (long)current_req->sector, 2995 - current_req->flags); 2994 + printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2995 + current_req->cmd_type, current_req->cmd_flags); 2996 2996 return; 2997 2997 } 2998 2998 if (test_bit(0, &fdc_busy)) {
+4 -4
drivers/block/nbd.c
··· 407 407 struct nbd_device *lo; 408 408 409 409 blkdev_dequeue_request(req); 410 - dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n", 411 - req->rq_disk->disk_name, req, req->flags); 410 + dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 411 + req->rq_disk->disk_name, req, req->cmd_type); 412 412 413 - if (!(req->flags & REQ_CMD)) 413 + if (!blk_fs_request(req)) 414 414 goto error_out; 415 415 416 416 lo = req->rq_disk->private_data; ··· 489 489 switch (cmd) { 490 490 case NBD_DISCONNECT: 491 491 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 492 - sreq.flags = REQ_SPECIAL; 492 + sreq.cmd_type = REQ_TYPE_SPECIAL; 493 493 nbd_cmd(&sreq) = NBD_CMD_DISC; 494 494 /* 495 495 * Set these to sane values in case server implementation
+1 -1
drivers/block/paride/pd.c
··· 437 437 438 438 static enum action do_pd_io_start(void) 439 439 { 440 - if (pd_req->flags & REQ_SPECIAL) { 440 + if (blk_special_request(pd_req)) { 441 441 phase = pd_special; 442 442 return pd_special(); 443 443 }
+3 -3
drivers/block/pktcdvd.c
··· 365 365 rq->sense = sense; 366 366 memset(sense, 0, sizeof(sense)); 367 367 rq->sense_len = 0; 368 - rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER; 368 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 369 + rq->cmd_flags |= REQ_HARDBARRIER; 369 370 if (cgc->quiet) 370 - rq->flags |= REQ_QUIET; 371 + rq->cmd_flags |= REQ_QUIET; 371 372 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 372 373 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) 373 374 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); 374 375 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 375 376 376 377 rq->ref_count++; 377 - rq->flags |= REQ_NOMERGE; 378 378 rq->waiting = &wait; 379 379 rq->end_io = blk_end_sync_rq; 380 380 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+1 -1
drivers/block/xd.c
··· 313 313 int res = 0; 314 314 int retry; 315 315 316 - if (!(req->flags & REQ_CMD)) { 316 + if (!blk_fs_request(req)) { 317 317 end_request(req, 0); 318 318 continue; 319 319 }
+1 -1
drivers/cdrom/cdrom.c
··· 2129 2129 rq->cmd[9] = 0xf8; 2130 2130 2131 2131 rq->cmd_len = 12; 2132 - rq->flags |= REQ_BLOCK_PC; 2132 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 2133 2133 rq->timeout = 60 * HZ; 2134 2134 bio = rq->bio; 2135 2135
+3 -1
drivers/cdrom/cdu31a.c
··· 1338 1338 } 1339 1339 1340 1340 /* WTF??? */ 1341 - if (!(req->flags & REQ_CMD)) 1341 + if (!blk_fs_request(req)) { 1342 + end_request(req, 0); 1342 1343 continue; 1344 + } 1343 1345 if (rq_data_dir(req) == WRITE) { 1344 1346 end_request(req, 0); 1345 1347 continue;
+35 -34
drivers/ide/ide-cd.c
··· 372 372 { 373 373 int log = 0; 374 374 375 - if (!sense || !rq || (rq->flags & REQ_QUIET)) 375 + if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) 376 376 return 0; 377 377 378 378 switch (sense->sense_key) { ··· 597 597 struct cdrom_info *cd = drive->driver_data; 598 598 599 599 ide_init_drive_cmd(rq); 600 - rq->flags = REQ_PC; 600 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 601 601 rq->rq_disk = cd->disk; 602 602 } 603 603 ··· 617 617 rq->cmd[0] = GPCMD_REQUEST_SENSE; 618 618 rq->cmd[4] = rq->data_len = 18; 619 619 620 - rq->flags = REQ_SENSE; 620 + rq->cmd_type = REQ_TYPE_SENSE; 621 621 622 622 /* NOTE! Save the failed command in "rq->buffer" */ 623 623 rq->buffer = (void *) failed_command; ··· 630 630 struct request *rq = HWGROUP(drive)->rq; 631 631 int nsectors = rq->hard_cur_sectors; 632 632 633 - if ((rq->flags & REQ_SENSE) && uptodate) { 633 + if (blk_sense_request(rq) && uptodate) { 634 634 /* 635 - * For REQ_SENSE, "rq->buffer" points to the original failed 636 - * request 635 + * For REQ_TYPE_SENSE, "rq->buffer" points to the original 636 + * failed request 637 637 */ 638 638 struct request *failed = (struct request *) rq->buffer; 639 639 struct cdrom_info *info = drive->driver_data; ··· 706 706 return 1; 707 707 } 708 708 709 - if (rq->flags & REQ_SENSE) { 709 + if (blk_sense_request(rq)) { 710 710 /* We got an error trying to get sense info 711 711 from the drive (probably while trying 712 712 to recover from a former error). Just give up. */ 713 713 714 - rq->flags |= REQ_FAILED; 714 + rq->cmd_flags |= REQ_FAILED; 715 715 cdrom_end_request(drive, 0); 716 716 ide_error(drive, "request sense failure", stat); 717 717 return 1; 718 718 719 - } else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) { 719 + } else if (blk_pc_request(rq)) { 720 720 /* All other functions, except for READ. */ 721 721 unsigned long flags; 722 722 ··· 724 724 * if we have an error, pass back CHECK_CONDITION as the 725 725 * scsi status byte 726 726 */ 727 - if ((rq->flags & REQ_BLOCK_PC) && !rq->errors) 727 + if (!rq->errors) 728 728 rq->errors = SAM_STAT_CHECK_CONDITION; 729 729 730 730 /* Check for tray open. */ ··· 735 735 cdrom_saw_media_change (drive); 736 736 /*printk("%s: media changed\n",drive->name);*/ 737 737 return 0; 738 - } else if (!(rq->flags & REQ_QUIET)) { 738 + } else if (!(rq->cmd_flags & REQ_QUIET)) { 739 739 /* Otherwise, print an error. */ 740 740 ide_dump_status(drive, "packet command error", stat); 741 741 } 742 742 743 - rq->flags |= REQ_FAILED; 743 + rq->cmd_flags |= REQ_FAILED; 744 744 745 745 /* 746 746 * instead of playing games with moving completions around, ··· 881 881 wait = ATAPI_WAIT_PC; 882 882 break; 883 883 default: 884 - if (!(rq->flags & REQ_QUIET)) 884 + if (!(rq->cmd_flags & REQ_QUIET)) 885 885 printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]); 886 886 wait = 0; 887 887 break; ··· 1124 1124 if (rq->current_nr_sectors > 0) { 1125 1125 printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n", 1126 1126 drive->name, rq->current_nr_sectors); 1127 - rq->flags |= REQ_FAILED; 1127 + rq->cmd_flags |= REQ_FAILED; 1128 1128 cdrom_end_request(drive, 0); 1129 1129 } else 1130 1130 cdrom_end_request(drive, 1); ··· 1456 1456 printk ("%s: cdrom_pc_intr: data underrun %d\n", 1457 1457 drive->name, pc->buflen); 1458 1458 */ 1459 - rq->flags |= REQ_FAILED; 1459 + rq->cmd_flags |= REQ_FAILED; 1460 1460 cdrom_end_request(drive, 0); 1461 1461 } 1462 1462 return ide_stopped; ··· 1509 1509 rq->data += thislen; 1510 1510 rq->data_len -= thislen; 1511 1511 1512 - if (rq->flags & REQ_SENSE) 1512 + if (blk_sense_request(rq)) 1513 1513 rq->sense_len += thislen; 1514 1514 } else { 1515 1515 confused: ··· 1517 1517 "appears confused (ireason = 0x%02x). " 1518 1518 "Trying to recover by ending request.\n", 1519 1519 drive->name, ireason); 1520 - rq->flags |= REQ_FAILED; 1520 + rq->cmd_flags |= REQ_FAILED; 1521 1521 cdrom_end_request(drive, 0); 1522 1522 return ide_stopped; 1523 1523 } ··· 1546 1546 struct cdrom_info *info = drive->driver_data; 1547 1547 1548 1548 info->dma = 0; 1549 - rq->flags &= ~REQ_FAILED; 1549 + rq->cmd_flags &= ~REQ_FAILED; 1550 1550 len = rq->data_len; 1551 1551 1552 1552 /* Start sending the command to the drive. */ ··· 1558 1558 { 1559 1559 struct request_sense sense; 1560 1560 int retries = 10; 1561 - unsigned int flags = rq->flags; 1561 + unsigned int flags = rq->cmd_flags; 1562 1562 1563 1563 if (rq->sense == NULL) 1564 1564 rq->sense = &sense; ··· 1567 1567 do { 1568 1568 int error; 1569 1569 unsigned long time = jiffies; 1570 - rq->flags = flags; 1570 + rq->cmd_flags = flags; 1571 1571 1572 1572 error = ide_do_drive_cmd(drive, rq, ide_wait); 1573 1573 time = jiffies - time; 1574 1574 1575 1575 /* FIXME: we should probably abort/retry or something 1576 1576 * in case of failure */ 1577 - if (rq->flags & REQ_FAILED) { 1577 + if (rq->cmd_flags & REQ_FAILED) { 1578 1578 /* The request failed. Retry if it was due to a unit 1579 1579 attention status 1580 1580 (usually means media was changed). */ ··· 1596 1596 } 1597 1597 1598 1598 /* End of retry loop. */ 1599 - } while ((rq->flags & REQ_FAILED) && retries >= 0); 1599 + } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0); 1600 1600 1601 1601 /* Return an error if the command failed. */ 1602 - return (rq->flags & REQ_FAILED) ? -EIO : 0; 1602 + return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0; 1603 1603 } 1604 1604 1605 1605 /* ··· 1963 1963 { 1964 1964 struct cdrom_info *info = drive->driver_data; 1965 1965 1966 - rq->flags |= REQ_QUIET; 1966 + rq->cmd_flags |= REQ_QUIET; 1967 1967 1968 1968 info->dma = 0; 1969 1969 ··· 2023 2023 } 2024 2024 info->last_block = block; 2025 2025 return action; 2026 - } else if (rq->flags & (REQ_PC | REQ_SENSE)) { 2026 + } else if (rq->cmd_type == REQ_TYPE_SENSE) { 2027 2027 return cdrom_do_packet_command(drive); 2028 - } else if (rq->flags & REQ_BLOCK_PC) { 2028 + } else if (blk_pc_request(rq)) { 2029 2029 return cdrom_do_block_pc(drive, rq); 2030 - } else if (rq->flags & REQ_SPECIAL) { 2030 + } else if (blk_special_request(rq)) { 2031 2031 /* 2032 2032 * right now this can only be a reset... 2033 2033 */ ··· 2105 2105 2106 2106 req.sense = sense; 2107 2107 req.cmd[0] = GPCMD_TEST_UNIT_READY; 2108 - req.flags |= REQ_QUIET; 2108 + req.cmd_flags |= REQ_QUIET; 2109 2109 2110 2110 #if ! STANDARD_ATAPI 2111 2111 /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to ··· 2207 2207 req.cmd[0] = GPCMD_READ_CDVD_CAPACITY; 2208 2208 req.data = (char *)&capbuf; 2209 2209 req.data_len = sizeof(capbuf); 2210 - req.flags |= REQ_QUIET; 2210 + req.cmd_flags |= REQ_QUIET; 2211 2211 2212 2212 stat = cdrom_queue_packet_command(drive, &req); 2213 2213 if (stat == 0) { ··· 2230 2230 req.sense = sense; 2231 2231 req.data = buf; 2232 2232 req.data_len = buflen; 2233 - req.flags |= REQ_QUIET; 2233 + req.cmd_flags |= REQ_QUIET; 2234 2234 req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; 2235 2235 req.cmd[6] = trackno; 2236 2236 req.cmd[7] = (buflen >> 8); ··· 2531 2531 req.timeout = cgc->timeout; 2532 2532 2533 2533 if (cgc->quiet) 2534 - req.flags |= REQ_QUIET; 2534 + req.cmd_flags |= REQ_QUIET; 2535 2535 2536 2536 req.sense = cgc->sense; 2537 2537 cgc->stat = cdrom_queue_packet_command(drive, &req); ··· 2629 2629 int ret; 2630 2630 2631 2631 cdrom_prepare_request(drive, &req); 2632 - req.flags = REQ_SPECIAL | REQ_QUIET; 2632 + req.cmd_type = REQ_TYPE_SPECIAL; 2633 + req.cmd_flags = REQ_QUIET; 2633 2634 ret = ide_do_drive_cmd(drive, &req, ide_wait); 2634 2635 2635 2636 /* ··· 3117 3116 3118 3117 static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq) 3119 3118 { 3120 - if (rq->flags & REQ_CMD) 3119 + if (blk_fs_request(rq)) 3121 3120 return ide_cdrom_prep_fs(q, rq); 3122 - else if (rq->flags & REQ_BLOCK_PC) 3121 + else if (blk_pc_request(rq)) 3123 3122 return ide_cdrom_prep_pc(rq); 3124 3123 3125 3124 return 0;
+3 -2
drivers/ide/ide-disk.c
··· 699 699 rq->cmd[0] = WIN_FLUSH_CACHE; 700 700 701 701 702 - rq->flags |= REQ_DRIVE_TASK; 702 + rq->cmd_type = REQ_TYPE_ATA_TASK; 703 + rq->cmd_flags |= REQ_SOFTBARRIER; 703 704 rq->buffer = rq->cmd; 704 705 } 705 706 ··· 741 740 if (drive->special.b.set_multmode) 742 741 return -EBUSY; 743 742 ide_init_drive_cmd (&rq); 744 - rq.flags = REQ_DRIVE_CMD; 743 + rq.cmd_type = REQ_TYPE_ATA_CMD; 745 744 drive->mult_req = arg; 746 745 drive->special.b.set_multmode = 1; 747 746 (void) ide_do_drive_cmd (drive, &rq, ide_wait);
+1 -1
drivers/ide/ide-dma.c
··· 205 205 ide_hwif_t *hwif = HWIF(drive); 206 206 struct scatterlist *sg = hwif->sg_table; 207 207 208 - BUG_ON((rq->flags & REQ_DRIVE_TASKFILE) && rq->nr_sectors > 256); 208 + BUG_ON((rq->cmd_type == REQ_TYPE_ATA_TASKFILE) && rq->nr_sectors > 256); 209 209 210 210 ide_map_sg(drive, rq); 211 211
+7 -7
drivers/ide/ide-floppy.c
··· 588 588 /* Why does this happen? */ 589 589 if (!rq) 590 590 return 0; 591 - if (!(rq->flags & REQ_SPECIAL)) { //if (!IDEFLOPPY_RQ_CMD (rq->cmd)) { 591 + if (!blk_special_request(rq)) { 592 592 /* our real local end request function */ 593 593 ide_end_request(drive, uptodate, nsecs); 594 594 return 0; ··· 689 689 690 690 ide_init_drive_cmd(rq); 691 691 rq->buffer = (char *) pc; 692 - rq->flags = REQ_SPECIAL; //rq->cmd = IDEFLOPPY_PC_RQ; 692 + rq->cmd_type = REQ_TYPE_SPECIAL; 693 693 rq->rq_disk = floppy->disk; 694 694 (void) ide_do_drive_cmd(drive, rq, ide_preempt); 695 695 } ··· 1250 1250 pc->callback = &idefloppy_rw_callback; 1251 1251 pc->rq = rq; 1252 1252 pc->b_count = cmd == READ ? 0 : rq->bio->bi_size; 1253 - if (rq->flags & REQ_RW) 1253 + if (rq->cmd_flags & REQ_RW) 1254 1254 set_bit(PC_WRITING, &pc->flags); 1255 1255 pc->buffer = NULL; 1256 1256 pc->request_transfer = pc->buffer_size = blocks * floppy->block_size; ··· 1303 1303 idefloppy_do_end_request(drive, 0, 0); 1304 1304 return ide_stopped; 1305 1305 } 1306 - if (rq->flags & REQ_CMD) { 1306 + if (blk_fs_request(rq)) { 1307 1307 if (((long)rq->sector % floppy->bs_factor) || 1308 1308 (rq->nr_sectors % floppy->bs_factor)) { 1309 1309 printk("%s: unsupported r/w request size\n", ··· 1313 1313 } 1314 1314 pc = idefloppy_next_pc_storage(drive); 1315 1315 idefloppy_create_rw_cmd(floppy, pc, rq, block); 1316 - } else if (rq->flags & REQ_SPECIAL) { 1316 + } else if (blk_special_request(rq)) { 1317 1317 pc = (idefloppy_pc_t *) rq->buffer; 1318 - } else if (rq->flags & REQ_BLOCK_PC) { 1318 + } else if (blk_pc_request(rq)) { 1319 1319 pc = idefloppy_next_pc_storage(drive); 1320 1320 if (idefloppy_blockpc_cmd(floppy, pc, rq)) { 1321 1321 idefloppy_do_end_request(drive, 0, 0); ··· 1343 1343 1344 1344 ide_init_drive_cmd (&rq); 1345 1345 rq.buffer = (char *) pc; 1346 - rq.flags = REQ_SPECIAL; // rq.cmd = IDEFLOPPY_PC_RQ; 1346 + rq.cmd_type = REQ_TYPE_SPECIAL; 1347 1347 rq.rq_disk = floppy->disk; 1348 1348 1349 1349 return ide_do_drive_cmd(drive, &rq, ide_wait);
+18 -18
drivers/ide/ide-io.c
··· 59 59 { 60 60 int ret = 1; 61 61 62 - BUG_ON(!(rq->flags & REQ_STARTED)); 62 + BUG_ON(!blk_rq_started(rq)); 63 63 64 64 /* 65 65 * if failfast is set on a request, override number of sectors and ··· 244 244 245 245 spin_lock_irqsave(&ide_lock, flags); 246 246 247 - BUG_ON(!(rq->flags & REQ_STARTED)); 247 + BUG_ON(!blk_rq_started(rq)); 248 248 249 249 /* 250 250 * if failfast is set on a request, override number of sectors and ··· 366 366 rq = HWGROUP(drive)->rq; 367 367 spin_unlock_irqrestore(&ide_lock, flags); 368 368 369 - if (rq->flags & REQ_DRIVE_CMD) { 369 + if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 370 370 u8 *args = (u8 *) rq->buffer; 371 371 if (rq->errors == 0) 372 372 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); ··· 376 376 args[1] = err; 377 377 args[2] = hwif->INB(IDE_NSECTOR_REG); 378 378 } 379 - } else if (rq->flags & REQ_DRIVE_TASK) { 379 + } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 380 380 u8 *args = (u8 *) rq->buffer; 381 381 if (rq->errors == 0) 382 382 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); ··· 390 390 args[5] = hwif->INB(IDE_HCYL_REG); 391 391 args[6] = hwif->INB(IDE_SELECT_REG); 392 392 } 393 - } else if (rq->flags & REQ_DRIVE_TASKFILE) { 393 + } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 394 394 ide_task_t *args = (ide_task_t *) rq->special; 395 395 if (rq->errors == 0) 396 396 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); ··· 587 587 return ide_stopped; 588 588 589 589 /* retry only "normal" I/O: */ 590 - if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { 590 + if (!blk_fs_request(rq)) { 591 591 rq->errors = 1; 592 592 ide_end_drive_cmd(drive, stat, err); 593 593 return ide_stopped; ··· 638 638 return ide_stopped; 639 639 640 640 /* retry only "normal" I/O: */ 641 - if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) { 641 + if (!blk_fs_request(rq)) { 642 642 rq->errors = 1; 643 643 ide_end_drive_cmd(drive, BUSY_STAT, 0); 644 644 return ide_stopped; ··· 808 808 if (hwif->sg_mapped) /* needed by ide-scsi */ 809 809 return; 810 810 811 - if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) { 811 + if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { 812 812 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); 813 813 } else { 814 814 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE); ··· 844 844 struct request *rq) 845 845 { 846 846 ide_hwif_t *hwif = HWIF(drive); 847 - if (rq->flags & REQ_DRIVE_TASKFILE) { 847 + if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 848 848 ide_task_t *args = rq->special; 849 849 850 850 if (!args) ··· 866 866 if (args->tf_out_flags.all != 0) 867 867 return flagged_taskfile(drive, args); 868 868 return do_rw_taskfile(drive, args); 869 - } else if (rq->flags & REQ_DRIVE_TASK) { 869 + } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { 870 870 u8 *args = rq->buffer; 871 871 u8 sel; 872 872 ··· 892 892 hwif->OUTB(sel, IDE_SELECT_REG); 893 893 ide_cmd(drive, args[0], args[2], &drive_cmd_intr); 894 894 return ide_started; 895 - } else if (rq->flags & REQ_DRIVE_CMD) { 895 + } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { 896 896 u8 *args = rq->buffer; 897 897 898 898 if (!args) ··· 980 980 ide_startstop_t startstop; 981 981 sector_t block; 982 982 983 - BUG_ON(!(rq->flags & REQ_STARTED)); 983 + BUG_ON(!blk_rq_started(rq)); 984 984 985 985 #ifdef DEBUG 986 986 printk("%s: start_request: current=0x%08lx\n", ··· 1013 1013 if (!drive->special.all) { 1014 1014 ide_driver_t *drv; 1015 1015 1016 - if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) 1017 - return execute_drive_cmd(drive, rq); 1018 - else if (rq->flags & REQ_DRIVE_TASKFILE) 1016 + if (rq->cmd_type == REQ_TYPE_ATA_CMD || 1017 + rq->cmd_type == REQ_TYPE_ATA_TASK || 1018 + rq->cmd_type == REQ_TYPE_ATA_TASKFILE) 1019 1019 return execute_drive_cmd(drive, rq); 1020 1020 else if (blk_pm_request(rq)) { 1021 1021 struct request_pm_state *pm = rq->end_io_data; ··· 1264 1264 * We count how many times we loop here to make sure we service 1265 1265 * all drives in the hwgroup without looping for ever 1266 1266 */ 1267 - if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) { 1267 + if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) { 1268 1268 drive = drive->next ? drive->next : hwgroup->drive; 1269 1269 if (loops++ < 4 && !blk_queue_plugged(drive->queue)) 1270 1270 goto again; ··· 1670 1670 void ide_init_drive_cmd (struct request *rq) 1671 1671 { 1672 1672 memset(rq, 0, sizeof(*rq)); 1673 - rq->flags = REQ_DRIVE_CMD; 1673 + rq->cmd_type = REQ_TYPE_ATA_CMD; 1674 1674 rq->ref_count = 1; 1675 1675 } 1676 1676 ··· 1727 1727 hwgroup->rq = NULL; 1728 1728 if (action == ide_preempt || action == ide_head_wait) { 1729 1729 where = ELEVATOR_INSERT_FRONT; 1730 - rq->flags |= REQ_PREEMPT; 1730 + rq->cmd_flags |= REQ_PREEMPT; 1731 1731 } 1732 1732 __elv_add_request(drive->queue, rq, where, 0); 1733 1733 ide_do_request(hwgroup, IDE_NO_IRQ);
+3 -2
drivers/ide/ide-lib.c
··· 456 456 spin_unlock(&ide_lock); 457 457 if (!rq) 458 458 return; 459 - if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) { 459 + if (rq->cmd_type == REQ_TYPE_ATA_CMD || 460 + rq->cmd_type == REQ_TYPE_ATA_TASK) { 460 461 char *args = rq->buffer; 461 462 if (args) { 462 463 opcode = args[0]; 463 464 found = 1; 464 465 } 465 - } else if (rq->flags & REQ_DRIVE_TASKFILE) { 466 + } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 466 467 ide_task_t *args = rq->special; 467 468 if (args) { 468 469 task_struct_t *tf = (task_struct_t *) args->tfRegister;
+4 -4
drivers/ide/ide-tape.c
··· 1776 1776 static void idetape_init_rq(struct request *rq, u8 cmd) 1777 1777 { 1778 1778 memset(rq, 0, sizeof(*rq)); 1779 - rq->flags = REQ_SPECIAL; 1779 + rq->cmd_type = REQ_TYPE_SPECIAL; 1780 1780 rq->cmd[0] = cmd; 1781 1781 } 1782 1782 ··· 2433 2433 rq->sector, rq->nr_sectors, rq->current_nr_sectors); 2434 2434 #endif /* IDETAPE_DEBUG_LOG */ 2435 2435 2436 - if ((rq->flags & REQ_SPECIAL) == 0) { 2436 + if (!blk_special_request(rq)) { 2437 2437 /* 2438 2438 * We do not support buffer cache originated requests. 2439 2439 */ 2440 2440 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in " 2441 - "request queue (%ld)\n", drive->name, rq->flags); 2441 + "request queue (%d)\n", drive->name, rq->cmd_type); 2442 2442 ide_end_request(drive, 0, 0); 2443 2443 return ide_stopped; 2444 2444 } ··· 2768 2768 idetape_tape_t *tape = drive->driver_data; 2769 2769 2770 2770 #if IDETAPE_DEBUG_BUGS 2771 - if (rq == NULL || (rq->flags & REQ_SPECIAL) == 0) { 2771 + if (rq == NULL || !blk_special_request(rq)) { 2772 2772 printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n"); 2773 2773 return; 2774 2774 }
+4 -4
drivers/ide/ide-taskfile.c
··· 363 363 364 364 static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) 365 365 { 366 - if (rq->flags & REQ_DRIVE_TASKFILE) { 366 + if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 367 367 ide_task_t *task = rq->special; 368 368 369 369 if (task->tf_out_flags.all) { ··· 474 474 struct request rq; 475 475 476 476 memset(&rq, 0, sizeof(rq)); 477 - rq.flags = REQ_DRIVE_TASKFILE; 477 + rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 478 478 rq.buffer = buf; 479 479 480 480 /* ··· 499 499 rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors; 500 500 501 501 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) 502 - rq.flags |= REQ_RW; 502 + rq.cmd_flags |= REQ_RW; 503 503 } 504 504 505 505 rq.special = args; ··· 737 737 struct request rq; 738 738 739 739 ide_init_drive_cmd(&rq); 740 - rq.flags = REQ_DRIVE_TASK; 740 + rq.cmd_type = REQ_TYPE_ATA_TASK; 741 741 rq.buffer = buf; 742 742 return ide_do_drive_cmd(drive, &rq, ide_wait); 743 743 }
+2 -2
drivers/ide/ide.c
··· 1217 1217 memset(&rq, 0, sizeof(rq)); 1218 1218 memset(&rqpm, 0, sizeof(rqpm)); 1219 1219 memset(&args, 0, sizeof(args)); 1220 - rq.flags = REQ_PM_SUSPEND; 1220 + rq.cmd_type = REQ_TYPE_PM_SUSPEND; 1221 1221 rq.special = &args; 1222 1222 rq.end_io_data = &rqpm; 1223 1223 rqpm.pm_step = ide_pm_state_start_suspend; ··· 1238 1238 memset(&rq, 0, sizeof(rq)); 1239 1239 memset(&rqpm, 0, sizeof(rqpm)); 1240 1240 memset(&args, 0, sizeof(args)); 1241 - rq.flags = REQ_PM_RESUME; 1241 + rq.cmd_type = REQ_TYPE_PM_RESUME; 1242 1242 rq.special = &args; 1243 1243 rq.end_io_data = &rqpm; 1244 1244 rqpm.pm_step = ide_pm_state_start_resume;
+1 -1
drivers/ide/legacy/hd.c
··· 626 626 req->rq_disk->disk_name, (req->cmd == READ)?"read":"writ", 627 627 cyl, head, sec, nsect, req->buffer); 628 628 #endif 629 - if (req->flags & REQ_CMD) { 629 + if (blk_fs_request(req)) { 630 630 switch (rq_data_dir(req)) { 631 631 case READ: 632 632 hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
+2 -1
drivers/md/dm-emc.c
··· 126 126 memset(&rq->cmd, 0, BLK_MAX_CDB); 127 127 128 128 rq->timeout = EMC_FAILOVER_TIMEOUT; 129 - rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE); 129 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 130 + rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 130 131 131 132 return rq; 132 133 }
+4 -3
drivers/message/i2o/i2o_block.c
··· 390 390 } 391 391 392 392 /* request is already processed by us, so return */ 393 - if (req->flags & REQ_SPECIAL) { 393 + if (blk_special_request(req)) { 394 394 osm_debug("REQ_SPECIAL already set!\n"); 395 - req->flags |= REQ_DONTPREP; 395 + req->cmd_flags |= REQ_DONTPREP; 396 396 return BLKPREP_OK; 397 397 } 398 398 ··· 411 411 ireq = req->special; 412 412 413 413 /* do not come back here */ 414 - req->flags |= REQ_DONTPREP | REQ_SPECIAL; 414 + req->cmd_type = REQ_TYPE_SPECIAL; 415 + req->cmd_flags |= REQ_DONTPREP; 415 416 416 417 return BLKPREP_OK; 417 418 };
+3 -3
drivers/mmc/mmc_queue.c
··· 28 28 struct mmc_queue *mq = q->queuedata; 29 29 int ret = BLKPREP_KILL; 30 30 31 - if (req->flags & REQ_SPECIAL) { 31 + if (blk_special_request(req)) { 32 32 /* 33 33 * Special commands already have the command 34 34 * blocks already setup in req->special. ··· 36 36 BUG_ON(!req->special); 37 37 38 38 ret = BLKPREP_OK; 39 - } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 39 + } else if (blk_fs_request(req) || blk_pc_request(req)) { 40 40 /* 41 41 * Block I/O requests need translating according 42 42 * to the protocol. ··· 50 50 } 51 51 52 52 if (ret == BLKPREP_OK) 53 - req->flags |= REQ_DONTPREP; 53 + req->cmd_flags |= REQ_DONTPREP; 54 54 55 55 return ret; 56 56 }
+1 -1
drivers/mtd/mtd_blkdevs.c
··· 46 46 nsect = req->current_nr_sectors; 47 47 buf = req->buffer; 48 48 49 - if (!(req->flags & REQ_CMD)) 49 + if (!blk_fs_request(req)) 50 50 return 0; 51 51 52 52 if (block + nsect > get_capacity(req->rq_disk))
+1 -1
drivers/s390/block/dasd_diag.c
··· 529 529 } 530 530 cqr->retries = DIAG_MAX_RETRIES; 531 531 cqr->buildclk = get_clock(); 532 - if (req->flags & REQ_FAILFAST) 532 + if (req->cmd_flags & REQ_FAILFAST) 533 533 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 534 534 cqr->device = device; 535 535 cqr->expires = DIAG_TIMEOUT;
+1 -1
drivers/s390/block/dasd_eckd.c
··· 1266 1266 recid++; 1267 1267 } 1268 1268 } 1269 - if (req->flags & REQ_FAILFAST) 1269 + if (req->cmd_flags & REQ_FAILFAST) 1270 1270 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1271 1271 cqr->device = device; 1272 1272 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+1 -1
drivers/s390/block/dasd_fba.c
··· 344 344 recid++; 345 345 } 346 346 } 347 - if (req->flags & REQ_FAILFAST) 347 + if (req->cmd_flags & REQ_FAILFAST) 348 348 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 349 349 cqr->device = device; 350 350 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
+2 -2
drivers/scsi/aic7xxx_old.c
··· 2862 2862 aic_dev->r_total++; 2863 2863 ptr = aic_dev->r_bins; 2864 2864 } 2865 - if(cmd->device->simple_tags && cmd->request->flags & REQ_HARDBARRIER) 2865 + if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER) 2866 2866 { 2867 2867 aic_dev->barrier_total++; 2868 2868 if(scb->tag_action == MSG_ORDERED_Q_TAG) ··· 10158 10158 /* We always force TEST_UNIT_READY to untagged */ 10159 10159 if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) 10160 10160 { 10161 - if (req->flags & REQ_HARDBARRIER) 10161 + if (req->cmd_flags & REQ_HARDBARRIER) 10162 10162 { 10163 10163 if(sdptr->ordered_tags) 10164 10164 {
+7 -7
drivers/scsi/ide-scsi.c
··· 344 344 pc->buffer = buf; 345 345 pc->c[0] = REQUEST_SENSE; 346 346 pc->c[4] = pc->request_transfer = pc->buffer_size = SCSI_SENSE_BUFFERSIZE; 347 - rq->flags = REQ_SENSE; 347 + rq->cmd_type = REQ_TYPE_SENSE; 348 348 pc->timeout = jiffies + WAIT_READY; 349 349 /* NOTE! Save the failed packet command in "rq->buffer" */ 350 350 rq->buffer = (void *) failed_command->special; ··· 398 398 int errors = rq->errors; 399 399 unsigned long flags; 400 400 401 - if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) { 401 + if (!blk_special_request(rq) && !blk_sense_request(rq)) { 402 402 ide_end_request(drive, uptodate, nrsecs); 403 403 return 0; 404 404 } 405 405 ide_end_drive_cmd (drive, 0, 0); 406 - if (rq->flags & REQ_SENSE) { 406 + if (blk_sense_request(rq)) { 407 407 idescsi_pc_t *opc = (idescsi_pc_t *) rq->buffer; 408 408 if (log) { 409 409 printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number); ··· 712 712 printk (KERN_INFO "sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n",rq->sector,rq->nr_sectors,rq->current_nr_sectors); 713 713 #endif /* IDESCSI_DEBUG_LOG */ 714 714 715 - if (rq->flags & (REQ_SPECIAL|REQ_SENSE)) { 715 + if (blk_sense_request(rq) || blk_special_request(rq)) { 716 716 return idescsi_issue_pc (drive, (idescsi_pc_t *) rq->special); 717 717 } 718 718 blk_dump_rq_flags(rq, "ide-scsi: unsup command"); ··· 938 938 939 939 ide_init_drive_cmd (rq); 940 940 rq->special = (char *) pc; 941 - rq->flags = REQ_SPECIAL; 941 + rq->cmd_type = REQ_TYPE_SPECIAL; 942 942 spin_unlock_irq(host->host_lock); 943 943 rq->rq_disk = scsi->disk; 944 944 (void) ide_do_drive_cmd (drive, rq, ide_end); ··· 992 992 */ 993 993 printk (KERN_ERR "ide-scsi: cmd aborted!\n"); 994 994 995 - if (scsi->pc->rq->flags & REQ_SENSE) 995 + if (blk_sense_request(scsi->pc->rq)) 996 996 kfree(scsi->pc->buffer); 997 997 kfree(scsi->pc->rq); 998 998 kfree(scsi->pc); ··· 1042 1042 /* kill current request */ 1043 1043 blkdev_dequeue_request(req); 1044 1044 end_that_request_last(req, 0); 1045 - if (req->flags & REQ_SENSE) 1045 + if (blk_sense_request(req)) 1046 1046 kfree(scsi->pc->buffer); 1047 1047 kfree(scsi->pc); 1048 1048 scsi->pc = NULL;
+3 -3
drivers/scsi/pluto.c
··· 67 67 68 68 static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) 69 69 { 70 - SCpnt->request->rq_status = RQ_SCSI_DONE; 71 70 PLND(("Detect done %08lx\n", (long)SCpnt)) 72 71 if (atomic_dec_and_test (&fcss)) 73 72 up(&fc_sem); ··· 165 166 166 167 SCpnt->cmd_len = COMMAND_SIZE(INQUIRY); 167 168 168 - SCpnt->request->rq_status = RQ_SCSI_BUSY; 169 + SCpnt->request->cmd_flags &= ~REQ_STARTED; 169 170 170 171 SCpnt->done = pluto_detect_done; 171 172 SCpnt->request_bufflen = 256; ··· 177 178 for (retry = 0; retry < 5; retry++) { 178 179 for (i = 0; i < fcscount; i++) { 179 180 if (!fcs[i].fc) break; 180 - if (fcs[i].cmd.request->rq_status != RQ_SCSI_DONE) { 181 + if (!(fcs[i].cmd.request->cmd_flags & REQ_STARTED)) { 182 + fcs[i].cmd.request->cmd_flags |= REQ_STARTED; 181 183 disable_irq(fcs[i].fc->irq); 182 184 PLND(("queuecommand %d %d\n", retry, i)) 183 185 fcp_scsi_queuecommand (&(fcs[i].cmd),
+19 -18
drivers/scsi/scsi_lib.c
··· 82 82 { 83 83 struct scsi_cmnd *cmd = req->special; 84 84 85 - req->flags &= ~REQ_DONTPREP; 85 + req->cmd_flags &= ~REQ_DONTPREP; 86 86 req->special = NULL; 87 87 88 88 scsi_put_command(cmd); ··· 196 196 req->sense_len = 0; 197 197 req->retries = retries; 198 198 req->timeout = timeout; 199 - req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET; 199 + req->cmd_type = REQ_TYPE_BLOCK_PC; 200 + req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; 200 201 201 202 /* 202 203 * head injection *required* here otherwise quiesce won't work ··· 398 397 req = blk_get_request(sdev->request_queue, write, gfp); 399 398 if (!req) 400 399 goto free_sense; 401 - req->flags |= REQ_BLOCK_PC | REQ_QUIET; 400 + req->cmd_type = REQ_TYPE_BLOCK_PC; 401 + req->cmd_flags |= REQ_QUIET; 402 402 403 403 if (use_sg) 404 404 err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); ··· 935 933 break; 936 934 } 937 935 } 938 - if (!(req->flags & REQ_QUIET)) { 936 + if (!(req->cmd_flags & REQ_QUIET)) { 939 937 scmd_printk(KERN_INFO, cmd, 940 938 "Device not ready: "); 941 939 scsi_print_sense_hdr("", &sshdr); ··· 943 941 scsi_end_request(cmd, 0, this_count, 1); 944 942 return; 945 943 case VOLUME_OVERFLOW: 946 - if (!(req->flags & REQ_QUIET)) { 944 + if (!(req->cmd_flags & REQ_QUIET)) { 947 945 scmd_printk(KERN_INFO, cmd, 948 946 "Volume overflow, CDB: "); 949 947 __scsi_print_command(cmd->cmnd); ··· 965 963 return; 966 964 } 967 965 if (result) { 968 - if (!(req->flags & REQ_QUIET)) { 966 + if (!(req->cmd_flags & REQ_QUIET)) { 969 967 scmd_printk(KERN_INFO, cmd, 970 968 "SCSI error: return code = 0x%08x\n", 971 969 result); ··· 997 995 /* 998 996 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer 999 997 */ 1000 - if ((req->flags & REQ_BLOCK_PC) && !req->bio) { 998 + if (blk_pc_request(req) && !req->bio) { 1001 999 cmd->request_bufflen = req->data_len; 1002 1000 cmd->request_buffer = req->data; 1003 1001 req->buffer = req->data; ··· 1141 1139 * these two cases differently. We differentiate by looking 1142 1140 * at request->cmd, as this tells us the real story. 1143 1141 */ 1144 - if (req->flags & REQ_SPECIAL && req->special) { 1142 + if (blk_special_request(req) && req->special) 1145 1143 cmd = req->special; 1146 - } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1147 - 1148 - if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) { 1149 - if(specials_only == SDEV_QUIESCE || 1150 - specials_only == SDEV_BLOCK) 1144 + else if (blk_pc_request(req) || blk_fs_request(req)) { 1145 + if (unlikely(specials_only) && !(req->cmd_flags & REQ_PREEMPT)){ 1146 + if (specials_only == SDEV_QUIESCE || 1147 + specials_only == SDEV_BLOCK) 1151 1148 goto defer; 1152 1149 1153 1150 sdev_printk(KERN_ERR, sdev, 1154 1151 "rejecting I/O to device being removed\n"); 1155 1152 goto kill; 1156 1153 } 1157 - 1158 1154 1159 1155 /* 1160 1156 * Now try and find a command block that we can use. ··· 1184 1184 * lock. We hope REQ_STARTED prevents anything untoward from 1185 1185 * happening now. 1186 1186 */ 1187 - if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) { 1187 + if (blk_fs_request(req) || blk_pc_request(req)) { 1188 1188 int ret; 1189 1189 1190 1190 /* ··· 1216 1216 /* 1217 1217 * Initialize the actual SCSI command for this request. 1218 1218 */ 1219 - if (req->flags & REQ_BLOCK_PC) { 1219 + if (blk_pc_request(req)) { 1220 1220 scsi_setup_blk_pc_cmnd(cmd); 1221 1221 } else if (req->rq_disk) { 1222 1222 struct scsi_driver *drv; ··· 1233 1233 /* 1234 1234 * The request is now prepped, no need to come back here 1235 1235 */ 1236 - req->flags |= REQ_DONTPREP; 1236 + req->cmd_flags |= REQ_DONTPREP; 1237 1237 return BLKPREP_OK; 1238 1238 1239 1239 defer: ··· 1454 1454 if (unlikely(cmd == NULL)) { 1455 1455 printk(KERN_CRIT "impossible request in %s.\n" 1456 1456 "please mail a stack trace to " 1457 - "linux-scsi@vger.kernel.org", 1457 + "linux-scsi@vger.kernel.org\n", 1458 1458 __FUNCTION__); 1459 + blk_dump_rq_flags(req, "foo"); 1459 1460 BUG(); 1460 1461 } 1461 1462 spin_lock(shost->host_lock);
+2 -3
drivers/scsi/sd.c
··· 443 443 SCpnt->cmnd[0] = READ_6; 444 444 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 445 445 } else { 446 - printk(KERN_ERR "sd: Unknown command %lx\n", rq->flags); 447 - /* overkill panic("Unknown sd command %lx\n", rq->flags); */ 446 + printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags); 448 447 return 0; 449 448 } 450 449 ··· 839 840 static void sd_prepare_flush(request_queue_t *q, struct request *rq) 840 841 { 841 842 memset(rq->cmd, 0, sizeof(rq->cmd)); 842 - rq->flags |= REQ_BLOCK_PC; 843 + rq->cmd_type = REQ_TYPE_BLOCK_PC; 843 844 rq->timeout = SD_TIMEOUT; 844 845 rq->cmd[0] = SYNCHRONIZE_CACHE; 845 846 rq->cmd_len = 10;
+1 -1
drivers/scsi/sun3_NCR5380.c
··· 2017 2017 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done 2018 2018 != cmd)) 2019 2019 { 2020 - if(cmd->request->flags & REQ_CMD) { 2020 + if(blk_fs_request(cmd->request)) { 2021 2021 sun3scsi_dma_setup(d, count, 2022 2022 rq_data_dir(cmd->request)); 2023 2023 sun3_dma_setup_done = cmd;
+1 -1
drivers/scsi/sun3_scsi.c
··· 524 524 static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, 525 525 int write_flag) 526 526 { 527 - if(cmd->request->flags & REQ_CMD) 527 + if(blk_fs_request(cmd->request)) 528 528 return wanted; 529 529 else 530 530 return 0;
+1 -1
drivers/scsi/sun3_scsi_vme.c
··· 458 458 static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, Scsi_Cmnd *cmd, 459 459 int write_flag) 460 460 { 461 - if(cmd->request->flags & REQ_CMD) 461 + if(blk_fs_request(cmd->request)) 462 462 return wanted; 463 463 else 464 464 return 0;
+99 -81
include/linux/blkdev.h
··· 120 120 wait_queue_head_t wait[2]; 121 121 }; 122 122 123 + /* 124 + * request command types 125 + */ 126 + enum rq_cmd_type_bits { 127 + REQ_TYPE_FS = 1, /* fs request */ 128 + REQ_TYPE_BLOCK_PC, /* scsi command */ 129 + REQ_TYPE_SENSE, /* sense request */ 130 + REQ_TYPE_PM_SUSPEND, /* suspend request */ 131 + REQ_TYPE_PM_RESUME, /* resume request */ 132 + REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 133 + REQ_TYPE_FLUSH, /* flush request */ 134 + REQ_TYPE_SPECIAL, /* driver defined type */ 135 + REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 136 + /* 137 + * for ATA/ATAPI devices. this really doesn't belong here, ide should 138 + * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 139 + * private REQ_LB opcodes to differentiate what type of request this is 140 + */ 141 + REQ_TYPE_ATA_CMD, 142 + REQ_TYPE_ATA_TASK, 143 + REQ_TYPE_ATA_TASKFILE, 144 + }; 145 + 146 + /* 147 + * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 148 + * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 149 + * SCSI cdb. 150 + * 151 + * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 152 + * typically to differentiate REQ_TYPE_SPECIAL requests. 153 + * 154 + */ 155 + enum { 156 + /* 157 + * just examples for now 158 + */ 159 + REQ_LB_OP_EJECT = 0x40, /* eject request */ 160 + REQ_LB_OP_FLUSH = 0x41, /* flush device */ 161 + }; 162 + 163 + /* 164 + * request type modified bits. first three bits match BIO_RW* bits, important 165 + */ 166 + enum rq_flag_bits { 167 + __REQ_RW, /* not set, read. set, write */ 168 + __REQ_FAILFAST, /* no low level driver retries */ 169 + __REQ_SORTED, /* elevator knows about this request */ 170 + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 171 + __REQ_HARDBARRIER, /* may not be passed by drive either */ 172 + __REQ_FUA, /* forced unit access */ 173 + __REQ_NOMERGE, /* don't touch this for merging */ 174 + __REQ_STARTED, /* drive already may have started this one */ 175 + __REQ_DONTPREP, /* don't call prep for this one */ 176 + __REQ_QUEUED, /* uses queueing */ 177 + __REQ_ELVPRIV, /* elevator private data attached */ 178 + __REQ_FAILED, /* set if the request failed */ 179 + __REQ_QUIET, /* don't worry about errors */ 180 + __REQ_PREEMPT, /* set for "ide_preempt" requests */ 181 + __REQ_ORDERED_COLOR, /* is before or after barrier */ 182 + __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 183 + __REQ_NR_BITS, /* stops here */ 184 + }; 185 + 186 + #define REQ_RW (1 << __REQ_RW) 187 + #define REQ_FAILFAST (1 << __REQ_FAILFAST) 188 + #define REQ_SORTED (1 << __REQ_SORTED) 189 + #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 190 + #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 191 + #define REQ_FUA (1 << __REQ_FUA) 192 + #define REQ_NOMERGE (1 << __REQ_NOMERGE) 193 + #define REQ_STARTED (1 << __REQ_STARTED) 194 + #define REQ_DONTPREP (1 << __REQ_DONTPREP) 195 + #define REQ_QUEUED (1 << __REQ_QUEUED) 196 + #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 197 + #define REQ_FAILED (1 << __REQ_FAILED) 198 + #define REQ_QUIET (1 << __REQ_QUIET) 199 + #define REQ_PREEMPT (1 << __REQ_PREEMPT) 200 + #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 201 + #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 202 + 123 203 #define BLK_MAX_CDB 16 124 204 125 205 /* ··· 209 129 struct list_head queuelist; 210 130 struct list_head donelist; 211 131 212 - unsigned long flags; /* see REQ_ bits below */ 132 + unsigned int cmd_flags; 133 + enum rq_cmd_type_bits cmd_type; 213 134 214 135 /* Maintain bio traversal state for part by part I/O submission. 215 136 * hard_* are block layer internals, no driver should touch them! ··· 283 202 }; 284 203 285 204 /* 286 - * first three bits match BIO_RW* bits, important 287 - */ 288 - enum rq_flag_bits { 289 - __REQ_RW, /* not set, read. set, write */ 290 - __REQ_FAILFAST, /* no low level driver retries */ 291 - __REQ_SORTED, /* elevator knows about this request */ 292 - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 293 - __REQ_HARDBARRIER, /* may not be passed by drive either */ 294 - __REQ_FUA, /* forced unit access */ 295 - __REQ_CMD, /* is a regular fs rw request */ 296 - __REQ_NOMERGE, /* don't touch this for merging */ 297 - __REQ_STARTED, /* drive already may have started this one */ 298 - __REQ_DONTPREP, /* don't call prep for this one */ 299 - __REQ_QUEUED, /* uses queueing */ 300 - __REQ_ELVPRIV, /* elevator private data attached */ 301 - /* 302 - * for ATA/ATAPI devices 303 - */ 304 - __REQ_PC, /* packet command (special) */ 305 - __REQ_BLOCK_PC, /* queued down pc from block layer */ 306 - __REQ_SENSE, /* sense retrival */ 307 - 308 - __REQ_FAILED, /* set if the request failed */ 309 - __REQ_QUIET, /* don't worry about errors */ 310 - __REQ_SPECIAL, /* driver suplied command */ 311 - __REQ_DRIVE_CMD, 312 - __REQ_DRIVE_TASK, 313 - __REQ_DRIVE_TASKFILE, 314 - __REQ_PREEMPT, /* set for "ide_preempt" requests */ 315 - __REQ_PM_SUSPEND, /* suspend request */ 316 - __REQ_PM_RESUME, /* resume request */ 317 - __REQ_PM_SHUTDOWN, /* shutdown request */ 318 - __REQ_ORDERED_COLOR, /* is before or after barrier */ 319 - __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 320 - __REQ_NR_BITS, /* stops here */ 321 - }; 322 - 323 - #define REQ_RW (1 << __REQ_RW) 324 - #define REQ_FAILFAST (1 << __REQ_FAILFAST) 325 - #define REQ_SORTED (1 << __REQ_SORTED) 326 - #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 327 - #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 328 - #define REQ_FUA (1 << __REQ_FUA) 329 - #define REQ_CMD (1 << __REQ_CMD) 330 - #define REQ_NOMERGE (1 << __REQ_NOMERGE) 331 - #define REQ_STARTED (1 << __REQ_STARTED) 332 - #define REQ_DONTPREP (1 << __REQ_DONTPREP) 333 - #define REQ_QUEUED (1 << __REQ_QUEUED) 334 - #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 335 - #define REQ_PC (1 << __REQ_PC) 336 - #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) 337 - #define REQ_SENSE (1 << __REQ_SENSE) 338 - #define REQ_FAILED (1 << __REQ_FAILED) 339 - #define REQ_QUIET (1 << __REQ_QUIET) 340 - #define REQ_SPECIAL (1 << __REQ_SPECIAL) 341 - #define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) 342 - #define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) 343 - #define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE) 344 - #define REQ_PREEMPT (1 << __REQ_PREEMPT) 345 - #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) 346 - #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) 347 - #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) 348 - #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 349 - #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 350 - 351 - /* 352 - * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME 205 + * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 353 206 * requests. Some step values could eventually be made generic. 354 207 */ 355 208 struct request_pm_state ··· 505 490 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 506 491 #define blk_queue_flushing(q) ((q)->ordseq) 507 492 508 - #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) 509 - #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) 510 - #define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) 511 - #define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) 493 + #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 494 + #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 495 + #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 496 + #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 497 + 498 + #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 499 + #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 512 500 513 501 #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) 514 502 515 - #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) 516 - #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) 503 + #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 504 + #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 517 505 #define blk_pm_request(rq) \ 518 - ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) 506 + (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 519 507 520 - #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) 521 - #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) 522 - #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) 508 + #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 509 + #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 510 + #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 523 511 524 512 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 525 513 526 - #define rq_data_dir(rq) ((rq)->flags & 1) 514 + #define rq_data_dir(rq) ((rq)->cmd_flags & 1) 527 515 528 516 static inline int blk_queue_full(struct request_queue *q, int rw) 529 517 { ··· 559 541 #define RQ_NOMERGE_FLAGS \ 560 542 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 561 543 #define rq_mergeable(rq) \ 562 - (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 544 + (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) 563 545 564 546 /* 565 547 * noop, requests are automagically marked as active/inactive by I/O ··· 755 737 */ 756 738 #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) 757 739 #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) 758 - #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) 740 + #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 759 741 extern int blk_queue_start_tag(request_queue_t *, struct request *); 760 742 extern struct request *blk_queue_find_tag(request_queue_t *, int); 761 743 extern void blk_queue_end_tag(request_queue_t *, struct request *);
+1 -1
include/linux/blktrace_api.h
··· 148 148 u32 what) 149 149 { 150 150 struct blk_trace *bt = q->blk_trace; 151 - int rw = rq->flags & 0x03; 151 + int rw = rq->cmd_flags & 0x03; 152 152 153 153 if (likely(!bt)) 154 154 return;
+1 -1
include/scsi/scsi_tcq.h
··· 100 100 struct scsi_device *sdev = cmd->device; 101 101 102 102 if (blk_rq_tagged(req)) { 103 - if (sdev->ordered_tags && req->flags & REQ_HARDBARRIER) 103 + if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER) 104 104 *msg++ = MSG_ORDERED_TAG; 105 105 else 106 106 *msg++ = MSG_SIMPLE_TAG;