Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[BLOCK] update SCSI to use new blk_ordered for barriers

All ordered request related stuff delegated to HLD. Midlayer
now doens't deal with ordered setting or prepare_flush
callback. sd.c updated to deal with blk_queue_ordered
setting. Currently, ordered tag isn't used as SCSI midlayer
cannot guarantee request ordering.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>

authored by

Tejun Heo and committed by
Jens Axboe
461d4e90 797e7dbb

+20 -95
-9
drivers/scsi/hosts.c
··· 347 347 shost->cmd_per_lun = sht->cmd_per_lun; 348 348 shost->unchecked_isa_dma = sht->unchecked_isa_dma; 349 349 shost->use_clustering = sht->use_clustering; 350 - shost->ordered_flush = sht->ordered_flush; 351 350 shost->ordered_tag = sht->ordered_tag; 352 - 353 - /* 354 - * hosts/devices that do queueing must support ordered tags 355 - */ 356 - if (shost->can_queue > 1 && shost->ordered_flush) { 357 - printk(KERN_ERR "scsi: ordered flushes don't support queueing\n"); 358 - shost->ordered_flush = 0; 359 - } 360 351 361 352 if (sht->max_host_blocked) 362 353 shost->max_host_blocked = sht->max_host_blocked;
-46
drivers/scsi/scsi_lib.c
··· 932 932 int sense_valid = 0; 933 933 int sense_deferred = 0; 934 934 935 - if (blk_complete_barrier_rq(q, req, good_bytes >> 9)) 936 - return; 937 - 938 935 /* 939 936 * Free up any indirection buffers we allocated for DMA purposes. 940 937 * For the case of a READ, we need to copy the data out of the ··· 1194 1197 scsi_release_buffers(cmd); 1195 1198 scsi_put_command(cmd); 1196 1199 return BLKPREP_KILL; 1197 - } 1198 - 1199 - static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq) 1200 - { 1201 - struct scsi_device *sdev = q->queuedata; 1202 - struct scsi_driver *drv; 1203 - 1204 - if (sdev->sdev_state == SDEV_RUNNING) { 1205 - drv = *(struct scsi_driver **) rq->rq_disk->private_data; 1206 - 1207 - if (drv->prepare_flush) 1208 - return drv->prepare_flush(q, rq); 1209 - } 1210 - 1211 - return 0; 1212 - } 1213 - 1214 - static void scsi_end_flush_fn(request_queue_t *q, struct request *rq) 1215 - { 1216 - struct scsi_device *sdev = q->queuedata; 1217 - struct request *flush_rq = rq->end_io_data; 1218 - struct scsi_driver *drv; 1219 - 1220 - if (flush_rq->errors) { 1221 - printk("scsi: barrier error, disabling flush support\n"); 1222 - blk_queue_ordered(q, QUEUE_ORDERED_NONE); 1223 - } 1224 - 1225 - if (sdev->sdev_state == SDEV_RUNNING) { 1226 - drv = *(struct scsi_driver **) rq->rq_disk->private_data; 1227 - drv->end_flush(q, rq); 1228 - } 1229 1200 } 1230 1201 1231 1202 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, ··· 1667 1702 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1668 1703 blk_queue_segment_boundary(q, shost->dma_boundary); 1669 1704 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1670 - 1671 - /* 1672 - * ordered tags are superior to flush ordering 1673 - */ 1674 - if (shost->ordered_tag) 1675 - blk_queue_ordered(q, QUEUE_ORDERED_TAG); 1676 - else if (shost->ordered_flush) { 1677 - blk_queue_ordered(q, QUEUE_ORDERED_FLUSH); 1678 - q->prepare_flush_fn = scsi_prepare_flush_fn; 1679 - q->end_flush_fn = scsi_end_flush_fn; 1680 - } 1681 1705 1682 1706 if (!shost->use_clustering) 1683 1707 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+20 -38
drivers/scsi/sd.c
··· 121 121 static void sd_rescan(struct device *); 122 122 static int sd_init_command(struct scsi_cmnd *); 123 123 static int sd_issue_flush(struct device *, sector_t *); 124 - static void sd_end_flush(request_queue_t *, struct request *); 125 - static int sd_prepare_flush(request_queue_t *, struct request *); 124 + static void sd_prepare_flush(request_queue_t *, struct request *); 126 125 static void sd_read_capacity(struct scsi_disk *sdkp, char *diskname, 127 126 unsigned char *buffer); 128 127 ··· 136 137 .rescan = sd_rescan, 137 138 .init_command = sd_init_command, 138 139 .issue_flush = sd_issue_flush, 139 - .prepare_flush = sd_prepare_flush, 140 - .end_flush = sd_end_flush, 141 140 }; 142 141 143 142 /* ··· 726 729 return ret; 727 730 } 728 731 729 - static void sd_end_flush(request_queue_t *q, struct request *flush_rq) 732 + static void sd_prepare_flush(request_queue_t *q, struct request *rq) 730 733 { 731 - struct request *rq = flush_rq->end_io_data; 732 - struct scsi_cmnd *cmd = rq->special; 733 - unsigned int bytes = rq->hard_nr_sectors << 9; 734 - 735 - if (!flush_rq->errors) { 736 - spin_unlock(q->queue_lock); 737 - scsi_io_completion(cmd, bytes, 0); 738 - spin_lock(q->queue_lock); 739 - } else if (blk_barrier_postflush(rq)) { 740 - spin_unlock(q->queue_lock); 741 - scsi_io_completion(cmd, 0, bytes); 742 - spin_lock(q->queue_lock); 743 - } else { 744 - /* 745 - * force journal abort of barriers 746 - */ 747 - end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors); 748 - end_that_request_last(rq, -EOPNOTSUPP); 749 - } 750 - } 751 - 752 - static int sd_prepare_flush(request_queue_t *q, struct request *rq) 753 - { 754 - struct scsi_device *sdev = q->queuedata; 755 - struct scsi_disk *sdkp = dev_get_drvdata(&sdev->sdev_gendev); 756 - 757 - if (!sdkp || !sdkp->WCE) 758 - return 0; 759 - 760 734 memset(rq->cmd, 0, sizeof(rq->cmd)); 761 - rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; 735 + rq->flags |= REQ_BLOCK_PC; 762 736 rq->timeout = SD_TIMEOUT; 763 737 rq->cmd[0] = SYNCHRONIZE_CACHE; 764 - return 1; 738 + rq->cmd_len = 10; 765 739 } 766 740 767 741 static void sd_rescan(struct device *dev) ··· 1430 1462 struct scsi_disk *sdkp = scsi_disk(disk); 1431 1463 struct scsi_device *sdp = sdkp->device; 1432 1464 unsigned char *buffer; 1465 + unsigned ordered; 1433 1466 1434 1467 SCSI_LOG_HLQUEUE(3, printk("sd_revalidate_disk: disk=%s\n", disk->disk_name)); 1435 1468 ··· 1467 1498 sd_read_write_protect_flag(sdkp, disk->disk_name, buffer); 1468 1499 sd_read_cache_type(sdkp, disk->disk_name, buffer); 1469 1500 } 1470 - 1501 + 1502 + /* 1503 + * We now have all cache related info, determine how we deal 1504 + * with ordered requests. Note that as the current SCSI 1505 + * dispatch function can alter request order, we cannot use 1506 + * QUEUE_ORDERED_TAG_* even when ordered tag is supported. 1507 + */ 1508 + if (sdkp->WCE) 1509 + ordered = QUEUE_ORDERED_DRAIN_FLUSH; 1510 + else 1511 + ordered = QUEUE_ORDERED_DRAIN; 1512 + 1513 + blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush); 1514 + 1471 1515 set_capacity(disk, sdkp->capacity); 1472 1516 kfree(buffer); 1473 1517 ··· 1580 1598 strcpy(gd->devfs_name, sdp->devfs_name); 1581 1599 1582 1600 gd->private_data = &sdkp->driver; 1601 + gd->queue = sdkp->device->request_queue; 1583 1602 1584 1603 sd_revalidate_disk(gd); 1585 1604 ··· 1588 1605 gd->flags = GENHD_FL_DRIVERFS; 1589 1606 if (sdp->removable) 1590 1607 gd->flags |= GENHD_FL_REMOVABLE; 1591 - gd->queue = sdkp->device->request_queue; 1592 1608 1593 1609 dev_set_drvdata(dev, sdkp); 1594 1610 add_disk(gd);
-1
include/scsi/scsi_driver.h
··· 15 15 void (*rescan)(struct device *); 16 16 int (*issue_flush)(struct device *, sector_t *); 17 17 int (*prepare_flush)(struct request_queue *, struct request *); 18 - void (*end_flush)(struct request_queue *, struct request *); 19 18 }; 20 19 #define to_scsi_driver(drv) \ 21 20 container_of((drv), struct scsi_driver, gendrv)
-1
include/scsi/scsi_host.h
··· 392 392 /* 393 393 * ordered write support 394 394 */ 395 - unsigned ordered_flush:1; 396 395 unsigned ordered_tag:1; 397 396 398 397 /*