[SCSI] Kill the SCSI softirq handling

This patch moves the SCSI softirq handling to the block layer version.
There should be no functional changes.

Signed-off-by: Jens Axboe <axboe@suse.de>

+45 -102
+8 -101
drivers/scsi/scsi.c
··· 69 69 #include "scsi_logging.h" 70 70 71 71 static void scsi_done(struct scsi_cmnd *cmd); 72 - static int scsi_retry_command(struct scsi_cmnd *cmd); 73 72 74 73 /* 75 74 * Definitions and constants. ··· 751 752 * isn't running --- used by scsi_times_out */ 752 753 void __scsi_done(struct scsi_cmnd *cmd) 753 754 { 754 - unsigned long flags; 755 + struct request *rq = cmd->request; 755 756 756 757 /* 757 758 * Set the serial numbers back to zero ··· 762 763 if (cmd->result) 763 764 atomic_inc(&cmd->device->ioerr_cnt); 764 765 766 + BUG_ON(!rq); 767 + 765 768 /* 766 - * Next, enqueue the command into the done queue. 767 - * It is a per-CPU queue, so we just disable local interrupts 768 - * and need no spinlock. 769 + * The uptodate/nbytes values don't matter, as we allow partial 770 + * completes and thus will check this in the softirq callback 769 771 */ 770 - local_irq_save(flags); 771 - list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); 772 - raise_softirq_irqoff(SCSI_SOFTIRQ); 773 - local_irq_restore(flags); 774 - } 775 - 776 - /** 777 - * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. 778 - * 779 - * This is the consumer of the done queue. 780 - * 781 - * This is called with all interrupts enabled. This should reduce 782 - * interrupt latency, stack depth, and reentrancy of the low-level 783 - * drivers. 784 - */ 785 - static void scsi_softirq(struct softirq_action *h) 786 - { 787 - int disposition; 788 - LIST_HEAD(local_q); 789 - 790 - local_irq_disable(); 791 - list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); 792 - local_irq_enable(); 793 - 794 - while (!list_empty(&local_q)) { 795 - struct scsi_cmnd *cmd = list_entry(local_q.next, 796 - struct scsi_cmnd, eh_entry); 797 - /* The longest time any command should be outstanding is the 798 - * per command timeout multiplied by the number of retries. 799 - * 800 - * For a typical command, this is 2.5 minutes */ 801 - unsigned long wait_for 802 - = cmd->allowed * cmd->timeout_per_command; 803 - list_del_init(&cmd->eh_entry); 804 - 805 - disposition = scsi_decide_disposition(cmd); 806 - if (disposition != SUCCESS && 807 - time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 808 - sdev_printk(KERN_ERR, cmd->device, 809 - "timing out command, waited %lus\n", 810 - wait_for/HZ); 811 - disposition = SUCCESS; 812 - } 813 - 814 - scsi_log_completion(cmd, disposition); 815 - switch (disposition) { 816 - case SUCCESS: 817 - scsi_finish_command(cmd); 818 - break; 819 - case NEEDS_RETRY: 820 - scsi_retry_command(cmd); 821 - break; 822 - case ADD_TO_MLQUEUE: 823 - scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 824 - break; 825 - default: 826 - if (!scsi_eh_scmd_add(cmd, 0)) 827 - scsi_finish_command(cmd); 828 - } 829 - } 772 + rq->completion_data = cmd; 773 + blk_complete_request(rq); 830 774 } 831 775 832 776 /* ··· 782 840 * level drivers should not become re-entrant as a result of 783 841 * this. 784 842 */ 785 - static int scsi_retry_command(struct scsi_cmnd *cmd) 843 + int scsi_retry_command(struct scsi_cmnd *cmd) 786 844 { 787 845 /* 788 846 * Restore the SCSI command state. ··· 1215 1273 } 1216 1274 EXPORT_SYMBOL(scsi_device_cancel); 1217 1275 1218 - #ifdef CONFIG_HOTPLUG_CPU 1219 - static int scsi_cpu_notify(struct notifier_block *self, 1220 - unsigned long action, void *hcpu) 1221 - { 1222 - int cpu = (unsigned long)hcpu; 1223 - 1224 - switch(action) { 1225 - case CPU_DEAD: 1226 - /* Drain scsi_done_q. */ 1227 - local_irq_disable(); 1228 - list_splice_init(&per_cpu(scsi_done_q, cpu), 1229 - &__get_cpu_var(scsi_done_q)); 1230 - raise_softirq_irqoff(SCSI_SOFTIRQ); 1231 - local_irq_enable(); 1232 - break; 1233 - default: 1234 - break; 1235 - } 1236 - return NOTIFY_OK; 1237 - } 1238 - 1239 - static struct notifier_block __devinitdata scsi_cpu_nb = { 1240 - .notifier_call = scsi_cpu_notify, 1241 - }; 1242 - 1243 - #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) 1244 - #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) 1245 - #else 1246 - #define register_scsi_cpu() 1247 - #define unregister_scsi_cpu() 1248 - #endif /* CONFIG_HOTPLUG_CPU */ 1249 - 1250 1276 MODULE_DESCRIPTION("SCSI core"); 1251 1277 MODULE_LICENSE("GPL"); 1252 1278 ··· 1248 1338 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); 1249 1339 1250 1340 devfs_mk_dir("scsi"); 1251 - open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); 1252 - register_scsi_cpu(); 1253 1341 printk(KERN_NOTICE "SCSI subsystem initialized\n"); 1254 1342 return 0; 1255 1343 ··· 1275 1367 devfs_remove("scsi"); 1276 1368 scsi_exit_procfs(); 1277 1369 scsi_exit_queue(); 1278 - unregister_scsi_cpu(); 1279 1370 } 1280 1371 1281 1372 subsys_initcall(init_scsi);
+36
drivers/scsi/scsi_lib.c
··· 1493 1493 __scsi_done(cmd); 1494 1494 } 1495 1495 1496 + static void scsi_softirq_done(struct request *rq) 1497 + { 1498 + struct scsi_cmnd *cmd = rq->completion_data; 1499 + unsigned long wait_for = cmd->allowed * cmd->timeout_per_command; 1500 + int disposition; 1501 + 1502 + INIT_LIST_HEAD(&cmd->eh_entry); 1503 + 1504 + disposition = scsi_decide_disposition(cmd); 1505 + if (disposition != SUCCESS && 1506 + time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { 1507 + sdev_printk(KERN_ERR, cmd->device, 1508 + "timing out command, waited %lus\n", 1509 + wait_for/HZ); 1510 + disposition = SUCCESS; 1511 + } 1512 + 1513 + scsi_log_completion(cmd, disposition); 1514 + 1515 + switch (disposition) { 1516 + case SUCCESS: 1517 + scsi_finish_command(cmd); 1518 + break; 1519 + case NEEDS_RETRY: 1520 + scsi_retry_command(cmd); 1521 + break; 1522 + case ADD_TO_MLQUEUE: 1523 + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); 1524 + break; 1525 + default: 1526 + if (!scsi_eh_scmd_add(cmd, 0)) 1527 + scsi_finish_command(cmd); 1528 + } 1529 + } 1530 + 1496 1531 /* 1497 1532 * Function: scsi_request_fn() 1498 1533 * ··· 1702 1667 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); 1703 1668 blk_queue_segment_boundary(q, shost->dma_boundary); 1704 1669 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); 1670 + blk_queue_softirq_done(q, scsi_softirq_done); 1705 1671 1706 1672 if (!shost->use_clustering) 1707 1673 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+1
drivers/scsi/scsi_priv.h
··· 44 44 struct scsi_request *sreq); 45 45 extern void __scsi_release_request(struct scsi_request *sreq); 46 46 extern void __scsi_done(struct scsi_cmnd *cmd); 47 + extern int scsi_retry_command(struct scsi_cmnd *cmd); 47 48 #ifdef CONFIG_SCSI_LOGGING 48 49 void scsi_log_send(struct scsi_cmnd *cmd); 49 50 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
-1
include/linux/interrupt.h
··· 113 113 NET_TX_SOFTIRQ, 114 114 NET_RX_SOFTIRQ, 115 115 BLOCK_SOFTIRQ, 116 - SCSI_SOFTIRQ, 117 116 TASKLET_SOFTIRQ 118 117 }; 119 118