···26#include <linux/slab.h>27#include <linux/swap.h>28#include <linux/writeback.h>002930/*31 * for max sense size···63/*64 * Controlling structure to kblockd65 */66-static struct workqueue_struct *kblockd_workqueue; 6768unsigned long blk_max_low_pfn, blk_max_pfn;6970EXPORT_SYMBOL(blk_max_low_pfn);71EXPORT_SYMBOL(blk_max_pfn);007273/* Amount of time in which a process may batch requests */74#define BLK_BATCH_TIME (HZ/50UL)···210211EXPORT_SYMBOL(blk_queue_merge_bvec);2120000000213/**214 * blk_queue_make_request - define an alternate make_request function for a device215 * @q: the request queue for the device to be affected···280static inline void rq_init(request_queue_t *q, struct request *rq)281{282 INIT_LIST_HEAD(&rq->queuelist);0283284 rq->errors = 0;285 rq->rq_status = RQ_ACTIVE;···297 rq->sense = NULL;298 rq->end_io = NULL;299 rq->end_io_data = NULL;0300}301302/**···3275EXPORT_SYMBOL(end_that_request_chunk);32763277/*0000000000000000000000000000000000000000000000000000000000000000000000000000000003278 * queue lock must be held3279 */3280void end_that_request_last(struct request *req, int uptodate)···34333434int __init blk_dev_init(void)3435{003436 kblockd_workqueue = create_workqueue("kblockd");3437 if (!kblockd_workqueue)3438 panic("Failed to create kblockd\n");···34473448 iocontext_cachep = kmem_cache_create("blkdev_ioc",3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);0000000034503451 blk_max_low_pfn = max_low_pfn;3452 blk_max_pfn = max_pfn;
···26#include <linux/slab.h>27#include <linux/swap.h>28#include <linux/writeback.h>29+#include <linux/interrupt.h>30+#include <linux/cpu.h>3132/*33 * for max sense size···61/*62 * Controlling structure to kblockd63 */64+static struct workqueue_struct *kblockd_workqueue;6566unsigned long blk_max_low_pfn, blk_max_pfn;6768EXPORT_SYMBOL(blk_max_low_pfn);69EXPORT_SYMBOL(blk_max_pfn);70+71+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);7273/* Amount of time in which a process may batch requests */74#define BLK_BATCH_TIME (HZ/50UL)···206207EXPORT_SYMBOL(blk_queue_merge_bvec);208209+void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)210+{211+ q->softirq_done_fn = fn;212+}213+214+EXPORT_SYMBOL(blk_queue_softirq_done);215+216/**217 * blk_queue_make_request - define an alternate make_request function for a device218 * @q: the request queue for the device to be affected···269static inline void rq_init(request_queue_t *q, struct request *rq)270{271 INIT_LIST_HEAD(&rq->queuelist);272+ INIT_LIST_HEAD(&rq->donelist);273274 rq->errors = 0;275 rq->rq_status = RQ_ACTIVE;···285 rq->sense = NULL;286 rq->end_io = NULL;287 rq->end_io_data = NULL;288+ rq->completion_data = NULL;289}290291/**···3262EXPORT_SYMBOL(end_that_request_chunk);32633264/*3265+ * splice the completion data to a local structure and hand off to3266+ * process_completion_queue() to complete the requests3267+ */3268+static void blk_done_softirq(struct softirq_action *h)3269+{3270+ struct list_head *cpu_list;3271+ LIST_HEAD(local_list);3272+3273+ local_irq_disable();3274+ cpu_list = &__get_cpu_var(blk_cpu_done);3275+ list_splice_init(cpu_list, &local_list);3276+ local_irq_enable();3277+3278+ while (!list_empty(&local_list)) {3279+ struct request *rq = list_entry(local_list.next, struct request, donelist);3280+3281+ list_del_init(&rq->donelist);3282+ rq->q->softirq_done_fn(rq);3283+ }3284+}3285+3286+#ifdef CONFIG_HOTPLUG_CPU3287+3288+static int blk_cpu_notify(struct notifier_block *self, unsigned long action,3289+ void *hcpu)3290+{3291+ /*3292+ * If a CPU goes away, splice its entries to the current CPU3293+ * and trigger a run of the softirq3294+ */3295+ if (action == CPU_DEAD) {3296+ int cpu = (unsigned long) hcpu;3297+3298+ local_irq_disable();3299+ list_splice_init(&per_cpu(blk_cpu_done, cpu),3300+ &__get_cpu_var(blk_cpu_done));3301+ raise_softirq_irqoff(BLOCK_SOFTIRQ);3302+ local_irq_enable();3303+ }3304+3305+ return NOTIFY_OK;3306+}3307+3308+3309+static struct notifier_block __devinitdata blk_cpu_notifier = {3310+ .notifier_call = blk_cpu_notify,3311+};3312+3313+#endif /* CONFIG_HOTPLUG_CPU */3314+3315+/**3316+ * blk_complete_request - end I/O on a request3317+ * @req: the request being processed3318+ *3319+ * Description:3320+ * Ends all I/O on a request. It does not handle partial completions,3321+ * unless the driver actually implements this in its completionc callback3322+ * through requeueing. Theh actual completion happens out-of-order,3323+ * through a softirq handler. The user must have registered a completion3324+ * callback through blk_queue_softirq_done().3325+ **/3326+3327+void blk_complete_request(struct request *req)3328+{3329+ struct list_head *cpu_list;3330+ unsigned long flags;3331+3332+ BUG_ON(!req->q->softirq_done_fn);3333+3334+ local_irq_save(flags);3335+3336+ cpu_list = &__get_cpu_var(blk_cpu_done);3337+ list_add_tail(&req->donelist, cpu_list);3338+ raise_softirq_irqoff(BLOCK_SOFTIRQ);3339+3340+ local_irq_restore(flags);3341+}3342+3343+EXPORT_SYMBOL(blk_complete_request);3344+3345+/*3346 * queue lock must be held3347 */3348void end_that_request_last(struct request *req, int uptodate)···33393340int __init blk_dev_init(void)3341{3342+ int i;3343+3344 kblockd_workqueue = create_workqueue("kblockd");3345 if (!kblockd_workqueue)3346 panic("Failed to create kblockd\n");···33513352 iocontext_cachep = kmem_cache_create("blkdev_ioc",3353 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);3354+3355+ for (i = 0; i < NR_CPUS; i++)3356+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));3357+3358+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);3359+#ifdef CONFIG_HOTPLUG_CPU3360+ register_cpu_notifier(&blk_cpu_notifier);3361+#endif33623363 blk_max_low_pfn = max_low_pfn;3364 blk_max_pfn = max_pfn;
+46-26
drivers/block/cciss.c
···21782179 start_io(h);2180}0000000000000000000000000000000002181/* checks the status of the job and calls complete buffers to mark all 2182- * buffers for the completed job. 02183 */ 2184static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,2185 int timeout)2186{2187 int status = 1;2188- int i;2189 int retry_cmd = 0;2190- u64bit temp64;21912192 if (timeout)2193 status = 0; ···2327 resend_cciss_cmd(h,cmd);2328 return;2329 } 2330- /* command did not need to be retried */2331- /* unmap the DMA mapping for all the scatter gather elements */2332- for(i=0; i<cmd->Header.SGList; i++) {2333- temp64.val32.lower = cmd->SG[i].Addr.lower;2334- temp64.val32.upper = cmd->SG[i].Addr.upper;2335- pci_unmap_page(hba[cmd->ctlr]->pdev,2336- temp64.val, cmd->SG[i].Len,2337- (cmd->Request.Type.Direction == XFER_READ) ?2338- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);2339- }2340- complete_buffers(cmd->rq->bio, status);23412342-#ifdef CCISS_DEBUG2343- printk("Done with %p\n", cmd->rq);2344-#endif /* CCISS_DEBUG */ 2345-2346- end_that_request_last(cmd->rq, status ? 1 : -EIO);2347- cmd_free(h,cmd,1);2348}23492350/* ···3217 drv->queue = q;32183219 q->backing_dev_info.ra_pages = READ_AHEAD;3220- blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);32213222- /* This is a hardware imposed limit. */3223- blk_queue_max_hw_segments(q, MAXSGENTRIES);32243225- /* This is a limit in the driver and could be eliminated. */3226- blk_queue_max_phys_segments(q, MAXSGENTRIES);32273228- blk_queue_max_sectors(q, 512);0032293230 q->queuedata = hba[i];3231 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
···21782179 start_io(h);2180}2181+2182+static void cciss_softirq_done(struct request *rq)2183+{2184+ CommandList_struct *cmd = rq->completion_data;2185+ ctlr_info_t *h = hba[cmd->ctlr];2186+ u64bit temp64;2187+ int i, ddir;2188+2189+ if (cmd->Request.Type.Direction == XFER_READ)2190+ ddir = PCI_DMA_FROMDEVICE;2191+ else2192+ ddir = PCI_DMA_TODEVICE;2193+2194+ /* command did not need to be retried */2195+ /* unmap the DMA mapping for all the scatter gather elements */2196+ for(i=0; i<cmd->Header.SGList; i++) {2197+ temp64.val32.lower = cmd->SG[i].Addr.lower;2198+ temp64.val32.upper = cmd->SG[i].Addr.upper;2199+ pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);2200+ }2201+2202+ complete_buffers(rq->bio, rq->errors);2203+2204+#ifdef CCISS_DEBUG2205+ printk("Done with %p\n", rq);2206+#endif /* CCISS_DEBUG */ 2207+2208+ spin_lock_irq(&h->lock);2209+ end_that_request_last(rq, rq->errors);2210+ cmd_free(h, cmd,1);2211+ spin_unlock_irq(&h->lock);2212+}2213+2214/* checks the status of the job and calls complete buffers to mark all 2215+ * buffers for the completed job. Note that this function does not need2216+ * to hold the hba/queue lock.2217 */ 2218static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,2219 int timeout)2220{2221 int status = 1;02222 int retry_cmd = 0;022232224 if (timeout)2225 status = 0; ···2295 resend_cciss_cmd(h,cmd);2296 return;2297 } 0000000000022982299+ cmd->rq->completion_data = cmd;2300+ cmd->rq->errors = status;2301+ blk_complete_request(cmd->rq);0002302}23032304/* ···3199 drv->queue = q;32003201 q->backing_dev_info.ra_pages = READ_AHEAD;3202+ blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);32033204+ /* This is a hardware imposed limit. */3205+ blk_queue_max_hw_segments(q, MAXSGENTRIES);32063207+ /* This is a limit in the driver and could be eliminated. */3208+ blk_queue_max_phys_segments(q, MAXSGENTRIES);32093210+ blk_queue_max_sectors(q, 512);3211+3212+ blk_queue_softirq_done(q, cciss_softirq_done);32133214 q->queuedata = hba[i];3215 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
+35-7
drivers/ide/ide-io.c
···55#include <asm/io.h>56#include <asm/bitops.h>5700000000000058int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,59 int nr_sectors)60{061 int ret = 1;6263 BUG_ON(!(rq->flags & REQ_STARTED));···94 HWGROUP(drive)->hwif->ide_dma_on(drive);95 }9697- if (!end_that_request_first(rq, uptodate, nr_sectors)) {98- add_disk_randomness(rq->rq_disk);99-100- if (blk_rq_tagged(rq))101- blk_queue_end_tag(drive->queue, rq);102-00103 blkdev_dequeue_request(rq);104 HWGROUP(drive)->rq = NULL;105- end_that_request_last(rq, uptodate);106 ret = 0;00000000107 }0108 return ret;109}110EXPORT_SYMBOL(__ide_end_request);···137 unsigned long flags;138 int ret = 1;1390000140 spin_lock_irqsave(&ide_lock, flags);141 rq = HWGROUP(drive)->rq;142
···55#include <asm/io.h>56#include <asm/bitops.h>5758+void ide_softirq_done(struct request *rq)59+{60+ request_queue_t *q = rq->q;61+62+ add_disk_randomness(rq->rq_disk);63+ end_that_request_chunk(rq, rq->errors, rq->data_len);64+65+ spin_lock_irq(q->queue_lock);66+ end_that_request_last(rq, rq->errors);67+ spin_unlock_irq(q->queue_lock);68+}69+70int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,71 int nr_sectors)72{73+ unsigned int nbytes;74 int ret = 1;7576 BUG_ON(!(rq->flags & REQ_STARTED));···81 HWGROUP(drive)->hwif->ide_dma_on(drive);82 }8384+ /*85+ * For partial completions (or non fs/pc requests), use the regular86+ * direct completion path.87+ */88+ nbytes = nr_sectors << 9;89+ if (rq_all_done(rq, nbytes)) {90+ rq->errors = uptodate;91+ rq->data_len = nbytes;92 blkdev_dequeue_request(rq);93 HWGROUP(drive)->rq = NULL;94+ blk_complete_request(rq);95 ret = 0;96+ } else {97+ if (!end_that_request_first(rq, uptodate, nr_sectors)) {98+ add_disk_randomness(rq->rq_disk);99+ blkdev_dequeue_request(rq);100+ HWGROUP(drive)->rq = NULL;101+ end_that_request_last(rq, uptodate);102+ ret = 0;103+ }104 }105+106 return ret;107}108EXPORT_SYMBOL(__ide_end_request);···113 unsigned long flags;114 int ret = 1;115116+ /*117+ * room for locking improvements here, the calls below don't118+ * need the queue lock held at all119+ */120 spin_lock_irqsave(&ide_lock, flags);121 rq = HWGROUP(drive)->rq;122
···69#include "scsi_logging.h"7071static void scsi_done(struct scsi_cmnd *cmd);72-static int scsi_retry_command(struct scsi_cmnd *cmd);7374/*75 * Definitions and constants.···751 * isn't running --- used by scsi_times_out */752void __scsi_done(struct scsi_cmnd *cmd)753{754- unsigned long flags;755756 /*757 * Set the serial numbers back to zero···762 if (cmd->result)763 atomic_inc(&cmd->device->ioerr_cnt);76400765 /*766- * Next, enqueue the command into the done queue.767- * It is a per-CPU queue, so we just disable local interrupts768- * and need no spinlock.769 */770- local_irq_save(flags);771- list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));772- raise_softirq_irqoff(SCSI_SOFTIRQ);773- local_irq_restore(flags);774-}775-776-/**777- * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.778- *779- * This is the consumer of the done queue.780- *781- * This is called with all interrupts enabled. This should reduce782- * interrupt latency, stack depth, and reentrancy of the low-level783- * drivers.784- */785-static void scsi_softirq(struct softirq_action *h)786-{787- int disposition;788- LIST_HEAD(local_q);789-790- local_irq_disable();791- list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);792- local_irq_enable();793-794- while (!list_empty(&local_q)) {795- struct scsi_cmnd *cmd = list_entry(local_q.next,796- struct scsi_cmnd, eh_entry);797- /* The longest time any command should be outstanding is the798- * per command timeout multiplied by the number of retries.799- *800- * For a typical command, this is 2.5 minutes */801- unsigned long wait_for 802- = cmd->allowed * cmd->timeout_per_command;803- list_del_init(&cmd->eh_entry);804-805- disposition = scsi_decide_disposition(cmd);806- if (disposition != SUCCESS &&807- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {808- sdev_printk(KERN_ERR, cmd->device,809- "timing out command, waited %lus\n",810- wait_for/HZ);811- disposition = SUCCESS;812- }813-814- scsi_log_completion(cmd, disposition);815- switch (disposition) {816- case SUCCESS:817- scsi_finish_command(cmd);818- break;819- case NEEDS_RETRY:820- scsi_retry_command(cmd);821- break;822- case ADD_TO_MLQUEUE:823- scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);824- break;825- default:826- if (!scsi_eh_scmd_add(cmd, 0))827- scsi_finish_command(cmd);828- }829- }830}831832/*···782 * level drivers should not become re-entrant as a result of783 * this.784 */785-static int scsi_retry_command(struct scsi_cmnd *cmd)786{787 /*788 * Restore the SCSI command state.···1215}1216EXPORT_SYMBOL(scsi_device_cancel);12171218-#ifdef CONFIG_HOTPLUG_CPU1219-static int scsi_cpu_notify(struct notifier_block *self,1220- unsigned long action, void *hcpu)1221-{1222- int cpu = (unsigned long)hcpu;1223-1224- switch(action) {1225- case CPU_DEAD:1226- /* Drain scsi_done_q. */1227- local_irq_disable();1228- list_splice_init(&per_cpu(scsi_done_q, cpu),1229- &__get_cpu_var(scsi_done_q));1230- raise_softirq_irqoff(SCSI_SOFTIRQ);1231- local_irq_enable();1232- break;1233- default:1234- break;1235- }1236- return NOTIFY_OK;1237-}1238-1239-static struct notifier_block __devinitdata scsi_cpu_nb = {1240- .notifier_call = scsi_cpu_notify,1241-};1242-1243-#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)1244-#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)1245-#else1246-#define register_scsi_cpu()1247-#define unregister_scsi_cpu()1248-#endif /* CONFIG_HOTPLUG_CPU */1249-1250MODULE_DESCRIPTION("SCSI core");1251MODULE_LICENSE("GPL");1252···1248 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));12491250 devfs_mk_dir("scsi");1251- open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);1252- register_scsi_cpu();1253 printk(KERN_NOTICE "SCSI subsystem initialized\n");1254 return 0;1255···1275 devfs_remove("scsi");1276 scsi_exit_procfs();1277 scsi_exit_queue();1278- unregister_scsi_cpu();1279}12801281subsys_initcall(init_scsi);
···69#include "scsi_logging.h"7071static void scsi_done(struct scsi_cmnd *cmd);07273/*74 * Definitions and constants.···752 * isn't running --- used by scsi_times_out */753void __scsi_done(struct scsi_cmnd *cmd)754{755+ struct request *rq = cmd->request;756757 /*758 * Set the serial numbers back to zero···763 if (cmd->result)764 atomic_inc(&cmd->device->ioerr_cnt);765766+ BUG_ON(!rq);767+768 /*769+ * The uptodate/nbytes values don't matter, as we allow partial770+ * completes and thus will check this in the softirq callback0771 */772+ rq->completion_data = cmd;773+ blk_complete_request(rq);0000000000000000000000000000000000000000000000000000000000774}775776/*···840 * level drivers should not become re-entrant as a result of841 * this.842 */843+int scsi_retry_command(struct scsi_cmnd *cmd)844{845 /*846 * Restore the SCSI command state.···1273}1274EXPORT_SYMBOL(scsi_device_cancel);1275000000000000000000000000000000001276MODULE_DESCRIPTION("SCSI core");1277MODULE_LICENSE("GPL");1278···1338 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));13391340 devfs_mk_dir("scsi");001341 printk(KERN_NOTICE "SCSI subsystem initialized\n");1342 return 0;1343···1367 devfs_remove("scsi");1368 scsi_exit_procfs();1369 scsi_exit_queue();01370}13711372subsys_initcall(init_scsi);
···118 * try to put the fields that are referenced together in the same cacheline119 */120struct request {121- struct list_head queuelist; /* looking for ->queue? you must _not_122- * access it directly, use123- * blkdev_dequeue_request! */124 unsigned long flags; /* see REQ_ bits below */125126 /* Maintain bio traversal state for part by part I/O submission.···141 struct bio *biotail;142143 void *elevator_private;0144145 unsigned short ioprio;146···292typedef void (activity_fn) (void *data, int rw);293typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);294typedef void (prepare_flush_fn) (request_queue_t *, struct request *);0295296enum blk_queue_state {297 Queue_down,···334 activity_fn *activity_fn;335 issue_flush_fn *issue_flush_fn;336 prepare_flush_fn *prepare_flush_fn;0337338 /*339 * Dispatch queue sorting···648extern int end_that_request_chunk(struct request *, int, int);649extern void end_that_request_last(struct request *, int);650extern void end_request(struct request *req, int uptodate);00000000000651652/*653 * end_that_request_first/chunk() takes an uptodate argument. we account···707extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);708extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);709extern void blk_queue_dma_alignment(request_queue_t *, int);0710extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);711extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);712extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
···118 * try to put the fields that are referenced together in the same cacheline119 */120struct request {121+ struct list_head queuelist;122+ struct list_head donelist;123+124 unsigned long flags; /* see REQ_ bits below */125126 /* Maintain bio traversal state for part by part I/O submission.···141 struct bio *biotail;142143 void *elevator_private;144+ void *completion_data;145146 unsigned short ioprio;147···291typedef void (activity_fn) (void *data, int rw);292typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);293typedef void (prepare_flush_fn) (request_queue_t *, struct request *);294+typedef void (softirq_done_fn)(struct request *);295296enum blk_queue_state {297 Queue_down,···332 activity_fn *activity_fn;333 issue_flush_fn *issue_flush_fn;334 prepare_flush_fn *prepare_flush_fn;335+ softirq_done_fn *softirq_done_fn;336337 /*338 * Dispatch queue sorting···645extern int end_that_request_chunk(struct request *, int, int);646extern void end_that_request_last(struct request *, int);647extern void end_request(struct request *req, int uptodate);648+extern void blk_complete_request(struct request *);649+650+static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)651+{652+ if (blk_fs_request(rq))653+ return (nr_bytes >= (rq->hard_nr_sectors << 9));654+ else if (blk_pc_request(rq))655+ return nr_bytes >= rq->data_len;656+657+ return 0;658+}659660/*661 * end_that_request_first/chunk() takes an uptodate argument. we account···693extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);694extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);695extern void blk_queue_dma_alignment(request_queue_t *, int);696+extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);697extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);698extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);699extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
+1
include/linux/ide.h
···10011002extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);1003extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);010041005/*1006 * This is used on exit from the driver to designate the next irq handler
···10011002extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);1003extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);1004+extern void ide_softirq_done(struct request *rq);10051006/*1007 * This is used on exit from the driver to designate the next irq handler