Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[BLOCK] add @uptodate to end_that_request_last() and @error to rq_end_io_fn()

add @uptodate argument to end_that_request_last() and @error
to rq_end_io_fn(). there's no generic way to pass error code
to request completion function, making generic error handling
of non-fs request difficult (rq->errors is driver-specific and
each driver uses it differently). this patch adds @uptodate
to end_that_request_last() and @error to rq_end_io_fn().

for fs requests, this doesn't really matter, so just using the
same uptodate argument used in the last call to
end_that_request_first() should suffice. imho, this can also
help the generic command-carrying request jens is working on.

Signed-off-by: tejun heo <htejun@gmail.com>
Signed-Off-By: Jens Axboe <axboe@suse.de>

authored by

Tejun Heo and committed by
Jens Axboe
8ffdc655 64100099

+42 -34
+1 -1
block/elevator.c
··· 498 498 blkdev_dequeue_request(rq); 499 499 rq->flags |= REQ_QUIET; 500 500 end_that_request_chunk(rq, 0, nr_bytes); 501 - end_that_request_last(rq); 501 + end_that_request_last(rq, 0); 502 502 } else { 503 503 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, 504 504 ret);
+15 -7
block/ll_rw_blk.c
··· 344 344 /* 345 345 * Cache flushing for ordered writes handling 346 346 */ 347 - static void blk_pre_flush_end_io(struct request *flush_rq) 347 + static void blk_pre_flush_end_io(struct request *flush_rq, int error) 348 348 { 349 349 struct request *rq = flush_rq->end_io_data; 350 350 request_queue_t *q = rq->q; ··· 362 362 } 363 363 } 364 364 365 - static void blk_post_flush_end_io(struct request *flush_rq) 365 + static void blk_post_flush_end_io(struct request *flush_rq, int error) 366 366 { 367 367 struct request *rq = flush_rq->end_io_data; 368 368 request_queue_t *q = rq->q; ··· 2317 2317 */ 2318 2318 void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, 2319 2319 struct request *rq, int at_head, 2320 - void (*done)(struct request *)) 2320 + rq_end_io_fn *done) 2321 2321 { 2322 2322 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 2323 2323 ··· 2521 2521 * blk_end_sync_rq - executes a completion event on a request 2522 2522 * @rq: request to complete 2523 2523 */ 2524 - void blk_end_sync_rq(struct request *rq) 2524 + void blk_end_sync_rq(struct request *rq, int error) 2525 2525 { 2526 2526 struct completion *waiting = rq->waiting; 2527 2527 ··· 3183 3183 /* 3184 3184 * queue lock must be held 3185 3185 */ 3186 - void end_that_request_last(struct request *req) 3186 + void end_that_request_last(struct request *req, int uptodate) 3187 3187 { 3188 3188 struct gendisk *disk = req->rq_disk; 3189 + int error; 3190 + 3191 + /* 3192 + * extend uptodate bool to allow < 0 value to be direct io error 3193 + */ 3194 + error = 0; 3195 + if (end_io_error(uptodate)) 3196 + error = !uptodate ? -EIO : uptodate; 3189 3197 3190 3198 if (unlikely(laptop_mode) && blk_fs_request(req)) 3191 3199 laptop_io_completion(); ··· 3208 3200 disk->in_flight--; 3209 3201 } 3210 3202 if (req->end_io) 3211 - req->end_io(req); 3203 + req->end_io(req, error); 3212 3204 else 3213 3205 __blk_put_request(req->q, req); 3214 3206 } ··· 3220 3212 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) { 3221 3213 add_disk_randomness(req->rq_disk); 3222 3214 blkdev_dequeue_request(req); 3223 - end_that_request_last(req); 3215 + end_that_request_last(req, uptodate); 3224 3216 } 3225 3217 } 3226 3218
+1 -1
drivers/block/DAC960.c
··· 3471 3471 3472 3472 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) { 3473 3473 3474 - end_that_request_last(Request); 3474 + end_that_request_last(Request, UpToDate); 3475 3475 3476 3476 if (Command->Completion) { 3477 3477 complete(Command->Completion);
+1 -1
drivers/block/cciss.c
··· 2310 2310 printk("Done with %p\n", cmd->rq); 2311 2311 #endif /* CCISS_DEBUG */ 2312 2312 2313 - end_that_request_last(cmd->rq); 2313 + end_that_request_last(cmd->rq, status ? 1 : -EIO); 2314 2314 cmd_free(h,cmd,1); 2315 2315 } 2316 2316
+1 -1
drivers/block/cpqarray.c
··· 1036 1036 complete_buffers(cmd->rq->bio, ok); 1037 1037 1038 1038 DBGPX(printk("Done with %p\n", cmd->rq);); 1039 - end_that_request_last(cmd->rq); 1039 + end_that_request_last(cmd->rq, ok ? 1 : -EIO); 1040 1040 } 1041 1041 1042 1042 /*
+1 -1
drivers/block/floppy.c
··· 2301 2301 add_disk_randomness(req->rq_disk); 2302 2302 floppy_off((long)req->rq_disk->private_data); 2303 2303 blkdev_dequeue_request(req); 2304 - end_that_request_last(req); 2304 + end_that_request_last(req, uptodate); 2305 2305 2306 2306 /* We're done with the request */ 2307 2307 current_req = NULL;
+1 -1
drivers/block/nbd.c
··· 136 136 137 137 spin_lock_irqsave(q->queue_lock, flags); 138 138 if (!end_that_request_first(req, uptodate, req->nr_sectors)) { 139 - end_that_request_last(req); 139 + end_that_request_last(req, uptodate); 140 140 } 141 141 spin_unlock_irqrestore(q->queue_lock, flags); 142 142 }
+1 -1
drivers/block/sx8.c
··· 770 770 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); 771 771 assert(rc == 0); 772 772 773 - end_that_request_last(req); 773 + end_that_request_last(req, uptodate); 774 774 775 775 rc = carm_put_request(host, crq); 776 776 assert(rc == 0);
+1 -1
drivers/block/ub.c
··· 951 951 static void ub_end_rq(struct request *rq, int uptodate) 952 952 { 953 953 end_that_request_first(rq, uptodate, rq->hard_nr_sectors); 954 - end_that_request_last(rq); 954 + end_that_request_last(rq, uptodate); 955 955 } 956 956 957 957 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
+1 -1
drivers/block/viodasd.c
··· 305 305 if (end_that_request_first(req, uptodate, num_sectors)) 306 306 return; 307 307 add_disk_randomness(req->rq_disk); 308 - end_that_request_last(req); 308 + end_that_request_last(req, uptodate); 309 309 } 310 310 311 311 /*
+1 -1
drivers/cdrom/cdu31a.c
··· 1402 1402 if (!end_that_request_first(req, 1, nblock)) { 1403 1403 spin_lock_irq(q->queue_lock); 1404 1404 blkdev_dequeue_request(req); 1405 - end_that_request_last(req); 1405 + end_that_request_last(req, 1); 1406 1406 spin_unlock_irq(q->queue_lock); 1407 1407 } 1408 1408 continue;
+2 -2
drivers/ide/ide-cd.c
··· 614 614 */ 615 615 spin_lock_irqsave(&ide_lock, flags); 616 616 end_that_request_chunk(failed, 0, failed->data_len); 617 - end_that_request_last(failed); 617 + end_that_request_last(failed, 0); 618 618 spin_unlock_irqrestore(&ide_lock, flags); 619 619 } 620 620 ··· 1735 1735 1736 1736 spin_lock_irqsave(&ide_lock, flags); 1737 1737 blkdev_dequeue_request(rq); 1738 - end_that_request_last(rq); 1738 + end_that_request_last(rq, 1); 1739 1739 HWGROUP(drive)->rq = NULL; 1740 1740 spin_unlock_irqrestore(&ide_lock, flags); 1741 1741 return ide_stopped;
+3 -3
drivers/ide/ide-io.c
··· 89 89 90 90 blkdev_dequeue_request(rq); 91 91 HWGROUP(drive)->rq = NULL; 92 - end_that_request_last(rq); 92 + end_that_request_last(rq, uptodate); 93 93 ret = 0; 94 94 } 95 95 return ret; ··· 247 247 } 248 248 blkdev_dequeue_request(rq); 249 249 HWGROUP(drive)->rq = NULL; 250 - end_that_request_last(rq); 250 + end_that_request_last(rq, 1); 251 251 spin_unlock_irqrestore(&ide_lock, flags); 252 252 } 253 253 ··· 379 379 blkdev_dequeue_request(rq); 380 380 HWGROUP(drive)->rq = NULL; 381 381 rq->errors = err; 382 - end_that_request_last(rq); 382 + end_that_request_last(rq, !rq->errors); 383 383 spin_unlock_irqrestore(&ide_lock, flags); 384 384 } 385 385
+1 -1
drivers/message/i2o/i2o_block.c
··· 466 466 467 467 spin_lock_irqsave(q->queue_lock, flags); 468 468 469 - end_that_request_last(req); 469 + end_that_request_last(req, uptodate); 470 470 471 471 if (likely(dev)) { 472 472 dev->open_queue_depth--;
+2 -2
drivers/mmc/mmc_block.c
··· 263 263 */ 264 264 add_disk_randomness(req->rq_disk); 265 265 blkdev_dequeue_request(req); 266 - end_that_request_last(req); 266 + end_that_request_last(req, 1); 267 267 } 268 268 spin_unlock_irq(&md->lock); 269 269 } while (ret); ··· 289 289 290 290 add_disk_randomness(req->rq_disk); 291 291 blkdev_dequeue_request(req); 292 - end_that_request_last(req); 292 + end_that_request_last(req, 0); 293 293 spin_unlock_irq(&md->lock); 294 294 295 295 return 0;
+1 -1
drivers/s390/block/dasd.c
··· 1035 1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1036 1036 BUG(); 1037 1037 add_disk_randomness(req->rq_disk); 1038 - end_that_request_last(req); 1038 + end_that_request_last(req, uptodate); 1039 1039 } 1040 1040 1041 1041 /*
+1 -1
drivers/s390/char/tape_block.c
··· 78 78 { 79 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 80 80 BUG(); 81 - end_that_request_last(req); 81 + end_that_request_last(req, uptodate); 82 82 } 83 83 84 84 static void
+2 -2
drivers/scsi/ide-scsi.c
··· 1046 1046 1047 1047 /* kill current request */ 1048 1048 blkdev_dequeue_request(req); 1049 - end_that_request_last(req); 1049 + end_that_request_last(req, 0); 1050 1050 if (req->flags & REQ_SENSE) 1051 1051 kfree(scsi->pc->buffer); 1052 1052 kfree(scsi->pc); ··· 1056 1056 /* now nuke the drive queue */ 1057 1057 while ((req = elv_next_request(drive->queue))) { 1058 1058 blkdev_dequeue_request(req); 1059 - end_that_request_last(req); 1059 + end_that_request_last(req, 0); 1060 1060 } 1061 1061 1062 1062 HWGROUP(drive)->rq = NULL;
+1 -1
drivers/scsi/scsi_lib.c
··· 791 791 spin_lock_irqsave(q->queue_lock, flags); 792 792 if (blk_rq_tagged(req)) 793 793 blk_queue_end_tag(q, req); 794 - end_that_request_last(req); 794 + end_that_request_last(req, uptodate); 795 795 spin_unlock_irqrestore(q->queue_lock, flags); 796 796 797 797 /*
+1 -1
drivers/scsi/sd.c
··· 748 748 * force journal abort of barriers 749 749 */ 750 750 end_that_request_first(rq, -EOPNOTSUPP, rq->hard_nr_sectors); 751 - end_that_request_last(rq); 751 + end_that_request_last(rq, -EOPNOTSUPP); 752 752 } 753 753 } 754 754
+3 -3
include/linux/blkdev.h
··· 102 102 void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 103 103 104 104 struct request; 105 - typedef void (rq_end_io_fn)(struct request *); 105 + typedef void (rq_end_io_fn)(struct request *, int); 106 106 107 107 struct request_list { 108 108 int count[2]; ··· 560 560 extern void generic_make_request(struct bio *bio); 561 561 extern void blk_put_request(struct request *); 562 562 extern void __blk_put_request(request_queue_t *, struct request *); 563 - extern void blk_end_sync_rq(struct request *rq); 563 + extern void blk_end_sync_rq(struct request *rq, int error); 564 564 extern void blk_attempt_remerge(request_queue_t *, struct request *); 565 565 extern struct request *blk_get_request(request_queue_t *, int, gfp_t); 566 566 extern void blk_insert_request(request_queue_t *, struct request *, int, void *); ··· 614 614 */ 615 615 extern int end_that_request_first(struct request *, int, int); 616 616 extern int end_that_request_chunk(struct request *, int, int); 617 - extern void end_that_request_last(struct request *); 617 + extern void end_that_request_last(struct request *, int); 618 618 extern void end_request(struct request *req, int uptodate); 619 619 620 620 /*