block: internal dequeue shouldn't start timer

blkdev_dequeue_request() and elv_dequeue_request() are equivalent and
both start the timeout timer. Barrier code dequeues the original
barrier request but doesn't passes the request itself to lower level
driver, only broken down proxy requests; however, as the original
barrier code goes through the same dequeue path and timeout timer is
started on it. If barrier sequence takes long enough, this timer
expires but the low level driver has no idea about this request and
oops follows.

Timeout timer shouldn't have been started on the original barrier
request as it never goes through actual IO. This patch unexports
elv_dequeue_request(), which has no external user anyway, and makes it
operate on elevator proper w/o adding the timer and make
blkdev_dequeue_request() call elv_dequeue_request() and add timer.
Internal users which don't pass the request to driver - barrier code
and end_that_request_last() - are converted to use
elv_dequeue_request().

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Mike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by Tejun Heo and committed by Jens Axboe 53a08807 bf91db18

+27 -15
+2 -2
block/blk-barrier.c
··· 161 /* 162 * Prep proxy barrier request. 163 */ 164 - blkdev_dequeue_request(rq); 165 q->orig_bar_rq = rq; 166 rq = &q->bar_rq; 167 blk_rq_init(q, rq); ··· 219 * This can happen when the queue switches to 220 * ORDERED_NONE while this request is on it. 221 */ 222 - blkdev_dequeue_request(rq); 223 if (__blk_end_request(rq, -EOPNOTSUPP, 224 blk_rq_bytes(rq))) 225 BUG();
··· 161 /* 162 * Prep proxy barrier request. 163 */ 164 + elv_dequeue_request(q, rq); 165 q->orig_bar_rq = rq; 166 rq = &q->bar_rq; 167 blk_rq_init(q, rq); ··· 219 * This can happen when the queue switches to 220 * ORDERED_NONE while this request is on it. 221 */ 222 + elv_dequeue_request(q, rq); 223 if (__blk_end_request(rq, -EOPNOTSUPP, 224 blk_rq_bytes(rq))) 225 BUG();
+23 -1
block/blk-core.c
··· 1637 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1638 1639 /** 1640 * __end_that_request_first - end I/O on a request 1641 * @req: the request being processed 1642 * @error: %0 for success, < %0 for error ··· 1796 blk_queue_end_tag(req->q, req); 1797 1798 if (blk_queued_rq(req)) 1799 - blkdev_dequeue_request(req); 1800 1801 if (unlikely(laptop_mode) && blk_fs_request(req)) 1802 laptop_io_completion();
··· 1637 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1638 1639 /** 1640 + * blkdev_dequeue_request - dequeue request and start timeout timer 1641 + * @req: request to dequeue 1642 + * 1643 + * Dequeue @req and start timeout timer on it. This hands off the 1644 + * request to the driver. 1645 + * 1646 + * Block internal functions which don't want to start timer should 1647 + * call elv_dequeue_request(). 1648 + */ 1649 + void blkdev_dequeue_request(struct request *req) 1650 + { 1651 + elv_dequeue_request(req->q, req); 1652 + 1653 + /* 1654 + * We are now handing the request to the hardware, add the 1655 + * timeout handler. 1656 + */ 1657 + blk_add_timer(req); 1658 + } 1659 + EXPORT_SYMBOL(blkdev_dequeue_request); 1660 + 1661 + /** 1662 * __end_that_request_first - end I/O on a request 1663 * @req: the request being processed 1664 * @error: %0 for success, < %0 for error ··· 1774 blk_queue_end_tag(req->q, req); 1775 1776 if (blk_queued_rq(req)) 1777 + elv_dequeue_request(req->q, req); 1778 1779 if (unlikely(laptop_mode) && blk_fs_request(req)) 1780 laptop_io_completion();
-7
block/elevator.c
··· 844 */ 845 if (blk_account_rq(rq)) 846 q->in_flight++; 847 - 848 - /* 849 - * We are now handing the request to the hardware, add the 850 - * timeout handler. 851 - */ 852 - blk_add_timer(rq); 853 } 854 - EXPORT_SYMBOL(elv_dequeue_request); 855 856 int elv_queue_empty(struct request_queue *q) 857 {
··· 844 */ 845 if (blk_account_rq(rq)) 846 q->in_flight++; 847 } 848 849 int elv_queue_empty(struct request_queue *q) 850 {
+2 -5
include/linux/blkdev.h
··· 786 blk_run_backing_dev(mapping->backing_dev_info, NULL); 787 } 788 789 /* 790 * blk_end_request() and friends. 791 * __blk_end_request() and end_request() must be called with ··· 821 */ 822 extern unsigned int blk_rq_bytes(struct request *rq); 823 extern unsigned int blk_rq_cur_bytes(struct request *rq); 824 - 825 - static inline void blkdev_dequeue_request(struct request *req) 826 - { 827 - elv_dequeue_request(req->q, req); 828 - } 829 830 /* 831 * Access functions for manipulating queue properties
··· 786 blk_run_backing_dev(mapping->backing_dev_info, NULL); 787 } 788 789 + extern void blkdev_dequeue_request(struct request *req); 790 + 791 /* 792 * blk_end_request() and friends. 793 * __blk_end_request() and end_request() must be called with ··· 819 */ 820 extern unsigned int blk_rq_bytes(struct request *rq); 821 extern unsigned int blk_rq_cur_bytes(struct request *rq); 822 823 /* 824 * Access functions for manipulating queue properties