Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: fix setting of max_segment_size and seg_boundary mask
block: internal dequeue shouldn't start timer
block: set disk->node_id before it's being used
When block layer fails to map iov, it calls bio_unmap_user to undo

+37 -19
+2 -2
block/blk-barrier.c
··· 161 /* 162 * Prep proxy barrier request. 163 */ 164 - blkdev_dequeue_request(rq); 165 q->orig_bar_rq = rq; 166 rq = &q->bar_rq; 167 blk_rq_init(q, rq); ··· 219 * This can happen when the queue switches to 220 * ORDERED_NONE while this request is on it. 221 */ 222 - blkdev_dequeue_request(rq); 223 if (__blk_end_request(rq, -EOPNOTSUPP, 224 blk_rq_bytes(rq))) 225 BUG();
··· 161 /* 162 * Prep proxy barrier request. 163 */ 164 + elv_dequeue_request(q, rq); 165 q->orig_bar_rq = rq; 166 rq = &q->bar_rq; 167 blk_rq_init(q, rq); ··· 219 * This can happen when the queue switches to 220 * ORDERED_NONE while this request is on it. 221 */ 222 + elv_dequeue_request(q, rq); 223 if (__blk_end_request(rq, -EOPNOTSUPP, 224 blk_rq_bytes(rq))) 225 BUG();
+24 -2
block/blk-core.c
··· 592 1 << QUEUE_FLAG_STACKABLE); 593 q->queue_lock = lock; 594 595 - blk_queue_segment_boundary(q, 0xffffffff); 596 597 blk_queue_make_request(q, __make_request); 598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); ··· 1637 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1638 1639 /** 1640 * __end_that_request_first - end I/O on a request 1641 * @req: the request being processed 1642 * @error: %0 for success, < %0 for error ··· 1796 blk_queue_end_tag(req->q, req); 1797 1798 if (blk_queued_rq(req)) 1799 - blkdev_dequeue_request(req); 1800 1801 if (unlikely(laptop_mode) && blk_fs_request(req)) 1802 laptop_io_completion();
··· 592 1 << QUEUE_FLAG_STACKABLE); 593 q->queue_lock = lock; 594 595 + blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); 596 597 blk_queue_make_request(q, __make_request); 598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); ··· 1637 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1638 1639 /** 1640 + * blkdev_dequeue_request - dequeue request and start timeout timer 1641 + * @req: request to dequeue 1642 + * 1643 + * Dequeue @req and start timeout timer on it. This hands off the 1644 + * request to the driver. 1645 + * 1646 + * Block internal functions which don't want to start timer should 1647 + * call elv_dequeue_request(). 1648 + */ 1649 + void blkdev_dequeue_request(struct request *req) 1650 + { 1651 + elv_dequeue_request(req->q, req); 1652 + 1653 + /* 1654 + * We are now handing the request to the hardware, add the 1655 + * timeout handler. 1656 + */ 1657 + blk_add_timer(req); 1658 + } 1659 + EXPORT_SYMBOL(blkdev_dequeue_request); 1660 + 1661 + /** 1662 * __end_that_request_first - end I/O on a request 1663 * @req: the request being processed 1664 * @error: %0 for success, < %0 for error ··· 1774 blk_queue_end_tag(req->q, req); 1775 1776 if (blk_queued_rq(req)) 1777 + elv_dequeue_request(req->q, req); 1778 1779 if (unlikely(laptop_mode) && blk_fs_request(req)) 1780 laptop_io_completion();
+1 -1
block/blk-map.c
··· 224 */ 225 bio_get(bio); 226 bio_endio(bio, 0); 227 - bio_unmap_user(bio); 228 return -EINVAL; 229 } 230
··· 224 */ 225 bio_get(bio); 226 bio_endio(bio, 0); 227 + __blk_rq_unmap_user(bio); 228 return -EINVAL; 229 } 230
+4
block/blk-settings.c
··· 125 q->nr_requests = BLKDEV_MAX_RQ; 126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 128 q->make_request_fn = mfn; 129 q->backing_dev_info.ra_pages = 130 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; ··· 317 /* zero is "infinity" */ 318 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 319 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 320 321 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 322 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
··· 125 q->nr_requests = BLKDEV_MAX_RQ; 126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 128 + blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); 129 + blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); 130 + 131 q->make_request_fn = mfn; 132 q->backing_dev_info.ra_pages = 133 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; ··· 314 /* zero is "infinity" */ 315 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 316 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 317 + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); 318 319 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 320 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
-7
block/elevator.c
··· 844 */ 845 if (blk_account_rq(rq)) 846 q->in_flight++; 847 - 848 - /* 849 - * We are now handing the request to the hardware, add the 850 - * timeout handler. 851 - */ 852 - blk_add_timer(rq); 853 } 854 - EXPORT_SYMBOL(elv_dequeue_request); 855 856 int elv_queue_empty(struct request_queue *q) 857 {
··· 844 */ 845 if (blk_account_rq(rq)) 846 q->in_flight++; 847 } 848 849 int elv_queue_empty(struct request_queue *q) 850 {
+1 -1
block/genhd.c
··· 1102 kfree(disk); 1103 return NULL; 1104 } 1105 if (disk_expand_part_tbl(disk, 0)) { 1106 free_part_stats(&disk->part0); 1107 kfree(disk); ··· 1117 device_initialize(disk_to_dev(disk)); 1118 INIT_WORK(&disk->async_notify, 1119 media_change_notify_thread); 1120 - disk->node_id = node_id; 1121 } 1122 return disk; 1123 }
··· 1102 kfree(disk); 1103 return NULL; 1104 } 1105 + disk->node_id = node_id; 1106 if (disk_expand_part_tbl(disk, 0)) { 1107 free_part_stats(&disk->part0); 1108 kfree(disk); ··· 1116 device_initialize(disk_to_dev(disk)); 1117 INIT_WORK(&disk->async_notify, 1118 media_change_notify_thread); 1119 } 1120 return disk; 1121 }
+1 -1
drivers/md/dm-table.c
··· 668 if (!rs->max_segment_size) 669 rs->max_segment_size = MAX_SEGMENT_SIZE; 670 if (!rs->seg_boundary_mask) 671 - rs->seg_boundary_mask = -1; 672 if (!rs->bounce_pfn) 673 rs->bounce_pfn = -1; 674 }
··· 668 if (!rs->max_segment_size) 669 rs->max_segment_size = MAX_SEGMENT_SIZE; 670 if (!rs->seg_boundary_mask) 671 + rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 672 if (!rs->bounce_pfn) 673 rs->bounce_pfn = -1; 674 }
+4 -5
include/linux/blkdev.h
··· 786 blk_run_backing_dev(mapping->backing_dev_info, NULL); 787 } 788 789 /* 790 * blk_end_request() and friends. 791 * __blk_end_request() and end_request() must be called with ··· 821 */ 822 extern unsigned int blk_rq_bytes(struct request *rq); 823 extern unsigned int blk_rq_cur_bytes(struct request *rq); 824 - 825 - static inline void blkdev_dequeue_request(struct request *req) 826 - { 827 - elv_dequeue_request(req->q, req); 828 - } 829 830 /* 831 * Access functions for manipulating queue properties ··· 917 #define BLK_DEF_MAX_SECTORS 1024 918 919 #define MAX_SEGMENT_SIZE 65536 920 921 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 922
··· 786 blk_run_backing_dev(mapping->backing_dev_info, NULL); 787 } 788 789 + extern void blkdev_dequeue_request(struct request *req); 790 + 791 /* 792 * blk_end_request() and friends. 793 * __blk_end_request() and end_request() must be called with ··· 819 */ 820 extern unsigned int blk_rq_bytes(struct request *rq); 821 extern unsigned int blk_rq_cur_bytes(struct request *rq); 822 823 /* 824 * Access functions for manipulating queue properties ··· 920 #define BLK_DEF_MAX_SECTORS 1024 921 922 #define MAX_SEGMENT_SIZE 65536 923 + 924 + #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 925 926 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 927