Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: fix setting of max_segment_size and seg_boundary mask
block: internal dequeue shouldn't start timer
block: set disk->node_id before it's being used
When block layer fails to map iov, it calls bio_unmap_user to undo

+37 -19
+2 -2
block/blk-barrier.c
··· 161 161 /* 162 162 * Prep proxy barrier request. 163 163 */ 164 - blkdev_dequeue_request(rq); 164 + elv_dequeue_request(q, rq); 165 165 q->orig_bar_rq = rq; 166 166 rq = &q->bar_rq; 167 167 blk_rq_init(q, rq); ··· 219 219 * This can happen when the queue switches to 220 220 * ORDERED_NONE while this request is on it. 221 221 */ 222 - blkdev_dequeue_request(rq); 222 + elv_dequeue_request(q, rq); 223 223 if (__blk_end_request(rq, -EOPNOTSUPP, 224 224 blk_rq_bytes(rq))) 225 225 BUG();
+24 -2
block/blk-core.c
··· 592 592 1 << QUEUE_FLAG_STACKABLE); 593 593 q->queue_lock = lock; 594 594 595 - blk_queue_segment_boundary(q, 0xffffffff); 595 + blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); 596 596 597 597 blk_queue_make_request(q, __make_request); 598 598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); ··· 1637 1637 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1638 1638 1639 1639 /** 1640 + * blkdev_dequeue_request - dequeue request and start timeout timer 1641 + * @req: request to dequeue 1642 + * 1643 + * Dequeue @req and start timeout timer on it. This hands off the 1644 + * request to the driver. 1645 + * 1646 + * Block internal functions which don't want to start timer should 1647 + * call elv_dequeue_request(). 1648 + */ 1649 + void blkdev_dequeue_request(struct request *req) 1650 + { 1651 + elv_dequeue_request(req->q, req); 1652 + 1653 + /* 1654 + * We are now handing the request to the hardware, add the 1655 + * timeout handler. 1656 + */ 1657 + blk_add_timer(req); 1658 + } 1659 + EXPORT_SYMBOL(blkdev_dequeue_request); 1660 + 1661 + /** 1640 1662 * __end_that_request_first - end I/O on a request 1641 1663 * @req: the request being processed 1642 1664 * @error: %0 for success, < %0 for error ··· 1796 1774 blk_queue_end_tag(req->q, req); 1797 1775 1798 1776 if (blk_queued_rq(req)) 1799 - blkdev_dequeue_request(req); 1777 + elv_dequeue_request(req->q, req); 1800 1778 1801 1779 if (unlikely(laptop_mode) && blk_fs_request(req)) 1802 1780 laptop_io_completion();
+1 -1
block/blk-map.c
··· 224 224 */ 225 225 bio_get(bio); 226 226 bio_endio(bio, 0); 227 - bio_unmap_user(bio); 227 + __blk_rq_unmap_user(bio); 228 228 return -EINVAL; 229 229 } 230 230
+4
block/blk-settings.c
··· 125 125 q->nr_requests = BLKDEV_MAX_RQ; 126 126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 127 127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 128 + blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); 129 + blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); 130 + 128 131 q->make_request_fn = mfn; 129 132 q->backing_dev_info.ra_pages = 130 133 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; ··· 317 314 /* zero is "infinity" */ 318 315 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 319 316 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 317 + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); 320 318 321 319 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 322 320 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
-7
block/elevator.c
··· 844 844 */ 845 845 if (blk_account_rq(rq)) 846 846 q->in_flight++; 847 - 848 - /* 849 - * We are now handing the request to the hardware, add the 850 - * timeout handler. 851 - */ 852 - blk_add_timer(rq); 853 847 } 854 - EXPORT_SYMBOL(elv_dequeue_request); 855 848 856 849 int elv_queue_empty(struct request_queue *q) 857 850 {
+1 -1
block/genhd.c
··· 1102 1102 kfree(disk); 1103 1103 return NULL; 1104 1104 } 1105 + disk->node_id = node_id; 1105 1106 if (disk_expand_part_tbl(disk, 0)) { 1106 1107 free_part_stats(&disk->part0); 1107 1108 kfree(disk); ··· 1117 1116 device_initialize(disk_to_dev(disk)); 1118 1117 INIT_WORK(&disk->async_notify, 1119 1118 media_change_notify_thread); 1120 - disk->node_id = node_id; 1121 1119 } 1122 1120 return disk; 1123 1121 }
+1 -1
drivers/md/dm-table.c
··· 668 668 if (!rs->max_segment_size) 669 669 rs->max_segment_size = MAX_SEGMENT_SIZE; 670 670 if (!rs->seg_boundary_mask) 671 - rs->seg_boundary_mask = -1; 671 + rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 672 672 if (!rs->bounce_pfn) 673 673 rs->bounce_pfn = -1; 674 674 }
+4 -5
include/linux/blkdev.h
··· 786 786 blk_run_backing_dev(mapping->backing_dev_info, NULL); 787 787 } 788 788 789 + extern void blkdev_dequeue_request(struct request *req); 790 + 789 791 /* 790 792 * blk_end_request() and friends. 791 793 * __blk_end_request() and end_request() must be called with ··· 821 819 */ 822 820 extern unsigned int blk_rq_bytes(struct request *rq); 823 821 extern unsigned int blk_rq_cur_bytes(struct request *rq); 824 - 825 - static inline void blkdev_dequeue_request(struct request *req) 826 - { 827 - elv_dequeue_request(req->q, req); 828 - } 829 822 830 823 /* 831 824 * Access functions for manipulating queue properties ··· 917 920 #define BLK_DEF_MAX_SECTORS 1024 918 921 919 922 #define MAX_SEGMENT_SIZE 65536 923 + 924 + #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL 920 925 921 926 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 922 927