Merge branch 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'for-linus' of git://brick.kernel.dk/data/git/linux-2.6-block:
[PATCH] Fixup cciss error handling
[PATCH] Allow as-iosched to be unloaded
[PATCH 2/2] cciss: remove calls to pci_disable_device
[PATCH 1/2] cciss: map out more memory for config table
[PATCH] Propagate down request sync flag

Resolve trivial whitespace conflict in drivers/block/cciss.c manually.

+34 -29
+1 -14
block/as-iosched.c
··· 1462 1462 1463 1463 static int __init as_init(void) 1464 1464 { 1465 - int ret; 1466 - 1467 - ret = elv_register(&iosched_as); 1468 - if (!ret) { 1469 - /* 1470 - * don't allow AS to get unregistered, since we would have 1471 - * to browse all tasks in the system and release their 1472 - * as_io_context first 1473 - */ 1474 - __module_get(THIS_MODULE); 1475 - return 0; 1476 - } 1477 - 1478 - return ret; 1465 + return elv_register(&iosched_as); 1479 1466 } 1480 1467 1481 1468 static void __exit as_exit(void)
+12 -6
block/cfq-iosched.c
··· 219 219 return !cfqd->busy_queues; 220 220 } 221 221 222 - static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 222 + static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync) 223 223 { 224 - if (rw == READ || rw == WRITE_SYNC) 224 + /* 225 + * Use the per-process queue, for read requests and syncronous writes 226 + */ 227 + if (!(rw & REQ_RW) || is_sync) 225 228 return task->pid; 226 229 227 230 return CFQ_KEY_ASYNC; ··· 476 473 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 477 474 { 478 475 struct task_struct *tsk = current; 479 - pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 476 + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio)); 480 477 struct cfq_queue *cfqq; 481 478 482 479 cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); ··· 1751 1748 struct cfq_data *cfqd = q->elevator->elevator_data; 1752 1749 struct task_struct *tsk = current; 1753 1750 struct cfq_queue *cfqq; 1751 + unsigned int key; 1752 + 1753 + key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC); 1754 1754 1755 1755 /* 1756 1756 * don't force setup of a queue from here, as a call to may_queue ··· 1761 1755 * so just lookup a possibly existing queue, or return 'may queue' 1762 1756 * if that fails 1763 1757 */ 1764 - cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio); 1758 + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 1765 1759 if (cfqq) { 1766 1760 cfq_init_prio_data(cfqq); 1767 1761 cfq_prio_boost(cfqq); ··· 1804 1798 struct task_struct *tsk = current; 1805 1799 struct cfq_io_context *cic; 1806 1800 const int rw = rq_data_dir(rq); 1807 - pid_t key = cfq_queue_pid(tsk, rw); 1801 + const int is_sync = rq_is_sync(rq); 1802 + pid_t key = cfq_queue_pid(tsk, rw, is_sync); 1808 1803 struct cfq_queue *cfqq; 1809 1804 unsigned long flags; 1810 - int is_sync = key != CFQ_KEY_ASYNC; 1811 1805 1812 1806 might_sleep_if(gfp_mask & __GFP_WAIT); 1813 1807
+20 -8
block/ll_rw_blk.c
··· 2058 2058 * Returns NULL on failure, with queue_lock held. 2059 2059 * Returns !NULL on success, with queue_lock *not held*. 2060 2060 */ 2061 - static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 2062 - gfp_t gfp_mask) 2061 + static struct request *get_request(request_queue_t *q, int rw_flags, 2062 + struct bio *bio, gfp_t gfp_mask) 2063 2063 { 2064 2064 struct request *rq = NULL; 2065 2065 struct request_list *rl = &q->rq; 2066 2066 struct io_context *ioc = NULL; 2067 + const int rw = rw_flags & 0x01; 2067 2068 int may_queue, priv; 2068 2069 2069 - may_queue = elv_may_queue(q, rw); 2070 + may_queue = elv_may_queue(q, rw_flags); 2070 2071 if (may_queue == ELV_MQUEUE_NO) 2071 2072 goto rq_starved; 2072 2073 ··· 2115 2114 2116 2115 spin_unlock_irq(q->queue_lock); 2117 2116 2118 - rq = blk_alloc_request(q, rw, priv, gfp_mask); 2117 + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 2119 2118 if (unlikely(!rq)) { 2120 2119 /* 2121 2120 * Allocation failed presumably due to memory. Undo anything ··· 2163 2162 * 2164 2163 * Called with q->queue_lock held, and returns with it unlocked. 2165 2164 */ 2166 - static struct request *get_request_wait(request_queue_t *q, int rw, 2165 + static struct request *get_request_wait(request_queue_t *q, int rw_flags, 2167 2166 struct bio *bio) 2168 2167 { 2168 + const int rw = rw_flags & 0x01; 2169 2169 struct request *rq; 2170 2170 2171 - rq = get_request(q, rw, bio, GFP_NOIO); 2171 + rq = get_request(q, rw_flags, bio, GFP_NOIO); 2172 2172 while (!rq) { 2173 2173 DEFINE_WAIT(wait); 2174 2174 struct request_list *rl = &q->rq; ··· 2177 2175 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 2178 2176 TASK_UNINTERRUPTIBLE); 2179 2177 2180 - rq = get_request(q, rw, bio, GFP_NOIO); 2178 + rq = get_request(q, rw_flags, bio, GFP_NOIO); 2181 2179 2182 2180 if (!rq) { 2183 2181 struct io_context *ioc; ··· 2912 2910 int el_ret, nr_sectors, barrier, err; 2913 2911 const unsigned short prio = bio_prio(bio); 2914 2912 const int sync = bio_sync(bio); 2913 + int rw_flags; 2915 2914 2916 2915 nr_sectors = bio_sectors(bio); 2917 2916 ··· 2987 2984 2988 2985 get_rq: 2989 2986 /* 2987 + * This sync check and mask will be re-done in init_request_from_bio(), 2988 + * but we need to set it earlier to expose the sync flag to the 2989 + * rq allocator and io schedulers. 2990 + */ 2991 + rw_flags = bio_data_dir(bio); 2992 + if (sync) 2993 + rw_flags |= REQ_RW_SYNC; 2994 + 2995 + /* 2990 2996 * Grab a free request. This is might sleep but can not fail. 2991 2997 * Returns with the queue unlocked. 2992 2998 */ 2993 - req = get_request_wait(q, bio_data_dir(bio), bio); 2999 + req = get_request_wait(q, rw_flags, bio); 2994 3000 2995 3001 /* 2996 3002 * After dropping the lock and possibly sleeping here, our request
+1 -1
drivers/block/cciss.c
··· 3004 3004 } 3005 3005 return 0; 3006 3006 3007 - err_out_free_res: 3007 + err_out_free_res: 3008 3008 /* 3009 3009 * Deliberately omit pci_disable_device(): it does something nasty to 3010 3010 * Smart Array controllers that pci_enable_device does not undo