Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
cciss: Add cciss_allow_hpsa module parameter
cciss: Fix multiple calls to pci_release_regions
blk-settings: fix function parameter kernel-doc notation
writeback: kill space in debugfs item name
writeback: account IO throttling wait as iowait
elv_iosched_store(): fix strstrip() misuse
cfq-iosched: avoid probable slice overrun when idling
cfq-iosched: apply bool value where we return 0/1
cfq-iosched: fix think time allowed for seekers
cfq-iosched: fix the slice residual sign
cfq-iosched: abstract out the 'may this cfqq dispatch' logic
block: use proper BLK_RW_ASYNC in blk_queue_start_tag()
block: Seperate read and write statistics of in_flight requests v2
block: get rid of kblock_schedule_delayed_work()
cfq-iosched: fix possible problem with jiffies wraparound
cfq-iosched: fix issue with rq-rq merging and fifo list ordering

+228 -201
+4 -12
block/blk-core.c
··· 70 part_stat_inc(cpu, part, merges[rw]); 71 else { 72 part_round_stats(cpu, part); 73 - part_inc_in_flight(part); 74 } 75 76 part_stat_unlock(); ··· 1030 if (now == part->stamp) 1031 return; 1032 1033 - if (part->in_flight) { 1034 __part_stat_add(cpu, part, time_in_queue, 1035 - part->in_flight * (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1037 } 1038 part->stamp = now; ··· 1739 part_stat_inc(cpu, part, ios[rw]); 1740 part_stat_add(cpu, part, ticks[rw], duration); 1741 part_round_stats(cpu, part); 1742 - part_dec_in_flight(part); 1743 1744 part_stat_unlock(); 1745 } ··· 2491 return queue_work(kblockd_workqueue, work); 2492 } 2493 EXPORT_SYMBOL(kblockd_schedule_work); 2494 - 2495 - int kblockd_schedule_delayed_work(struct request_queue *q, 2496 - struct delayed_work *work, 2497 - unsigned long delay) 2498 - { 2499 - return queue_delayed_work(kblockd_workqueue, work, delay); 2500 - } 2501 - EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2502 2503 int __init blk_dev_init(void) 2504 {
··· 70 part_stat_inc(cpu, part, merges[rw]); 71 else { 72 part_round_stats(cpu, part); 73 + part_inc_in_flight(part, rw); 74 } 75 76 part_stat_unlock(); ··· 1030 if (now == part->stamp) 1031 return; 1032 1033 + if (part_in_flight(part)) { 1034 __part_stat_add(cpu, part, time_in_queue, 1035 + part_in_flight(part) * (now - part->stamp)); 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1037 } 1038 part->stamp = now; ··· 1739 part_stat_inc(cpu, part, ios[rw]); 1740 part_stat_add(cpu, part, ticks[rw], duration); 1741 part_round_stats(cpu, part); 1742 + part_dec_in_flight(part, rw); 1743 1744 part_stat_unlock(); 1745 } ··· 2491 return queue_work(kblockd_workqueue, work); 2492 } 2493 EXPORT_SYMBOL(kblockd_schedule_work); 2494 2495 int __init blk_dev_init(void) 2496 {
+1 -1
block/blk-merge.c
··· 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 352 353 part_round_stats(cpu, part); 354 - part_dec_in_flight(part); 355 356 part_stat_unlock(); 357 }
··· 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 352 353 part_round_stats(cpu, part); 354 + part_dec_in_flight(part, rq_data_dir(req)); 355 356 part_stat_unlock(); 357 }
+1 -1
block/blk-settings.c
··· 242 /** 243 * blk_queue_max_discard_sectors - set max sectors for a single discard 244 * @q: the request queue for the device 245 - * @max_discard: maximum number of sectors to discard 246 **/ 247 void blk_queue_max_discard_sectors(struct request_queue *q, 248 unsigned int max_discard_sectors)
··· 242 /** 243 * blk_queue_max_discard_sectors - set max sectors for a single discard 244 * @q: the request queue for the device 245 + * @max_discard_sectors: maximum number of sectors to discard 246 **/ 247 void blk_queue_max_discard_sectors(struct request_queue *q, 248 unsigned int max_discard_sectors)
+1 -1
block/blk-tag.c
··· 359 max_depth -= 2; 360 if (!max_depth) 361 max_depth = 1; 362 - if (q->in_flight[0] > max_depth) 363 return 1; 364 } 365
··· 359 max_depth -= 2; 360 if (!max_depth) 361 max_depth = 1; 362 + if (q->in_flight[BLK_RW_ASYNC] > max_depth) 363 return 1; 364 } 365
+142 -117
block/cfq-iosched.c
··· 150 * idle window management 151 */ 152 struct timer_list idle_slice_timer; 153 - struct delayed_work unplug_work; 154 155 struct cfq_queue *active_queue; 156 struct cfq_io_context *active_cic; ··· 230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 231 232 static void cfq_dispatch_insert(struct request_queue *, struct request *); 233 - static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 234 struct io_context *, gfp_t); 235 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 236 struct io_context *); ··· 241 } 242 243 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 244 - int is_sync) 245 { 246 - return cic->cfqq[!!is_sync]; 247 } 248 249 static inline void cic_set_cfqq(struct cfq_io_context *cic, 250 - struct cfq_queue *cfqq, int is_sync) 251 { 252 - cic->cfqq[!!is_sync] = cfqq; 253 } 254 255 /* 256 * We regard a request as SYNC, if it's either a read or has the SYNC bit 257 * set (in which case it could also be direct WRITE). 258 */ 259 - static inline int cfq_bio_sync(struct bio *bio) 260 { 261 - if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) 262 - return 1; 263 - 264 - return 0; 265 } 266 267 /* 268 * scheduler run of queue, if there are requests pending and no one in the 269 * driver that will restart queueing 270 */ 271 - static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, 272 - unsigned long delay) 273 { 274 if (cfqd->busy_queues) { 275 cfq_log(cfqd, "schedule dispatch"); 276 - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, 277 - delay); 278 } 279 } 280 ··· 285 * if a queue is marked sync and has sync io queued. A sync queue with async 286 * io only, should not get full sync slice length. 287 */ 288 - static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 289 unsigned short prio) 290 { 291 const int base_slice = cfqd->cfq_slice[sync]; ··· 313 * isn't valid until the first request from the dispatch is activated 314 * and the slice time set. 315 */ 316 - static inline int cfq_slice_used(struct cfq_queue *cfqq) 317 { 318 if (cfq_cfqq_slice_new(cfqq)) 319 return 0; ··· 488 * we will service the queues. 489 */ 490 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 491 - int add_front) 492 { 493 struct rb_node **p, *parent; 494 struct cfq_queue *__cfqq; ··· 504 } else 505 rb_key += jiffies; 506 } else if (!add_front) { 507 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 508 - rb_key += cfqq->slice_resid; 509 cfqq->slice_resid = 0; 510 - } else 511 - rb_key = 0; 512 513 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 514 /* ··· 551 n = &(*p)->rb_left; 552 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 553 n = &(*p)->rb_right; 554 - else if (rb_key < __cfqq->rb_key) 555 n = &(*p)->rb_left; 556 else 557 n = &(*p)->rb_right; ··· 831 * reposition in fifo if next is older than rq 832 */ 833 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 834 - time_before(next->start_time, rq->start_time)) 835 list_move(&rq->queuelist, &next->queuelist); 836 837 cfq_remove_request(next); 838 } ··· 850 * Disallow merge of a sync bio into an async request. 851 */ 852 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 853 - return 0; 854 855 /* 856 * Lookup the cfqq that this bio will be queued with. Allow ··· 858 */ 859 cic = cfq_cic_lookup(cfqd, current->io_context); 860 if (!cic) 861 - return 0; 862 863 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 864 - if (cfqq == RQ_CFQQ(rq)) 865 - return 1; 866 - 867 - return 0; 868 } 869 870 static void __cfq_set_active_queue(struct cfq_data *cfqd, ··· 889 */ 890 static void 891 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 892 - int timed_out) 893 { 894 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 895 ··· 917 } 918 } 919 920 - static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 921 { 922 struct cfq_queue *cfqq = cfqd->active_queue; 923 ··· 1029 */ 1030 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1031 struct cfq_queue *cur_cfqq, 1032 - int probe) 1033 { 1034 struct cfq_queue *cfqq; 1035 ··· 1093 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1094 return; 1095 1096 cfq_mark_cfqq_wait_request(cfqq); 1097 1098 /* ··· 1141 */ 1142 static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 1143 { 1144 - struct cfq_data *cfqd = cfqq->cfqd; 1145 - struct request *rq; 1146 - int fifo; 1147 1148 if (cfq_cfqq_fifo_expire(cfqq)) 1149 return NULL; ··· 1151 if (list_empty(&cfqq->fifo)) 1152 return NULL; 1153 1154 - fifo = cfq_cfqq_sync(cfqq); 1155 rq = rq_entry_fifo(cfqq->fifo.next); 1156 - 1157 - if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 1158 rq = NULL; 1159 1160 - cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); 1161 return rq; 1162 } 1163 ··· 1256 return dispatched; 1257 } 1258 1259 - /* 1260 - * Dispatch a request from cfqq, moving them to the request queue 1261 - * dispatch list. 1262 - */ 1263 - static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1264 { 1265 - struct request *rq; 1266 - 1267 - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1268 - 1269 - /* 1270 - * follow expired path, else get first next available 1271 - */ 1272 - rq = cfq_check_fifo(cfqq); 1273 - if (!rq) 1274 - rq = cfqq->next_rq; 1275 - 1276 - /* 1277 - * insert request into driver dispatch list 1278 - */ 1279 - cfq_dispatch_insert(cfqd->queue, rq); 1280 - 1281 - if (!cfqd->active_cic) { 1282 - struct cfq_io_context *cic = RQ_CIC(rq); 1283 - 1284 - atomic_long_inc(&cic->ioc->refcount); 1285 - cfqd->active_cic = cic; 1286 - } 1287 - } 1288 - 1289 - /* 1290 - * Find the cfqq that we need to service and move a request from that to the 1291 - * dispatch list 1292 - */ 1293 - static int cfq_dispatch_requests(struct request_queue *q, int force) 1294 - { 1295 - struct cfq_data *cfqd = q->elevator->elevator_data; 1296 - struct cfq_queue *cfqq; 1297 unsigned int max_dispatch; 1298 - 1299 - if (!cfqd->busy_queues) 1300 - return 0; 1301 - 1302 - if (unlikely(force)) 1303 - return cfq_forced_dispatch(cfqd); 1304 - 1305 - cfqq = cfq_select_queue(cfqd); 1306 - if (!cfqq) 1307 - return 0; 1308 1309 /* 1310 * Drain async requests before we start sync IO 1311 */ 1312 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1313 - return 0; 1314 1315 /* 1316 * If this is an async queue and we have sync IO in flight, let it wait 1317 */ 1318 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1319 - return 0; 1320 1321 max_dispatch = cfqd->cfq_quantum; 1322 if (cfq_class_idle(cfqq)) ··· 1284 * idle queue must always only have a single IO in flight 1285 */ 1286 if (cfq_class_idle(cfqq)) 1287 - return 0; 1288 1289 /* 1290 * We have other queues, don't allow more IO from this one 1291 */ 1292 if (cfqd->busy_queues > 1) 1293 - return 0; 1294 1295 /* 1296 * Sole queue user, allow bigger slice ··· 1314 max_dispatch = depth; 1315 } 1316 1317 - if (cfqq->dispatched >= max_dispatch) 1318 return 0; 1319 1320 /* 1321 - * Dispatch a request from this cfqq 1322 */ 1323 - cfq_dispatch_request(cfqd, cfqq); 1324 cfqq->slice_dispatch++; 1325 cfq_clear_cfqq_must_dispatch(cfqq); 1326 ··· 1420 1421 if (unlikely(cfqd->active_queue == cfqq)) { 1422 __cfq_slice_expired(cfqd, cfqq, 0); 1423 - cfq_schedule_dispatch(cfqd, 0); 1424 } 1425 1426 kmem_cache_free(cfq_pool, cfqq); ··· 1515 { 1516 if (unlikely(cfqq == cfqd->active_queue)) { 1517 __cfq_slice_expired(cfqd, cfqq, 0); 1518 - cfq_schedule_dispatch(cfqd, 0); 1519 } 1520 1521 cfq_put_queue(cfqq); ··· 1679 } 1680 1681 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1682 - pid_t pid, int is_sync) 1683 { 1684 RB_CLEAR_NODE(&cfqq->rb_node); 1685 RB_CLEAR_NODE(&cfqq->p_node); ··· 1699 } 1700 1701 static struct cfq_queue * 1702 - cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1703 struct io_context *ioc, gfp_t gfp_mask) 1704 { 1705 struct cfq_queue *cfqq, *new_cfqq = NULL; ··· 1763 } 1764 1765 static struct cfq_queue * 1766 - cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1767 gfp_t gfp_mask) 1768 { 1769 const int ioprio = task_ioprio(ioc); ··· 1998 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 1999 enable_idle = 0; 2000 else if (sample_valid(cic->ttime_samples)) { 2001 - if (cic->ttime_mean > cfqd->cfq_slice_idle) 2002 enable_idle = 0; 2003 else 2004 enable_idle = 1; ··· 2020 * Check if new_cfqq should preempt the currently active queue. Return 0 for 2021 * no or if we aren't sure, a 1 will cause a preempt. 2022 */ 2023 - static int 2024 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2025 struct request *rq) 2026 { ··· 2028 2029 cfqq = cfqd->active_queue; 2030 if (!cfqq) 2031 - return 0; 2032 2033 if (cfq_slice_used(cfqq)) 2034 - return 1; 2035 2036 if (cfq_class_idle(new_cfqq)) 2037 - return 0; 2038 2039 if (cfq_class_idle(cfqq)) 2040 - return 1; 2041 2042 /* 2043 * if the new request is sync, but the currently running queue is 2044 * not, let the sync request have priority. 2045 */ 2046 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2047 - return 1; 2048 2049 /* 2050 * So both queues are sync. Let the new request get disk time if 2051 * it's a metadata request and the current queue is doing regular IO. 2052 */ 2053 if (rq_is_meta(rq) && !cfqq->meta_pending) 2054 - return 1; 2055 2056 /* 2057 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2058 */ 2059 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2060 - return 1; 2061 2062 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2063 - return 0; 2064 2065 /* 2066 * if this request is as-good as one we would expect from the 2067 * current cfqq, let it preempt 2068 */ 2069 if (cfq_rq_close(cfqd, rq)) 2070 - return 1; 2071 2072 - return 0; 2073 } 2074 2075 /* ··· 2154 2155 cfq_add_rq_rb(rq); 2156 2157 list_add_tail(&rq->queuelist, &cfqq->fifo); 2158 2159 cfq_rq_enqueued(cfqd, cfqq, rq); ··· 2236 } 2237 2238 if (!rq_in_driver(cfqd)) 2239 - cfq_schedule_dispatch(cfqd, 0); 2240 } 2241 2242 /* ··· 2334 struct cfq_data *cfqd = q->elevator->elevator_data; 2335 struct cfq_io_context *cic; 2336 const int rw = rq_data_dir(rq); 2337 - const int is_sync = rq_is_sync(rq); 2338 struct cfq_queue *cfqq; 2339 unsigned long flags; 2340 ··· 2366 if (cic) 2367 put_io_context(cic->ioc); 2368 2369 - cfq_schedule_dispatch(cfqd, 0); 2370 spin_unlock_irqrestore(q->queue_lock, flags); 2371 cfq_log(cfqd, "set_request fail"); 2372 return 1; ··· 2375 static void cfq_kick_queue(struct work_struct *work) 2376 { 2377 struct cfq_data *cfqd = 2378 - container_of(work, struct cfq_data, unplug_work.work); 2379 struct request_queue *q = cfqd->queue; 2380 2381 spin_lock_irq(q->queue_lock); ··· 2429 expire: 2430 cfq_slice_expired(cfqd, timed_out); 2431 out_kick: 2432 - cfq_schedule_dispatch(cfqd, 0); 2433 out_cont: 2434 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2435 } ··· 2437 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2438 { 2439 del_timer_sync(&cfqd->idle_slice_timer); 2440 - cancel_delayed_work_sync(&cfqd->unplug_work); 2441 } 2442 2443 static void cfq_put_async_queues(struct cfq_data *cfqd) ··· 2519 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2520 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2521 2522 - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); 2523 2524 cfqd->cfq_quantum = cfq_quantum; 2525 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
··· 150 * idle window management 151 */ 152 struct timer_list idle_slice_timer; 153 + struct work_struct unplug_work; 154 155 struct cfq_queue *active_queue; 156 struct cfq_io_context *active_cic; ··· 230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 231 232 static void cfq_dispatch_insert(struct request_queue *, struct request *); 233 + static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 234 struct io_context *, gfp_t); 235 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 236 struct io_context *); ··· 241 } 242 243 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 244 + bool is_sync) 245 { 246 + return cic->cfqq[is_sync]; 247 } 248 249 static inline void cic_set_cfqq(struct cfq_io_context *cic, 250 + struct cfq_queue *cfqq, bool is_sync) 251 { 252 + cic->cfqq[is_sync] = cfqq; 253 } 254 255 /* 256 * We regard a request as SYNC, if it's either a read or has the SYNC bit 257 * set (in which case it could also be direct WRITE). 258 */ 259 + static inline bool cfq_bio_sync(struct bio *bio) 260 { 261 + return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 262 } 263 264 /* 265 * scheduler run of queue, if there are requests pending and no one in the 266 * driver that will restart queueing 267 */ 268 + static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 269 { 270 if (cfqd->busy_queues) { 271 cfq_log(cfqd, "schedule dispatch"); 272 + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 273 } 274 } 275 ··· 290 * if a queue is marked sync and has sync io queued. A sync queue with async 291 * io only, should not get full sync slice length. 292 */ 293 + static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, 294 unsigned short prio) 295 { 296 const int base_slice = cfqd->cfq_slice[sync]; ··· 318 * isn't valid until the first request from the dispatch is activated 319 * and the slice time set. 320 */ 321 + static inline bool cfq_slice_used(struct cfq_queue *cfqq) 322 { 323 if (cfq_cfqq_slice_new(cfqq)) 324 return 0; ··· 493 * we will service the queues. 494 */ 495 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 496 + bool add_front) 497 { 498 struct rb_node **p, *parent; 499 struct cfq_queue *__cfqq; ··· 509 } else 510 rb_key += jiffies; 511 } else if (!add_front) { 512 + /* 513 + * Get our rb key offset. Subtract any residual slice 514 + * value carried from last service. A negative resid 515 + * count indicates slice overrun, and this should position 516 + * the next service time further away in the tree. 517 + */ 518 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 519 + rb_key -= cfqq->slice_resid; 520 cfqq->slice_resid = 0; 521 + } else { 522 + rb_key = -HZ; 523 + __cfqq = cfq_rb_first(&cfqd->service_tree); 524 + rb_key += __cfqq ? __cfqq->rb_key : jiffies; 525 + } 526 527 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 528 /* ··· 547 n = &(*p)->rb_left; 548 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 549 n = &(*p)->rb_right; 550 + else if (time_before(rb_key, __cfqq->rb_key)) 551 n = &(*p)->rb_left; 552 else 553 n = &(*p)->rb_right; ··· 827 * reposition in fifo if next is older than rq 828 */ 829 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 830 + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { 831 list_move(&rq->queuelist, &next->queuelist); 832 + rq_set_fifo_time(rq, rq_fifo_time(next)); 833 + } 834 835 cfq_remove_request(next); 836 } ··· 844 * Disallow merge of a sync bio into an async request. 845 */ 846 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 847 + return false; 848 849 /* 850 * Lookup the cfqq that this bio will be queued with. Allow ··· 852 */ 853 cic = cfq_cic_lookup(cfqd, current->io_context); 854 if (!cic) 855 + return false; 856 857 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 858 + return cfqq == RQ_CFQQ(rq); 859 } 860 861 static void __cfq_set_active_queue(struct cfq_data *cfqd, ··· 886 */ 887 static void 888 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 889 + bool timed_out) 890 { 891 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 892 ··· 914 } 915 } 916 917 + static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) 918 { 919 struct cfq_queue *cfqq = cfqd->active_queue; 920 ··· 1026 */ 1027 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1028 struct cfq_queue *cur_cfqq, 1029 + bool probe) 1030 { 1031 struct cfq_queue *cfqq; 1032 ··· 1090 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1091 return; 1092 1093 + /* 1094 + * If our average think time is larger than the remaining time 1095 + * slice, then don't idle. This avoids overrunning the allotted 1096 + * time slice. 1097 + */ 1098 + if (sample_valid(cic->ttime_samples) && 1099 + (cfqq->slice_end - jiffies < cic->ttime_mean)) 1100 + return; 1101 + 1102 cfq_mark_cfqq_wait_request(cfqq); 1103 1104 /* ··· 1129 */ 1130 static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 1131 { 1132 + struct request *rq = NULL; 1133 1134 if (cfq_cfqq_fifo_expire(cfqq)) 1135 return NULL; ··· 1141 if (list_empty(&cfqq->fifo)) 1142 return NULL; 1143 1144 rq = rq_entry_fifo(cfqq->fifo.next); 1145 + if (time_before(jiffies, rq_fifo_time(rq))) 1146 rq = NULL; 1147 1148 + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 1149 return rq; 1150 } 1151 ··· 1248 return dispatched; 1249 } 1250 1251 + static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1252 { 1253 unsigned int max_dispatch; 1254 1255 /* 1256 * Drain async requests before we start sync IO 1257 */ 1258 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1259 + return false; 1260 1261 /* 1262 * If this is an async queue and we have sync IO in flight, let it wait 1263 */ 1264 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1265 + return false; 1266 1267 max_dispatch = cfqd->cfq_quantum; 1268 if (cfq_class_idle(cfqq)) ··· 1322 * idle queue must always only have a single IO in flight 1323 */ 1324 if (cfq_class_idle(cfqq)) 1325 + return false; 1326 1327 /* 1328 * We have other queues, don't allow more IO from this one 1329 */ 1330 if (cfqd->busy_queues > 1) 1331 + return false; 1332 1333 /* 1334 * Sole queue user, allow bigger slice ··· 1352 max_dispatch = depth; 1353 } 1354 1355 + /* 1356 + * If we're below the current max, allow a dispatch 1357 + */ 1358 + return cfqq->dispatched < max_dispatch; 1359 + } 1360 + 1361 + /* 1362 + * Dispatch a request from cfqq, moving them to the request queue 1363 + * dispatch list. 1364 + */ 1365 + static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1366 + { 1367 + struct request *rq; 1368 + 1369 + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1370 + 1371 + if (!cfq_may_dispatch(cfqd, cfqq)) 1372 + return false; 1373 + 1374 + /* 1375 + * follow expired path, else get first next available 1376 + */ 1377 + rq = cfq_check_fifo(cfqq); 1378 + if (!rq) 1379 + rq = cfqq->next_rq; 1380 + 1381 + /* 1382 + * insert request into driver dispatch list 1383 + */ 1384 + cfq_dispatch_insert(cfqd->queue, rq); 1385 + 1386 + if (!cfqd->active_cic) { 1387 + struct cfq_io_context *cic = RQ_CIC(rq); 1388 + 1389 + atomic_long_inc(&cic->ioc->refcount); 1390 + cfqd->active_cic = cic; 1391 + } 1392 + 1393 + return true; 1394 + } 1395 + 1396 + /* 1397 + * Find the cfqq that we need to service and move a request from that to the 1398 + * dispatch list 1399 + */ 1400 + static int cfq_dispatch_requests(struct request_queue *q, int force) 1401 + { 1402 + struct cfq_data *cfqd = q->elevator->elevator_data; 1403 + struct cfq_queue *cfqq; 1404 + 1405 + if (!cfqd->busy_queues) 1406 + return 0; 1407 + 1408 + if (unlikely(force)) 1409 + return cfq_forced_dispatch(cfqd); 1410 + 1411 + cfqq = cfq_select_queue(cfqd); 1412 + if (!cfqq) 1413 return 0; 1414 1415 /* 1416 + * Dispatch a request from this cfqq, if it is allowed 1417 */ 1418 + if (!cfq_dispatch_request(cfqd, cfqq)) 1419 + return 0; 1420 + 1421 cfqq->slice_dispatch++; 1422 cfq_clear_cfqq_must_dispatch(cfqq); 1423 ··· 1399 1400 if (unlikely(cfqd->active_queue == cfqq)) { 1401 __cfq_slice_expired(cfqd, cfqq, 0); 1402 + cfq_schedule_dispatch(cfqd); 1403 } 1404 1405 kmem_cache_free(cfq_pool, cfqq); ··· 1494 { 1495 if (unlikely(cfqq == cfqd->active_queue)) { 1496 __cfq_slice_expired(cfqd, cfqq, 0); 1497 + cfq_schedule_dispatch(cfqd); 1498 } 1499 1500 cfq_put_queue(cfqq); ··· 1658 } 1659 1660 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1661 + pid_t pid, bool is_sync) 1662 { 1663 RB_CLEAR_NODE(&cfqq->rb_node); 1664 RB_CLEAR_NODE(&cfqq->p_node); ··· 1678 } 1679 1680 static struct cfq_queue * 1681 + cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 1682 struct io_context *ioc, gfp_t gfp_mask) 1683 { 1684 struct cfq_queue *cfqq, *new_cfqq = NULL; ··· 1742 } 1743 1744 static struct cfq_queue * 1745 + cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, 1746 gfp_t gfp_mask) 1747 { 1748 const int ioprio = task_ioprio(ioc); ··· 1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 1978 enable_idle = 0; 1979 else if (sample_valid(cic->ttime_samples)) { 1980 + unsigned int slice_idle = cfqd->cfq_slice_idle; 1981 + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 1982 + slice_idle = msecs_to_jiffies(CFQ_MIN_TT); 1983 + if (cic->ttime_mean > slice_idle) 1984 enable_idle = 0; 1985 else 1986 enable_idle = 1; ··· 1996 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1997 * no or if we aren't sure, a 1 will cause a preempt. 1998 */ 1999 + static bool 2000 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2001 struct request *rq) 2002 { ··· 2004 2005 cfqq = cfqd->active_queue; 2006 if (!cfqq) 2007 + return false; 2008 2009 if (cfq_slice_used(cfqq)) 2010 + return true; 2011 2012 if (cfq_class_idle(new_cfqq)) 2013 + return false; 2014 2015 if (cfq_class_idle(cfqq)) 2016 + return true; 2017 2018 /* 2019 * if the new request is sync, but the currently running queue is 2020 * not, let the sync request have priority. 2021 */ 2022 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2023 + return true; 2024 2025 /* 2026 * So both queues are sync. Let the new request get disk time if 2027 * it's a metadata request and the current queue is doing regular IO. 2028 */ 2029 if (rq_is_meta(rq) && !cfqq->meta_pending) 2030 + return false; 2031 2032 /* 2033 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2034 */ 2035 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2036 + return true; 2037 2038 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2039 + return false; 2040 2041 /* 2042 * if this request is as-good as one we would expect from the 2043 * current cfqq, let it preempt 2044 */ 2045 if (cfq_rq_close(cfqd, rq)) 2046 + return true; 2047 2048 + return false; 2049 } 2050 2051 /* ··· 2130 2131 cfq_add_rq_rb(rq); 2132 2133 + rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 2134 list_add_tail(&rq->queuelist, &cfqq->fifo); 2135 2136 cfq_rq_enqueued(cfqd, cfqq, rq); ··· 2211 } 2212 2213 if (!rq_in_driver(cfqd)) 2214 + cfq_schedule_dispatch(cfqd); 2215 } 2216 2217 /* ··· 2309 struct cfq_data *cfqd = q->elevator->elevator_data; 2310 struct cfq_io_context *cic; 2311 const int rw = rq_data_dir(rq); 2312 + const bool is_sync = rq_is_sync(rq); 2313 struct cfq_queue *cfqq; 2314 unsigned long flags; 2315 ··· 2341 if (cic) 2342 put_io_context(cic->ioc); 2343 2344 + cfq_schedule_dispatch(cfqd); 2345 spin_unlock_irqrestore(q->queue_lock, flags); 2346 cfq_log(cfqd, "set_request fail"); 2347 return 1; ··· 2350 static void cfq_kick_queue(struct work_struct *work) 2351 { 2352 struct cfq_data *cfqd = 2353 + container_of(work, struct cfq_data, unplug_work); 2354 struct request_queue *q = cfqd->queue; 2355 2356 spin_lock_irq(q->queue_lock); ··· 2404 expire: 2405 cfq_slice_expired(cfqd, timed_out); 2406 out_kick: 2407 + cfq_schedule_dispatch(cfqd); 2408 out_cont: 2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2410 } ··· 2412 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2413 { 2414 del_timer_sync(&cfqd->idle_slice_timer); 2415 + cancel_work_sync(&cfqd->unplug_work); 2416 } 2417 2418 static void cfq_put_async_queues(struct cfq_data *cfqd) ··· 2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2496 2497 + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2498 2499 cfqd->cfq_quantum = cfq_quantum; 2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+1 -3
block/elevator.c
··· 1059 return count; 1060 1061 strlcpy(elevator_name, name, sizeof(elevator_name)); 1062 - strstrip(elevator_name); 1063 - 1064 - e = elevator_get(elevator_name); 1065 if (!e) { 1066 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1067 return -EINVAL;
··· 1059 return count; 1060 1061 strlcpy(elevator_name, name, sizeof(elevator_name)); 1062 + e = elevator_get(strstrip(elevator_name)); 1063 if (!e) { 1064 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1065 return -EINVAL;
+3 -1
block/genhd.c
··· 869 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 870 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 871 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 872 #ifdef CONFIG_FAIL_MAKE_REQUEST 873 static struct device_attribute dev_attr_fail = 874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 889 &dev_attr_alignment_offset.attr, 890 &dev_attr_capability.attr, 891 &dev_attr_stat.attr, 892 #ifdef CONFIG_FAIL_MAKE_REQUEST 893 &dev_attr_fail.attr, 894 #endif ··· 1055 part_stat_read(hd, merges[1]), 1056 (unsigned long long)part_stat_read(hd, sectors[1]), 1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1058 - hd->in_flight, 1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1061 );
··· 869 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 870 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 871 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 872 + static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 873 #ifdef CONFIG_FAIL_MAKE_REQUEST 874 static struct device_attribute dev_attr_fail = 875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 888 &dev_attr_alignment_offset.attr, 889 &dev_attr_capability.attr, 890 &dev_attr_stat.attr, 891 + &dev_attr_inflight.attr, 892 #ifdef CONFIG_FAIL_MAKE_REQUEST 893 &dev_attr_fail.attr, 894 #endif ··· 1053 part_stat_read(hd, merges[1]), 1054 (unsigned long long)part_stat_read(hd, sectors[1]), 1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1056 + part_in_flight(hd), 1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1059 );
+37 -42
drivers/block/cciss.c
··· 68 MODULE_VERSION("3.6.20"); 69 MODULE_LICENSE("GPL"); 70 71 #include "cciss_cmd.h" 72 #include "cciss.h" 73 #include <linux/cciss_ioctl.h> ··· 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 110 - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 111 - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 112 {0,} 113 }; 114 ··· 127 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 128 {0x40910E11, "Smart Array 6i", &SA5_access}, 129 {0x3225103C, "Smart Array P600", &SA5_access}, 130 - {0x3223103C, "Smart Array P800", &SA5_access}, 131 - {0x3234103C, "Smart Array P400", &SA5_access}, 132 {0x3235103C, "Smart Array P400i", &SA5_access}, 133 {0x3211103C, "Smart Array E200i", &SA5_access}, 134 {0x3212103C, "Smart Array E200", &SA5_access}, ··· 134 {0x3214103C, "Smart Array E200i", &SA5_access}, 135 {0x3215103C, "Smart Array E200i", &SA5_access}, 136 {0x3237103C, "Smart Array E500", &SA5_access}, 137 {0x323D103C, "Smart Array P700m", &SA5_access}, 138 {0x3241103C, "Smart Array P212", &SA5_access}, 139 {0x3243103C, "Smart Array P410", &SA5_access}, ··· 146 {0x3249103C, "Smart Array P812", &SA5_access}, 147 {0x324A103C, "Smart Array P712m", &SA5_access}, 148 {0x324B103C, "Smart Array P711m", &SA5_access}, 149 - {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 150 }; 151 152 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 3759 __u64 cfg_offset; 3760 __u32 cfg_base_addr; 3761 __u64 cfg_base_addr_index; 3762 - int i, err; 3763 3764 /* check to see if controller has been disabled */ 3765 /* BEFORE trying to enable it */ ··· 3802 "aborting\n"); 3803 return err; 3804 } 3805 - 3806 - subsystem_vendor_id = pdev->subsystem_vendor; 3807 - subsystem_device_id = pdev->subsystem_device; 3808 - board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3809 - subsystem_vendor_id); 3810 3811 #ifdef CCISS_DEBUG 3812 printk("command = %x\n", command); ··· 3888 * leave a little room for ioctl calls. 3889 */ 3890 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 3891 - for (i = 0; i < ARRAY_SIZE(products); i++) { 3892 - if (board_id == products[i].board_id) { 3893 - c->product_name = products[i].product_name; 3894 - c->access = *(products[i].access); 3895 - c->nr_cmds = c->max_commands - 4; 3896 - break; 3897 - } 3898 - } 3899 if ((readb(&c->cfgtable->Signature[0]) != 'C') || 3900 (readb(&c->cfgtable->Signature[1]) != 'I') || 3901 (readb(&c->cfgtable->Signature[2]) != 'S') || ··· 3898 printk("Does not appear to be a valid CISS config table\n"); 3899 err = -ENODEV; 3900 goto err_out_free_res; 3901 - } 3902 - /* We didn't find the controller in our list. We know the 3903 - * signature is valid. If it's an HP device let's try to 3904 - * bind to the device and fire it up. Otherwise we bail. 3905 - */ 3906 - if (i == ARRAY_SIZE(products)) { 3907 - if (subsystem_vendor_id == PCI_VENDOR_ID_HP) { 3908 - c->product_name = products[i-1].product_name; 3909 - c->access = *(products[i-1].access); 3910 - c->nr_cmds = c->max_commands - 4; 3911 - printk(KERN_WARNING "cciss: This is an unknown " 3912 - "Smart Array controller.\n" 3913 - "cciss: Please update to the latest driver " 3914 - "available from www.hp.com.\n"); 3915 - } else { 3916 - printk(KERN_WARNING "cciss: Sorry, I don't know how" 3917 - " to access the Smart Array controller %08lx\n" 3918 - , (unsigned long)board_id); 3919 - err = -ENODEV; 3920 - goto err_out_free_res; 3921 - } 3922 } 3923 #ifdef CONFIG_X86 3924 { ··· 4248 mutex_init(&hba[i]->busy_shutting_down); 4249 4250 if (cciss_pci_init(hba[i], pdev) != 0) 4251 - goto clean0; 4252 4253 sprintf(hba[i]->devname, "cciss%d", i); 4254 hba[i]->ctlr = i; ··· 4385 clean1: 4386 cciss_destroy_hba_sysfs_entry(hba[i]); 4387 clean0: 4388 hba[i]->busy_initializing = 0; 4389 4390 /* 4391 * Deliberately omit pci_disable_device(): it does something nasty to 4392 * Smart Array controllers that pci_enable_device does not undo 4393 */ 4394 - pci_release_regions(pdev); 4395 pci_set_drvdata(pdev, NULL); 4396 free_hba(i); 4397 return -1;
··· 68 MODULE_VERSION("3.6.20"); 69 MODULE_LICENSE("GPL"); 70 71 + static int cciss_allow_hpsa; 72 + module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); 73 + MODULE_PARM_DESC(cciss_allow_hpsa, 74 + "Prevent cciss driver from accessing hardware known to be " 75 + " supported by the hpsa driver"); 76 + 77 #include "cciss_cmd.h" 78 #include "cciss.h" 79 #include <linux/cciss_ioctl.h> ··· 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 104 {0,} 105 }; 106 ··· 123 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 124 {0x40910E11, "Smart Array 6i", &SA5_access}, 125 {0x3225103C, "Smart Array P600", &SA5_access}, 126 {0x3235103C, "Smart Array P400i", &SA5_access}, 127 {0x3211103C, "Smart Array E200i", &SA5_access}, 128 {0x3212103C, "Smart Array E200", &SA5_access}, ··· 132 {0x3214103C, "Smart Array E200i", &SA5_access}, 133 {0x3215103C, "Smart Array E200i", &SA5_access}, 134 {0x3237103C, "Smart Array E500", &SA5_access}, 135 + /* controllers below this line are also supported by the hpsa driver. */ 136 + #define HPSA_BOUNDARY 0x3223103C 137 + {0x3223103C, "Smart Array P800", &SA5_access}, 138 + {0x3234103C, "Smart Array P400", &SA5_access}, 139 {0x323D103C, "Smart Array P700m", &SA5_access}, 140 {0x3241103C, "Smart Array P212", &SA5_access}, 141 {0x3243103C, "Smart Array P410", &SA5_access}, ··· 140 {0x3249103C, "Smart Array P812", &SA5_access}, 141 {0x324A103C, "Smart Array P712m", &SA5_access}, 142 {0x324B103C, "Smart Array P711m", &SA5_access}, 143 }; 144 145 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 3754 __u64 cfg_offset; 3755 __u32 cfg_base_addr; 3756 __u64 cfg_base_addr_index; 3757 + int i, prod_index, err; 3758 + 3759 + subsystem_vendor_id = pdev->subsystem_vendor; 3760 + subsystem_device_id = pdev->subsystem_device; 3761 + board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3762 + subsystem_vendor_id); 3763 + 3764 + for (i = 0; i < ARRAY_SIZE(products); i++) { 3765 + /* Stand aside for hpsa driver on request */ 3766 + if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) 3767 + return -ENODEV; 3768 + if (board_id == products[i].board_id) 3769 + break; 3770 + } 3771 + prod_index = i; 3772 + if (prod_index == ARRAY_SIZE(products)) { 3773 + dev_warn(&pdev->dev, 3774 + "unrecognized board ID: 0x%08lx, ignoring.\n", 3775 + (unsigned long) board_id); 3776 + return -ENODEV; 3777 + } 3778 3779 /* check to see if controller has been disabled */ 3780 /* BEFORE trying to enable it */ ··· 3777 "aborting\n"); 3778 return err; 3779 } 3780 3781 #ifdef CCISS_DEBUG 3782 printk("command = %x\n", command); ··· 3868 * leave a little room for ioctl calls. 3869 */ 3870 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 3871 + c->product_name = products[prod_index].product_name; 3872 + c->access = *(products[prod_index].access); 3873 + c->nr_cmds = c->max_commands - 4; 3874 if ((readb(&c->cfgtable->Signature[0]) != 'C') || 3875 (readb(&c->cfgtable->Signature[1]) != 'I') || 3876 (readb(&c->cfgtable->Signature[2]) != 'S') || ··· 3883 printk("Does not appear to be a valid CISS config table\n"); 3884 err = -ENODEV; 3885 goto err_out_free_res; 3886 } 3887 #ifdef CONFIG_X86 3888 { ··· 4254 mutex_init(&hba[i]->busy_shutting_down); 4255 4256 if (cciss_pci_init(hba[i], pdev) != 0) 4257 + goto clean_no_release_regions; 4258 4259 sprintf(hba[i]->devname, "cciss%d", i); 4260 hba[i]->ctlr = i; ··· 4391 clean1: 4392 cciss_destroy_hba_sysfs_entry(hba[i]); 4393 clean0: 4394 + pci_release_regions(pdev); 4395 + clean_no_release_regions: 4396 hba[i]->busy_initializing = 0; 4397 4398 /* 4399 * Deliberately omit pci_disable_device(): it does something nasty to 4400 * Smart Array controllers that pci_enable_device does not undo 4401 */ 4402 pci_set_drvdata(pdev, NULL); 4403 free_hba(i); 4404 return -1;
+10 -6
drivers/md/dm.c
··· 130 /* 131 * A list of ios that arrived while we were suspended. 132 */ 133 - atomic_t pending; 134 wait_queue_head_t wait; 135 struct work_struct work; 136 struct bio_list deferred; ··· 453 { 454 struct mapped_device *md = io->md; 455 int cpu; 456 457 io->start_time = jiffies; 458 459 cpu = part_stat_lock(); 460 part_round_stats(cpu, &dm_disk(md)->part0); 461 part_stat_unlock(); 462 - dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 463 } 464 465 static void end_io_acct(struct dm_io *io) ··· 480 * After this is decremented the bio must not be touched if it is 481 * a barrier. 482 */ 483 - dm_disk(md)->part0.in_flight = pending = 484 - atomic_dec_return(&md->pending); 485 486 /* nudge anyone waiting on suspend queue */ 487 if (!pending) ··· 1787 if (!md->disk) 1788 goto bad_disk; 1789 1790 - atomic_set(&md->pending, 0); 1791 init_waitqueue_head(&md->wait); 1792 INIT_WORK(&md->work, dm_wq_work); 1793 init_waitqueue_head(&md->eventq); ··· 2091 break; 2092 } 2093 spin_unlock_irqrestore(q->queue_lock, flags); 2094 - } else if (!atomic_read(&md->pending)) 2095 break; 2096 2097 if (interruptible == TASK_INTERRUPTIBLE &&
··· 130 /* 131 * A list of ios that arrived while we were suspended. 132 */ 133 + atomic_t pending[2]; 134 wait_queue_head_t wait; 135 struct work_struct work; 136 struct bio_list deferred; ··· 453 { 454 struct mapped_device *md = io->md; 455 int cpu; 456 + int rw = bio_data_dir(io->bio); 457 458 io->start_time = jiffies; 459 460 cpu = part_stat_lock(); 461 part_round_stats(cpu, &dm_disk(md)->part0); 462 part_stat_unlock(); 463 + dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 464 } 465 466 static void end_io_acct(struct dm_io *io) ··· 479 * After this is decremented the bio must not be touched if it is 480 * a barrier. 481 */ 482 + dm_disk(md)->part0.in_flight[rw] = pending = 483 + atomic_dec_return(&md->pending[rw]); 484 + pending += atomic_read(&md->pending[rw^0x1]); 485 486 /* nudge anyone waiting on suspend queue */ 487 if (!pending) ··· 1785 if (!md->disk) 1786 goto bad_disk; 1787 1788 + atomic_set(&md->pending[0], 0); 1789 + atomic_set(&md->pending[1], 0); 1790 init_waitqueue_head(&md->wait); 1791 INIT_WORK(&md->work, dm_wq_work); 1792 init_waitqueue_head(&md->eventq); ··· 2088 break; 2089 } 2090 spin_unlock_irqrestore(q->queue_lock, flags); 2091 + } else if (!atomic_read(&md->pending[0]) && 2092 + !atomic_read(&md->pending[1])) 2093 break; 2094 2095 if (interruptible == TASK_INTERRUPTIBLE &&
+11 -1
fs/partitions/check.c
··· 248 part_stat_read(p, merges[WRITE]), 249 (unsigned long long)part_stat_read(p, sectors[WRITE]), 250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 251 - p->in_flight, 252 jiffies_to_msecs(part_stat_read(p, io_ticks)), 253 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 254 } 255 256 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 289 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 290 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 291 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 292 #ifdef CONFIG_FAIL_MAKE_REQUEST 293 static struct device_attribute dev_attr_fail = 294 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 301 &dev_attr_size.attr, 302 &dev_attr_alignment_offset.attr, 303 &dev_attr_stat.attr, 304 #ifdef CONFIG_FAIL_MAKE_REQUEST 305 &dev_attr_fail.attr, 306 #endif
··· 248 part_stat_read(p, merges[WRITE]), 249 (unsigned long long)part_stat_read(p, sectors[WRITE]), 250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 251 + part_in_flight(p), 252 jiffies_to_msecs(part_stat_read(p, io_ticks)), 253 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 254 + } 255 + 256 + ssize_t part_inflight_show(struct device *dev, 257 + struct device_attribute *attr, char *buf) 258 + { 259 + struct hd_struct *p = dev_to_part(dev); 260 + 261 + return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); 262 } 263 264 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 281 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 282 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 283 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 284 + static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 285 #ifdef CONFIG_FAIL_MAKE_REQUEST 286 static struct device_attribute dev_attr_fail = 287 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 292 &dev_attr_size.attr, 293 &dev_attr_alignment_offset.attr, 294 &dev_attr_stat.attr, 295 + &dev_attr_inflight.attr, 296 #ifdef CONFIG_FAIL_MAKE_REQUEST 297 &dev_attr_fail.attr, 298 #endif
-4
include/linux/blkdev.h
··· 1172 } 1173 1174 struct work_struct; 1175 - struct delayed_work; 1176 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1177 - int kblockd_schedule_delayed_work(struct request_queue *q, 1178 - struct delayed_work *work, 1179 - unsigned long delay); 1180 1181 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1182 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
··· 1172 } 1173 1174 struct work_struct; 1175 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1176 1177 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1178 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+14 -7
include/linux/genhd.h
··· 98 int make_it_fail; 99 #endif 100 unsigned long stamp; 101 - int in_flight; 102 #ifdef CONFIG_SMP 103 struct disk_stats *dkstats; 104 #else ··· 322 #define part_stat_sub(cpu, gendiskp, field, subnd) \ 323 part_stat_add(cpu, gendiskp, field, -subnd) 324 325 - static inline void part_inc_in_flight(struct hd_struct *part) 326 { 327 - part->in_flight++; 328 if (part->partno) 329 - part_to_disk(part)->part0.in_flight++; 330 } 331 332 - static inline void part_dec_in_flight(struct hd_struct *part) 333 { 334 - part->in_flight--; 335 if (part->partno) 336 - part_to_disk(part)->part0.in_flight--; 337 } 338 339 /* block/blk-core.c */ ··· 550 extern ssize_t part_size_show(struct device *dev, 551 struct device_attribute *attr, char *buf); 552 extern ssize_t part_stat_show(struct device *dev, 553 struct device_attribute *attr, char *buf); 554 #ifdef CONFIG_FAIL_MAKE_REQUEST 555 extern ssize_t part_fail_show(struct device *dev,
··· 98 int make_it_fail; 99 #endif 100 unsigned long stamp; 101 + int in_flight[2]; 102 #ifdef CONFIG_SMP 103 struct disk_stats *dkstats; 104 #else ··· 322 #define part_stat_sub(cpu, gendiskp, field, subnd) \ 323 part_stat_add(cpu, gendiskp, field, -subnd) 324 325 + static inline void part_inc_in_flight(struct hd_struct *part, int rw) 326 { 327 + part->in_flight[rw]++; 328 if (part->partno) 329 + part_to_disk(part)->part0.in_flight[rw]++; 330 } 331 332 + static inline void part_dec_in_flight(struct hd_struct *part, int rw) 333 { 334 + part->in_flight[rw]--; 335 if (part->partno) 336 + part_to_disk(part)->part0.in_flight[rw]--; 337 + } 338 + 339 + static inline int part_in_flight(struct hd_struct *part) 340 + { 341 + return part->in_flight[0] + part->in_flight[1]; 342 } 343 344 /* block/blk-core.c */ ··· 545 extern ssize_t part_size_show(struct device *dev, 546 struct device_attribute *attr, char *buf); 547 extern ssize_t part_stat_show(struct device *dev, 548 + struct device_attribute *attr, char *buf); 549 + extern ssize_t part_inflight_show(struct device *dev, 550 struct device_attribute *attr, char *buf); 551 #ifdef CONFIG_FAIL_MAKE_REQUEST 552 extern ssize_t part_fail_show(struct device *dev,
-3
kernel/sched.c
··· 6718 /* 6719 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6720 * that process accounting knows that this is a task in IO wait state. 6721 - * 6722 - * But don't do that if it is a deliberate, throttling IO wait (this task 6723 - * has set its backing_dev_info: the queue against which it should throttle) 6724 */ 6725 void __sched io_schedule(void) 6726 {
··· 6718 /* 6719 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6720 * that process accounting knows that this is a task in IO wait state. 6721 */ 6722 void __sched io_schedule(void) 6723 {
+1 -1
mm/backing-dev.c
··· 92 "BdiDirtyThresh: %8lu kB\n" 93 "DirtyThresh: %8lu kB\n" 94 "BackgroundThresh: %8lu kB\n" 95 - "WriteBack threads:%8lu\n" 96 "b_dirty: %8lu\n" 97 "b_io: %8lu\n" 98 "b_more_io: %8lu\n"
··· 92 "BdiDirtyThresh: %8lu kB\n" 93 "DirtyThresh: %8lu kB\n" 94 "BackgroundThresh: %8lu kB\n" 95 + "WritebackThreads: %8lu\n" 96 "b_dirty: %8lu\n" 97 "b_io: %8lu\n" 98 "b_more_io: %8lu\n"
+2 -1
mm/page-writeback.c
··· 566 if (pages_written >= write_chunk) 567 break; /* We've done our duty */ 568 569 - schedule_timeout_interruptible(pause); 570 571 /* 572 * Increase the delay for each loop, up to our previous
··· 566 if (pages_written >= write_chunk) 567 break; /* We've done our duty */ 568 569 + __set_current_state(TASK_INTERRUPTIBLE); 570 + io_schedule_timeout(pause); 571 572 /* 573 * Increase the delay for each loop, up to our previous