Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
cciss: Add cciss_allow_hpsa module parameter
cciss: Fix multiple calls to pci_release_regions
blk-settings: fix function parameter kernel-doc notation
writeback: kill space in debugfs item name
writeback: account IO throttling wait as iowait
elv_iosched_store(): fix strstrip() misuse
cfq-iosched: avoid probable slice overrun when idling
cfq-iosched: apply bool value where we return 0/1
cfq-iosched: fix think time allowed for seekers
cfq-iosched: fix the slice residual sign
cfq-iosched: abstract out the 'may this cfqq dispatch' logic
block: use proper BLK_RW_ASYNC in blk_queue_start_tag()
block: Seperate read and write statistics of in_flight requests v2
block: get rid of kblock_schedule_delayed_work()
cfq-iosched: fix possible problem with jiffies wraparound
cfq-iosched: fix issue with rq-rq merging and fifo list ordering

+228 -201
+4 -12
block/blk-core.c
··· 70 70 part_stat_inc(cpu, part, merges[rw]); 71 71 else { 72 72 part_round_stats(cpu, part); 73 - part_inc_in_flight(part); 73 + part_inc_in_flight(part, rw); 74 74 } 75 75 76 76 part_stat_unlock(); ··· 1030 1030 if (now == part->stamp) 1031 1031 return; 1032 1032 1033 - if (part->in_flight) { 1033 + if (part_in_flight(part)) { 1034 1034 __part_stat_add(cpu, part, time_in_queue, 1035 - part->in_flight * (now - part->stamp)); 1035 + part_in_flight(part) * (now - part->stamp)); 1036 1036 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1037 1037 } 1038 1038 part->stamp = now; ··· 1739 1739 part_stat_inc(cpu, part, ios[rw]); 1740 1740 part_stat_add(cpu, part, ticks[rw], duration); 1741 1741 part_round_stats(cpu, part); 1742 - part_dec_in_flight(part); 1742 + part_dec_in_flight(part, rw); 1743 1743 1744 1744 part_stat_unlock(); 1745 1745 } ··· 2491 2491 return queue_work(kblockd_workqueue, work); 2492 2492 } 2493 2493 EXPORT_SYMBOL(kblockd_schedule_work); 2494 - 2495 - int kblockd_schedule_delayed_work(struct request_queue *q, 2496 - struct delayed_work *work, 2497 - unsigned long delay) 2498 - { 2499 - return queue_delayed_work(kblockd_workqueue, work, delay); 2500 - } 2501 - EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2502 2494 2503 2495 int __init blk_dev_init(void) 2504 2496 {
+1 -1
block/blk-merge.c
··· 351 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 352 352 353 353 part_round_stats(cpu, part); 354 - part_dec_in_flight(part); 354 + part_dec_in_flight(part, rq_data_dir(req)); 355 355 356 356 part_stat_unlock(); 357 357 }
+1 -1
block/blk-settings.c
··· 242 242 /** 243 243 * blk_queue_max_discard_sectors - set max sectors for a single discard 244 244 * @q: the request queue for the device 245 - * @max_discard: maximum number of sectors to discard 245 + * @max_discard_sectors: maximum number of sectors to discard 246 246 **/ 247 247 void blk_queue_max_discard_sectors(struct request_queue *q, 248 248 unsigned int max_discard_sectors)
+1 -1
block/blk-tag.c
··· 359 359 max_depth -= 2; 360 360 if (!max_depth) 361 361 max_depth = 1; 362 - if (q->in_flight[0] > max_depth) 362 + if (q->in_flight[BLK_RW_ASYNC] > max_depth) 363 363 return 1; 364 364 } 365 365
+142 -117
block/cfq-iosched.c
··· 150 150 * idle window management 151 151 */ 152 152 struct timer_list idle_slice_timer; 153 - struct delayed_work unplug_work; 153 + struct work_struct unplug_work; 154 154 155 155 struct cfq_queue *active_queue; 156 156 struct cfq_io_context *active_cic; ··· 230 230 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 231 231 232 232 static void cfq_dispatch_insert(struct request_queue *, struct request *); 233 - static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 233 + static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, 234 234 struct io_context *, gfp_t); 235 235 static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 236 236 struct io_context *); ··· 241 241 } 242 242 243 243 static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 244 - int is_sync) 244 + bool is_sync) 245 245 { 246 - return cic->cfqq[!!is_sync]; 246 + return cic->cfqq[is_sync]; 247 247 } 248 248 249 249 static inline void cic_set_cfqq(struct cfq_io_context *cic, 250 - struct cfq_queue *cfqq, int is_sync) 250 + struct cfq_queue *cfqq, bool is_sync) 251 251 { 252 - cic->cfqq[!!is_sync] = cfqq; 252 + cic->cfqq[is_sync] = cfqq; 253 253 } 254 254 255 255 /* 256 256 * We regard a request as SYNC, if it's either a read or has the SYNC bit 257 257 * set (in which case it could also be direct WRITE). 258 258 */ 259 - static inline int cfq_bio_sync(struct bio *bio) 259 + static inline bool cfq_bio_sync(struct bio *bio) 260 260 { 261 - if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) 262 - return 1; 263 - 264 - return 0; 261 + return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 265 262 } 266 263 267 264 /* 268 265 * scheduler run of queue, if there are requests pending and no one in the 269 266 * driver that will restart queueing 270 267 */ 271 - static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, 272 - unsigned long delay) 268 + static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 273 269 { 274 270 if (cfqd->busy_queues) { 275 271 cfq_log(cfqd, "schedule dispatch"); 276 - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, 277 - delay); 272 + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 278 273 } 279 274 } 280 275 ··· 285 290 * if a queue is marked sync and has sync io queued. A sync queue with async 286 291 * io only, should not get full sync slice length. 287 292 */ 288 - static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 293 + static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, 289 294 unsigned short prio) 290 295 { 291 296 const int base_slice = cfqd->cfq_slice[sync]; ··· 313 318 * isn't valid until the first request from the dispatch is activated 314 319 * and the slice time set. 315 320 */ 316 - static inline int cfq_slice_used(struct cfq_queue *cfqq) 321 + static inline bool cfq_slice_used(struct cfq_queue *cfqq) 317 322 { 318 323 if (cfq_cfqq_slice_new(cfqq)) 319 324 return 0; ··· 488 493 * we will service the queues. 489 494 */ 490 495 static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 491 - int add_front) 496 + bool add_front) 492 497 { 493 498 struct rb_node **p, *parent; 494 499 struct cfq_queue *__cfqq; ··· 504 509 } else 505 510 rb_key += jiffies; 506 511 } else if (!add_front) { 512 + /* 513 + * Get our rb key offset. Subtract any residual slice 514 + * value carried from last service. A negative resid 515 + * count indicates slice overrun, and this should position 516 + * the next service time further away in the tree. 517 + */ 507 518 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 508 - rb_key += cfqq->slice_resid; 519 + rb_key -= cfqq->slice_resid; 509 520 cfqq->slice_resid = 0; 510 - } else 511 - rb_key = 0; 521 + } else { 522 + rb_key = -HZ; 523 + __cfqq = cfq_rb_first(&cfqd->service_tree); 524 + rb_key += __cfqq ? __cfqq->rb_key : jiffies; 525 + } 512 526 513 527 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 514 528 /* ··· 551 547 n = &(*p)->rb_left; 552 548 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 553 549 n = &(*p)->rb_right; 554 - else if (rb_key < __cfqq->rb_key) 550 + else if (time_before(rb_key, __cfqq->rb_key)) 555 551 n = &(*p)->rb_left; 556 552 else 557 553 n = &(*p)->rb_right; ··· 831 827 * reposition in fifo if next is older than rq 832 828 */ 833 829 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 834 - time_before(next->start_time, rq->start_time)) 830 + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { 835 831 list_move(&rq->queuelist, &next->queuelist); 832 + rq_set_fifo_time(rq, rq_fifo_time(next)); 833 + } 836 834 837 835 cfq_remove_request(next); 838 836 } ··· 850 844 * Disallow merge of a sync bio into an async request. 851 845 */ 852 846 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 853 - return 0; 847 + return false; 854 848 855 849 /* 856 850 * Lookup the cfqq that this bio will be queued with. Allow ··· 858 852 */ 859 853 cic = cfq_cic_lookup(cfqd, current->io_context); 860 854 if (!cic) 861 - return 0; 855 + return false; 862 856 863 857 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 864 - if (cfqq == RQ_CFQQ(rq)) 865 - return 1; 866 - 867 - return 0; 858 + return cfqq == RQ_CFQQ(rq); 868 859 } 869 860 870 861 static void __cfq_set_active_queue(struct cfq_data *cfqd, ··· 889 886 */ 890 887 static void 891 888 __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 892 - int timed_out) 889 + bool timed_out) 893 890 { 894 891 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 895 892 ··· 917 914 } 918 915 } 919 916 920 - static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 917 + static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) 921 918 { 922 919 struct cfq_queue *cfqq = cfqd->active_queue; 923 920 ··· 1029 1026 */ 1030 1027 static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 1031 1028 struct cfq_queue *cur_cfqq, 1032 - int probe) 1029 + bool probe) 1033 1030 { 1034 1031 struct cfq_queue *cfqq; 1035 1032 ··· 1093 1090 if (!cic || !atomic_read(&cic->ioc->nr_tasks)) 1094 1091 return; 1095 1092 1093 + /* 1094 + * If our average think time is larger than the remaining time 1095 + * slice, then don't idle. This avoids overrunning the allotted 1096 + * time slice. 1097 + */ 1098 + if (sample_valid(cic->ttime_samples) && 1099 + (cfqq->slice_end - jiffies < cic->ttime_mean)) 1100 + return; 1101 + 1096 1102 cfq_mark_cfqq_wait_request(cfqq); 1097 1103 1098 1104 /* ··· 1141 1129 */ 1142 1130 static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 1143 1131 { 1144 - struct cfq_data *cfqd = cfqq->cfqd; 1145 - struct request *rq; 1146 - int fifo; 1132 + struct request *rq = NULL; 1147 1133 1148 1134 if (cfq_cfqq_fifo_expire(cfqq)) 1149 1135 return NULL; ··· 1151 1141 if (list_empty(&cfqq->fifo)) 1152 1142 return NULL; 1153 1143 1154 - fifo = cfq_cfqq_sync(cfqq); 1155 1144 rq = rq_entry_fifo(cfqq->fifo.next); 1156 - 1157 - if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 1145 + if (time_before(jiffies, rq_fifo_time(rq))) 1158 1146 rq = NULL; 1159 1147 1160 - cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); 1148 + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 1161 1149 return rq; 1162 1150 } 1163 1151 ··· 1256 1248 return dispatched; 1257 1249 } 1258 1250 1259 - /* 1260 - * Dispatch a request from cfqq, moving them to the request queue 1261 - * dispatch list. 1262 - */ 1263 - static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1251 + static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1264 1252 { 1265 - struct request *rq; 1266 - 1267 - BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1268 - 1269 - /* 1270 - * follow expired path, else get first next available 1271 - */ 1272 - rq = cfq_check_fifo(cfqq); 1273 - if (!rq) 1274 - rq = cfqq->next_rq; 1275 - 1276 - /* 1277 - * insert request into driver dispatch list 1278 - */ 1279 - cfq_dispatch_insert(cfqd->queue, rq); 1280 - 1281 - if (!cfqd->active_cic) { 1282 - struct cfq_io_context *cic = RQ_CIC(rq); 1283 - 1284 - atomic_long_inc(&cic->ioc->refcount); 1285 - cfqd->active_cic = cic; 1286 - } 1287 - } 1288 - 1289 - /* 1290 - * Find the cfqq that we need to service and move a request from that to the 1291 - * dispatch list 1292 - */ 1293 - static int cfq_dispatch_requests(struct request_queue *q, int force) 1294 - { 1295 - struct cfq_data *cfqd = q->elevator->elevator_data; 1296 - struct cfq_queue *cfqq; 1297 1253 unsigned int max_dispatch; 1298 - 1299 - if (!cfqd->busy_queues) 1300 - return 0; 1301 - 1302 - if (unlikely(force)) 1303 - return cfq_forced_dispatch(cfqd); 1304 - 1305 - cfqq = cfq_select_queue(cfqd); 1306 - if (!cfqq) 1307 - return 0; 1308 1254 1309 1255 /* 1310 1256 * Drain async requests before we start sync IO 1311 1257 */ 1312 1258 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 1313 - return 0; 1259 + return false; 1314 1260 1315 1261 /* 1316 1262 * If this is an async queue and we have sync IO in flight, let it wait 1317 1263 */ 1318 1264 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1319 - return 0; 1265 + return false; 1320 1266 1321 1267 max_dispatch = cfqd->cfq_quantum; 1322 1268 if (cfq_class_idle(cfqq)) ··· 1284 1322 * idle queue must always only have a single IO in flight 1285 1323 */ 1286 1324 if (cfq_class_idle(cfqq)) 1287 - return 0; 1325 + return false; 1288 1326 1289 1327 /* 1290 1328 * We have other queues, don't allow more IO from this one 1291 1329 */ 1292 1330 if (cfqd->busy_queues > 1) 1293 - return 0; 1331 + return false; 1294 1332 1295 1333 /* 1296 1334 * Sole queue user, allow bigger slice ··· 1314 1352 max_dispatch = depth; 1315 1353 } 1316 1354 1317 - if (cfqq->dispatched >= max_dispatch) 1355 + /* 1356 + * If we're below the current max, allow a dispatch 1357 + */ 1358 + return cfqq->dispatched < max_dispatch; 1359 + } 1360 + 1361 + /* 1362 + * Dispatch a request from cfqq, moving them to the request queue 1363 + * dispatch list. 1364 + */ 1365 + static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1366 + { 1367 + struct request *rq; 1368 + 1369 + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1370 + 1371 + if (!cfq_may_dispatch(cfqd, cfqq)) 1372 + return false; 1373 + 1374 + /* 1375 + * follow expired path, else get first next available 1376 + */ 1377 + rq = cfq_check_fifo(cfqq); 1378 + if (!rq) 1379 + rq = cfqq->next_rq; 1380 + 1381 + /* 1382 + * insert request into driver dispatch list 1383 + */ 1384 + cfq_dispatch_insert(cfqd->queue, rq); 1385 + 1386 + if (!cfqd->active_cic) { 1387 + struct cfq_io_context *cic = RQ_CIC(rq); 1388 + 1389 + atomic_long_inc(&cic->ioc->refcount); 1390 + cfqd->active_cic = cic; 1391 + } 1392 + 1393 + return true; 1394 + } 1395 + 1396 + /* 1397 + * Find the cfqq that we need to service and move a request from that to the 1398 + * dispatch list 1399 + */ 1400 + static int cfq_dispatch_requests(struct request_queue *q, int force) 1401 + { 1402 + struct cfq_data *cfqd = q->elevator->elevator_data; 1403 + struct cfq_queue *cfqq; 1404 + 1405 + if (!cfqd->busy_queues) 1406 + return 0; 1407 + 1408 + if (unlikely(force)) 1409 + return cfq_forced_dispatch(cfqd); 1410 + 1411 + cfqq = cfq_select_queue(cfqd); 1412 + if (!cfqq) 1318 1413 return 0; 1319 1414 1320 1415 /* 1321 - * Dispatch a request from this cfqq 1416 + * Dispatch a request from this cfqq, if it is allowed 1322 1417 */ 1323 - cfq_dispatch_request(cfqd, cfqq); 1418 + if (!cfq_dispatch_request(cfqd, cfqq)) 1419 + return 0; 1420 + 1324 1421 cfqq->slice_dispatch++; 1325 1422 cfq_clear_cfqq_must_dispatch(cfqq); 1326 1423 ··· 1420 1399 1421 1400 if (unlikely(cfqd->active_queue == cfqq)) { 1422 1401 __cfq_slice_expired(cfqd, cfqq, 0); 1423 - cfq_schedule_dispatch(cfqd, 0); 1402 + cfq_schedule_dispatch(cfqd); 1424 1403 } 1425 1404 1426 1405 kmem_cache_free(cfq_pool, cfqq); ··· 1515 1494 { 1516 1495 if (unlikely(cfqq == cfqd->active_queue)) { 1517 1496 __cfq_slice_expired(cfqd, cfqq, 0); 1518 - cfq_schedule_dispatch(cfqd, 0); 1497 + cfq_schedule_dispatch(cfqd); 1519 1498 } 1520 1499 1521 1500 cfq_put_queue(cfqq); ··· 1679 1658 } 1680 1659 1681 1660 static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1682 - pid_t pid, int is_sync) 1661 + pid_t pid, bool is_sync) 1683 1662 { 1684 1663 RB_CLEAR_NODE(&cfqq->rb_node); 1685 1664 RB_CLEAR_NODE(&cfqq->p_node); ··· 1699 1678 } 1700 1679 1701 1680 static struct cfq_queue * 1702 - cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, 1681 + cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 1703 1682 struct io_context *ioc, gfp_t gfp_mask) 1704 1683 { 1705 1684 struct cfq_queue *cfqq, *new_cfqq = NULL; ··· 1763 1742 } 1764 1743 1765 1744 static struct cfq_queue * 1766 - cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, 1745 + cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, 1767 1746 gfp_t gfp_mask) 1768 1747 { 1769 1748 const int ioprio = task_ioprio(ioc); ··· 1998 1977 (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 1999 1978 enable_idle = 0; 2000 1979 else if (sample_valid(cic->ttime_samples)) { 2001 - if (cic->ttime_mean > cfqd->cfq_slice_idle) 1980 + unsigned int slice_idle = cfqd->cfq_slice_idle; 1981 + if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 1982 + slice_idle = msecs_to_jiffies(CFQ_MIN_TT); 1983 + if (cic->ttime_mean > slice_idle) 2002 1984 enable_idle = 0; 2003 1985 else 2004 1986 enable_idle = 1; ··· 2020 1996 * Check if new_cfqq should preempt the currently active queue. Return 0 for 2021 1997 * no or if we aren't sure, a 1 will cause a preempt. 2022 1998 */ 2023 - static int 1999 + static bool 2024 2000 cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 2025 2001 struct request *rq) 2026 2002 { ··· 2028 2004 2029 2005 cfqq = cfqd->active_queue; 2030 2006 if (!cfqq) 2031 - return 0; 2007 + return false; 2032 2008 2033 2009 if (cfq_slice_used(cfqq)) 2034 - return 1; 2010 + return true; 2035 2011 2036 2012 if (cfq_class_idle(new_cfqq)) 2037 - return 0; 2013 + return false; 2038 2014 2039 2015 if (cfq_class_idle(cfqq)) 2040 - return 1; 2016 + return true; 2041 2017 2042 2018 /* 2043 2019 * if the new request is sync, but the currently running queue is 2044 2020 * not, let the sync request have priority. 2045 2021 */ 2046 2022 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 2047 - return 1; 2023 + return true; 2048 2024 2049 2025 /* 2050 2026 * So both queues are sync. Let the new request get disk time if 2051 2027 * it's a metadata request and the current queue is doing regular IO. 2052 2028 */ 2053 2029 if (rq_is_meta(rq) && !cfqq->meta_pending) 2054 - return 1; 2030 + return false; 2055 2031 2056 2032 /* 2057 2033 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 2058 2034 */ 2059 2035 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 2060 - return 1; 2036 + return true; 2061 2037 2062 2038 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 2063 - return 0; 2039 + return false; 2064 2040 2065 2041 /* 2066 2042 * if this request is as-good as one we would expect from the 2067 2043 * current cfqq, let it preempt 2068 2044 */ 2069 2045 if (cfq_rq_close(cfqd, rq)) 2070 - return 1; 2046 + return true; 2071 2047 2072 - return 0; 2048 + return false; 2073 2049 } 2074 2050 2075 2051 /* ··· 2154 2130 2155 2131 cfq_add_rq_rb(rq); 2156 2132 2133 + rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 2157 2134 list_add_tail(&rq->queuelist, &cfqq->fifo); 2158 2135 2159 2136 cfq_rq_enqueued(cfqd, cfqq, rq); ··· 2236 2211 } 2237 2212 2238 2213 if (!rq_in_driver(cfqd)) 2239 - cfq_schedule_dispatch(cfqd, 0); 2214 + cfq_schedule_dispatch(cfqd); 2240 2215 } 2241 2216 2242 2217 /* ··· 2334 2309 struct cfq_data *cfqd = q->elevator->elevator_data; 2335 2310 struct cfq_io_context *cic; 2336 2311 const int rw = rq_data_dir(rq); 2337 - const int is_sync = rq_is_sync(rq); 2312 + const bool is_sync = rq_is_sync(rq); 2338 2313 struct cfq_queue *cfqq; 2339 2314 unsigned long flags; 2340 2315 ··· 2366 2341 if (cic) 2367 2342 put_io_context(cic->ioc); 2368 2343 2369 - cfq_schedule_dispatch(cfqd, 0); 2344 + cfq_schedule_dispatch(cfqd); 2370 2345 spin_unlock_irqrestore(q->queue_lock, flags); 2371 2346 cfq_log(cfqd, "set_request fail"); 2372 2347 return 1; ··· 2375 2350 static void cfq_kick_queue(struct work_struct *work) 2376 2351 { 2377 2352 struct cfq_data *cfqd = 2378 - container_of(work, struct cfq_data, unplug_work.work); 2353 + container_of(work, struct cfq_data, unplug_work); 2379 2354 struct request_queue *q = cfqd->queue; 2380 2355 2381 2356 spin_lock_irq(q->queue_lock); ··· 2429 2404 expire: 2430 2405 cfq_slice_expired(cfqd, timed_out); 2431 2406 out_kick: 2432 - cfq_schedule_dispatch(cfqd, 0); 2407 + cfq_schedule_dispatch(cfqd); 2433 2408 out_cont: 2434 2409 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2435 2410 } ··· 2437 2412 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2438 2413 { 2439 2414 del_timer_sync(&cfqd->idle_slice_timer); 2440 - cancel_delayed_work_sync(&cfqd->unplug_work); 2415 + cancel_work_sync(&cfqd->unplug_work); 2441 2416 } 2442 2417 2443 2418 static void cfq_put_async_queues(struct cfq_data *cfqd) ··· 2519 2494 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2520 2495 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2521 2496 2522 - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); 2497 + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2523 2498 2524 2499 cfqd->cfq_quantum = cfq_quantum; 2525 2500 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+1 -3
block/elevator.c
··· 1059 1059 return count; 1060 1060 1061 1061 strlcpy(elevator_name, name, sizeof(elevator_name)); 1062 - strstrip(elevator_name); 1063 - 1064 - e = elevator_get(elevator_name); 1062 + e = elevator_get(strstrip(elevator_name)); 1065 1063 if (!e) { 1066 1064 printk(KERN_ERR "elevator: type %s not found\n", elevator_name); 1067 1065 return -EINVAL;
+3 -1
block/genhd.c
··· 869 869 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 870 870 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 871 871 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 872 + static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 872 873 #ifdef CONFIG_FAIL_MAKE_REQUEST 873 874 static struct device_attribute dev_attr_fail = 874 875 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 889 888 &dev_attr_alignment_offset.attr, 890 889 &dev_attr_capability.attr, 891 890 &dev_attr_stat.attr, 891 + &dev_attr_inflight.attr, 892 892 #ifdef CONFIG_FAIL_MAKE_REQUEST 893 893 &dev_attr_fail.attr, 894 894 #endif ··· 1055 1053 part_stat_read(hd, merges[1]), 1056 1054 (unsigned long long)part_stat_read(hd, sectors[1]), 1057 1055 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1058 - hd->in_flight, 1056 + part_in_flight(hd), 1059 1057 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1060 1058 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1061 1059 );
+37 -42
drivers/block/cciss.c
··· 68 68 MODULE_VERSION("3.6.20"); 69 69 MODULE_LICENSE("GPL"); 70 70 71 + static int cciss_allow_hpsa; 72 + module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); 73 + MODULE_PARM_DESC(cciss_allow_hpsa, 74 + "Prevent cciss driver from accessing hardware known to be " 75 + " supported by the hpsa driver"); 76 + 71 77 #include "cciss_cmd.h" 72 78 #include "cciss.h" 73 79 #include <linux/cciss_ioctl.h> ··· 107 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 108 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 109 103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 110 - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 111 - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 112 104 {0,} 113 105 }; 114 106 ··· 127 123 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 128 124 {0x40910E11, "Smart Array 6i", &SA5_access}, 129 125 {0x3225103C, "Smart Array P600", &SA5_access}, 130 - {0x3223103C, "Smart Array P800", &SA5_access}, 131 - {0x3234103C, "Smart Array P400", &SA5_access}, 132 126 {0x3235103C, "Smart Array P400i", &SA5_access}, 133 127 {0x3211103C, "Smart Array E200i", &SA5_access}, 134 128 {0x3212103C, "Smart Array E200", &SA5_access}, ··· 134 132 {0x3214103C, "Smart Array E200i", &SA5_access}, 135 133 {0x3215103C, "Smart Array E200i", &SA5_access}, 136 134 {0x3237103C, "Smart Array E500", &SA5_access}, 135 + /* controllers below this line are also supported by the hpsa driver. */ 136 + #define HPSA_BOUNDARY 0x3223103C 137 + {0x3223103C, "Smart Array P800", &SA5_access}, 138 + {0x3234103C, "Smart Array P400", &SA5_access}, 137 139 {0x323D103C, "Smart Array P700m", &SA5_access}, 138 140 {0x3241103C, "Smart Array P212", &SA5_access}, 139 141 {0x3243103C, "Smart Array P410", &SA5_access}, ··· 146 140 {0x3249103C, "Smart Array P812", &SA5_access}, 147 141 {0x324A103C, "Smart Array P712m", &SA5_access}, 148 142 {0x324B103C, "Smart Array P711m", &SA5_access}, 149 - {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 150 143 }; 151 144 152 145 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 3759 3754 __u64 cfg_offset; 3760 3755 __u32 cfg_base_addr; 3761 3756 __u64 cfg_base_addr_index; 3762 - int i, err; 3757 + int i, prod_index, err; 3758 + 3759 + subsystem_vendor_id = pdev->subsystem_vendor; 3760 + subsystem_device_id = pdev->subsystem_device; 3761 + board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3762 + subsystem_vendor_id); 3763 + 3764 + for (i = 0; i < ARRAY_SIZE(products); i++) { 3765 + /* Stand aside for hpsa driver on request */ 3766 + if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) 3767 + return -ENODEV; 3768 + if (board_id == products[i].board_id) 3769 + break; 3770 + } 3771 + prod_index = i; 3772 + if (prod_index == ARRAY_SIZE(products)) { 3773 + dev_warn(&pdev->dev, 3774 + "unrecognized board ID: 0x%08lx, ignoring.\n", 3775 + (unsigned long) board_id); 3776 + return -ENODEV; 3777 + } 3763 3778 3764 3779 /* check to see if controller has been disabled */ 3765 3780 /* BEFORE trying to enable it */ ··· 3802 3777 "aborting\n"); 3803 3778 return err; 3804 3779 } 3805 - 3806 - subsystem_vendor_id = pdev->subsystem_vendor; 3807 - subsystem_device_id = pdev->subsystem_device; 3808 - board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3809 - subsystem_vendor_id); 3810 3780 3811 3781 #ifdef CCISS_DEBUG 3812 3782 printk("command = %x\n", command); ··· 3888 3868 * leave a little room for ioctl calls. 3889 3869 */ 3890 3870 c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 3891 - for (i = 0; i < ARRAY_SIZE(products); i++) { 3892 - if (board_id == products[i].board_id) { 3893 - c->product_name = products[i].product_name; 3894 - c->access = *(products[i].access); 3895 - c->nr_cmds = c->max_commands - 4; 3896 - break; 3897 - } 3898 - } 3871 + c->product_name = products[prod_index].product_name; 3872 + c->access = *(products[prod_index].access); 3873 + c->nr_cmds = c->max_commands - 4; 3899 3874 if ((readb(&c->cfgtable->Signature[0]) != 'C') || 3900 3875 (readb(&c->cfgtable->Signature[1]) != 'I') || 3901 3876 (readb(&c->cfgtable->Signature[2]) != 'S') || ··· 3898 3883 printk("Does not appear to be a valid CISS config table\n"); 3899 3884 err = -ENODEV; 3900 3885 goto err_out_free_res; 3901 - } 3902 - /* We didn't find the controller in our list. We know the 3903 - * signature is valid. If it's an HP device let's try to 3904 - * bind to the device and fire it up. Otherwise we bail. 3905 - */ 3906 - if (i == ARRAY_SIZE(products)) { 3907 - if (subsystem_vendor_id == PCI_VENDOR_ID_HP) { 3908 - c->product_name = products[i-1].product_name; 3909 - c->access = *(products[i-1].access); 3910 - c->nr_cmds = c->max_commands - 4; 3911 - printk(KERN_WARNING "cciss: This is an unknown " 3912 - "Smart Array controller.\n" 3913 - "cciss: Please update to the latest driver " 3914 - "available from www.hp.com.\n"); 3915 - } else { 3916 - printk(KERN_WARNING "cciss: Sorry, I don't know how" 3917 - " to access the Smart Array controller %08lx\n" 3918 - , (unsigned long)board_id); 3919 - err = -ENODEV; 3920 - goto err_out_free_res; 3921 - } 3922 3886 } 3923 3887 #ifdef CONFIG_X86 3924 3888 { ··· 4248 4254 mutex_init(&hba[i]->busy_shutting_down); 4249 4255 4250 4256 if (cciss_pci_init(hba[i], pdev) != 0) 4251 - goto clean0; 4257 + goto clean_no_release_regions; 4252 4258 4253 4259 sprintf(hba[i]->devname, "cciss%d", i); 4254 4260 hba[i]->ctlr = i; ··· 4385 4391 clean1: 4386 4392 cciss_destroy_hba_sysfs_entry(hba[i]); 4387 4393 clean0: 4394 + pci_release_regions(pdev); 4395 + clean_no_release_regions: 4388 4396 hba[i]->busy_initializing = 0; 4389 4397 4390 4398 /* 4391 4399 * Deliberately omit pci_disable_device(): it does something nasty to 4392 4400 * Smart Array controllers that pci_enable_device does not undo 4393 4401 */ 4394 - pci_release_regions(pdev); 4395 4402 pci_set_drvdata(pdev, NULL); 4396 4403 free_hba(i); 4397 4404 return -1;
+10 -6
drivers/md/dm.c
··· 130 130 /* 131 131 * A list of ios that arrived while we were suspended. 132 132 */ 133 - atomic_t pending; 133 + atomic_t pending[2]; 134 134 wait_queue_head_t wait; 135 135 struct work_struct work; 136 136 struct bio_list deferred; ··· 453 453 { 454 454 struct mapped_device *md = io->md; 455 455 int cpu; 456 + int rw = bio_data_dir(io->bio); 456 457 457 458 io->start_time = jiffies; 458 459 459 460 cpu = part_stat_lock(); 460 461 part_round_stats(cpu, &dm_disk(md)->part0); 461 462 part_stat_unlock(); 462 - dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 463 + dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 463 464 } 464 465 465 466 static void end_io_acct(struct dm_io *io) ··· 480 479 * After this is decremented the bio must not be touched if it is 481 480 * a barrier. 482 481 */ 483 - dm_disk(md)->part0.in_flight = pending = 484 - atomic_dec_return(&md->pending); 482 + dm_disk(md)->part0.in_flight[rw] = pending = 483 + atomic_dec_return(&md->pending[rw]); 484 + pending += atomic_read(&md->pending[rw^0x1]); 485 485 486 486 /* nudge anyone waiting on suspend queue */ 487 487 if (!pending) ··· 1787 1785 if (!md->disk) 1788 1786 goto bad_disk; 1789 1787 1790 - atomic_set(&md->pending, 0); 1788 + atomic_set(&md->pending[0], 0); 1789 + atomic_set(&md->pending[1], 0); 1791 1790 init_waitqueue_head(&md->wait); 1792 1791 INIT_WORK(&md->work, dm_wq_work); 1793 1792 init_waitqueue_head(&md->eventq); ··· 2091 2088 break; 2092 2089 } 2093 2090 spin_unlock_irqrestore(q->queue_lock, flags); 2094 - } else if (!atomic_read(&md->pending)) 2091 + } else if (!atomic_read(&md->pending[0]) && 2092 + !atomic_read(&md->pending[1])) 2095 2093 break; 2096 2094 2097 2095 if (interruptible == TASK_INTERRUPTIBLE &&
+11 -1
fs/partitions/check.c
··· 248 248 part_stat_read(p, merges[WRITE]), 249 249 (unsigned long long)part_stat_read(p, sectors[WRITE]), 250 250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 251 - p->in_flight, 251 + part_in_flight(p), 252 252 jiffies_to_msecs(part_stat_read(p, io_ticks)), 253 253 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 254 + } 255 + 256 + ssize_t part_inflight_show(struct device *dev, 257 + struct device_attribute *attr, char *buf) 258 + { 259 + struct hd_struct *p = dev_to_part(dev); 260 + 261 + return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); 254 262 } 255 263 256 264 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 289 281 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 290 282 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 291 283 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 284 + static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 292 285 #ifdef CONFIG_FAIL_MAKE_REQUEST 293 286 static struct device_attribute dev_attr_fail = 294 287 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 301 292 &dev_attr_size.attr, 302 293 &dev_attr_alignment_offset.attr, 303 294 &dev_attr_stat.attr, 295 + &dev_attr_inflight.attr, 304 296 #ifdef CONFIG_FAIL_MAKE_REQUEST 305 297 &dev_attr_fail.attr, 306 298 #endif
-4
include/linux/blkdev.h
··· 1172 1172 } 1173 1173 1174 1174 struct work_struct; 1175 - struct delayed_work; 1176 1175 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1177 - int kblockd_schedule_delayed_work(struct request_queue *q, 1178 - struct delayed_work *work, 1179 - unsigned long delay); 1180 1176 1181 1177 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1182 1178 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+14 -7
include/linux/genhd.h
··· 98 98 int make_it_fail; 99 99 #endif 100 100 unsigned long stamp; 101 - int in_flight; 101 + int in_flight[2]; 102 102 #ifdef CONFIG_SMP 103 103 struct disk_stats *dkstats; 104 104 #else ··· 322 322 #define part_stat_sub(cpu, gendiskp, field, subnd) \ 323 323 part_stat_add(cpu, gendiskp, field, -subnd) 324 324 325 - static inline void part_inc_in_flight(struct hd_struct *part) 325 + static inline void part_inc_in_flight(struct hd_struct *part, int rw) 326 326 { 327 - part->in_flight++; 327 + part->in_flight[rw]++; 328 328 if (part->partno) 329 - part_to_disk(part)->part0.in_flight++; 329 + part_to_disk(part)->part0.in_flight[rw]++; 330 330 } 331 331 332 - static inline void part_dec_in_flight(struct hd_struct *part) 332 + static inline void part_dec_in_flight(struct hd_struct *part, int rw) 333 333 { 334 - part->in_flight--; 334 + part->in_flight[rw]--; 335 335 if (part->partno) 336 - part_to_disk(part)->part0.in_flight--; 336 + part_to_disk(part)->part0.in_flight[rw]--; 337 + } 338 + 339 + static inline int part_in_flight(struct hd_struct *part) 340 + { 341 + return part->in_flight[0] + part->in_flight[1]; 337 342 } 338 343 339 344 /* block/blk-core.c */ ··· 550 545 extern ssize_t part_size_show(struct device *dev, 551 546 struct device_attribute *attr, char *buf); 552 547 extern ssize_t part_stat_show(struct device *dev, 548 + struct device_attribute *attr, char *buf); 549 + extern ssize_t part_inflight_show(struct device *dev, 553 550 struct device_attribute *attr, char *buf); 554 551 #ifdef CONFIG_FAIL_MAKE_REQUEST 555 552 extern ssize_t part_fail_show(struct device *dev,
-3
kernel/sched.c
··· 6718 6718 /* 6719 6719 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6720 6720 * that process accounting knows that this is a task in IO wait state. 6721 - * 6722 - * But don't do that if it is a deliberate, throttling IO wait (this task 6723 - * has set its backing_dev_info: the queue against which it should throttle) 6724 6721 */ 6725 6722 void __sched io_schedule(void) 6726 6723 {
+1 -1
mm/backing-dev.c
··· 92 92 "BdiDirtyThresh: %8lu kB\n" 93 93 "DirtyThresh: %8lu kB\n" 94 94 "BackgroundThresh: %8lu kB\n" 95 - "WriteBack threads:%8lu\n" 95 + "WritebackThreads: %8lu\n" 96 96 "b_dirty: %8lu\n" 97 97 "b_io: %8lu\n" 98 98 "b_more_io: %8lu\n"
+2 -1
mm/page-writeback.c
··· 566 566 if (pages_written >= write_chunk) 567 567 break; /* We've done our duty */ 568 568 569 - schedule_timeout_interruptible(pause); 569 + __set_current_state(TASK_INTERRUPTIBLE); 570 + io_schedule_timeout(pause); 570 571 571 572 /* 572 573 * Increase the delay for each loop, up to our previous