Merge branch 'cfq-merge' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'cfq-merge' of git://brick.kernel.dk/data/git/linux-2.6-block:
[BLOCK] cfq-iosched: seek and async performance fixes
[PATCH] ll_rw_blk: fix 80-col offender in put_io_context()
[PATCH] cfq-iosched: small cfq_choose_req() optimization
[PATCH] [BLOCK] cfq-iosched: change cfq io context linking from list to tree

+222 -182
+194 -167
block/cfq-iosched.c
··· 26 26 static const int cfq_slice_sync = HZ / 10; 27 27 static int cfq_slice_async = HZ / 25; 28 28 static const int cfq_slice_async_rq = 2; 29 - static int cfq_slice_idle = HZ / 100; 29 + static int cfq_slice_idle = HZ / 70; 30 30 31 31 #define CFQ_IDLE_GRACE (HZ / 10) 32 32 #define CFQ_SLICE_SCALE (5) 33 33 34 34 #define CFQ_KEY_ASYNC (0) 35 - #define CFQ_KEY_ANY (0xffff) 36 - 37 - /* 38 - * disable queueing at the driver/hardware level 39 - */ 40 - static const int cfq_max_depth = 2; 41 35 42 36 static DEFINE_RWLOCK(cfq_exit_lock); 43 37 ··· 95 101 96 102 #define cfq_cfqq_sync(cfqq) \ 97 103 (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC]) 104 + 105 + #define sample_valid(samples) ((samples) > 80) 98 106 99 107 /* 100 108 * Per block device queue structure ··· 166 170 unsigned int cfq_slice[2]; 167 171 unsigned int cfq_slice_async_rq; 168 172 unsigned int cfq_slice_idle; 169 - unsigned int cfq_max_depth; 170 173 171 174 struct list_head cic_list; 172 175 }; ··· 338 343 return !cfqd->busy_queues; 339 344 } 340 345 346 + static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 347 + { 348 + if (rw == READ || process_sync(task)) 349 + return task->pid; 350 + 351 + return CFQ_KEY_ASYNC; 352 + } 353 + 341 354 /* 342 355 * Lifted from AS - choose which of crq1 and crq2 that is best served now. 343 356 * We choose the request that is closest to the head right now. Distance 344 - * behind the head are penalized and only allowed to a certain extent. 357 + * behind the head is penalized and only allowed to a certain extent. 345 358 */ 346 359 static struct cfq_rq * 347 360 cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) 348 361 { 349 362 sector_t last, s1, s2, d1 = 0, d2 = 0; 350 - int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */ 351 363 unsigned long back_max; 364 + #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 365 + #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 366 + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 352 367 353 368 if (crq1 == NULL || crq1 == crq2) 354 369 return crq2; ··· 390 385 else if (s1 + back_max >= last) 391 386 d1 = (last - s1) * cfqd->cfq_back_penalty; 392 387 else 393 - r1_wrap = 1; 388 + wrap |= CFQ_RQ1_WRAP; 394 389 395 390 if (s2 >= last) 396 391 d2 = s2 - last; 397 392 else if (s2 + back_max >= last) 398 393 d2 = (last - s2) * cfqd->cfq_back_penalty; 399 394 else 400 - r2_wrap = 1; 395 + wrap |= CFQ_RQ2_WRAP; 401 396 402 397 /* Found required data */ 403 - if (!r1_wrap && r2_wrap) 404 - return crq1; 405 - else if (!r2_wrap && r1_wrap) 406 - return crq2; 407 - else if (r1_wrap && r2_wrap) { 408 - /* both behind the head */ 409 - if (s1 <= s2) 410 - return crq1; 411 - else 412 - return crq2; 413 - } 414 398 415 - /* Both requests in front of the head */ 416 - if (d1 < d2) 399 + /* 400 + * By doing switch() on the bit mask "wrap" we avoid having to 401 + * check two variables for all permutations: --> faster! 402 + */ 403 + switch (wrap) { 404 + case 0: /* common case for CFQ: crq1 and crq2 not wrapped */ 405 + if (d1 < d2) 406 + return crq1; 407 + else if (d2 < d1) 408 + return crq2; 409 + else { 410 + if (s1 >= s2) 411 + return crq1; 412 + else 413 + return crq2; 414 + } 415 + 416 + case CFQ_RQ2_WRAP: 417 417 return crq1; 418 - else if (d2 < d1) 418 + case CFQ_RQ1_WRAP: 419 419 return crq2; 420 - else { 421 - if (s1 >= s2) 420 + case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */ 421 + default: 422 + /* 423 + * Since both rqs are wrapped, 424 + * start with the one that's further behind head 425 + * (--> only *one* back seek required), 426 + * since back seek takes more time than forward. 427 + */ 428 + if (s1 <= s2) 422 429 return crq1; 423 430 else 424 431 return crq2; ··· 629 612 cfq_add_crq_rb(crq); 630 613 } 631 614 632 - static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) 633 - 615 + static struct request * 616 + cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 634 617 { 635 - struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY); 618 + struct task_struct *tsk = current; 619 + pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio)); 620 + struct cfq_queue *cfqq; 636 621 struct rb_node *n; 622 + sector_t sector; 637 623 624 + cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio); 638 625 if (!cfqq) 639 626 goto out; 640 627 628 + sector = bio->bi_sector + bio_sectors(bio); 641 629 n = cfqq->sort_list.rb_node; 642 630 while (n) { 643 631 struct cfq_rq *crq = rb_entry_crq(n); ··· 696 674 goto out; 697 675 } 698 676 699 - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); 677 + __rq = cfq_find_rq_fmerge(cfqd, bio); 700 678 if (__rq && elv_rq_merge_ok(__rq, bio)) { 701 679 ret = ELEVATOR_FRONT_MERGE; 702 680 goto out; ··· 899 877 static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 900 878 901 879 { 880 + struct cfq_io_context *cic; 902 881 unsigned long sl; 903 882 904 883 WARN_ON(!RB_EMPTY(&cfqq->sort_list)); ··· 915 892 /* 916 893 * task has exited, don't wait 917 894 */ 918 - if (cfqd->active_cic && !cfqd->active_cic->ioc->task) 895 + cic = cfqd->active_cic; 896 + if (!cic || !cic->ioc->task) 919 897 return 0; 920 898 921 899 cfq_mark_cfqq_must_dispatch(cfqq); 922 900 cfq_mark_cfqq_wait_request(cfqq); 923 901 924 902 sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle); 903 + 904 + /* 905 + * we don't want to idle for seeks, but we do want to allow 906 + * fair distribution of slice time for a process doing back-to-back 907 + * seeks. so allow a little bit of time for him to submit a new rq 908 + */ 909 + if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072) 910 + sl = 2; 911 + 925 912 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 926 913 return 1; 927 914 } ··· 1148 1115 if (cfqq) { 1149 1116 int max_dispatch; 1150 1117 1151 - /* 1152 - * if idle window is disabled, allow queue buildup 1153 - */ 1154 - if (!cfq_cfqq_idle_window(cfqq) && 1155 - cfqd->rq_in_driver >= cfqd->cfq_max_depth) 1156 - return 0; 1157 - 1158 1118 cfq_clear_cfqq_must_dispatch(cfqq); 1159 1119 cfq_clear_cfqq_wait_request(cfqq); 1160 1120 del_timer(&cfqd->idle_slice_timer); ··· 1197 1171 const int hashval) 1198 1172 { 1199 1173 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; 1200 - struct hlist_node *entry, *next; 1174 + struct hlist_node *entry; 1175 + struct cfq_queue *__cfqq; 1201 1176 1202 - hlist_for_each_safe(entry, next, hash_list) { 1203 - struct cfq_queue *__cfqq = list_entry_qhash(entry); 1177 + hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) { 1204 1178 const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); 1205 1179 1206 - if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) 1180 + if (__cfqq->key == key && (__p == prio || !prio)) 1207 1181 return __cfqq; 1208 1182 } 1209 1183 ··· 1216 1190 return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT)); 1217 1191 } 1218 1192 1219 - static void cfq_free_io_context(struct cfq_io_context *cic) 1193 + static void cfq_free_io_context(struct io_context *ioc) 1220 1194 { 1221 1195 struct cfq_io_context *__cic; 1222 - struct list_head *entry, *next; 1223 - int freed = 1; 1196 + struct rb_node *n; 1197 + int freed = 0; 1224 1198 1225 - list_for_each_safe(entry, next, &cic->list) { 1226 - __cic = list_entry(entry, struct cfq_io_context, list); 1199 + while ((n = rb_first(&ioc->cic_root)) != NULL) { 1200 + __cic = rb_entry(n, struct cfq_io_context, rb_node); 1201 + rb_erase(&__cic->rb_node, &ioc->cic_root); 1227 1202 kmem_cache_free(cfq_ioc_pool, __cic); 1228 1203 freed++; 1229 1204 } 1230 1205 1231 - kmem_cache_free(cfq_ioc_pool, cic); 1232 1206 if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) 1233 1207 complete(ioc_gone); 1234 1208 } ··· 1236 1210 static void cfq_trim(struct io_context *ioc) 1237 1211 { 1238 1212 ioc->set_ioprio = NULL; 1239 - if (ioc->cic) 1240 - cfq_free_io_context(ioc->cic); 1213 + cfq_free_io_context(ioc); 1241 1214 } 1242 1215 1243 1216 /* ··· 1275 1250 spin_unlock(q->queue_lock); 1276 1251 } 1277 1252 1278 - static void cfq_exit_io_context(struct cfq_io_context *cic) 1253 + static void cfq_exit_io_context(struct io_context *ioc) 1279 1254 { 1280 1255 struct cfq_io_context *__cic; 1281 - struct list_head *entry; 1282 1256 unsigned long flags; 1283 - 1284 - local_irq_save(flags); 1257 + struct rb_node *n; 1285 1258 1286 1259 /* 1287 1260 * put the reference this task is holding to the various queues 1288 1261 */ 1289 - read_lock(&cfq_exit_lock); 1290 - list_for_each(entry, &cic->list) { 1291 - __cic = list_entry(entry, struct cfq_io_context, list); 1262 + read_lock_irqsave(&cfq_exit_lock, flags); 1263 + 1264 + n = rb_first(&ioc->cic_root); 1265 + while (n != NULL) { 1266 + __cic = rb_entry(n, struct cfq_io_context, rb_node); 1267 + 1292 1268 cfq_exit_single_io_context(__cic); 1269 + n = rb_next(n); 1293 1270 } 1294 1271 1295 - cfq_exit_single_io_context(cic); 1296 - read_unlock(&cfq_exit_lock); 1297 - local_irq_restore(flags); 1272 + read_unlock_irqrestore(&cfq_exit_lock, flags); 1298 1273 } 1299 1274 1300 1275 static struct cfq_io_context * ··· 1303 1278 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); 1304 1279 1305 1280 if (cic) { 1306 - INIT_LIST_HEAD(&cic->list); 1281 + RB_CLEAR(&cic->rb_node); 1282 + cic->key = NULL; 1307 1283 cic->cfqq[ASYNC] = NULL; 1308 1284 cic->cfqq[SYNC] = NULL; 1309 - cic->key = NULL; 1310 1285 cic->last_end_request = jiffies; 1311 1286 cic->ttime_total = 0; 1312 1287 cic->ttime_samples = 0; ··· 1398 1373 static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) 1399 1374 { 1400 1375 struct cfq_io_context *cic; 1376 + struct rb_node *n; 1401 1377 1402 1378 write_lock(&cfq_exit_lock); 1403 1379 1404 - cic = ioc->cic; 1405 - 1406 - changed_ioprio(cic); 1407 - 1408 - list_for_each_entry(cic, &cic->list, list) 1380 + n = rb_first(&ioc->cic_root); 1381 + while (n != NULL) { 1382 + cic = rb_entry(n, struct cfq_io_context, rb_node); 1383 + 1409 1384 changed_ioprio(cic); 1385 + n = rb_next(n); 1386 + } 1410 1387 1411 1388 write_unlock(&cfq_exit_lock); 1412 1389 ··· 1472 1445 return cfqq; 1473 1446 } 1474 1447 1448 + static struct cfq_io_context * 1449 + cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1450 + { 1451 + struct rb_node *n = ioc->cic_root.rb_node; 1452 + struct cfq_io_context *cic; 1453 + void *key = cfqd; 1454 + 1455 + while (n) { 1456 + cic = rb_entry(n, struct cfq_io_context, rb_node); 1457 + 1458 + if (key < cic->key) 1459 + n = n->rb_left; 1460 + else if (key > cic->key) 1461 + n = n->rb_right; 1462 + else 1463 + return cic; 1464 + } 1465 + 1466 + return NULL; 1467 + } 1468 + 1469 + static inline void 1470 + cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1471 + struct cfq_io_context *cic) 1472 + { 1473 + struct rb_node **p = &ioc->cic_root.rb_node; 1474 + struct rb_node *parent = NULL; 1475 + struct cfq_io_context *__cic; 1476 + 1477 + read_lock(&cfq_exit_lock); 1478 + 1479 + cic->ioc = ioc; 1480 + cic->key = cfqd; 1481 + 1482 + ioc->set_ioprio = cfq_ioc_set_ioprio; 1483 + 1484 + while (*p) { 1485 + parent = *p; 1486 + __cic = rb_entry(parent, struct cfq_io_context, rb_node); 1487 + 1488 + if (cic->key < __cic->key) 1489 + p = &(*p)->rb_left; 1490 + else if (cic->key > __cic->key) 1491 + p = &(*p)->rb_right; 1492 + else 1493 + BUG(); 1494 + } 1495 + 1496 + rb_link_node(&cic->rb_node, parent, p); 1497 + rb_insert_color(&cic->rb_node, &ioc->cic_root); 1498 + list_add(&cic->queue_list, &cfqd->cic_list); 1499 + read_unlock(&cfq_exit_lock); 1500 + } 1501 + 1475 1502 /* 1476 1503 * Setup general io context and cfq io context. There can be several cfq 1477 1504 * io contexts per general io context, if this process is doing io to more 1478 - * than one device managed by cfq. Note that caller is holding a reference to 1479 - * cfqq, so we don't need to worry about it disappearing 1505 + * than one device managed by cfq. 1480 1506 */ 1481 1507 static struct cfq_io_context * 1482 - cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) 1508 + cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1483 1509 { 1484 1510 struct io_context *ioc = NULL; 1485 1511 struct cfq_io_context *cic; ··· 1543 1463 if (!ioc) 1544 1464 return NULL; 1545 1465 1546 - restart: 1547 - if ((cic = ioc->cic) == NULL) { 1548 - cic = cfq_alloc_io_context(cfqd, gfp_mask); 1466 + cic = cfq_cic_rb_lookup(cfqd, ioc); 1467 + if (cic) 1468 + goto out; 1549 1469 1550 - if (cic == NULL) 1551 - goto err; 1470 + cic = cfq_alloc_io_context(cfqd, gfp_mask); 1471 + if (cic == NULL) 1472 + goto err; 1552 1473 1553 - /* 1554 - * manually increment generic io_context usage count, it 1555 - * cannot go away since we are already holding one ref to it 1556 - */ 1557 - cic->ioc = ioc; 1558 - cic->key = cfqd; 1559 - read_lock(&cfq_exit_lock); 1560 - ioc->set_ioprio = cfq_ioc_set_ioprio; 1561 - ioc->cic = cic; 1562 - list_add(&cic->queue_list, &cfqd->cic_list); 1563 - read_unlock(&cfq_exit_lock); 1564 - } else { 1565 - struct cfq_io_context *__cic; 1566 - 1567 - /* 1568 - * the first cic on the list is actually the head itself 1569 - */ 1570 - if (cic->key == cfqd) 1571 - goto out; 1572 - 1573 - if (unlikely(!cic->key)) { 1574 - read_lock(&cfq_exit_lock); 1575 - if (list_empty(&cic->list)) 1576 - ioc->cic = NULL; 1577 - else 1578 - ioc->cic = list_entry(cic->list.next, 1579 - struct cfq_io_context, 1580 - list); 1581 - read_unlock(&cfq_exit_lock); 1582 - kmem_cache_free(cfq_ioc_pool, cic); 1583 - atomic_dec(&ioc_count); 1584 - goto restart; 1585 - } 1586 - 1587 - /* 1588 - * cic exists, check if we already are there. linear search 1589 - * should be ok here, the list will usually not be more than 1590 - * 1 or a few entries long 1591 - */ 1592 - list_for_each_entry(__cic, &cic->list, list) { 1593 - /* 1594 - * this process is already holding a reference to 1595 - * this queue, so no need to get one more 1596 - */ 1597 - if (__cic->key == cfqd) { 1598 - cic = __cic; 1599 - goto out; 1600 - } 1601 - if (unlikely(!__cic->key)) { 1602 - read_lock(&cfq_exit_lock); 1603 - list_del(&__cic->list); 1604 - read_unlock(&cfq_exit_lock); 1605 - kmem_cache_free(cfq_ioc_pool, __cic); 1606 - atomic_dec(&ioc_count); 1607 - goto restart; 1608 - } 1609 - } 1610 - 1611 - /* 1612 - * nope, process doesn't have a cic assoicated with this 1613 - * cfqq yet. get a new one and add to list 1614 - */ 1615 - __cic = cfq_alloc_io_context(cfqd, gfp_mask); 1616 - if (__cic == NULL) 1617 - goto err; 1618 - 1619 - __cic->ioc = ioc; 1620 - __cic->key = cfqd; 1621 - read_lock(&cfq_exit_lock); 1622 - list_add(&__cic->list, &cic->list); 1623 - list_add(&__cic->queue_list, &cfqd->cic_list); 1624 - read_unlock(&cfq_exit_lock); 1625 - cic = __cic; 1626 - } 1627 - 1474 + cfq_cic_link(cfqd, ioc, cic); 1628 1475 out: 1629 1476 return cic; 1630 1477 err: ··· 1584 1577 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1585 1578 } 1586 1579 1587 - #define sample_valid(samples) ((samples) > 80) 1580 + static void 1581 + cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1582 + struct cfq_rq *crq) 1583 + { 1584 + sector_t sdist; 1585 + u64 total; 1586 + 1587 + if (cic->last_request_pos < crq->request->sector) 1588 + sdist = crq->request->sector - cic->last_request_pos; 1589 + else 1590 + sdist = cic->last_request_pos - crq->request->sector; 1591 + 1592 + /* 1593 + * Don't allow the seek distance to get too large from the 1594 + * odd fragment, pagein, etc 1595 + */ 1596 + if (cic->seek_samples <= 60) /* second&third seek */ 1597 + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1598 + else 1599 + sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1600 + 1601 + cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1602 + cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1603 + total = cic->seek_total + (cic->seek_samples/2); 1604 + do_div(total, cic->seek_samples); 1605 + cic->seek_mean = (sector_t)total; 1606 + } 1588 1607 1589 1608 /* 1590 1609 * Disable idle window if the process thinks too long or seeks so much that ··· 1723 1690 cic = crq->io_context; 1724 1691 1725 1692 cfq_update_io_thinktime(cfqd, cic); 1693 + cfq_update_io_seektime(cfqd, cic, crq); 1726 1694 cfq_update_idle_window(cfqd, cfqq, cic); 1727 1695 1728 1696 cic->last_queue = jiffies; 1697 + cic->last_request_pos = crq->request->sector + crq->request->nr_sectors; 1729 1698 1730 1699 if (cfqq == cfqd->active_queue) { 1731 1700 /* ··· 1858 1823 if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && 1859 1824 cfq_cfqq_on_rr(cfqq)) 1860 1825 cfq_resort_rr_list(cfqq, 0); 1861 - } 1862 - 1863 - static inline pid_t cfq_queue_pid(struct task_struct *task, int rw) 1864 - { 1865 - if (rw == READ || process_sync(task)) 1866 - return task->pid; 1867 - 1868 - return CFQ_KEY_ASYNC; 1869 1826 } 1870 1827 1871 1828 static inline int ··· 1992 1965 1993 1966 might_sleep_if(gfp_mask & __GFP_WAIT); 1994 1967 1995 - cic = cfq_get_io_context(cfqd, key, gfp_mask); 1968 + cic = cfq_get_io_context(cfqd, gfp_mask); 1996 1969 1997 1970 spin_lock_irqsave(q->queue_lock, flags); 1998 1971 ··· 2160 2133 request_queue_t *q = cfqd->queue; 2161 2134 2162 2135 cfq_shutdown_timer_wq(cfqd); 2136 + 2163 2137 write_lock(&cfq_exit_lock); 2164 2138 spin_lock_irq(q->queue_lock); 2139 + 2165 2140 if (cfqd->active_queue) 2166 2141 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2167 - while(!list_empty(&cfqd->cic_list)) { 2142 + 2143 + while (!list_empty(&cfqd->cic_list)) { 2168 2144 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2169 2145 struct cfq_io_context, 2170 2146 queue_list); ··· 2182 2152 cic->key = NULL; 2183 2153 list_del_init(&cic->queue_list); 2184 2154 } 2155 + 2185 2156 spin_unlock_irq(q->queue_lock); 2186 2157 write_unlock(&cfq_exit_lock); 2187 2158 ··· 2258 2227 cfqd->cfq_slice[1] = cfq_slice_sync; 2259 2228 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2260 2229 cfqd->cfq_slice_idle = cfq_slice_idle; 2261 - cfqd->cfq_max_depth = cfq_max_depth; 2262 2230 2263 2231 return 0; 2264 2232 out_crqpool: ··· 2340 2310 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2341 2311 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2342 2312 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2343 - SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); 2344 2313 #undef SHOW_FUNCTION 2345 2314 2346 2315 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ ··· 2368 2339 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2369 2340 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2370 2341 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2371 - STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); 2372 2342 #undef STORE_FUNCTION 2373 2343 2374 2344 #define CFQ_ATTR(name) \ ··· 2384 2356 CFQ_ATTR(slice_async), 2385 2357 CFQ_ATTR(slice_async_rq), 2386 2358 CFQ_ATTR(slice_idle), 2387 - CFQ_ATTR(max_depth), 2388 2359 __ATTR_NULL 2389 2360 }; 2390 2361
+15 -6
block/ll_rw_blk.c
··· 3539 3539 BUG_ON(atomic_read(&ioc->refcount) == 0); 3540 3540 3541 3541 if (atomic_dec_and_test(&ioc->refcount)) { 3542 + struct cfq_io_context *cic; 3543 + 3542 3544 rcu_read_lock(); 3543 3545 if (ioc->aic && ioc->aic->dtor) 3544 3546 ioc->aic->dtor(ioc->aic); 3545 - if (ioc->cic && ioc->cic->dtor) 3546 - ioc->cic->dtor(ioc->cic); 3547 + if (ioc->cic_root.rb_node != NULL) { 3548 + struct rb_node *n = rb_first(&ioc->cic_root); 3549 + 3550 + cic = rb_entry(n, struct cfq_io_context, rb_node); 3551 + cic->dtor(ioc); 3552 + } 3547 3553 rcu_read_unlock(); 3548 3554 3549 3555 kmem_cache_free(iocontext_cachep, ioc); ··· 3562 3556 { 3563 3557 unsigned long flags; 3564 3558 struct io_context *ioc; 3559 + struct cfq_io_context *cic; 3565 3560 3566 3561 local_irq_save(flags); 3567 3562 task_lock(current); ··· 3574 3567 3575 3568 if (ioc->aic && ioc->aic->exit) 3576 3569 ioc->aic->exit(ioc->aic); 3577 - if (ioc->cic && ioc->cic->exit) 3578 - ioc->cic->exit(ioc->cic); 3579 - 3570 + if (ioc->cic_root.rb_node != NULL) { 3571 + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); 3572 + cic->exit(ioc); 3573 + } 3574 + 3580 3575 put_io_context(ioc); 3581 3576 } 3582 3577 ··· 3607 3598 ret->last_waited = jiffies; /* doesn't matter... */ 3608 3599 ret->nr_batch_requests = 0; /* because this is 0 */ 3609 3600 ret->aic = NULL; 3610 - ret->cic = NULL; 3601 + ret->cic_root.rb_node = NULL; 3611 3602 tsk->io_context = ret; 3612 3603 } 3613 3604
+13 -9
include/linux/blkdev.h
··· 55 55 56 56 struct cfq_queue; 57 57 struct cfq_io_context { 58 - /* 59 - * circular list of cfq_io_contexts belonging to a process io context 60 - */ 61 - struct list_head list; 62 - struct cfq_queue *cfqq[2]; 58 + struct rb_node rb_node; 63 59 void *key; 60 + 61 + struct cfq_queue *cfqq[2]; 64 62 65 63 struct io_context *ioc; 66 64 67 65 unsigned long last_end_request; 68 - unsigned long last_queue; 66 + sector_t last_request_pos; 67 + unsigned long last_queue; 68 + 69 69 unsigned long ttime_total; 70 70 unsigned long ttime_samples; 71 71 unsigned long ttime_mean; 72 72 73 + unsigned int seek_samples; 74 + u64 seek_total; 75 + sector_t seek_mean; 76 + 73 77 struct list_head queue_list; 74 78 75 - void (*dtor)(struct cfq_io_context *); 76 - void (*exit)(struct cfq_io_context *); 79 + void (*dtor)(struct io_context *); /* destructor */ 80 + void (*exit)(struct io_context *); /* called on task exit */ 77 81 }; 78 82 79 83 /* ··· 98 94 int nr_batch_requests; /* Number of requests left in the batch */ 99 95 100 96 struct as_io_context *aic; 101 - struct cfq_io_context *cic; 97 + struct rb_root cic_root; 102 98 }; 103 99 104 100 void put_io_context(struct io_context *ioc);