Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[BLOCK] as-iosched: update alias handling

Unlike other ioscheds, as-iosched handles alias by chaing them using
rq->queuelist. As aliased requests are very rare in the first place,
this complicates merge/dispatch handling without meaningful
performance improvement. This patch updates as-iosched to dump
aliased requests into dispatch queue as other ioscheds do.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jens Axboe <axboe@suse.de>

authored by

Tejun Heo and committed by
Jens Axboe
ef9be1d3 9f155b98

+25 -119
+25 -119
block/as-iosched.c
··· 182 182 183 183 static kmem_cache_t *arq_pool; 184 184 185 + static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); 186 + static void as_antic_stop(struct as_data *ad); 187 + 185 188 /* 186 189 * IO Context helper functions 187 190 */ ··· 373 370 * existing request against the same sector), which can happen when using 374 371 * direct IO, then return the alias. 375 372 */ 376 - static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq) 373 + static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq) 377 374 { 378 375 struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; 379 376 struct rb_node *parent = NULL; ··· 398 395 rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); 399 396 400 397 return NULL; 398 + } 399 + 400 + static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) 401 + { 402 + struct as_rq *alias; 403 + 404 + while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) { 405 + as_move_to_dispatch(ad, alias); 406 + as_antic_stop(ad); 407 + } 401 408 } 402 409 403 410 static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) ··· 1146 1133 /* 1147 1134 * take it off the sort and fifo list, add to dispatch queue 1148 1135 */ 1149 - while (!list_empty(&rq->queuelist)) { 1150 - struct request *__rq = list_entry_rq(rq->queuelist.next); 1151 - struct as_rq *__arq = RQ_DATA(__rq); 1152 - 1153 - list_del(&__rq->queuelist); 1154 - 1155 - elv_dispatch_add_tail(ad->q, __rq); 1156 - 1157 - if (__arq->io_context && __arq->io_context->aic) 1158 - atomic_inc(&__arq->io_context->aic->nr_dispatched); 1159 - 1160 - WARN_ON(__arq->state != AS_RQ_QUEUED); 1161 - __arq->state = AS_RQ_DISPATCHED; 1162 - 1163 - ad->nr_dispatched++; 1164 - } 1165 - 1166 1136 as_remove_queued_request(ad->q, rq); 1167 1137 WARN_ON(arq->state != AS_RQ_QUEUED); 1168 1138 ··· 1322 1326 } 1323 1327 1324 1328 /* 1325 - * Add arq to a list behind alias 1326 - */ 1327 - static inline void 1328 - as_add_aliased_request(struct as_data *ad, struct as_rq *arq, 1329 - struct as_rq *alias) 1330 - { 1331 - struct request *req = arq->request; 1332 - struct list_head *insert = alias->request->queuelist.prev; 1333 - 1334 - /* 1335 - * Transfer list of aliases 1336 - */ 1337 - while (!list_empty(&req->queuelist)) { 1338 - struct request *__rq = list_entry_rq(req->queuelist.next); 1339 - struct as_rq *__arq = RQ_DATA(__rq); 1340 - 1341 - list_move_tail(&__rq->queuelist, &alias->request->queuelist); 1342 - 1343 - WARN_ON(__arq->state != AS_RQ_QUEUED); 1344 - } 1345 - 1346 - /* 1347 - * Another request with the same start sector on the rbtree. 1348 - * Link this request to that sector. They are untangled in 1349 - * as_move_to_dispatch 1350 - */ 1351 - list_add(&arq->request->queuelist, insert); 1352 - 1353 - /* 1354 - * Don't want to have to handle merges. 1355 - */ 1356 - as_del_arq_hash(arq); 1357 - arq->request->flags |= REQ_NOMERGE; 1358 - } 1359 - 1360 - /* 1361 1329 * add arq to rbtree and fifo 1362 1330 */ 1363 1331 static void as_add_request(request_queue_t *q, struct request *rq) 1364 1332 { 1365 1333 struct as_data *ad = q->elevator->elevator_data; 1366 1334 struct as_rq *arq = RQ_DATA(rq); 1367 - struct as_rq *alias; 1368 1335 int data_dir; 1369 1336 1370 1337 arq->state = AS_RQ_NEW; ··· 1346 1387 atomic_inc(&arq->io_context->aic->nr_queued); 1347 1388 } 1348 1389 1349 - alias = as_add_arq_rb(ad, arq); 1350 - if (!alias) { 1351 - /* 1352 - * set expire time (only used for reads) and add to fifo list 1353 - */ 1354 - arq->expires = jiffies + ad->fifo_expire[data_dir]; 1355 - list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); 1390 + as_add_arq_rb(ad, arq); 1391 + if (rq_mergeable(arq->request)) 1392 + as_add_arq_hash(ad, arq); 1356 1393 1357 - if (rq_mergeable(arq->request)) 1358 - as_add_arq_hash(ad, arq); 1359 - as_update_arq(ad, arq); /* keep state machine up to date */ 1394 + /* 1395 + * set expire time (only used for reads) and add to fifo list 1396 + */ 1397 + arq->expires = jiffies + ad->fifo_expire[data_dir]; 1398 + list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); 1360 1399 1361 - } else { 1362 - as_add_aliased_request(ad, arq, alias); 1363 - 1364 - /* 1365 - * have we been anticipating this request? 1366 - * or does it come from the same process as the one we are 1367 - * anticipating for? 1368 - */ 1369 - if (ad->antic_status == ANTIC_WAIT_REQ 1370 - || ad->antic_status == ANTIC_WAIT_NEXT) { 1371 - if (as_can_break_anticipation(ad, arq)) 1372 - as_antic_stop(ad); 1373 - } 1374 - } 1375 - 1400 + as_update_arq(ad, arq); /* keep state machine up to date */ 1376 1401 arq->state = AS_RQ_QUEUED; 1377 1402 } 1378 1403 ··· 1479 1536 * if the merge was a front merge, we need to reposition request 1480 1537 */ 1481 1538 if (rq_rb_key(req) != arq->rb_key) { 1482 - struct as_rq *alias, *next_arq = NULL; 1483 - 1484 - if (ad->next_arq[arq->is_sync] == arq) 1485 - next_arq = as_find_next_arq(ad, arq); 1486 - 1487 - /* 1488 - * Note! We should really be moving any old aliased requests 1489 - * off this request and try to insert them into the rbtree. We 1490 - * currently don't bother. Ditto the next function. 1491 - */ 1492 1539 as_del_arq_rb(ad, arq); 1493 - if ((alias = as_add_arq_rb(ad, arq))) { 1494 - list_del_init(&arq->fifo); 1495 - as_add_aliased_request(ad, arq, alias); 1496 - if (next_arq) 1497 - ad->next_arq[arq->is_sync] = next_arq; 1498 - } 1540 + as_add_arq_rb(ad, arq); 1499 1541 /* 1500 1542 * Note! At this stage of this and the next function, our next 1501 1543 * request may not be optimal - eg the request may have "grown" ··· 1507 1579 as_add_arq_hash(ad, arq); 1508 1580 1509 1581 if (rq_rb_key(req) != arq->rb_key) { 1510 - struct as_rq *alias, *next_arq = NULL; 1511 - 1512 - if (ad->next_arq[arq->is_sync] == arq) 1513 - next_arq = as_find_next_arq(ad, arq); 1514 - 1515 1582 as_del_arq_rb(ad, arq); 1516 - if ((alias = as_add_arq_rb(ad, arq))) { 1517 - list_del_init(&arq->fifo); 1518 - as_add_aliased_request(ad, arq, alias); 1519 - if (next_arq) 1520 - ad->next_arq[arq->is_sync] = next_arq; 1521 - } 1583 + as_add_arq_rb(ad, arq); 1522 1584 } 1523 1585 1524 1586 /* ··· 1525 1607 */ 1526 1608 swap_io_context(&arq->io_context, &anext->io_context); 1527 1609 } 1528 - } 1529 - 1530 - /* 1531 - * Transfer list of aliases 1532 - */ 1533 - while (!list_empty(&next->queuelist)) { 1534 - struct request *__rq = list_entry_rq(next->queuelist.next); 1535 - struct as_rq *__arq = RQ_DATA(__rq); 1536 - 1537 - list_move_tail(&__rq->queuelist, &req->queuelist); 1538 - 1539 - WARN_ON(__arq->state != AS_RQ_QUEUED); 1540 1610 } 1541 1611 1542 1612 /*