Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cfq: pass around cfq_io_cq instead of io_context

Now that io_cq is managed by block core and guaranteed to exist for
any in-flight request, it is easier and carries more information to
pass around cfq_io_cq than io_context.

This patch updates cfq_init_prio_data(), cfq_find_alloc_queue() and
cfq_get_queue() to take @cic instead of @ioc. This change removes a
duplicate cfq_cic_lookup() from cfq_find_alloc_queue().

This change enables the use of cic-cached ioprio in the next patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Tejun Heo and committed by
Jens Axboe
abede6da 9a9e8a26

+17 -22
+17 -22
block/cfq-iosched.c
··· 468 468 469 469 static void cfq_dispatch_insert(struct request_queue *, struct request *); 470 470 static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, 471 - struct io_context *ioc, struct bio *bio, 471 + struct cfq_io_cq *cic, struct bio *bio, 472 472 gfp_t gfp_mask); 473 473 474 474 static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) ··· 2560 2560 } 2561 2561 } 2562 2562 2563 - static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) 2563 + static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) 2564 2564 { 2565 2565 struct task_struct *tsk = current; 2566 2566 int ioprio_class; ··· 2568 2568 if (!cfq_cfqq_prio_changed(cfqq)) 2569 2569 return; 2570 2570 2571 - ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 2571 + ioprio_class = IOPRIO_PRIO_CLASS(cic->icq.ioc->ioprio); 2572 2572 switch (ioprio_class) { 2573 2573 default: 2574 2574 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); ··· 2580 2580 cfqq->ioprio_class = task_nice_ioclass(tsk); 2581 2581 break; 2582 2582 case IOPRIO_CLASS_RT: 2583 - cfqq->ioprio = task_ioprio(ioc); 2583 + cfqq->ioprio = task_ioprio(cic->icq.ioc); 2584 2584 cfqq->ioprio_class = IOPRIO_CLASS_RT; 2585 2585 break; 2586 2586 case IOPRIO_CLASS_BE: 2587 - cfqq->ioprio = task_ioprio(ioc); 2587 + cfqq->ioprio = task_ioprio(cic->icq.ioc); 2588 2588 cfqq->ioprio_class = IOPRIO_CLASS_BE; 2589 2589 break; 2590 2590 case IOPRIO_CLASS_IDLE: ··· 2613 2613 cfqq = cic->cfqq[BLK_RW_ASYNC]; 2614 2614 if (cfqq) { 2615 2615 struct cfq_queue *new_cfqq; 2616 - new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc, 2617 - bio, GFP_ATOMIC); 2616 + new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, 2617 + GFP_ATOMIC); 2618 2618 if (new_cfqq) { 2619 2619 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 2620 2620 cfq_put_queue(cfqq); ··· 2671 2671 #endif /* CONFIG_CFQ_GROUP_IOSCHED */ 2672 2672 2673 2673 static struct cfq_queue * 2674 - cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, 2675 - struct io_context *ioc, struct bio *bio, gfp_t gfp_mask) 2674 + cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 2675 + struct bio *bio, gfp_t gfp_mask) 2676 2676 { 2677 2677 struct blkio_cgroup *blkcg; 2678 2678 struct cfq_queue *cfqq, *new_cfqq = NULL; 2679 - struct cfq_io_cq *cic; 2680 2679 struct cfq_group *cfqg; 2681 2680 2682 2681 retry: 2683 2682 rcu_read_lock(); 2684 2683 2685 2684 blkcg = bio_blkio_cgroup(bio); 2686 - 2687 2685 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); 2688 - 2689 - cic = cfq_cic_lookup(cfqd, ioc); 2690 - /* cic always exists here */ 2691 2686 cfqq = cic_to_cfqq(cic, is_sync); 2692 2687 2693 2688 /* ··· 2711 2716 2712 2717 if (cfqq) { 2713 2718 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); 2714 - cfq_init_prio_data(cfqq, ioc); 2719 + cfq_init_prio_data(cfqq, cic); 2715 2720 cfq_link_cfqq_cfqg(cfqq, cfqg); 2716 2721 cfq_log_cfqq(cfqd, cfqq, "alloced"); 2717 2722 } else ··· 2741 2746 } 2742 2747 2743 2748 static struct cfq_queue * 2744 - cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, 2749 + cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 2745 2750 struct bio *bio, gfp_t gfp_mask) 2746 2751 { 2747 - const int ioprio = task_ioprio(ioc); 2748 - const int ioprio_class = task_ioprio_class(ioc); 2752 + const int ioprio = task_ioprio(cic->icq.ioc); 2753 + const int ioprio_class = task_ioprio_class(cic->icq.ioc); 2749 2754 struct cfq_queue **async_cfqq = NULL; 2750 2755 struct cfq_queue *cfqq = NULL; 2751 2756 ··· 2755 2760 } 2756 2761 2757 2762 if (!cfqq) 2758 - cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, bio, gfp_mask); 2763 + cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); 2759 2764 2760 2765 /* 2761 2766 * pin the queue now that it's allocated, scheduler exit will prune it ··· 3025 3030 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3026 3031 3027 3032 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 3028 - cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc); 3033 + cfq_init_prio_data(cfqq, RQ_CIC(rq)); 3029 3034 3030 3035 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3031 3036 list_add_tail(&rq->queuelist, &cfqq->fifo); ··· 3229 3234 3230 3235 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 3231 3236 if (cfqq) { 3232 - cfq_init_prio_data(cfqq, cic->icq.ioc); 3237 + cfq_init_prio_data(cfqq, cic); 3233 3238 3234 3239 return __cfq_may_queue(cfqq); 3235 3240 } ··· 3321 3326 new_queue: 3322 3327 cfqq = cic_to_cfqq(cic, is_sync); 3323 3328 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 3324 - cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, bio, gfp_mask); 3329 + cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); 3325 3330 cic_set_cfqq(cic, cfqq, is_sync); 3326 3331 } else { 3327 3332 /*